| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * fs/dcache.c |
| 4 | * |
| 5 | * Complete reimplementation |
| 6 | * (C) 1997 Thomas Schoebel-Theuer, |
| 7 | * with heavy changes by Linus Torvalds |
| 8 | */ |
| 9 | |
| 10 | /* |
| 11 | * Notes on the allocation strategy: |
| 12 | * |
| 13 | * The dcache is a master of the icache - whenever a dcache entry |
| 14 | * exists, the inode will always exist. "iput()" is done either when |
| 15 | * the dcache entry is deleted or garbage collected. |
| 16 | */ |
| 17 | |
| 18 | #include <linux/ratelimit.h> |
| 19 | #include <linux/string.h> |
| 20 | #include <linux/mm.h> |
| 21 | #include <linux/fs.h> |
| 22 | #include <linux/fscrypt.h> |
| 23 | #include <linux/fsnotify.h> |
| 24 | #include <linux/slab.h> |
| 25 | #include <linux/init.h> |
| 26 | #include <linux/hash.h> |
| 27 | #include <linux/cache.h> |
| 28 | #include <linux/export.h> |
| 29 | #include <linux/security.h> |
| 30 | #include <linux/seqlock.h> |
| 31 | #include <linux/memblock.h> |
| 32 | #include <linux/bit_spinlock.h> |
| 33 | #include <linux/rculist_bl.h> |
| 34 | #include <linux/list_lru.h> |
| 35 | #include "internal.h" |
| 36 | #include "mount.h" |
| 37 | |
| 38 | /* |
| 39 | * Usage: |
| 40 | * dcache->d_inode->i_lock protects: |
| 41 | * - i_dentry, d_u.d_alias, d_inode of aliases |
| 42 | * dcache_hash_bucket lock protects: |
| 43 | * - the dcache hash table |
| 44 | * s_roots bl list spinlock protects: |
| 45 | * - the s_roots list (see __d_drop) |
| 46 | * dentry->d_sb->s_dentry_lru_lock protects: |
| 47 | * - the dcache lru lists and counters |
| 48 | * d_lock protects: |
| 49 | * - d_flags |
| 50 | * - d_name |
| 51 | * - d_lru |
| 52 | * - d_count |
| 53 | * - d_unhashed() |
| 54 | * - d_parent and d_subdirs |
| 55 | * - childrens' d_child and d_parent |
| 56 | * - d_u.d_alias, d_inode |
| 57 | * |
| 58 | * Ordering: |
| 59 | * dentry->d_inode->i_lock |
| 60 | * dentry->d_lock |
| 61 | * dentry->d_sb->s_dentry_lru_lock |
| 62 | * dcache_hash_bucket lock |
| 63 | * s_roots lock |
| 64 | * |
| 65 | * If there is an ancestor relationship: |
| 66 | * dentry->d_parent->...->d_parent->d_lock |
| 67 | * ... |
| 68 | * dentry->d_parent->d_lock |
| 69 | * dentry->d_lock |
| 70 | * |
| 71 | * If no ancestor relationship: |
| 72 | * arbitrary, since it's serialized on rename_lock |
| 73 | */ |
| 74 | int sysctl_vfs_cache_pressure __read_mostly = 100; |
| 75 | EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); |
| 76 | |
| 77 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); |
| 78 | |
| 79 | EXPORT_SYMBOL(rename_lock); |
| 80 | |
| 81 | static struct kmem_cache *dentry_cache __read_mostly; |
| 82 | |
| 83 | const struct qstr empty_name = QSTR_INIT("", 0); |
| 84 | EXPORT_SYMBOL(empty_name); |
| 85 | const struct qstr slash_name = QSTR_INIT("/", 1); |
| 86 | EXPORT_SYMBOL(slash_name); |
| 87 | const struct qstr dotdot_name = QSTR_INIT("..", 2); |
| 88 | EXPORT_SYMBOL(dotdot_name); |
| 89 | |
| 90 | /* |
| 91 | * This is the single most critical data structure when it comes |
| 92 | * to the dcache: the hashtable for lookups. Somebody should try |
| 93 | * to make this good - I've just made it work. |
| 94 | * |
| 95 | * This hash-function tries to avoid losing too many bits of hash |
| 96 | * information, yet avoid using a prime hash-size or similar. |
| 97 | */ |
| 98 | |
| 99 | static unsigned int d_hash_shift __read_mostly; |
| 100 | |
| 101 | static struct hlist_bl_head *dentry_hashtable __read_mostly; |
| 102 | |
| 103 | static inline struct hlist_bl_head *d_hash(unsigned int hash) |
| 104 | { |
| 105 | return dentry_hashtable + (hash >> d_hash_shift); |
| 106 | } |
| 107 | |
| 108 | #define IN_LOOKUP_SHIFT 10 |
| 109 | static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT]; |
| 110 | |
| 111 | static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent, |
| 112 | unsigned int hash) |
| 113 | { |
| 114 | hash += (unsigned long) parent / L1_CACHE_BYTES; |
| 115 | return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT); |
| 116 | } |
| 117 | |
| 118 | struct dentry_stat_t { |
| 119 | long nr_dentry; |
| 120 | long nr_unused; |
| 121 | long age_limit; /* age in seconds */ |
| 122 | long want_pages; /* pages requested by system */ |
| 123 | long nr_negative; /* # of unused negative dentries */ |
| 124 | long dummy; /* Reserved for future use */ |
| 125 | }; |
| 126 | |
| 127 | static DEFINE_PER_CPU(long, nr_dentry); |
| 128 | static DEFINE_PER_CPU(long, nr_dentry_unused); |
| 129 | static DEFINE_PER_CPU(long, nr_dentry_negative); |
| 130 | |
| 131 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) |
| 132 | /* Statistics gathering. */ |
| 133 | static struct dentry_stat_t dentry_stat = { |
| 134 | .age_limit = 45, |
| 135 | }; |
| 136 | |
| 137 | /* |
| 138 | * Here we resort to our own counters instead of using generic per-cpu counters |
| 139 | * for consistency with what the vfs inode code does. We are expected to harvest |
| 140 | * better code and performance by having our own specialized counters. |
| 141 | * |
| 142 | * Please note that the loop is done over all possible CPUs, not over all online |
| 143 | * CPUs. The reason for this is that we don't want to play games with CPUs going |
| 144 | * on and off. If one of them goes off, we will just keep their counters. |
| 145 | * |
| 146 | * glommer: See cffbc8a for details, and if you ever intend to change this, |
| 147 | * please update all vfs counters to match. |
| 148 | */ |
| 149 | static long get_nr_dentry(void) |
| 150 | { |
| 151 | int i; |
| 152 | long sum = 0; |
| 153 | for_each_possible_cpu(i) |
| 154 | sum += per_cpu(nr_dentry, i); |
| 155 | return sum < 0 ? 0 : sum; |
| 156 | } |
| 157 | |
| 158 | static long get_nr_dentry_unused(void) |
| 159 | { |
| 160 | int i; |
| 161 | long sum = 0; |
| 162 | for_each_possible_cpu(i) |
| 163 | sum += per_cpu(nr_dentry_unused, i); |
| 164 | return sum < 0 ? 0 : sum; |
| 165 | } |
| 166 | |
| 167 | static long get_nr_dentry_negative(void) |
| 168 | { |
| 169 | int i; |
| 170 | long sum = 0; |
| 171 | |
| 172 | for_each_possible_cpu(i) |
| 173 | sum += per_cpu(nr_dentry_negative, i); |
| 174 | return sum < 0 ? 0 : sum; |
| 175 | } |
| 176 | |
| 177 | static int proc_nr_dentry(struct ctl_table *table, int write, void *buffer, |
| 178 | size_t *lenp, loff_t *ppos) |
| 179 | { |
| 180 | dentry_stat.nr_dentry = get_nr_dentry(); |
| 181 | dentry_stat.nr_unused = get_nr_dentry_unused(); |
| 182 | dentry_stat.nr_negative = get_nr_dentry_negative(); |
| 183 | return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
| 184 | } |
| 185 | |
| 186 | static struct ctl_table fs_dcache_sysctls[] = { |
| 187 | { |
| 188 | .procname = "dentry-state", |
| 189 | .data = &dentry_stat, |
| 190 | .maxlen = 6*sizeof(long), |
| 191 | .mode = 0444, |
| 192 | .proc_handler = proc_nr_dentry, |
| 193 | }, |
| 194 | { } |
| 195 | }; |
| 196 | |
| 197 | static int __init init_fs_dcache_sysctls(void) |
| 198 | { |
| 199 | register_sysctl_init("fs", fs_dcache_sysctls); |
| 200 | return 0; |
| 201 | } |
| 202 | fs_initcall(init_fs_dcache_sysctls); |
| 203 | #endif |
| 204 | |
| 205 | /* |
| 206 | * Compare 2 name strings, return 0 if they match, otherwise non-zero. |
| 207 | * The strings are both count bytes long, and count is non-zero. |
| 208 | */ |
| 209 | #ifdef CONFIG_DCACHE_WORD_ACCESS |
| 210 | |
| 211 | #include <asm/word-at-a-time.h> |
| 212 | /* |
| 213 | * NOTE! 'cs' and 'scount' come from a dentry, so it has a |
| 214 | * aligned allocation for this particular component. We don't |
| 215 | * strictly need the load_unaligned_zeropad() safety, but it |
| 216 | * doesn't hurt either. |
| 217 | * |
| 218 | * In contrast, 'ct' and 'tcount' can be from a pathname, and do |
| 219 | * need the careful unaligned handling. |
| 220 | */ |
| 221 | static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) |
| 222 | { |
| 223 | unsigned long a,b,mask; |
| 224 | |
| 225 | for (;;) { |
| 226 | a = read_word_at_a_time(cs); |
| 227 | b = load_unaligned_zeropad(ct); |
| 228 | if (tcount < sizeof(unsigned long)) |
| 229 | break; |
| 230 | if (unlikely(a != b)) |
| 231 | return 1; |
| 232 | cs += sizeof(unsigned long); |
| 233 | ct += sizeof(unsigned long); |
| 234 | tcount -= sizeof(unsigned long); |
| 235 | if (!tcount) |
| 236 | return 0; |
| 237 | } |
| 238 | mask = bytemask_from_count(tcount); |
| 239 | return unlikely(!!((a ^ b) & mask)); |
| 240 | } |
| 241 | |
| 242 | #else |
| 243 | |
| 244 | static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) |
| 245 | { |
| 246 | do { |
| 247 | if (*cs != *ct) |
| 248 | return 1; |
| 249 | cs++; |
| 250 | ct++; |
| 251 | tcount--; |
| 252 | } while (tcount); |
| 253 | return 0; |
| 254 | } |
| 255 | |
| 256 | #endif |
| 257 | |
| 258 | static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount) |
| 259 | { |
| 260 | /* |
| 261 | * Be careful about RCU walk racing with rename: |
| 262 | * use 'READ_ONCE' to fetch the name pointer. |
| 263 | * |
| 264 | * NOTE! Even if a rename will mean that the length |
| 265 | * was not loaded atomically, we don't care. The |
| 266 | * RCU walk will check the sequence count eventually, |
| 267 | * and catch it. And we won't overrun the buffer, |
| 268 | * because we're reading the name pointer atomically, |
| 269 | * and a dentry name is guaranteed to be properly |
| 270 | * terminated with a NUL byte. |
| 271 | * |
| 272 | * End result: even if 'len' is wrong, we'll exit |
| 273 | * early because the data cannot match (there can |
| 274 | * be no NUL in the ct/tcount data) |
| 275 | */ |
| 276 | const unsigned char *cs = READ_ONCE(dentry->d_name.name); |
| 277 | |
| 278 | return dentry_string_cmp(cs, ct, tcount); |
| 279 | } |
| 280 | |
| 281 | struct external_name { |
| 282 | union { |
| 283 | atomic_t count; |
| 284 | struct rcu_head head; |
| 285 | } u; |
| 286 | unsigned char name[]; |
| 287 | }; |
| 288 | |
| 289 | static inline struct external_name *external_name(struct dentry *dentry) |
| 290 | { |
| 291 | return container_of(dentry->d_name.name, struct external_name, name[0]); |
| 292 | } |
| 293 | |
| 294 | static void __d_free(struct rcu_head *head) |
| 295 | { |
| 296 | struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); |
| 297 | |
| 298 | kmem_cache_free(dentry_cache, dentry); |
| 299 | } |
| 300 | |
| 301 | static void __d_free_external(struct rcu_head *head) |
| 302 | { |
| 303 | struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); |
| 304 | kfree(external_name(dentry)); |
| 305 | kmem_cache_free(dentry_cache, dentry); |
| 306 | } |
| 307 | |
| 308 | static inline int dname_external(const struct dentry *dentry) |
| 309 | { |
| 310 | return dentry->d_name.name != dentry->d_iname; |
| 311 | } |
| 312 | |
| 313 | void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry) |
| 314 | { |
| 315 | spin_lock(&dentry->d_lock); |
| 316 | name->name = dentry->d_name; |
| 317 | if (unlikely(dname_external(dentry))) { |
| 318 | atomic_inc(&external_name(dentry)->u.count); |
| 319 | } else { |
| 320 | memcpy(name->inline_name, dentry->d_iname, |
| 321 | dentry->d_name.len + 1); |
| 322 | name->name.name = name->inline_name; |
| 323 | } |
| 324 | spin_unlock(&dentry->d_lock); |
| 325 | } |
| 326 | EXPORT_SYMBOL(take_dentry_name_snapshot); |
| 327 | |
| 328 | void release_dentry_name_snapshot(struct name_snapshot *name) |
| 329 | { |
| 330 | if (unlikely(name->name.name != name->inline_name)) { |
| 331 | struct external_name *p; |
| 332 | p = container_of(name->name.name, struct external_name, name[0]); |
| 333 | if (unlikely(atomic_dec_and_test(&p->u.count))) |
| 334 | kfree_rcu(p, u.head); |
| 335 | } |
| 336 | } |
| 337 | EXPORT_SYMBOL(release_dentry_name_snapshot); |
| 338 | |
| 339 | static inline void __d_set_inode_and_type(struct dentry *dentry, |
| 340 | struct inode *inode, |
| 341 | unsigned type_flags) |
| 342 | { |
| 343 | unsigned flags; |
| 344 | |
| 345 | dentry->d_inode = inode; |
| 346 | flags = READ_ONCE(dentry->d_flags); |
| 347 | flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); |
| 348 | flags |= type_flags; |
| 349 | smp_store_release(&dentry->d_flags, flags); |
| 350 | } |
| 351 | |
| 352 | static inline void __d_clear_type_and_inode(struct dentry *dentry) |
| 353 | { |
| 354 | unsigned flags = READ_ONCE(dentry->d_flags); |
| 355 | |
| 356 | flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); |
| 357 | WRITE_ONCE(dentry->d_flags, flags); |
| 358 | dentry->d_inode = NULL; |
| 359 | if (dentry->d_flags & DCACHE_LRU_LIST) |
| 360 | this_cpu_inc(nr_dentry_negative); |
| 361 | } |
| 362 | |
| 363 | static void dentry_free(struct dentry *dentry) |
| 364 | { |
| 365 | WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias)); |
| 366 | if (unlikely(dname_external(dentry))) { |
| 367 | struct external_name *p = external_name(dentry); |
| 368 | if (likely(atomic_dec_and_test(&p->u.count))) { |
| 369 | call_rcu(&dentry->d_u.d_rcu, __d_free_external); |
| 370 | return; |
| 371 | } |
| 372 | } |
| 373 | /* if dentry was never visible to RCU, immediate free is OK */ |
| 374 | if (dentry->d_flags & DCACHE_NORCU) |
| 375 | __d_free(&dentry->d_u.d_rcu); |
| 376 | else |
| 377 | call_rcu(&dentry->d_u.d_rcu, __d_free); |
| 378 | } |
| 379 | |
| 380 | /* |
| 381 | * Release the dentry's inode, using the filesystem |
| 382 | * d_iput() operation if defined. |
| 383 | */ |
| 384 | static void dentry_unlink_inode(struct dentry * dentry) |
| 385 | __releases(dentry->d_lock) |
| 386 | __releases(dentry->d_inode->i_lock) |
| 387 | { |
| 388 | struct inode *inode = dentry->d_inode; |
| 389 | |
| 390 | raw_write_seqcount_begin(&dentry->d_seq); |
| 391 | __d_clear_type_and_inode(dentry); |
| 392 | hlist_del_init(&dentry->d_u.d_alias); |
| 393 | raw_write_seqcount_end(&dentry->d_seq); |
| 394 | spin_unlock(&dentry->d_lock); |
| 395 | spin_unlock(&inode->i_lock); |
| 396 | if (!inode->i_nlink) |
| 397 | fsnotify_inoderemove(inode); |
| 398 | if (dentry->d_op && dentry->d_op->d_iput) |
| 399 | dentry->d_op->d_iput(dentry, inode); |
| 400 | else |
| 401 | iput(inode); |
| 402 | } |
| 403 | |
| 404 | /* |
| 405 | * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry |
| 406 | * is in use - which includes both the "real" per-superblock |
| 407 | * LRU list _and_ the DCACHE_SHRINK_LIST use. |
| 408 | * |
| 409 | * The DCACHE_SHRINK_LIST bit is set whenever the dentry is |
| 410 | * on the shrink list (ie not on the superblock LRU list). |
| 411 | * |
| 412 | * The per-cpu "nr_dentry_unused" counters are updated with |
| 413 | * the DCACHE_LRU_LIST bit. |
| 414 | * |
| 415 | * The per-cpu "nr_dentry_negative" counters are only updated |
| 416 | * when deleted from or added to the per-superblock LRU list, not |
| 417 | * from/to the shrink list. That is to avoid an unneeded dec/inc |
| 418 | * pair when moving from LRU to shrink list in select_collect(). |
| 419 | * |
| 420 | * These helper functions make sure we always follow the |
| 421 | * rules. d_lock must be held by the caller. |
| 422 | */ |
| 423 | #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x)) |
| 424 | static void d_lru_add(struct dentry *dentry) |
| 425 | { |
| 426 | D_FLAG_VERIFY(dentry, 0); |
| 427 | dentry->d_flags |= DCACHE_LRU_LIST; |
| 428 | this_cpu_inc(nr_dentry_unused); |
| 429 | if (d_is_negative(dentry)) |
| 430 | this_cpu_inc(nr_dentry_negative); |
| 431 | WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); |
| 432 | } |
| 433 | |
| 434 | static void d_lru_del(struct dentry *dentry) |
| 435 | { |
| 436 | D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); |
| 437 | dentry->d_flags &= ~DCACHE_LRU_LIST; |
| 438 | this_cpu_dec(nr_dentry_unused); |
| 439 | if (d_is_negative(dentry)) |
| 440 | this_cpu_dec(nr_dentry_negative); |
| 441 | WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); |
| 442 | } |
| 443 | |
| 444 | static void d_shrink_del(struct dentry *dentry) |
| 445 | { |
| 446 | D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); |
| 447 | list_del_init(&dentry->d_lru); |
| 448 | dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); |
| 449 | this_cpu_dec(nr_dentry_unused); |
| 450 | } |
| 451 | |
| 452 | static void d_shrink_add(struct dentry *dentry, struct list_head *list) |
| 453 | { |
| 454 | D_FLAG_VERIFY(dentry, 0); |
| 455 | list_add(&dentry->d_lru, list); |
| 456 | dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST; |
| 457 | this_cpu_inc(nr_dentry_unused); |
| 458 | } |
| 459 | |
| 460 | /* |
| 461 | * These can only be called under the global LRU lock, ie during the |
| 462 | * callback for freeing the LRU list. "isolate" removes it from the |
| 463 | * LRU lists entirely, while shrink_move moves it to the indicated |
| 464 | * private list. |
| 465 | */ |
| 466 | static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry) |
| 467 | { |
| 468 | D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); |
| 469 | dentry->d_flags &= ~DCACHE_LRU_LIST; |
| 470 | this_cpu_dec(nr_dentry_unused); |
| 471 | if (d_is_negative(dentry)) |
| 472 | this_cpu_dec(nr_dentry_negative); |
| 473 | list_lru_isolate(lru, &dentry->d_lru); |
| 474 | } |
| 475 | |
| 476 | static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry, |
| 477 | struct list_head *list) |
| 478 | { |
| 479 | D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); |
| 480 | dentry->d_flags |= DCACHE_SHRINK_LIST; |
| 481 | if (d_is_negative(dentry)) |
| 482 | this_cpu_dec(nr_dentry_negative); |
| 483 | list_lru_isolate_move(lru, &dentry->d_lru, list); |
| 484 | } |
| 485 | |
| 486 | static void ___d_drop(struct dentry *dentry) |
| 487 | { |
| 488 | struct hlist_bl_head *b; |
| 489 | /* |
| 490 | * Hashed dentries are normally on the dentry hashtable, |
| 491 | * with the exception of those newly allocated by |
| 492 | * d_obtain_root, which are always IS_ROOT: |
| 493 | */ |
| 494 | if (unlikely(IS_ROOT(dentry))) |
| 495 | b = &dentry->d_sb->s_roots; |
| 496 | else |
| 497 | b = d_hash(dentry->d_name.hash); |
| 498 | |
| 499 | hlist_bl_lock(b); |
| 500 | __hlist_bl_del(&dentry->d_hash); |
| 501 | hlist_bl_unlock(b); |
| 502 | } |
| 503 | |
| 504 | void __d_drop(struct dentry *dentry) |
| 505 | { |
| 506 | if (!d_unhashed(dentry)) { |
| 507 | ___d_drop(dentry); |
| 508 | dentry->d_hash.pprev = NULL; |
| 509 | write_seqcount_invalidate(&dentry->d_seq); |
| 510 | } |
| 511 | } |
| 512 | EXPORT_SYMBOL(__d_drop); |
| 513 | |
| 514 | /** |
| 515 | * d_drop - drop a dentry |
| 516 | * @dentry: dentry to drop |
| 517 | * |
| 518 | * d_drop() unhashes the entry from the parent dentry hashes, so that it won't |
| 519 | * be found through a VFS lookup any more. Note that this is different from |
| 520 | * deleting the dentry - d_delete will try to mark the dentry negative if |
| 521 | * possible, giving a successful _negative_ lookup, while d_drop will |
| 522 | * just make the cache lookup fail. |
| 523 | * |
| 524 | * d_drop() is used mainly for stuff that wants to invalidate a dentry for some |
| 525 | * reason (NFS timeouts or autofs deletes). |
| 526 | * |
| 527 | * __d_drop requires dentry->d_lock |
| 528 | * |
| 529 | * ___d_drop doesn't mark dentry as "unhashed" |
| 530 | * (dentry->d_hash.pprev will be LIST_POISON2, not NULL). |
| 531 | */ |
| 532 | void d_drop(struct dentry *dentry) |
| 533 | { |
| 534 | spin_lock(&dentry->d_lock); |
| 535 | __d_drop(dentry); |
| 536 | spin_unlock(&dentry->d_lock); |
| 537 | } |
| 538 | EXPORT_SYMBOL(d_drop); |
| 539 | |
| 540 | static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent) |
| 541 | { |
| 542 | struct dentry *next; |
| 543 | /* |
| 544 | * Inform d_walk() and shrink_dentry_list() that we are no longer |
| 545 | * attached to the dentry tree |
| 546 | */ |
| 547 | dentry->d_flags |= DCACHE_DENTRY_KILLED; |
| 548 | if (unlikely(list_empty(&dentry->d_child))) |
| 549 | return; |
| 550 | __list_del_entry(&dentry->d_child); |
| 551 | /* |
| 552 | * Cursors can move around the list of children. While we'd been |
| 553 | * a normal list member, it didn't matter - ->d_child.next would've |
| 554 | * been updated. However, from now on it won't be and for the |
| 555 | * things like d_walk() it might end up with a nasty surprise. |
| 556 | * Normally d_walk() doesn't care about cursors moving around - |
| 557 | * ->d_lock on parent prevents that and since a cursor has no children |
| 558 | * of its own, we get through it without ever unlocking the parent. |
| 559 | * There is one exception, though - if we ascend from a child that |
| 560 | * gets killed as soon as we unlock it, the next sibling is found |
| 561 | * using the value left in its ->d_child.next. And if _that_ |
| 562 | * pointed to a cursor, and cursor got moved (e.g. by lseek()) |
| 563 | * before d_walk() regains parent->d_lock, we'll end up skipping |
| 564 | * everything the cursor had been moved past. |
| 565 | * |
| 566 | * Solution: make sure that the pointer left behind in ->d_child.next |
| 567 | * points to something that won't be moving around. I.e. skip the |
| 568 | * cursors. |
| 569 | */ |
| 570 | while (dentry->d_child.next != &parent->d_subdirs) { |
| 571 | next = list_entry(dentry->d_child.next, struct dentry, d_child); |
| 572 | if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR))) |
| 573 | break; |
| 574 | dentry->d_child.next = next->d_child.next; |
| 575 | } |
| 576 | } |
| 577 | |
| 578 | static void __dentry_kill(struct dentry *dentry) |
| 579 | { |
| 580 | struct dentry *parent = NULL; |
| 581 | bool can_free = true; |
| 582 | if (!IS_ROOT(dentry)) |
| 583 | parent = dentry->d_parent; |
| 584 | |
| 585 | /* |
| 586 | * The dentry is now unrecoverably dead to the world. |
| 587 | */ |
| 588 | lockref_mark_dead(&dentry->d_lockref); |
| 589 | |
| 590 | /* |
| 591 | * inform the fs via d_prune that this dentry is about to be |
| 592 | * unhashed and destroyed. |
| 593 | */ |
| 594 | if (dentry->d_flags & DCACHE_OP_PRUNE) |
| 595 | dentry->d_op->d_prune(dentry); |
| 596 | |
| 597 | if (dentry->d_flags & DCACHE_LRU_LIST) { |
| 598 | if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) |
| 599 | d_lru_del(dentry); |
| 600 | } |
| 601 | /* if it was on the hash then remove it */ |
| 602 | __d_drop(dentry); |
| 603 | dentry_unlist(dentry, parent); |
| 604 | if (parent) |
| 605 | spin_unlock(&parent->d_lock); |
| 606 | if (dentry->d_inode) |
| 607 | dentry_unlink_inode(dentry); |
| 608 | else |
| 609 | spin_unlock(&dentry->d_lock); |
| 610 | this_cpu_dec(nr_dentry); |
| 611 | if (dentry->d_op && dentry->d_op->d_release) |
| 612 | dentry->d_op->d_release(dentry); |
| 613 | |
| 614 | spin_lock(&dentry->d_lock); |
| 615 | if (dentry->d_flags & DCACHE_SHRINK_LIST) { |
| 616 | dentry->d_flags |= DCACHE_MAY_FREE; |
| 617 | can_free = false; |
| 618 | } |
| 619 | spin_unlock(&dentry->d_lock); |
| 620 | if (likely(can_free)) |
| 621 | dentry_free(dentry); |
| 622 | cond_resched(); |
| 623 | } |
| 624 | |
| 625 | static struct dentry *__lock_parent(struct dentry *dentry) |
| 626 | { |
| 627 | struct dentry *parent; |
| 628 | rcu_read_lock(); |
| 629 | spin_unlock(&dentry->d_lock); |
| 630 | again: |
| 631 | parent = READ_ONCE(dentry->d_parent); |
| 632 | spin_lock(&parent->d_lock); |
| 633 | /* |
| 634 | * We can't blindly lock dentry until we are sure |
| 635 | * that we won't violate the locking order. |
| 636 | * Any changes of dentry->d_parent must have |
| 637 | * been done with parent->d_lock held, so |
| 638 | * spin_lock() above is enough of a barrier |
| 639 | * for checking if it's still our child. |
| 640 | */ |
| 641 | if (unlikely(parent != dentry->d_parent)) { |
| 642 | spin_unlock(&parent->d_lock); |
| 643 | goto again; |
| 644 | } |
| 645 | rcu_read_unlock(); |
| 646 | if (parent != dentry) |
| 647 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); |
| 648 | else |
| 649 | parent = NULL; |
| 650 | return parent; |
| 651 | } |
| 652 | |
| 653 | static inline struct dentry *lock_parent(struct dentry *dentry) |
| 654 | { |
| 655 | struct dentry *parent = dentry->d_parent; |
| 656 | if (IS_ROOT(dentry)) |
| 657 | return NULL; |
| 658 | if (likely(spin_trylock(&parent->d_lock))) |
| 659 | return parent; |
| 660 | return __lock_parent(dentry); |
| 661 | } |
| 662 | |
| 663 | static inline bool retain_dentry(struct dentry *dentry) |
| 664 | { |
| 665 | WARN_ON(d_in_lookup(dentry)); |
| 666 | |
| 667 | /* Unreachable? Get rid of it */ |
| 668 | if (unlikely(d_unhashed(dentry))) |
| 669 | return false; |
| 670 | |
| 671 | if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) |
| 672 | return false; |
| 673 | |
| 674 | if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) { |
| 675 | if (dentry->d_op->d_delete(dentry)) |
| 676 | return false; |
| 677 | } |
| 678 | |
| 679 | if (unlikely(dentry->d_flags & DCACHE_DONTCACHE)) |
| 680 | return false; |
| 681 | |
| 682 | /* retain; LRU fodder */ |
| 683 | dentry->d_lockref.count--; |
| 684 | if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) |
| 685 | d_lru_add(dentry); |
| 686 | else if (unlikely(!(dentry->d_flags & DCACHE_REFERENCED))) |
| 687 | dentry->d_flags |= DCACHE_REFERENCED; |
| 688 | return true; |
| 689 | } |
| 690 | |
| 691 | void d_mark_dontcache(struct inode *inode) |
| 692 | { |
| 693 | struct dentry *de; |
| 694 | |
| 695 | spin_lock(&inode->i_lock); |
| 696 | hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) { |
| 697 | spin_lock(&de->d_lock); |
| 698 | de->d_flags |= DCACHE_DONTCACHE; |
| 699 | spin_unlock(&de->d_lock); |
| 700 | } |
| 701 | inode->i_state |= I_DONTCACHE; |
| 702 | spin_unlock(&inode->i_lock); |
| 703 | } |
| 704 | EXPORT_SYMBOL(d_mark_dontcache); |
| 705 | |
| 706 | /* |
| 707 | * Finish off a dentry we've decided to kill. |
| 708 | * dentry->d_lock must be held, returns with it unlocked. |
| 709 | * Returns dentry requiring refcount drop, or NULL if we're done. |
| 710 | */ |
| 711 | static struct dentry *dentry_kill(struct dentry *dentry) |
| 712 | __releases(dentry->d_lock) |
| 713 | { |
| 714 | struct inode *inode = dentry->d_inode; |
| 715 | struct dentry *parent = NULL; |
| 716 | |
| 717 | if (inode && unlikely(!spin_trylock(&inode->i_lock))) |
| 718 | goto slow_positive; |
| 719 | |
| 720 | if (!IS_ROOT(dentry)) { |
| 721 | parent = dentry->d_parent; |
| 722 | if (unlikely(!spin_trylock(&parent->d_lock))) { |
| 723 | parent = __lock_parent(dentry); |
| 724 | if (likely(inode || !dentry->d_inode)) |
| 725 | goto got_locks; |
| 726 | /* negative that became positive */ |
| 727 | if (parent) |
| 728 | spin_unlock(&parent->d_lock); |
| 729 | inode = dentry->d_inode; |
| 730 | goto slow_positive; |
| 731 | } |
| 732 | } |
| 733 | __dentry_kill(dentry); |
| 734 | return parent; |
| 735 | |
| 736 | slow_positive: |
| 737 | spin_unlock(&dentry->d_lock); |
| 738 | spin_lock(&inode->i_lock); |
| 739 | spin_lock(&dentry->d_lock); |
| 740 | parent = lock_parent(dentry); |
| 741 | got_locks: |
| 742 | if (unlikely(dentry->d_lockref.count != 1)) { |
| 743 | dentry->d_lockref.count--; |
| 744 | } else if (likely(!retain_dentry(dentry))) { |
| 745 | __dentry_kill(dentry); |
| 746 | return parent; |
| 747 | } |
| 748 | /* we are keeping it, after all */ |
| 749 | if (inode) |
| 750 | spin_unlock(&inode->i_lock); |
| 751 | if (parent) |
| 752 | spin_unlock(&parent->d_lock); |
| 753 | spin_unlock(&dentry->d_lock); |
| 754 | return NULL; |
| 755 | } |
| 756 | |
| 757 | /* |
| 758 | * Try to do a lockless dput(), and return whether that was successful. |
| 759 | * |
| 760 | * If unsuccessful, we return false, having already taken the dentry lock. |
| 761 | * |
| 762 | * The caller needs to hold the RCU read lock, so that the dentry is |
| 763 | * guaranteed to stay around even if the refcount goes down to zero! |
| 764 | */ |
| 765 | static inline bool fast_dput(struct dentry *dentry) |
| 766 | { |
| 767 | int ret; |
| 768 | unsigned int d_flags; |
| 769 | |
| 770 | /* |
| 771 | * If we have a d_op->d_delete() operation, we sould not |
| 772 | * let the dentry count go to zero, so use "put_or_lock". |
| 773 | */ |
| 774 | if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) |
| 775 | return lockref_put_or_lock(&dentry->d_lockref); |
| 776 | |
| 777 | /* |
| 778 | * .. otherwise, we can try to just decrement the |
| 779 | * lockref optimistically. |
| 780 | */ |
| 781 | ret = lockref_put_return(&dentry->d_lockref); |
| 782 | |
| 783 | /* |
| 784 | * If the lockref_put_return() failed due to the lock being held |
| 785 | * by somebody else, the fast path has failed. We will need to |
| 786 | * get the lock, and then check the count again. |
| 787 | */ |
| 788 | if (unlikely(ret < 0)) { |
| 789 | spin_lock(&dentry->d_lock); |
| 790 | if (dentry->d_lockref.count > 1) { |
| 791 | dentry->d_lockref.count--; |
| 792 | spin_unlock(&dentry->d_lock); |
| 793 | return true; |
| 794 | } |
| 795 | return false; |
| 796 | } |
| 797 | |
| 798 | /* |
| 799 | * If we weren't the last ref, we're done. |
| 800 | */ |
| 801 | if (ret) |
| 802 | return true; |
| 803 | |
| 804 | /* |
| 805 | * Careful, careful. The reference count went down |
| 806 | * to zero, but we don't hold the dentry lock, so |
| 807 | * somebody else could get it again, and do another |
| 808 | * dput(), and we need to not race with that. |
| 809 | * |
| 810 | * However, there is a very special and common case |
| 811 | * where we don't care, because there is nothing to |
| 812 | * do: the dentry is still hashed, it does not have |
| 813 | * a 'delete' op, and it's referenced and already on |
| 814 | * the LRU list. |
| 815 | * |
| 816 | * NOTE! Since we aren't locked, these values are |
| 817 | * not "stable". However, it is sufficient that at |
| 818 | * some point after we dropped the reference the |
| 819 | * dentry was hashed and the flags had the proper |
| 820 | * value. Other dentry users may have re-gotten |
| 821 | * a reference to the dentry and change that, but |
| 822 | * our work is done - we can leave the dentry |
| 823 | * around with a zero refcount. |
| 824 | * |
| 825 | * Nevertheless, there are two cases that we should kill |
| 826 | * the dentry anyway. |
| 827 | * 1. free disconnected dentries as soon as their refcount |
| 828 | * reached zero. |
| 829 | * 2. free dentries if they should not be cached. |
| 830 | */ |
| 831 | smp_rmb(); |
| 832 | d_flags = READ_ONCE(dentry->d_flags); |
| 833 | d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | |
| 834 | DCACHE_DISCONNECTED | DCACHE_DONTCACHE; |
| 835 | |
| 836 | /* Nothing to do? Dropping the reference was all we needed? */ |
| 837 | if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry)) |
| 838 | return true; |
| 839 | |
| 840 | /* |
| 841 | * Not the fast normal case? Get the lock. We've already decremented |
| 842 | * the refcount, but we'll need to re-check the situation after |
| 843 | * getting the lock. |
| 844 | */ |
| 845 | spin_lock(&dentry->d_lock); |
| 846 | |
| 847 | /* |
| 848 | * Did somebody else grab a reference to it in the meantime, and |
| 849 | * we're no longer the last user after all? Alternatively, somebody |
| 850 | * else could have killed it and marked it dead. Either way, we |
| 851 | * don't need to do anything else. |
| 852 | */ |
| 853 | if (dentry->d_lockref.count) { |
| 854 | spin_unlock(&dentry->d_lock); |
| 855 | return true; |
| 856 | } |
| 857 | |
| 858 | /* |
| 859 | * Re-get the reference we optimistically dropped. We hold the |
| 860 | * lock, and we just tested that it was zero, so we can just |
| 861 | * set it to 1. |
| 862 | */ |
| 863 | dentry->d_lockref.count = 1; |
| 864 | return false; |
| 865 | } |
| 866 | |
| 867 | |
| 868 | /* |
| 869 | * This is dput |
| 870 | * |
| 871 | * This is complicated by the fact that we do not want to put |
| 872 | * dentries that are no longer on any hash chain on the unused |
| 873 | * list: we'd much rather just get rid of them immediately. |
| 874 | * |
| 875 | * However, that implies that we have to traverse the dentry |
| 876 | * tree upwards to the parents which might _also_ now be |
| 877 | * scheduled for deletion (it may have been only waiting for |
| 878 | * its last child to go away). |
| 879 | * |
| 880 | * This tail recursion is done by hand as we don't want to depend |
| 881 | * on the compiler to always get this right (gcc generally doesn't). |
| 882 | * Real recursion would eat up our stack space. |
| 883 | */ |
| 884 | |
| 885 | /* |
| 886 | * dput - release a dentry |
| 887 | * @dentry: dentry to release |
| 888 | * |
| 889 | * Release a dentry. This will drop the usage count and if appropriate |
| 890 | * call the dentry unlink method as well as removing it from the queues and |
| 891 | * releasing its resources. If the parent dentries were scheduled for release |
| 892 | * they too may now get deleted. |
| 893 | */ |
| 894 | void dput(struct dentry *dentry) |
| 895 | { |
| 896 | while (dentry) { |
| 897 | might_sleep(); |
| 898 | |
| 899 | rcu_read_lock(); |
| 900 | if (likely(fast_dput(dentry))) { |
| 901 | rcu_read_unlock(); |
| 902 | return; |
| 903 | } |
| 904 | |
| 905 | /* Slow case: now with the dentry lock held */ |
| 906 | rcu_read_unlock(); |
| 907 | |
| 908 | if (likely(retain_dentry(dentry))) { |
| 909 | spin_unlock(&dentry->d_lock); |
| 910 | return; |
| 911 | } |
| 912 | |
| 913 | dentry = dentry_kill(dentry); |
| 914 | } |
| 915 | } |
| 916 | EXPORT_SYMBOL(dput); |
| 917 | |
| 918 | static void __dput_to_list(struct dentry *dentry, struct list_head *list) |
| 919 | __must_hold(&dentry->d_lock) |
| 920 | { |
| 921 | if (dentry->d_flags & DCACHE_SHRINK_LIST) { |
| 922 | /* let the owner of the list it's on deal with it */ |
| 923 | --dentry->d_lockref.count; |
| 924 | } else { |
| 925 | if (dentry->d_flags & DCACHE_LRU_LIST) |
| 926 | d_lru_del(dentry); |
| 927 | if (!--dentry->d_lockref.count) |
| 928 | d_shrink_add(dentry, list); |
| 929 | } |
| 930 | } |
| 931 | |
| 932 | void dput_to_list(struct dentry *dentry, struct list_head *list) |
| 933 | { |
| 934 | rcu_read_lock(); |
| 935 | if (likely(fast_dput(dentry))) { |
| 936 | rcu_read_unlock(); |
| 937 | return; |
| 938 | } |
| 939 | rcu_read_unlock(); |
| 940 | if (!retain_dentry(dentry)) |
| 941 | __dput_to_list(dentry, list); |
| 942 | spin_unlock(&dentry->d_lock); |
| 943 | } |
| 944 | |
| 945 | /* This must be called with d_lock held */ |
| 946 | static inline void __dget_dlock(struct dentry *dentry) |
| 947 | { |
| 948 | dentry->d_lockref.count++; |
| 949 | } |
| 950 | |
| 951 | static inline void __dget(struct dentry *dentry) |
| 952 | { |
| 953 | lockref_get(&dentry->d_lockref); |
| 954 | } |
| 955 | |
| 956 | struct dentry *dget_parent(struct dentry *dentry) |
| 957 | { |
| 958 | int gotref; |
| 959 | struct dentry *ret; |
| 960 | unsigned seq; |
| 961 | |
| 962 | /* |
| 963 | * Do optimistic parent lookup without any |
| 964 | * locking. |
| 965 | */ |
| 966 | rcu_read_lock(); |
| 967 | seq = raw_seqcount_begin(&dentry->d_seq); |
| 968 | ret = READ_ONCE(dentry->d_parent); |
| 969 | gotref = lockref_get_not_zero(&ret->d_lockref); |
| 970 | rcu_read_unlock(); |
| 971 | if (likely(gotref)) { |
| 972 | if (!read_seqcount_retry(&dentry->d_seq, seq)) |
| 973 | return ret; |
| 974 | dput(ret); |
| 975 | } |
| 976 | |
| 977 | repeat: |
| 978 | /* |
| 979 | * Don't need rcu_dereference because we re-check it was correct under |
| 980 | * the lock. |
| 981 | */ |
| 982 | rcu_read_lock(); |
| 983 | ret = dentry->d_parent; |
| 984 | spin_lock(&ret->d_lock); |
| 985 | if (unlikely(ret != dentry->d_parent)) { |
| 986 | spin_unlock(&ret->d_lock); |
| 987 | rcu_read_unlock(); |
| 988 | goto repeat; |
| 989 | } |
| 990 | rcu_read_unlock(); |
| 991 | BUG_ON(!ret->d_lockref.count); |
| 992 | ret->d_lockref.count++; |
| 993 | spin_unlock(&ret->d_lock); |
| 994 | return ret; |
| 995 | } |
| 996 | EXPORT_SYMBOL(dget_parent); |
| 997 | |
| 998 | static struct dentry * __d_find_any_alias(struct inode *inode) |
| 999 | { |
| 1000 | struct dentry *alias; |
| 1001 | |
| 1002 | if (hlist_empty(&inode->i_dentry)) |
| 1003 | return NULL; |
| 1004 | alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); |
| 1005 | __dget(alias); |
| 1006 | return alias; |
| 1007 | } |
| 1008 | |
| 1009 | /** |
| 1010 | * d_find_any_alias - find any alias for a given inode |
| 1011 | * @inode: inode to find an alias for |
| 1012 | * |
| 1013 | * If any aliases exist for the given inode, take and return a |
| 1014 | * reference for one of them. If no aliases exist, return %NULL. |
| 1015 | */ |
| 1016 | struct dentry *d_find_any_alias(struct inode *inode) |
| 1017 | { |
| 1018 | struct dentry *de; |
| 1019 | |
| 1020 | spin_lock(&inode->i_lock); |
| 1021 | de = __d_find_any_alias(inode); |
| 1022 | spin_unlock(&inode->i_lock); |
| 1023 | return de; |
| 1024 | } |
| 1025 | EXPORT_SYMBOL(d_find_any_alias); |
| 1026 | |
| 1027 | static struct dentry *__d_find_alias(struct inode *inode) |
| 1028 | { |
| 1029 | struct dentry *alias; |
| 1030 | |
| 1031 | if (S_ISDIR(inode->i_mode)) |
| 1032 | return __d_find_any_alias(inode); |
| 1033 | |
| 1034 | hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { |
| 1035 | spin_lock(&alias->d_lock); |
| 1036 | if (!d_unhashed(alias)) { |
| 1037 | __dget_dlock(alias); |
| 1038 | spin_unlock(&alias->d_lock); |
| 1039 | return alias; |
| 1040 | } |
| 1041 | spin_unlock(&alias->d_lock); |
| 1042 | } |
| 1043 | return NULL; |
| 1044 | } |
| 1045 | |
| 1046 | /** |
| 1047 | * d_find_alias - grab a hashed alias of inode |
| 1048 | * @inode: inode in question |
| 1049 | * |
| 1050 | * If inode has a hashed alias, or is a directory and has any alias, |
| 1051 | * acquire the reference to alias and return it. Otherwise return NULL. |
| 1052 | * Notice that if inode is a directory there can be only one alias and |
| 1053 | * it can be unhashed only if it has no children, or if it is the root |
| 1054 | * of a filesystem, or if the directory was renamed and d_revalidate |
| 1055 | * was the first vfs operation to notice. |
| 1056 | * |
| 1057 | * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer |
| 1058 | * any other hashed alias over that one. |
| 1059 | */ |
| 1060 | struct dentry *d_find_alias(struct inode *inode) |
| 1061 | { |
| 1062 | struct dentry *de = NULL; |
| 1063 | |
| 1064 | if (!hlist_empty(&inode->i_dentry)) { |
| 1065 | spin_lock(&inode->i_lock); |
| 1066 | de = __d_find_alias(inode); |
| 1067 | spin_unlock(&inode->i_lock); |
| 1068 | } |
| 1069 | return de; |
| 1070 | } |
| 1071 | EXPORT_SYMBOL(d_find_alias); |
| 1072 | |
| 1073 | /* |
| 1074 | * Caller MUST be holding rcu_read_lock() and be guaranteed |
| 1075 | * that inode won't get freed until rcu_read_unlock(). |
| 1076 | */ |
| 1077 | struct dentry *d_find_alias_rcu(struct inode *inode) |
| 1078 | { |
| 1079 | struct hlist_head *l = &inode->i_dentry; |
| 1080 | struct dentry *de = NULL; |
| 1081 | |
| 1082 | spin_lock(&inode->i_lock); |
| 1083 | // ->i_dentry and ->i_rcu are colocated, but the latter won't be |
| 1084 | // used without having I_FREEING set, which means no aliases left |
| 1085 | if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) { |
| 1086 | if (S_ISDIR(inode->i_mode)) { |
| 1087 | de = hlist_entry(l->first, struct dentry, d_u.d_alias); |
| 1088 | } else { |
| 1089 | hlist_for_each_entry(de, l, d_u.d_alias) |
| 1090 | if (!d_unhashed(de)) |
| 1091 | break; |
| 1092 | } |
| 1093 | } |
| 1094 | spin_unlock(&inode->i_lock); |
| 1095 | return de; |
| 1096 | } |
| 1097 | |
| 1098 | /* |
| 1099 | * Try to kill dentries associated with this inode. |
| 1100 | * WARNING: you must own a reference to inode. |
| 1101 | */ |
| 1102 | void d_prune_aliases(struct inode *inode) |
| 1103 | { |
| 1104 | struct dentry *dentry; |
| 1105 | restart: |
| 1106 | spin_lock(&inode->i_lock); |
| 1107 | hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { |
| 1108 | spin_lock(&dentry->d_lock); |
| 1109 | if (!dentry->d_lockref.count) { |
| 1110 | struct dentry *parent = lock_parent(dentry); |
| 1111 | if (likely(!dentry->d_lockref.count)) { |
| 1112 | __dentry_kill(dentry); |
| 1113 | dput(parent); |
| 1114 | goto restart; |
| 1115 | } |
| 1116 | if (parent) |
| 1117 | spin_unlock(&parent->d_lock); |
| 1118 | } |
| 1119 | spin_unlock(&dentry->d_lock); |
| 1120 | } |
| 1121 | spin_unlock(&inode->i_lock); |
| 1122 | } |
| 1123 | EXPORT_SYMBOL(d_prune_aliases); |
| 1124 | |
| 1125 | /* |
| 1126 | * Lock a dentry from shrink list. |
| 1127 | * Called under rcu_read_lock() and dentry->d_lock; the former |
| 1128 | * guarantees that nothing we access will be freed under us. |
| 1129 | * Note that dentry is *not* protected from concurrent dentry_kill(), |
| 1130 | * d_delete(), etc. |
| 1131 | * |
| 1132 | * Return false if dentry has been disrupted or grabbed, leaving |
| 1133 | * the caller to kick it off-list. Otherwise, return true and have |
| 1134 | * that dentry's inode and parent both locked. |
| 1135 | */ |
| 1136 | static bool shrink_lock_dentry(struct dentry *dentry) |
| 1137 | { |
| 1138 | struct inode *inode; |
| 1139 | struct dentry *parent; |
| 1140 | |
| 1141 | if (dentry->d_lockref.count) |
| 1142 | return false; |
| 1143 | |
| 1144 | inode = dentry->d_inode; |
| 1145 | if (inode && unlikely(!spin_trylock(&inode->i_lock))) { |
| 1146 | spin_unlock(&dentry->d_lock); |
| 1147 | spin_lock(&inode->i_lock); |
| 1148 | spin_lock(&dentry->d_lock); |
| 1149 | if (unlikely(dentry->d_lockref.count)) |
| 1150 | goto out; |
| 1151 | /* changed inode means that somebody had grabbed it */ |
| 1152 | if (unlikely(inode != dentry->d_inode)) |
| 1153 | goto out; |
| 1154 | } |
| 1155 | |
| 1156 | parent = dentry->d_parent; |
| 1157 | if (IS_ROOT(dentry) || likely(spin_trylock(&parent->d_lock))) |
| 1158 | return true; |
| 1159 | |
| 1160 | spin_unlock(&dentry->d_lock); |
| 1161 | spin_lock(&parent->d_lock); |
| 1162 | if (unlikely(parent != dentry->d_parent)) { |
| 1163 | spin_unlock(&parent->d_lock); |
| 1164 | spin_lock(&dentry->d_lock); |
| 1165 | goto out; |
| 1166 | } |
| 1167 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); |
| 1168 | if (likely(!dentry->d_lockref.count)) |
| 1169 | return true; |
| 1170 | spin_unlock(&parent->d_lock); |
| 1171 | out: |
| 1172 | if (inode) |
| 1173 | spin_unlock(&inode->i_lock); |
| 1174 | return false; |
| 1175 | } |
| 1176 | |
| 1177 | void shrink_dentry_list(struct list_head *list) |
| 1178 | { |
| 1179 | while (!list_empty(list)) { |
| 1180 | struct dentry *dentry, *parent; |
| 1181 | |
| 1182 | dentry = list_entry(list->prev, struct dentry, d_lru); |
| 1183 | spin_lock(&dentry->d_lock); |
| 1184 | rcu_read_lock(); |
| 1185 | if (!shrink_lock_dentry(dentry)) { |
| 1186 | bool can_free = false; |
| 1187 | rcu_read_unlock(); |
| 1188 | d_shrink_del(dentry); |
| 1189 | if (dentry->d_lockref.count < 0) |
| 1190 | can_free = dentry->d_flags & DCACHE_MAY_FREE; |
| 1191 | spin_unlock(&dentry->d_lock); |
| 1192 | if (can_free) |
| 1193 | dentry_free(dentry); |
| 1194 | continue; |
| 1195 | } |
| 1196 | rcu_read_unlock(); |
| 1197 | d_shrink_del(dentry); |
| 1198 | parent = dentry->d_parent; |
| 1199 | if (parent != dentry) |
| 1200 | __dput_to_list(parent, list); |
| 1201 | __dentry_kill(dentry); |
| 1202 | } |
| 1203 | } |
| 1204 | |
| 1205 | static enum lru_status dentry_lru_isolate(struct list_head *item, |
| 1206 | struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) |
| 1207 | { |
| 1208 | struct list_head *freeable = arg; |
| 1209 | struct dentry *dentry = container_of(item, struct dentry, d_lru); |
| 1210 | |
| 1211 | |
| 1212 | /* |
| 1213 | * we are inverting the lru lock/dentry->d_lock here, |
| 1214 | * so use a trylock. If we fail to get the lock, just skip |
| 1215 | * it |
| 1216 | */ |
| 1217 | if (!spin_trylock(&dentry->d_lock)) |
| 1218 | return LRU_SKIP; |
| 1219 | |
| 1220 | /* |
| 1221 | * Referenced dentries are still in use. If they have active |
| 1222 | * counts, just remove them from the LRU. Otherwise give them |
| 1223 | * another pass through the LRU. |
| 1224 | */ |
| 1225 | if (dentry->d_lockref.count) { |
| 1226 | d_lru_isolate(lru, dentry); |
| 1227 | spin_unlock(&dentry->d_lock); |
| 1228 | return LRU_REMOVED; |
| 1229 | } |
| 1230 | |
| 1231 | if (dentry->d_flags & DCACHE_REFERENCED) { |
| 1232 | dentry->d_flags &= ~DCACHE_REFERENCED; |
| 1233 | spin_unlock(&dentry->d_lock); |
| 1234 | |
| 1235 | /* |
| 1236 | * The list move itself will be made by the common LRU code. At |
| 1237 | * this point, we've dropped the dentry->d_lock but keep the |
| 1238 | * lru lock. This is safe to do, since every list movement is |
| 1239 | * protected by the lru lock even if both locks are held. |
| 1240 | * |
| 1241 | * This is guaranteed by the fact that all LRU management |
| 1242 | * functions are intermediated by the LRU API calls like |
| 1243 | * list_lru_add and list_lru_del. List movement in this file |
| 1244 | * only ever occur through this functions or through callbacks |
| 1245 | * like this one, that are called from the LRU API. |
| 1246 | * |
| 1247 | * The only exceptions to this are functions like |
| 1248 | * shrink_dentry_list, and code that first checks for the |
| 1249 | * DCACHE_SHRINK_LIST flag. Those are guaranteed to be |
| 1250 | * operating only with stack provided lists after they are |
| 1251 | * properly isolated from the main list. It is thus, always a |
| 1252 | * local access. |
| 1253 | */ |
| 1254 | return LRU_ROTATE; |
| 1255 | } |
| 1256 | |
| 1257 | d_lru_shrink_move(lru, dentry, freeable); |
| 1258 | spin_unlock(&dentry->d_lock); |
| 1259 | |
| 1260 | return LRU_REMOVED; |
| 1261 | } |
| 1262 | |
| 1263 | /** |
| 1264 | * prune_dcache_sb - shrink the dcache |
| 1265 | * @sb: superblock |
| 1266 | * @sc: shrink control, passed to list_lru_shrink_walk() |
| 1267 | * |
| 1268 | * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This |
| 1269 | * is done when we need more memory and called from the superblock shrinker |
| 1270 | * function. |
| 1271 | * |
| 1272 | * This function may fail to free any resources if all the dentries are in |
| 1273 | * use. |
| 1274 | */ |
| 1275 | long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc) |
| 1276 | { |
| 1277 | LIST_HEAD(dispose); |
| 1278 | long freed; |
| 1279 | |
| 1280 | freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc, |
| 1281 | dentry_lru_isolate, &dispose); |
| 1282 | shrink_dentry_list(&dispose); |
| 1283 | return freed; |
| 1284 | } |
| 1285 | |
| 1286 | static enum lru_status dentry_lru_isolate_shrink(struct list_head *item, |
| 1287 | struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) |
| 1288 | { |
| 1289 | struct list_head *freeable = arg; |
| 1290 | struct dentry *dentry = container_of(item, struct dentry, d_lru); |
| 1291 | |
| 1292 | /* |
| 1293 | * we are inverting the lru lock/dentry->d_lock here, |
| 1294 | * so use a trylock. If we fail to get the lock, just skip |
| 1295 | * it |
| 1296 | */ |
| 1297 | if (!spin_trylock(&dentry->d_lock)) |
| 1298 | return LRU_SKIP; |
| 1299 | |
| 1300 | d_lru_shrink_move(lru, dentry, freeable); |
| 1301 | spin_unlock(&dentry->d_lock); |
| 1302 | |
| 1303 | return LRU_REMOVED; |
| 1304 | } |
| 1305 | |
| 1306 | |
| 1307 | /** |
| 1308 | * shrink_dcache_sb - shrink dcache for a superblock |
| 1309 | * @sb: superblock |
| 1310 | * |
| 1311 | * Shrink the dcache for the specified super block. This is used to free |
| 1312 | * the dcache before unmounting a file system. |
| 1313 | */ |
| 1314 | void shrink_dcache_sb(struct super_block *sb) |
| 1315 | { |
| 1316 | do { |
| 1317 | LIST_HEAD(dispose); |
| 1318 | |
| 1319 | list_lru_walk(&sb->s_dentry_lru, |
| 1320 | dentry_lru_isolate_shrink, &dispose, 1024); |
| 1321 | shrink_dentry_list(&dispose); |
| 1322 | } while (list_lru_count(&sb->s_dentry_lru) > 0); |
| 1323 | } |
| 1324 | EXPORT_SYMBOL(shrink_dcache_sb); |
| 1325 | |
| 1326 | /** |
| 1327 | * enum d_walk_ret - action to talke during tree walk |
| 1328 | * @D_WALK_CONTINUE: contrinue walk |
| 1329 | * @D_WALK_QUIT: quit walk |
| 1330 | * @D_WALK_NORETRY: quit when retry is needed |
| 1331 | * @D_WALK_SKIP: skip this dentry and its children |
| 1332 | */ |
| 1333 | enum d_walk_ret { |
| 1334 | D_WALK_CONTINUE, |
| 1335 | D_WALK_QUIT, |
| 1336 | D_WALK_NORETRY, |
| 1337 | D_WALK_SKIP, |
| 1338 | }; |
| 1339 | |
| 1340 | /** |
| 1341 | * d_walk - walk the dentry tree |
| 1342 | * @parent: start of walk |
| 1343 | * @data: data passed to @enter() and @finish() |
| 1344 | * @enter: callback when first entering the dentry |
| 1345 | * |
| 1346 | * The @enter() callbacks are called with d_lock held. |
| 1347 | */ |
| 1348 | static void d_walk(struct dentry *parent, void *data, |
| 1349 | enum d_walk_ret (*enter)(void *, struct dentry *)) |
| 1350 | { |
| 1351 | struct dentry *this_parent; |
| 1352 | struct list_head *next; |
| 1353 | unsigned seq = 0; |
| 1354 | enum d_walk_ret ret; |
| 1355 | bool retry = true; |
| 1356 | |
| 1357 | again: |
| 1358 | read_seqbegin_or_lock(&rename_lock, &seq); |
| 1359 | this_parent = parent; |
| 1360 | spin_lock(&this_parent->d_lock); |
| 1361 | |
| 1362 | ret = enter(data, this_parent); |
| 1363 | switch (ret) { |
| 1364 | case D_WALK_CONTINUE: |
| 1365 | break; |
| 1366 | case D_WALK_QUIT: |
| 1367 | case D_WALK_SKIP: |
| 1368 | goto out_unlock; |
| 1369 | case D_WALK_NORETRY: |
| 1370 | retry = false; |
| 1371 | break; |
| 1372 | } |
| 1373 | repeat: |
| 1374 | next = this_parent->d_subdirs.next; |
| 1375 | resume: |
| 1376 | while (next != &this_parent->d_subdirs) { |
| 1377 | struct list_head *tmp = next; |
| 1378 | struct dentry *dentry = list_entry(tmp, struct dentry, d_child); |
| 1379 | next = tmp->next; |
| 1380 | |
| 1381 | if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR)) |
| 1382 | continue; |
| 1383 | |
| 1384 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); |
| 1385 | |
| 1386 | ret = enter(data, dentry); |
| 1387 | switch (ret) { |
| 1388 | case D_WALK_CONTINUE: |
| 1389 | break; |
| 1390 | case D_WALK_QUIT: |
| 1391 | spin_unlock(&dentry->d_lock); |
| 1392 | goto out_unlock; |
| 1393 | case D_WALK_NORETRY: |
| 1394 | retry = false; |
| 1395 | break; |
| 1396 | case D_WALK_SKIP: |
| 1397 | spin_unlock(&dentry->d_lock); |
| 1398 | continue; |
| 1399 | } |
| 1400 | |
| 1401 | if (!list_empty(&dentry->d_subdirs)) { |
| 1402 | spin_unlock(&this_parent->d_lock); |
| 1403 | spin_release(&dentry->d_lock.dep_map, _RET_IP_); |
| 1404 | this_parent = dentry; |
| 1405 | spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); |
| 1406 | goto repeat; |
| 1407 | } |
| 1408 | spin_unlock(&dentry->d_lock); |
| 1409 | } |
| 1410 | /* |
| 1411 | * All done at this level ... ascend and resume the search. |
| 1412 | */ |
| 1413 | rcu_read_lock(); |
| 1414 | ascend: |
| 1415 | if (this_parent != parent) { |
| 1416 | struct dentry *child = this_parent; |
| 1417 | this_parent = child->d_parent; |
| 1418 | |
| 1419 | spin_unlock(&child->d_lock); |
| 1420 | spin_lock(&this_parent->d_lock); |
| 1421 | |
| 1422 | /* might go back up the wrong parent if we have had a rename. */ |
| 1423 | if (need_seqretry(&rename_lock, seq)) |
| 1424 | goto rename_retry; |
| 1425 | /* go into the first sibling still alive */ |
| 1426 | do { |
| 1427 | next = child->d_child.next; |
| 1428 | if (next == &this_parent->d_subdirs) |
| 1429 | goto ascend; |
| 1430 | child = list_entry(next, struct dentry, d_child); |
| 1431 | } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)); |
| 1432 | rcu_read_unlock(); |
| 1433 | goto resume; |
| 1434 | } |
| 1435 | if (need_seqretry(&rename_lock, seq)) |
| 1436 | goto rename_retry; |
| 1437 | rcu_read_unlock(); |
| 1438 | |
| 1439 | out_unlock: |
| 1440 | spin_unlock(&this_parent->d_lock); |
| 1441 | done_seqretry(&rename_lock, seq); |
| 1442 | return; |
| 1443 | |
| 1444 | rename_retry: |
| 1445 | spin_unlock(&this_parent->d_lock); |
| 1446 | rcu_read_unlock(); |
| 1447 | BUG_ON(seq & 1); |
| 1448 | if (!retry) |
| 1449 | return; |
| 1450 | seq = 1; |
| 1451 | goto again; |
| 1452 | } |
| 1453 | |
| 1454 | struct check_mount { |
| 1455 | struct vfsmount *mnt; |
| 1456 | unsigned int mounted; |
| 1457 | }; |
| 1458 | |
| 1459 | static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry) |
| 1460 | { |
| 1461 | struct check_mount *info = data; |
| 1462 | struct path path = { .mnt = info->mnt, .dentry = dentry }; |
| 1463 | |
| 1464 | if (likely(!d_mountpoint(dentry))) |
| 1465 | return D_WALK_CONTINUE; |
| 1466 | if (__path_is_mountpoint(&path)) { |
| 1467 | info->mounted = 1; |
| 1468 | return D_WALK_QUIT; |
| 1469 | } |
| 1470 | return D_WALK_CONTINUE; |
| 1471 | } |
| 1472 | |
| 1473 | /** |
| 1474 | * path_has_submounts - check for mounts over a dentry in the |
| 1475 | * current namespace. |
| 1476 | * @parent: path to check. |
| 1477 | * |
| 1478 | * Return true if the parent or its subdirectories contain |
| 1479 | * a mount point in the current namespace. |
| 1480 | */ |
| 1481 | int path_has_submounts(const struct path *parent) |
| 1482 | { |
| 1483 | struct check_mount data = { .mnt = parent->mnt, .mounted = 0 }; |
| 1484 | |
| 1485 | read_seqlock_excl(&mount_lock); |
| 1486 | d_walk(parent->dentry, &data, path_check_mount); |
| 1487 | read_sequnlock_excl(&mount_lock); |
| 1488 | |
| 1489 | return data.mounted; |
| 1490 | } |
| 1491 | EXPORT_SYMBOL(path_has_submounts); |
| 1492 | |
| 1493 | /* |
| 1494 | * Called by mount code to set a mountpoint and check if the mountpoint is |
| 1495 | * reachable (e.g. NFS can unhash a directory dentry and then the complete |
| 1496 | * subtree can become unreachable). |
| 1497 | * |
| 1498 | * Only one of d_invalidate() and d_set_mounted() must succeed. For |
| 1499 | * this reason take rename_lock and d_lock on dentry and ancestors. |
| 1500 | */ |
| 1501 | int d_set_mounted(struct dentry *dentry) |
| 1502 | { |
| 1503 | struct dentry *p; |
| 1504 | int ret = -ENOENT; |
| 1505 | write_seqlock(&rename_lock); |
| 1506 | for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) { |
| 1507 | /* Need exclusion wrt. d_invalidate() */ |
| 1508 | spin_lock(&p->d_lock); |
| 1509 | if (unlikely(d_unhashed(p))) { |
| 1510 | spin_unlock(&p->d_lock); |
| 1511 | goto out; |
| 1512 | } |
| 1513 | spin_unlock(&p->d_lock); |
| 1514 | } |
| 1515 | spin_lock(&dentry->d_lock); |
| 1516 | if (!d_unlinked(dentry)) { |
| 1517 | ret = -EBUSY; |
| 1518 | if (!d_mountpoint(dentry)) { |
| 1519 | dentry->d_flags |= DCACHE_MOUNTED; |
| 1520 | ret = 0; |
| 1521 | } |
| 1522 | } |
| 1523 | spin_unlock(&dentry->d_lock); |
| 1524 | out: |
| 1525 | write_sequnlock(&rename_lock); |
| 1526 | return ret; |
| 1527 | } |
| 1528 | |
| 1529 | /* |
| 1530 | * Search the dentry child list of the specified parent, |
| 1531 | * and move any unused dentries to the end of the unused |
| 1532 | * list for prune_dcache(). We descend to the next level |
| 1533 | * whenever the d_subdirs list is non-empty and continue |
| 1534 | * searching. |
| 1535 | * |
| 1536 | * It returns zero iff there are no unused children, |
| 1537 | * otherwise it returns the number of children moved to |
| 1538 | * the end of the unused list. This may not be the total |
| 1539 | * number of unused children, because select_parent can |
| 1540 | * drop the lock and return early due to latency |
| 1541 | * constraints. |
| 1542 | */ |
| 1543 | |
| 1544 | struct select_data { |
| 1545 | struct dentry *start; |
| 1546 | union { |
| 1547 | long found; |
| 1548 | struct dentry *victim; |
| 1549 | }; |
| 1550 | struct list_head dispose; |
| 1551 | }; |
| 1552 | |
| 1553 | static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) |
| 1554 | { |
| 1555 | struct select_data *data = _data; |
| 1556 | enum d_walk_ret ret = D_WALK_CONTINUE; |
| 1557 | |
| 1558 | if (data->start == dentry) |
| 1559 | goto out; |
| 1560 | |
| 1561 | if (dentry->d_flags & DCACHE_SHRINK_LIST) { |
| 1562 | data->found++; |
| 1563 | } else { |
| 1564 | if (dentry->d_flags & DCACHE_LRU_LIST) |
| 1565 | d_lru_del(dentry); |
| 1566 | if (!dentry->d_lockref.count) { |
| 1567 | d_shrink_add(dentry, &data->dispose); |
| 1568 | data->found++; |
| 1569 | } |
| 1570 | } |
| 1571 | /* |
| 1572 | * We can return to the caller if we have found some (this |
| 1573 | * ensures forward progress). We'll be coming back to find |
| 1574 | * the rest. |
| 1575 | */ |
| 1576 | if (!list_empty(&data->dispose)) |
| 1577 | ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY; |
| 1578 | out: |
| 1579 | return ret; |
| 1580 | } |
| 1581 | |
| 1582 | static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry) |
| 1583 | { |
| 1584 | struct select_data *data = _data; |
| 1585 | enum d_walk_ret ret = D_WALK_CONTINUE; |
| 1586 | |
| 1587 | if (data->start == dentry) |
| 1588 | goto out; |
| 1589 | |
| 1590 | if (dentry->d_flags & DCACHE_SHRINK_LIST) { |
| 1591 | if (!dentry->d_lockref.count) { |
| 1592 | rcu_read_lock(); |
| 1593 | data->victim = dentry; |
| 1594 | return D_WALK_QUIT; |
| 1595 | } |
| 1596 | } else { |
| 1597 | if (dentry->d_flags & DCACHE_LRU_LIST) |
| 1598 | d_lru_del(dentry); |
| 1599 | if (!dentry->d_lockref.count) |
| 1600 | d_shrink_add(dentry, &data->dispose); |
| 1601 | } |
| 1602 | /* |
| 1603 | * We can return to the caller if we have found some (this |
| 1604 | * ensures forward progress). We'll be coming back to find |
| 1605 | * the rest. |
| 1606 | */ |
| 1607 | if (!list_empty(&data->dispose)) |
| 1608 | ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY; |
| 1609 | out: |
| 1610 | return ret; |
| 1611 | } |
| 1612 | |
| 1613 | /** |
| 1614 | * shrink_dcache_parent - prune dcache |
| 1615 | * @parent: parent of entries to prune |
| 1616 | * |
| 1617 | * Prune the dcache to remove unused children of the parent dentry. |
| 1618 | */ |
| 1619 | void shrink_dcache_parent(struct dentry *parent) |
| 1620 | { |
| 1621 | for (;;) { |
| 1622 | struct select_data data = {.start = parent}; |
| 1623 | |
| 1624 | INIT_LIST_HEAD(&data.dispose); |
| 1625 | d_walk(parent, &data, select_collect); |
| 1626 | |
| 1627 | if (!list_empty(&data.dispose)) { |
| 1628 | shrink_dentry_list(&data.dispose); |
| 1629 | continue; |
| 1630 | } |
| 1631 | |
| 1632 | cond_resched(); |
| 1633 | if (!data.found) |
| 1634 | break; |
| 1635 | data.victim = NULL; |
| 1636 | d_walk(parent, &data, select_collect2); |
| 1637 | if (data.victim) { |
| 1638 | struct dentry *parent; |
| 1639 | spin_lock(&data.victim->d_lock); |
| 1640 | if (!shrink_lock_dentry(data.victim)) { |
| 1641 | spin_unlock(&data.victim->d_lock); |
| 1642 | rcu_read_unlock(); |
| 1643 | } else { |
| 1644 | rcu_read_unlock(); |
| 1645 | parent = data.victim->d_parent; |
| 1646 | if (parent != data.victim) |
| 1647 | __dput_to_list(parent, &data.dispose); |
| 1648 | __dentry_kill(data.victim); |
| 1649 | } |
| 1650 | } |
| 1651 | if (!list_empty(&data.dispose)) |
| 1652 | shrink_dentry_list(&data.dispose); |
| 1653 | } |
| 1654 | } |
| 1655 | EXPORT_SYMBOL(shrink_dcache_parent); |
| 1656 | |
| 1657 | static enum d_walk_ret umount_check(void *_data, struct dentry *dentry) |
| 1658 | { |
| 1659 | /* it has busy descendents; complain about those instead */ |
| 1660 | if (!list_empty(&dentry->d_subdirs)) |
| 1661 | return D_WALK_CONTINUE; |
| 1662 | |
| 1663 | /* root with refcount 1 is fine */ |
| 1664 | if (dentry == _data && dentry->d_lockref.count == 1) |
| 1665 | return D_WALK_CONTINUE; |
| 1666 | |
| 1667 | printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} " |
| 1668 | " still in use (%d) [unmount of %s %s]\n", |
| 1669 | dentry, |
| 1670 | dentry->d_inode ? |
| 1671 | dentry->d_inode->i_ino : 0UL, |
| 1672 | dentry, |
| 1673 | dentry->d_lockref.count, |
| 1674 | dentry->d_sb->s_type->name, |
| 1675 | dentry->d_sb->s_id); |
| 1676 | WARN_ON(1); |
| 1677 | return D_WALK_CONTINUE; |
| 1678 | } |
| 1679 | |
| 1680 | static void do_one_tree(struct dentry *dentry) |
| 1681 | { |
| 1682 | shrink_dcache_parent(dentry); |
| 1683 | d_walk(dentry, dentry, umount_check); |
| 1684 | d_drop(dentry); |
| 1685 | dput(dentry); |
| 1686 | } |
| 1687 | |
| 1688 | /* |
| 1689 | * destroy the dentries attached to a superblock on unmounting |
| 1690 | */ |
| 1691 | void shrink_dcache_for_umount(struct super_block *sb) |
| 1692 | { |
| 1693 | struct dentry *dentry; |
| 1694 | |
| 1695 | WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked"); |
| 1696 | |
| 1697 | dentry = sb->s_root; |
| 1698 | sb->s_root = NULL; |
| 1699 | do_one_tree(dentry); |
| 1700 | |
| 1701 | while (!hlist_bl_empty(&sb->s_roots)) { |
| 1702 | dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash)); |
| 1703 | do_one_tree(dentry); |
| 1704 | } |
| 1705 | } |
| 1706 | |
| 1707 | static enum d_walk_ret find_submount(void *_data, struct dentry *dentry) |
| 1708 | { |
| 1709 | struct dentry **victim = _data; |
| 1710 | if (d_mountpoint(dentry)) { |
| 1711 | __dget_dlock(dentry); |
| 1712 | *victim = dentry; |
| 1713 | return D_WALK_QUIT; |
| 1714 | } |
| 1715 | return D_WALK_CONTINUE; |
| 1716 | } |
| 1717 | |
| 1718 | /** |
| 1719 | * d_invalidate - detach submounts, prune dcache, and drop |
| 1720 | * @dentry: dentry to invalidate (aka detach, prune and drop) |
| 1721 | */ |
| 1722 | void d_invalidate(struct dentry *dentry) |
| 1723 | { |
| 1724 | bool had_submounts = false; |
| 1725 | spin_lock(&dentry->d_lock); |
| 1726 | if (d_unhashed(dentry)) { |
| 1727 | spin_unlock(&dentry->d_lock); |
| 1728 | return; |
| 1729 | } |
| 1730 | __d_drop(dentry); |
| 1731 | spin_unlock(&dentry->d_lock); |
| 1732 | |
| 1733 | /* Negative dentries can be dropped without further checks */ |
| 1734 | if (!dentry->d_inode) |
| 1735 | return; |
| 1736 | |
| 1737 | shrink_dcache_parent(dentry); |
| 1738 | for (;;) { |
| 1739 | struct dentry *victim = NULL; |
| 1740 | d_walk(dentry, &victim, find_submount); |
| 1741 | if (!victim) { |
| 1742 | if (had_submounts) |
| 1743 | shrink_dcache_parent(dentry); |
| 1744 | return; |
| 1745 | } |
| 1746 | had_submounts = true; |
| 1747 | detach_mounts(victim); |
| 1748 | dput(victim); |
| 1749 | } |
| 1750 | } |
| 1751 | EXPORT_SYMBOL(d_invalidate); |
| 1752 | |
| 1753 | /** |
| 1754 | * __d_alloc - allocate a dcache entry |
| 1755 | * @sb: filesystem it will belong to |
| 1756 | * @name: qstr of the name |
| 1757 | * |
| 1758 | * Allocates a dentry. It returns %NULL if there is insufficient memory |
| 1759 | * available. On a success the dentry is returned. The name passed in is |
| 1760 | * copied and the copy passed in may be reused after this call. |
| 1761 | */ |
| 1762 | |
| 1763 | static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) |
| 1764 | { |
| 1765 | struct dentry *dentry; |
| 1766 | char *dname; |
| 1767 | int err; |
| 1768 | |
| 1769 | dentry = kmem_cache_alloc_lru(dentry_cache, &sb->s_dentry_lru, |
| 1770 | GFP_KERNEL); |
| 1771 | if (!dentry) |
| 1772 | return NULL; |
| 1773 | |
| 1774 | /* |
| 1775 | * We guarantee that the inline name is always NUL-terminated. |
| 1776 | * This way the memcpy() done by the name switching in rename |
| 1777 | * will still always have a NUL at the end, even if we might |
| 1778 | * be overwriting an internal NUL character |
| 1779 | */ |
| 1780 | dentry->d_iname[DNAME_INLINE_LEN-1] = 0; |
| 1781 | if (unlikely(!name)) { |
| 1782 | name = &slash_name; |
| 1783 | dname = dentry->d_iname; |
| 1784 | } else if (name->len > DNAME_INLINE_LEN-1) { |
| 1785 | size_t size = offsetof(struct external_name, name[1]); |
| 1786 | struct external_name *p = kmalloc(size + name->len, |
| 1787 | GFP_KERNEL_ACCOUNT | |
| 1788 | __GFP_RECLAIMABLE); |
| 1789 | if (!p) { |
| 1790 | kmem_cache_free(dentry_cache, dentry); |
| 1791 | return NULL; |
| 1792 | } |
| 1793 | atomic_set(&p->u.count, 1); |
| 1794 | dname = p->name; |
| 1795 | } else { |
| 1796 | dname = dentry->d_iname; |
| 1797 | } |
| 1798 | |
| 1799 | dentry->d_name.len = name->len; |
| 1800 | dentry->d_name.hash = name->hash; |
| 1801 | memcpy(dname, name->name, name->len); |
| 1802 | dname[name->len] = 0; |
| 1803 | |
| 1804 | /* Make sure we always see the terminating NUL character */ |
| 1805 | smp_store_release(&dentry->d_name.name, dname); /* ^^^ */ |
| 1806 | |
| 1807 | dentry->d_lockref.count = 1; |
| 1808 | dentry->d_flags = 0; |
| 1809 | spin_lock_init(&dentry->d_lock); |
| 1810 | seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock); |
| 1811 | dentry->d_inode = NULL; |
| 1812 | dentry->d_parent = dentry; |
| 1813 | dentry->d_sb = sb; |
| 1814 | dentry->d_op = NULL; |
| 1815 | dentry->d_fsdata = NULL; |
| 1816 | INIT_HLIST_BL_NODE(&dentry->d_hash); |
| 1817 | INIT_LIST_HEAD(&dentry->d_lru); |
| 1818 | INIT_LIST_HEAD(&dentry->d_subdirs); |
| 1819 | INIT_HLIST_NODE(&dentry->d_u.d_alias); |
| 1820 | INIT_LIST_HEAD(&dentry->d_child); |
| 1821 | d_set_d_op(dentry, dentry->d_sb->s_d_op); |
| 1822 | |
| 1823 | if (dentry->d_op && dentry->d_op->d_init) { |
| 1824 | err = dentry->d_op->d_init(dentry); |
| 1825 | if (err) { |
| 1826 | if (dname_external(dentry)) |
| 1827 | kfree(external_name(dentry)); |
| 1828 | kmem_cache_free(dentry_cache, dentry); |
| 1829 | return NULL; |
| 1830 | } |
| 1831 | } |
| 1832 | |
| 1833 | this_cpu_inc(nr_dentry); |
| 1834 | |
| 1835 | return dentry; |
| 1836 | } |
| 1837 | |
| 1838 | /** |
| 1839 | * d_alloc - allocate a dcache entry |
| 1840 | * @parent: parent of entry to allocate |
| 1841 | * @name: qstr of the name |
| 1842 | * |
| 1843 | * Allocates a dentry. It returns %NULL if there is insufficient memory |
| 1844 | * available. On a success the dentry is returned. The name passed in is |
| 1845 | * copied and the copy passed in may be reused after this call. |
| 1846 | */ |
| 1847 | struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) |
| 1848 | { |
| 1849 | struct dentry *dentry = __d_alloc(parent->d_sb, name); |
| 1850 | if (!dentry) |
| 1851 | return NULL; |
| 1852 | spin_lock(&parent->d_lock); |
| 1853 | /* |
| 1854 | * don't need child lock because it is not subject |
| 1855 | * to concurrency here |
| 1856 | */ |
| 1857 | __dget_dlock(parent); |
| 1858 | dentry->d_parent = parent; |
| 1859 | list_add(&dentry->d_child, &parent->d_subdirs); |
| 1860 | spin_unlock(&parent->d_lock); |
| 1861 | |
| 1862 | return dentry; |
| 1863 | } |
| 1864 | EXPORT_SYMBOL(d_alloc); |
| 1865 | |
| 1866 | struct dentry *d_alloc_anon(struct super_block *sb) |
| 1867 | { |
| 1868 | return __d_alloc(sb, NULL); |
| 1869 | } |
| 1870 | EXPORT_SYMBOL(d_alloc_anon); |
| 1871 | |
| 1872 | struct dentry *d_alloc_cursor(struct dentry * parent) |
| 1873 | { |
| 1874 | struct dentry *dentry = d_alloc_anon(parent->d_sb); |
| 1875 | if (dentry) { |
| 1876 | dentry->d_flags |= DCACHE_DENTRY_CURSOR; |
| 1877 | dentry->d_parent = dget(parent); |
| 1878 | } |
| 1879 | return dentry; |
| 1880 | } |
| 1881 | |
| 1882 | /** |
| 1883 | * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems) |
| 1884 | * @sb: the superblock |
| 1885 | * @name: qstr of the name |
| 1886 | * |
| 1887 | * For a filesystem that just pins its dentries in memory and never |
| 1888 | * performs lookups at all, return an unhashed IS_ROOT dentry. |
| 1889 | * This is used for pipes, sockets et.al. - the stuff that should |
| 1890 | * never be anyone's children or parents. Unlike all other |
| 1891 | * dentries, these will not have RCU delay between dropping the |
| 1892 | * last reference and freeing them. |
| 1893 | * |
| 1894 | * The only user is alloc_file_pseudo() and that's what should |
| 1895 | * be considered a public interface. Don't use directly. |
| 1896 | */ |
| 1897 | struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name) |
| 1898 | { |
| 1899 | struct dentry *dentry = __d_alloc(sb, name); |
| 1900 | if (likely(dentry)) |
| 1901 | dentry->d_flags |= DCACHE_NORCU; |
| 1902 | return dentry; |
| 1903 | } |
| 1904 | |
| 1905 | struct dentry *d_alloc_name(struct dentry *parent, const char *name) |
| 1906 | { |
| 1907 | struct qstr q; |
| 1908 | |
| 1909 | q.name = name; |
| 1910 | q.hash_len = hashlen_string(parent, name); |
| 1911 | return d_alloc(parent, &q); |
| 1912 | } |
| 1913 | EXPORT_SYMBOL(d_alloc_name); |
| 1914 | |
| 1915 | void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) |
| 1916 | { |
| 1917 | WARN_ON_ONCE(dentry->d_op); |
| 1918 | WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH | |
| 1919 | DCACHE_OP_COMPARE | |
| 1920 | DCACHE_OP_REVALIDATE | |
| 1921 | DCACHE_OP_WEAK_REVALIDATE | |
| 1922 | DCACHE_OP_DELETE | |
| 1923 | DCACHE_OP_REAL)); |
| 1924 | dentry->d_op = op; |
| 1925 | if (!op) |
| 1926 | return; |
| 1927 | if (op->d_hash) |
| 1928 | dentry->d_flags |= DCACHE_OP_HASH; |
| 1929 | if (op->d_compare) |
| 1930 | dentry->d_flags |= DCACHE_OP_COMPARE; |
| 1931 | if (op->d_revalidate) |
| 1932 | dentry->d_flags |= DCACHE_OP_REVALIDATE; |
| 1933 | if (op->d_weak_revalidate) |
| 1934 | dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE; |
| 1935 | if (op->d_delete) |
| 1936 | dentry->d_flags |= DCACHE_OP_DELETE; |
| 1937 | if (op->d_prune) |
| 1938 | dentry->d_flags |= DCACHE_OP_PRUNE; |
| 1939 | if (op->d_real) |
| 1940 | dentry->d_flags |= DCACHE_OP_REAL; |
| 1941 | |
| 1942 | } |
| 1943 | EXPORT_SYMBOL(d_set_d_op); |
| 1944 | |
| 1945 | |
| 1946 | /* |
| 1947 | * d_set_fallthru - Mark a dentry as falling through to a lower layer |
| 1948 | * @dentry - The dentry to mark |
| 1949 | * |
| 1950 | * Mark a dentry as falling through to the lower layer (as set with |
| 1951 | * d_pin_lower()). This flag may be recorded on the medium. |
| 1952 | */ |
| 1953 | void d_set_fallthru(struct dentry *dentry) |
| 1954 | { |
| 1955 | spin_lock(&dentry->d_lock); |
| 1956 | dentry->d_flags |= DCACHE_FALLTHRU; |
| 1957 | spin_unlock(&dentry->d_lock); |
| 1958 | } |
| 1959 | EXPORT_SYMBOL(d_set_fallthru); |
| 1960 | |
| 1961 | static unsigned d_flags_for_inode(struct inode *inode) |
| 1962 | { |
| 1963 | unsigned add_flags = DCACHE_REGULAR_TYPE; |
| 1964 | |
| 1965 | if (!inode) |
| 1966 | return DCACHE_MISS_TYPE; |
| 1967 | |
| 1968 | if (S_ISDIR(inode->i_mode)) { |
| 1969 | add_flags = DCACHE_DIRECTORY_TYPE; |
| 1970 | if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) { |
| 1971 | if (unlikely(!inode->i_op->lookup)) |
| 1972 | add_flags = DCACHE_AUTODIR_TYPE; |
| 1973 | else |
| 1974 | inode->i_opflags |= IOP_LOOKUP; |
| 1975 | } |
| 1976 | goto type_determined; |
| 1977 | } |
| 1978 | |
| 1979 | if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { |
| 1980 | if (unlikely(inode->i_op->get_link)) { |
| 1981 | add_flags = DCACHE_SYMLINK_TYPE; |
| 1982 | goto type_determined; |
| 1983 | } |
| 1984 | inode->i_opflags |= IOP_NOFOLLOW; |
| 1985 | } |
| 1986 | |
| 1987 | if (unlikely(!S_ISREG(inode->i_mode))) |
| 1988 | add_flags = DCACHE_SPECIAL_TYPE; |
| 1989 | |
| 1990 | type_determined: |
| 1991 | if (unlikely(IS_AUTOMOUNT(inode))) |
| 1992 | add_flags |= DCACHE_NEED_AUTOMOUNT; |
| 1993 | return add_flags; |
| 1994 | } |
| 1995 | |
| 1996 | static void __d_instantiate(struct dentry *dentry, struct inode *inode) |
| 1997 | { |
| 1998 | unsigned add_flags = d_flags_for_inode(inode); |
| 1999 | WARN_ON(d_in_lookup(dentry)); |
| 2000 | |
| 2001 | spin_lock(&dentry->d_lock); |
| 2002 | /* |
| 2003 | * Decrement negative dentry count if it was in the LRU list. |
| 2004 | */ |
| 2005 | if (dentry->d_flags & DCACHE_LRU_LIST) |
| 2006 | this_cpu_dec(nr_dentry_negative); |
| 2007 | hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); |
| 2008 | raw_write_seqcount_begin(&dentry->d_seq); |
| 2009 | __d_set_inode_and_type(dentry, inode, add_flags); |
| 2010 | raw_write_seqcount_end(&dentry->d_seq); |
| 2011 | fsnotify_update_flags(dentry); |
| 2012 | spin_unlock(&dentry->d_lock); |
| 2013 | } |
| 2014 | |
| 2015 | /** |
| 2016 | * d_instantiate - fill in inode information for a dentry |
| 2017 | * @entry: dentry to complete |
| 2018 | * @inode: inode to attach to this dentry |
| 2019 | * |
| 2020 | * Fill in inode information in the entry. |
| 2021 | * |
| 2022 | * This turns negative dentries into productive full members |
| 2023 | * of society. |
| 2024 | * |
| 2025 | * NOTE! This assumes that the inode count has been incremented |
| 2026 | * (or otherwise set) by the caller to indicate that it is now |
| 2027 | * in use by the dcache. |
| 2028 | */ |
| 2029 | |
| 2030 | void d_instantiate(struct dentry *entry, struct inode * inode) |
| 2031 | { |
| 2032 | BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); |
| 2033 | if (inode) { |
| 2034 | security_d_instantiate(entry, inode); |
| 2035 | spin_lock(&inode->i_lock); |
| 2036 | __d_instantiate(entry, inode); |
| 2037 | spin_unlock(&inode->i_lock); |
| 2038 | } |
| 2039 | } |
| 2040 | EXPORT_SYMBOL(d_instantiate); |
| 2041 | |
| 2042 | /* |
| 2043 | * This should be equivalent to d_instantiate() + unlock_new_inode(), |
| 2044 | * with lockdep-related part of unlock_new_inode() done before |
| 2045 | * anything else. Use that instead of open-coding d_instantiate()/ |
| 2046 | * unlock_new_inode() combinations. |
| 2047 | */ |
| 2048 | void d_instantiate_new(struct dentry *entry, struct inode *inode) |
| 2049 | { |
| 2050 | BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); |
| 2051 | BUG_ON(!inode); |
| 2052 | lockdep_annotate_inode_mutex_key(inode); |
| 2053 | security_d_instantiate(entry, inode); |
| 2054 | spin_lock(&inode->i_lock); |
| 2055 | __d_instantiate(entry, inode); |
| 2056 | WARN_ON(!(inode->i_state & I_NEW)); |
| 2057 | inode->i_state &= ~I_NEW & ~I_CREATING; |
| 2058 | smp_mb(); |
| 2059 | wake_up_bit(&inode->i_state, __I_NEW); |
| 2060 | spin_unlock(&inode->i_lock); |
| 2061 | } |
| 2062 | EXPORT_SYMBOL(d_instantiate_new); |
| 2063 | |
| 2064 | struct dentry *d_make_root(struct inode *root_inode) |
| 2065 | { |
| 2066 | struct dentry *res = NULL; |
| 2067 | |
| 2068 | if (root_inode) { |
| 2069 | res = d_alloc_anon(root_inode->i_sb); |
| 2070 | if (res) |
| 2071 | d_instantiate(res, root_inode); |
| 2072 | else |
| 2073 | iput(root_inode); |
| 2074 | } |
| 2075 | return res; |
| 2076 | } |
| 2077 | EXPORT_SYMBOL(d_make_root); |
| 2078 | |
| 2079 | static struct dentry *__d_instantiate_anon(struct dentry *dentry, |
| 2080 | struct inode *inode, |
| 2081 | bool disconnected) |
| 2082 | { |
| 2083 | struct dentry *res; |
| 2084 | unsigned add_flags; |
| 2085 | |
| 2086 | security_d_instantiate(dentry, inode); |
| 2087 | spin_lock(&inode->i_lock); |
| 2088 | res = __d_find_any_alias(inode); |
| 2089 | if (res) { |
| 2090 | spin_unlock(&inode->i_lock); |
| 2091 | dput(dentry); |
| 2092 | goto out_iput; |
| 2093 | } |
| 2094 | |
| 2095 | /* attach a disconnected dentry */ |
| 2096 | add_flags = d_flags_for_inode(inode); |
| 2097 | |
| 2098 | if (disconnected) |
| 2099 | add_flags |= DCACHE_DISCONNECTED; |
| 2100 | |
| 2101 | spin_lock(&dentry->d_lock); |
| 2102 | __d_set_inode_and_type(dentry, inode, add_flags); |
| 2103 | hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); |
| 2104 | if (!disconnected) { |
| 2105 | hlist_bl_lock(&dentry->d_sb->s_roots); |
| 2106 | hlist_bl_add_head(&dentry->d_hash, &dentry->d_sb->s_roots); |
| 2107 | hlist_bl_unlock(&dentry->d_sb->s_roots); |
| 2108 | } |
| 2109 | spin_unlock(&dentry->d_lock); |
| 2110 | spin_unlock(&inode->i_lock); |
| 2111 | |
| 2112 | return dentry; |
| 2113 | |
| 2114 | out_iput: |
| 2115 | iput(inode); |
| 2116 | return res; |
| 2117 | } |
| 2118 | |
| 2119 | struct dentry *d_instantiate_anon(struct dentry *dentry, struct inode *inode) |
| 2120 | { |
| 2121 | return __d_instantiate_anon(dentry, inode, true); |
| 2122 | } |
| 2123 | EXPORT_SYMBOL(d_instantiate_anon); |
| 2124 | |
| 2125 | static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected) |
| 2126 | { |
| 2127 | struct dentry *tmp; |
| 2128 | struct dentry *res; |
| 2129 | |
| 2130 | if (!inode) |
| 2131 | return ERR_PTR(-ESTALE); |
| 2132 | if (IS_ERR(inode)) |
| 2133 | return ERR_CAST(inode); |
| 2134 | |
| 2135 | res = d_find_any_alias(inode); |
| 2136 | if (res) |
| 2137 | goto out_iput; |
| 2138 | |
| 2139 | tmp = d_alloc_anon(inode->i_sb); |
| 2140 | if (!tmp) { |
| 2141 | res = ERR_PTR(-ENOMEM); |
| 2142 | goto out_iput; |
| 2143 | } |
| 2144 | |
| 2145 | return __d_instantiate_anon(tmp, inode, disconnected); |
| 2146 | |
| 2147 | out_iput: |
| 2148 | iput(inode); |
| 2149 | return res; |
| 2150 | } |
| 2151 | |
| 2152 | /** |
| 2153 | * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode |
| 2154 | * @inode: inode to allocate the dentry for |
| 2155 | * |
| 2156 | * Obtain a dentry for an inode resulting from NFS filehandle conversion or |
| 2157 | * similar open by handle operations. The returned dentry may be anonymous, |
| 2158 | * or may have a full name (if the inode was already in the cache). |
| 2159 | * |
| 2160 | * When called on a directory inode, we must ensure that the inode only ever |
| 2161 | * has one dentry. If a dentry is found, that is returned instead of |
| 2162 | * allocating a new one. |
| 2163 | * |
| 2164 | * On successful return, the reference to the inode has been transferred |
| 2165 | * to the dentry. In case of an error the reference on the inode is released. |
| 2166 | * To make it easier to use in export operations a %NULL or IS_ERR inode may |
| 2167 | * be passed in and the error will be propagated to the return value, |
| 2168 | * with a %NULL @inode replaced by ERR_PTR(-ESTALE). |
| 2169 | */ |
| 2170 | struct dentry *d_obtain_alias(struct inode *inode) |
| 2171 | { |
| 2172 | return __d_obtain_alias(inode, true); |
| 2173 | } |
| 2174 | EXPORT_SYMBOL(d_obtain_alias); |
| 2175 | |
| 2176 | /** |
| 2177 | * d_obtain_root - find or allocate a dentry for a given inode |
| 2178 | * @inode: inode to allocate the dentry for |
| 2179 | * |
| 2180 | * Obtain an IS_ROOT dentry for the root of a filesystem. |
| 2181 | * |
| 2182 | * We must ensure that directory inodes only ever have one dentry. If a |
| 2183 | * dentry is found, that is returned instead of allocating a new one. |
| 2184 | * |
| 2185 | * On successful return, the reference to the inode has been transferred |
| 2186 | * to the dentry. In case of an error the reference on the inode is |
| 2187 | * released. A %NULL or IS_ERR inode may be passed in and will be the |
| 2188 | * error will be propagate to the return value, with a %NULL @inode |
| 2189 | * replaced by ERR_PTR(-ESTALE). |
| 2190 | */ |
| 2191 | struct dentry *d_obtain_root(struct inode *inode) |
| 2192 | { |
| 2193 | return __d_obtain_alias(inode, false); |
| 2194 | } |
| 2195 | EXPORT_SYMBOL(d_obtain_root); |
| 2196 | |
| 2197 | /** |
| 2198 | * d_add_ci - lookup or allocate new dentry with case-exact name |
| 2199 | * @inode: the inode case-insensitive lookup has found |
| 2200 | * @dentry: the negative dentry that was passed to the parent's lookup func |
| 2201 | * @name: the case-exact name to be associated with the returned dentry |
| 2202 | * |
| 2203 | * This is to avoid filling the dcache with case-insensitive names to the |
| 2204 | * same inode, only the actual correct case is stored in the dcache for |
| 2205 | * case-insensitive filesystems. |
| 2206 | * |
| 2207 | * For a case-insensitive lookup match and if the case-exact dentry |
| 2208 | * already exists in the dcache, use it and return it. |
| 2209 | * |
| 2210 | * If no entry exists with the exact case name, allocate new dentry with |
| 2211 | * the exact case, and return the spliced entry. |
| 2212 | */ |
| 2213 | struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, |
| 2214 | struct qstr *name) |
| 2215 | { |
| 2216 | struct dentry *found, *res; |
| 2217 | |
| 2218 | /* |
| 2219 | * First check if a dentry matching the name already exists, |
| 2220 | * if not go ahead and create it now. |
| 2221 | */ |
| 2222 | found = d_hash_and_lookup(dentry->d_parent, name); |
| 2223 | if (found) { |
| 2224 | iput(inode); |
| 2225 | return found; |
| 2226 | } |
| 2227 | if (d_in_lookup(dentry)) { |
| 2228 | found = d_alloc_parallel(dentry->d_parent, name, |
| 2229 | dentry->d_wait); |
| 2230 | if (IS_ERR(found) || !d_in_lookup(found)) { |
| 2231 | iput(inode); |
| 2232 | return found; |
| 2233 | } |
| 2234 | } else { |
| 2235 | found = d_alloc(dentry->d_parent, name); |
| 2236 | if (!found) { |
| 2237 | iput(inode); |
| 2238 | return ERR_PTR(-ENOMEM); |
| 2239 | } |
| 2240 | } |
| 2241 | res = d_splice_alias(inode, found); |
| 2242 | if (res) { |
| 2243 | d_lookup_done(found); |
| 2244 | dput(found); |
| 2245 | return res; |
| 2246 | } |
| 2247 | return found; |
| 2248 | } |
| 2249 | EXPORT_SYMBOL(d_add_ci); |
| 2250 | |
| 2251 | /** |
| 2252 | * d_same_name - compare dentry name with case-exact name |
| 2253 | * @parent: parent dentry |
| 2254 | * @dentry: the negative dentry that was passed to the parent's lookup func |
| 2255 | * @name: the case-exact name to be associated with the returned dentry |
| 2256 | * |
| 2257 | * Return: true if names are same, or false |
| 2258 | */ |
| 2259 | bool d_same_name(const struct dentry *dentry, const struct dentry *parent, |
| 2260 | const struct qstr *name) |
| 2261 | { |
| 2262 | if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) { |
| 2263 | if (dentry->d_name.len != name->len) |
| 2264 | return false; |
| 2265 | return dentry_cmp(dentry, name->name, name->len) == 0; |
| 2266 | } |
| 2267 | return parent->d_op->d_compare(dentry, |
| 2268 | dentry->d_name.len, dentry->d_name.name, |
| 2269 | name) == 0; |
| 2270 | } |
| 2271 | EXPORT_SYMBOL_GPL(d_same_name); |
| 2272 | |
| 2273 | /* |
| 2274 | * This is __d_lookup_rcu() when the parent dentry has |
| 2275 | * DCACHE_OP_COMPARE, which makes things much nastier. |
| 2276 | */ |
| 2277 | static noinline struct dentry *__d_lookup_rcu_op_compare( |
| 2278 | const struct dentry *parent, |
| 2279 | const struct qstr *name, |
| 2280 | unsigned *seqp) |
| 2281 | { |
| 2282 | u64 hashlen = name->hash_len; |
| 2283 | struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen)); |
| 2284 | struct hlist_bl_node *node; |
| 2285 | struct dentry *dentry; |
| 2286 | |
| 2287 | hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { |
| 2288 | int tlen; |
| 2289 | const char *tname; |
| 2290 | unsigned seq; |
| 2291 | |
| 2292 | seqretry: |
| 2293 | seq = raw_seqcount_begin(&dentry->d_seq); |
| 2294 | if (dentry->d_parent != parent) |
| 2295 | continue; |
| 2296 | if (d_unhashed(dentry)) |
| 2297 | continue; |
| 2298 | if (dentry->d_name.hash != hashlen_hash(hashlen)) |
| 2299 | continue; |
| 2300 | tlen = dentry->d_name.len; |
| 2301 | tname = dentry->d_name.name; |
| 2302 | /* we want a consistent (name,len) pair */ |
| 2303 | if (read_seqcount_retry(&dentry->d_seq, seq)) { |
| 2304 | cpu_relax(); |
| 2305 | goto seqretry; |
| 2306 | } |
| 2307 | if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0) |
| 2308 | continue; |
| 2309 | *seqp = seq; |
| 2310 | return dentry; |
| 2311 | } |
| 2312 | return NULL; |
| 2313 | } |
| 2314 | |
| 2315 | /** |
| 2316 | * __d_lookup_rcu - search for a dentry (racy, store-free) |
| 2317 | * @parent: parent dentry |
| 2318 | * @name: qstr of name we wish to find |
| 2319 | * @seqp: returns d_seq value at the point where the dentry was found |
| 2320 | * Returns: dentry, or NULL |
| 2321 | * |
| 2322 | * __d_lookup_rcu is the dcache lookup function for rcu-walk name |
| 2323 | * resolution (store-free path walking) design described in |
| 2324 | * Documentation/filesystems/path-lookup.txt. |
| 2325 | * |
| 2326 | * This is not to be used outside core vfs. |
| 2327 | * |
| 2328 | * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock |
| 2329 | * held, and rcu_read_lock held. The returned dentry must not be stored into |
| 2330 | * without taking d_lock and checking d_seq sequence count against @seq |
| 2331 | * returned here. |
| 2332 | * |
| 2333 | * A refcount may be taken on the found dentry with the d_rcu_to_refcount |
| 2334 | * function. |
| 2335 | * |
| 2336 | * Alternatively, __d_lookup_rcu may be called again to look up the child of |
| 2337 | * the returned dentry, so long as its parent's seqlock is checked after the |
| 2338 | * child is looked up. Thus, an interlocking stepping of sequence lock checks |
| 2339 | * is formed, giving integrity down the path walk. |
| 2340 | * |
| 2341 | * NOTE! The caller *has* to check the resulting dentry against the sequence |
| 2342 | * number we've returned before using any of the resulting dentry state! |
| 2343 | */ |
| 2344 | struct dentry *__d_lookup_rcu(const struct dentry *parent, |
| 2345 | const struct qstr *name, |
| 2346 | unsigned *seqp) |
| 2347 | { |
| 2348 | u64 hashlen = name->hash_len; |
| 2349 | const unsigned char *str = name->name; |
| 2350 | struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen)); |
| 2351 | struct hlist_bl_node *node; |
| 2352 | struct dentry *dentry; |
| 2353 | |
| 2354 | /* |
| 2355 | * Note: There is significant duplication with __d_lookup_rcu which is |
| 2356 | * required to prevent single threaded performance regressions |
| 2357 | * especially on architectures where smp_rmb (in seqcounts) are costly. |
| 2358 | * Keep the two functions in sync. |
| 2359 | */ |
| 2360 | |
| 2361 | if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) |
| 2362 | return __d_lookup_rcu_op_compare(parent, name, seqp); |
| 2363 | |
| 2364 | /* |
| 2365 | * The hash list is protected using RCU. |
| 2366 | * |
| 2367 | * Carefully use d_seq when comparing a candidate dentry, to avoid |
| 2368 | * races with d_move(). |
| 2369 | * |
| 2370 | * It is possible that concurrent renames can mess up our list |
| 2371 | * walk here and result in missing our dentry, resulting in the |
| 2372 | * false-negative result. d_lookup() protects against concurrent |
| 2373 | * renames using rename_lock seqlock. |
| 2374 | * |
| 2375 | * See Documentation/filesystems/path-lookup.txt for more details. |
| 2376 | */ |
| 2377 | hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { |
| 2378 | unsigned seq; |
| 2379 | |
| 2380 | /* |
| 2381 | * The dentry sequence count protects us from concurrent |
| 2382 | * renames, and thus protects parent and name fields. |
| 2383 | * |
| 2384 | * The caller must perform a seqcount check in order |
| 2385 | * to do anything useful with the returned dentry. |
| 2386 | * |
| 2387 | * NOTE! We do a "raw" seqcount_begin here. That means that |
| 2388 | * we don't wait for the sequence count to stabilize if it |
| 2389 | * is in the middle of a sequence change. If we do the slow |
| 2390 | * dentry compare, we will do seqretries until it is stable, |
| 2391 | * and if we end up with a successful lookup, we actually |
| 2392 | * want to exit RCU lookup anyway. |
| 2393 | * |
| 2394 | * Note that raw_seqcount_begin still *does* smp_rmb(), so |
| 2395 | * we are still guaranteed NUL-termination of ->d_name.name. |
| 2396 | */ |
| 2397 | seq = raw_seqcount_begin(&dentry->d_seq); |
| 2398 | if (dentry->d_parent != parent) |
| 2399 | continue; |
| 2400 | if (d_unhashed(dentry)) |
| 2401 | continue; |
| 2402 | if (dentry->d_name.hash_len != hashlen) |
| 2403 | continue; |
| 2404 | if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0) |
| 2405 | continue; |
| 2406 | *seqp = seq; |
| 2407 | return dentry; |
| 2408 | } |
| 2409 | return NULL; |
| 2410 | } |
| 2411 | |
| 2412 | /** |
| 2413 | * d_lookup - search for a dentry |
| 2414 | * @parent: parent dentry |
| 2415 | * @name: qstr of name we wish to find |
| 2416 | * Returns: dentry, or NULL |
| 2417 | * |
| 2418 | * d_lookup searches the children of the parent dentry for the name in |
| 2419 | * question. If the dentry is found its reference count is incremented and the |
| 2420 | * dentry is returned. The caller must use dput to free the entry when it has |
| 2421 | * finished using it. %NULL is returned if the dentry does not exist. |
| 2422 | */ |
| 2423 | struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name) |
| 2424 | { |
| 2425 | struct dentry *dentry; |
| 2426 | unsigned seq; |
| 2427 | |
| 2428 | do { |
| 2429 | seq = read_seqbegin(&rename_lock); |
| 2430 | dentry = __d_lookup(parent, name); |
| 2431 | if (dentry) |
| 2432 | break; |
| 2433 | } while (read_seqretry(&rename_lock, seq)); |
| 2434 | return dentry; |
| 2435 | } |
| 2436 | EXPORT_SYMBOL(d_lookup); |
| 2437 | |
| 2438 | /** |
| 2439 | * __d_lookup - search for a dentry (racy) |
| 2440 | * @parent: parent dentry |
| 2441 | * @name: qstr of name we wish to find |
| 2442 | * Returns: dentry, or NULL |
| 2443 | * |
| 2444 | * __d_lookup is like d_lookup, however it may (rarely) return a |
| 2445 | * false-negative result due to unrelated rename activity. |
| 2446 | * |
| 2447 | * __d_lookup is slightly faster by avoiding rename_lock read seqlock, |
| 2448 | * however it must be used carefully, eg. with a following d_lookup in |
| 2449 | * the case of failure. |
| 2450 | * |
| 2451 | * __d_lookup callers must be commented. |
| 2452 | */ |
| 2453 | struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name) |
| 2454 | { |
| 2455 | unsigned int hash = name->hash; |
| 2456 | struct hlist_bl_head *b = d_hash(hash); |
| 2457 | struct hlist_bl_node *node; |
| 2458 | struct dentry *found = NULL; |
| 2459 | struct dentry *dentry; |
| 2460 | |
| 2461 | /* |
| 2462 | * Note: There is significant duplication with __d_lookup_rcu which is |
| 2463 | * required to prevent single threaded performance regressions |
| 2464 | * especially on architectures where smp_rmb (in seqcounts) are costly. |
| 2465 | * Keep the two functions in sync. |
| 2466 | */ |
| 2467 | |
| 2468 | /* |
| 2469 | * The hash list is protected using RCU. |
| 2470 | * |
| 2471 | * Take d_lock when comparing a candidate dentry, to avoid races |
| 2472 | * with d_move(). |
| 2473 | * |
| 2474 | * It is possible that concurrent renames can mess up our list |
| 2475 | * walk here and result in missing our dentry, resulting in the |
| 2476 | * false-negative result. d_lookup() protects against concurrent |
| 2477 | * renames using rename_lock seqlock. |
| 2478 | * |
| 2479 | * See Documentation/filesystems/path-lookup.txt for more details. |
| 2480 | */ |
| 2481 | rcu_read_lock(); |
| 2482 | |
| 2483 | hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { |
| 2484 | |
| 2485 | if (dentry->d_name.hash != hash) |
| 2486 | continue; |
| 2487 | |
| 2488 | spin_lock(&dentry->d_lock); |
| 2489 | if (dentry->d_parent != parent) |
| 2490 | goto next; |
| 2491 | if (d_unhashed(dentry)) |
| 2492 | goto next; |
| 2493 | |
| 2494 | if (!d_same_name(dentry, parent, name)) |
| 2495 | goto next; |
| 2496 | |
| 2497 | dentry->d_lockref.count++; |
| 2498 | found = dentry; |
| 2499 | spin_unlock(&dentry->d_lock); |
| 2500 | break; |
| 2501 | next: |
| 2502 | spin_unlock(&dentry->d_lock); |
| 2503 | } |
| 2504 | rcu_read_unlock(); |
| 2505 | |
| 2506 | return found; |
| 2507 | } |
| 2508 | |
| 2509 | /** |
| 2510 | * d_hash_and_lookup - hash the qstr then search for a dentry |
| 2511 | * @dir: Directory to search in |
| 2512 | * @name: qstr of name we wish to find |
| 2513 | * |
| 2514 | * On lookup failure NULL is returned; on bad name - ERR_PTR(-error) |
| 2515 | */ |
| 2516 | struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) |
| 2517 | { |
| 2518 | /* |
| 2519 | * Check for a fs-specific hash function. Note that we must |
| 2520 | * calculate the standard hash first, as the d_op->d_hash() |
| 2521 | * routine may choose to leave the hash value unchanged. |
| 2522 | */ |
| 2523 | name->hash = full_name_hash(dir, name->name, name->len); |
| 2524 | if (dir->d_flags & DCACHE_OP_HASH) { |
| 2525 | int err = dir->d_op->d_hash(dir, name); |
| 2526 | if (unlikely(err < 0)) |
| 2527 | return ERR_PTR(err); |
| 2528 | } |
| 2529 | return d_lookup(dir, name); |
| 2530 | } |
| 2531 | EXPORT_SYMBOL(d_hash_and_lookup); |
| 2532 | |
| 2533 | /* |
| 2534 | * When a file is deleted, we have two options: |
| 2535 | * - turn this dentry into a negative dentry |
| 2536 | * - unhash this dentry and free it. |
| 2537 | * |
| 2538 | * Usually, we want to just turn this into |
| 2539 | * a negative dentry, but if anybody else is |
| 2540 | * currently using the dentry or the inode |
| 2541 | * we can't do that and we fall back on removing |
| 2542 | * it from the hash queues and waiting for |
| 2543 | * it to be deleted later when it has no users |
| 2544 | */ |
| 2545 | |
| 2546 | /** |
| 2547 | * d_delete - delete a dentry |
| 2548 | * @dentry: The dentry to delete |
| 2549 | * |
| 2550 | * Turn the dentry into a negative dentry if possible, otherwise |
| 2551 | * remove it from the hash queues so it can be deleted later |
| 2552 | */ |
| 2553 | |
| 2554 | void d_delete(struct dentry * dentry) |
| 2555 | { |
| 2556 | struct inode *inode = dentry->d_inode; |
| 2557 | |
| 2558 | spin_lock(&inode->i_lock); |
| 2559 | spin_lock(&dentry->d_lock); |
| 2560 | /* |
| 2561 | * Are we the only user? |
| 2562 | */ |
| 2563 | if (dentry->d_lockref.count == 1) { |
| 2564 | dentry->d_flags &= ~DCACHE_CANT_MOUNT; |
| 2565 | dentry_unlink_inode(dentry); |
| 2566 | } else { |
| 2567 | __d_drop(dentry); |
| 2568 | spin_unlock(&dentry->d_lock); |
| 2569 | spin_unlock(&inode->i_lock); |
| 2570 | } |
| 2571 | } |
| 2572 | EXPORT_SYMBOL(d_delete); |
| 2573 | |
| 2574 | static void __d_rehash(struct dentry *entry) |
| 2575 | { |
| 2576 | struct hlist_bl_head *b = d_hash(entry->d_name.hash); |
| 2577 | |
| 2578 | hlist_bl_lock(b); |
| 2579 | hlist_bl_add_head_rcu(&entry->d_hash, b); |
| 2580 | hlist_bl_unlock(b); |
| 2581 | } |
| 2582 | |
| 2583 | /** |
| 2584 | * d_rehash - add an entry back to the hash |
| 2585 | * @entry: dentry to add to the hash |
| 2586 | * |
| 2587 | * Adds a dentry to the hash according to its name. |
| 2588 | */ |
| 2589 | |
| 2590 | void d_rehash(struct dentry * entry) |
| 2591 | { |
| 2592 | spin_lock(&entry->d_lock); |
| 2593 | __d_rehash(entry); |
| 2594 | spin_unlock(&entry->d_lock); |
| 2595 | } |
| 2596 | EXPORT_SYMBOL(d_rehash); |
| 2597 | |
| 2598 | static inline unsigned start_dir_add(struct inode *dir) |
| 2599 | { |
| 2600 | preempt_disable_nested(); |
| 2601 | for (;;) { |
| 2602 | unsigned n = dir->i_dir_seq; |
| 2603 | if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n) |
| 2604 | return n; |
| 2605 | cpu_relax(); |
| 2606 | } |
| 2607 | } |
| 2608 | |
| 2609 | static inline void end_dir_add(struct inode *dir, unsigned int n, |
| 2610 | wait_queue_head_t *d_wait) |
| 2611 | { |
| 2612 | smp_store_release(&dir->i_dir_seq, n + 2); |
| 2613 | preempt_enable_nested(); |
| 2614 | wake_up_all(d_wait); |
| 2615 | } |
| 2616 | |
| 2617 | static void d_wait_lookup(struct dentry *dentry) |
| 2618 | { |
| 2619 | if (d_in_lookup(dentry)) { |
| 2620 | DECLARE_WAITQUEUE(wait, current); |
| 2621 | add_wait_queue(dentry->d_wait, &wait); |
| 2622 | do { |
| 2623 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 2624 | spin_unlock(&dentry->d_lock); |
| 2625 | schedule(); |
| 2626 | spin_lock(&dentry->d_lock); |
| 2627 | } while (d_in_lookup(dentry)); |
| 2628 | } |
| 2629 | } |
| 2630 | |
| 2631 | struct dentry *d_alloc_parallel(struct dentry *parent, |
| 2632 | const struct qstr *name, |
| 2633 | wait_queue_head_t *wq) |
| 2634 | { |
| 2635 | unsigned int hash = name->hash; |
| 2636 | struct hlist_bl_head *b = in_lookup_hash(parent, hash); |
| 2637 | struct hlist_bl_node *node; |
| 2638 | struct dentry *new = d_alloc(parent, name); |
| 2639 | struct dentry *dentry; |
| 2640 | unsigned seq, r_seq, d_seq; |
| 2641 | |
| 2642 | if (unlikely(!new)) |
| 2643 | return ERR_PTR(-ENOMEM); |
| 2644 | |
| 2645 | retry: |
| 2646 | rcu_read_lock(); |
| 2647 | seq = smp_load_acquire(&parent->d_inode->i_dir_seq); |
| 2648 | r_seq = read_seqbegin(&rename_lock); |
| 2649 | dentry = __d_lookup_rcu(parent, name, &d_seq); |
| 2650 | if (unlikely(dentry)) { |
| 2651 | if (!lockref_get_not_dead(&dentry->d_lockref)) { |
| 2652 | rcu_read_unlock(); |
| 2653 | goto retry; |
| 2654 | } |
| 2655 | if (read_seqcount_retry(&dentry->d_seq, d_seq)) { |
| 2656 | rcu_read_unlock(); |
| 2657 | dput(dentry); |
| 2658 | goto retry; |
| 2659 | } |
| 2660 | rcu_read_unlock(); |
| 2661 | dput(new); |
| 2662 | return dentry; |
| 2663 | } |
| 2664 | if (unlikely(read_seqretry(&rename_lock, r_seq))) { |
| 2665 | rcu_read_unlock(); |
| 2666 | goto retry; |
| 2667 | } |
| 2668 | |
| 2669 | if (unlikely(seq & 1)) { |
| 2670 | rcu_read_unlock(); |
| 2671 | goto retry; |
| 2672 | } |
| 2673 | |
| 2674 | hlist_bl_lock(b); |
| 2675 | if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) { |
| 2676 | hlist_bl_unlock(b); |
| 2677 | rcu_read_unlock(); |
| 2678 | goto retry; |
| 2679 | } |
| 2680 | /* |
| 2681 | * No changes for the parent since the beginning of d_lookup(). |
| 2682 | * Since all removals from the chain happen with hlist_bl_lock(), |
| 2683 | * any potential in-lookup matches are going to stay here until |
| 2684 | * we unlock the chain. All fields are stable in everything |
| 2685 | * we encounter. |
| 2686 | */ |
| 2687 | hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) { |
| 2688 | if (dentry->d_name.hash != hash) |
| 2689 | continue; |
| 2690 | if (dentry->d_parent != parent) |
| 2691 | continue; |
| 2692 | if (!d_same_name(dentry, parent, name)) |
| 2693 | continue; |
| 2694 | hlist_bl_unlock(b); |
| 2695 | /* now we can try to grab a reference */ |
| 2696 | if (!lockref_get_not_dead(&dentry->d_lockref)) { |
| 2697 | rcu_read_unlock(); |
| 2698 | goto retry; |
| 2699 | } |
| 2700 | |
| 2701 | rcu_read_unlock(); |
| 2702 | /* |
| 2703 | * somebody is likely to be still doing lookup for it; |
| 2704 | * wait for them to finish |
| 2705 | */ |
| 2706 | spin_lock(&dentry->d_lock); |
| 2707 | d_wait_lookup(dentry); |
| 2708 | /* |
| 2709 | * it's not in-lookup anymore; in principle we should repeat |
| 2710 | * everything from dcache lookup, but it's likely to be what |
| 2711 | * d_lookup() would've found anyway. If it is, just return it; |
| 2712 | * otherwise we really have to repeat the whole thing. |
| 2713 | */ |
| 2714 | if (unlikely(dentry->d_name.hash != hash)) |
| 2715 | goto mismatch; |
| 2716 | if (unlikely(dentry->d_parent != parent)) |
| 2717 | goto mismatch; |
| 2718 | if (unlikely(d_unhashed(dentry))) |
| 2719 | goto mismatch; |
| 2720 | if (unlikely(!d_same_name(dentry, parent, name))) |
| 2721 | goto mismatch; |
| 2722 | /* OK, it *is* a hashed match; return it */ |
| 2723 | spin_unlock(&dentry->d_lock); |
| 2724 | dput(new); |
| 2725 | return dentry; |
| 2726 | } |
| 2727 | rcu_read_unlock(); |
| 2728 | /* we can't take ->d_lock here; it's OK, though. */ |
| 2729 | new->d_flags |= DCACHE_PAR_LOOKUP; |
| 2730 | new->d_wait = wq; |
| 2731 | hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b); |
| 2732 | hlist_bl_unlock(b); |
| 2733 | return new; |
| 2734 | mismatch: |
| 2735 | spin_unlock(&dentry->d_lock); |
| 2736 | dput(dentry); |
| 2737 | goto retry; |
| 2738 | } |
| 2739 | EXPORT_SYMBOL(d_alloc_parallel); |
| 2740 | |
| 2741 | /* |
| 2742 | * - Unhash the dentry |
| 2743 | * - Retrieve and clear the waitqueue head in dentry |
| 2744 | * - Return the waitqueue head |
| 2745 | */ |
| 2746 | static wait_queue_head_t *__d_lookup_unhash(struct dentry *dentry) |
| 2747 | { |
| 2748 | wait_queue_head_t *d_wait; |
| 2749 | struct hlist_bl_head *b; |
| 2750 | |
| 2751 | lockdep_assert_held(&dentry->d_lock); |
| 2752 | |
| 2753 | b = in_lookup_hash(dentry->d_parent, dentry->d_name.hash); |
| 2754 | hlist_bl_lock(b); |
| 2755 | dentry->d_flags &= ~DCACHE_PAR_LOOKUP; |
| 2756 | __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); |
| 2757 | d_wait = dentry->d_wait; |
| 2758 | dentry->d_wait = NULL; |
| 2759 | hlist_bl_unlock(b); |
| 2760 | INIT_HLIST_NODE(&dentry->d_u.d_alias); |
| 2761 | INIT_LIST_HEAD(&dentry->d_lru); |
| 2762 | return d_wait; |
| 2763 | } |
| 2764 | |
| 2765 | void __d_lookup_unhash_wake(struct dentry *dentry) |
| 2766 | { |
| 2767 | spin_lock(&dentry->d_lock); |
| 2768 | wake_up_all(__d_lookup_unhash(dentry)); |
| 2769 | spin_unlock(&dentry->d_lock); |
| 2770 | } |
| 2771 | EXPORT_SYMBOL(__d_lookup_unhash_wake); |
| 2772 | |
| 2773 | /* inode->i_lock held if inode is non-NULL */ |
| 2774 | |
| 2775 | static inline void __d_add(struct dentry *dentry, struct inode *inode) |
| 2776 | { |
| 2777 | wait_queue_head_t *d_wait; |
| 2778 | struct inode *dir = NULL; |
| 2779 | unsigned n; |
| 2780 | spin_lock(&dentry->d_lock); |
| 2781 | if (unlikely(d_in_lookup(dentry))) { |
| 2782 | dir = dentry->d_parent->d_inode; |
| 2783 | n = start_dir_add(dir); |
| 2784 | d_wait = __d_lookup_unhash(dentry); |
| 2785 | } |
| 2786 | if (inode) { |
| 2787 | unsigned add_flags = d_flags_for_inode(inode); |
| 2788 | hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); |
| 2789 | raw_write_seqcount_begin(&dentry->d_seq); |
| 2790 | __d_set_inode_and_type(dentry, inode, add_flags); |
| 2791 | raw_write_seqcount_end(&dentry->d_seq); |
| 2792 | fsnotify_update_flags(dentry); |
| 2793 | } |
| 2794 | __d_rehash(dentry); |
| 2795 | if (dir) |
| 2796 | end_dir_add(dir, n, d_wait); |
| 2797 | spin_unlock(&dentry->d_lock); |
| 2798 | if (inode) |
| 2799 | spin_unlock(&inode->i_lock); |
| 2800 | } |
| 2801 | |
| 2802 | /** |
| 2803 | * d_add - add dentry to hash queues |
| 2804 | * @entry: dentry to add |
| 2805 | * @inode: The inode to attach to this dentry |
| 2806 | * |
| 2807 | * This adds the entry to the hash queues and initializes @inode. |
| 2808 | * The entry was actually filled in earlier during d_alloc(). |
| 2809 | */ |
| 2810 | |
| 2811 | void d_add(struct dentry *entry, struct inode *inode) |
| 2812 | { |
| 2813 | if (inode) { |
| 2814 | security_d_instantiate(entry, inode); |
| 2815 | spin_lock(&inode->i_lock); |
| 2816 | } |
| 2817 | __d_add(entry, inode); |
| 2818 | } |
| 2819 | EXPORT_SYMBOL(d_add); |
| 2820 | |
| 2821 | /** |
| 2822 | * d_exact_alias - find and hash an exact unhashed alias |
| 2823 | * @entry: dentry to add |
| 2824 | * @inode: The inode to go with this dentry |
| 2825 | * |
| 2826 | * If an unhashed dentry with the same name/parent and desired |
| 2827 | * inode already exists, hash and return it. Otherwise, return |
| 2828 | * NULL. |
| 2829 | * |
| 2830 | * Parent directory should be locked. |
| 2831 | */ |
| 2832 | struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode) |
| 2833 | { |
| 2834 | struct dentry *alias; |
| 2835 | unsigned int hash = entry->d_name.hash; |
| 2836 | |
| 2837 | spin_lock(&inode->i_lock); |
| 2838 | hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { |
| 2839 | /* |
| 2840 | * Don't need alias->d_lock here, because aliases with |
| 2841 | * d_parent == entry->d_parent are not subject to name or |
| 2842 | * parent changes, because the parent inode i_mutex is held. |
| 2843 | */ |
| 2844 | if (alias->d_name.hash != hash) |
| 2845 | continue; |
| 2846 | if (alias->d_parent != entry->d_parent) |
| 2847 | continue; |
| 2848 | if (!d_same_name(alias, entry->d_parent, &entry->d_name)) |
| 2849 | continue; |
| 2850 | spin_lock(&alias->d_lock); |
| 2851 | if (!d_unhashed(alias)) { |
| 2852 | spin_unlock(&alias->d_lock); |
| 2853 | alias = NULL; |
| 2854 | } else { |
| 2855 | __dget_dlock(alias); |
| 2856 | __d_rehash(alias); |
| 2857 | spin_unlock(&alias->d_lock); |
| 2858 | } |
| 2859 | spin_unlock(&inode->i_lock); |
| 2860 | return alias; |
| 2861 | } |
| 2862 | spin_unlock(&inode->i_lock); |
| 2863 | return NULL; |
| 2864 | } |
| 2865 | EXPORT_SYMBOL(d_exact_alias); |
| 2866 | |
| 2867 | static void swap_names(struct dentry *dentry, struct dentry *target) |
| 2868 | { |
| 2869 | if (unlikely(dname_external(target))) { |
| 2870 | if (unlikely(dname_external(dentry))) { |
| 2871 | /* |
| 2872 | * Both external: swap the pointers |
| 2873 | */ |
| 2874 | swap(target->d_name.name, dentry->d_name.name); |
| 2875 | } else { |
| 2876 | /* |
| 2877 | * dentry:internal, target:external. Steal target's |
| 2878 | * storage and make target internal. |
| 2879 | */ |
| 2880 | memcpy(target->d_iname, dentry->d_name.name, |
| 2881 | dentry->d_name.len + 1); |
| 2882 | dentry->d_name.name = target->d_name.name; |
| 2883 | target->d_name.name = target->d_iname; |
| 2884 | } |
| 2885 | } else { |
| 2886 | if (unlikely(dname_external(dentry))) { |
| 2887 | /* |
| 2888 | * dentry:external, target:internal. Give dentry's |
| 2889 | * storage to target and make dentry internal |
| 2890 | */ |
| 2891 | memcpy(dentry->d_iname, target->d_name.name, |
| 2892 | target->d_name.len + 1); |
| 2893 | target->d_name.name = dentry->d_name.name; |
| 2894 | dentry->d_name.name = dentry->d_iname; |
| 2895 | } else { |
| 2896 | /* |
| 2897 | * Both are internal. |
| 2898 | */ |
| 2899 | unsigned int i; |
| 2900 | BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long))); |
| 2901 | for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) { |
| 2902 | swap(((long *) &dentry->d_iname)[i], |
| 2903 | ((long *) &target->d_iname)[i]); |
| 2904 | } |
| 2905 | } |
| 2906 | } |
| 2907 | swap(dentry->d_name.hash_len, target->d_name.hash_len); |
| 2908 | } |
| 2909 | |
| 2910 | static void copy_name(struct dentry *dentry, struct dentry *target) |
| 2911 | { |
| 2912 | struct external_name *old_name = NULL; |
| 2913 | if (unlikely(dname_external(dentry))) |
| 2914 | old_name = external_name(dentry); |
| 2915 | if (unlikely(dname_external(target))) { |
| 2916 | atomic_inc(&external_name(target)->u.count); |
| 2917 | dentry->d_name = target->d_name; |
| 2918 | } else { |
| 2919 | memcpy(dentry->d_iname, target->d_name.name, |
| 2920 | target->d_name.len + 1); |
| 2921 | dentry->d_name.name = dentry->d_iname; |
| 2922 | dentry->d_name.hash_len = target->d_name.hash_len; |
| 2923 | } |
| 2924 | if (old_name && likely(atomic_dec_and_test(&old_name->u.count))) |
| 2925 | kfree_rcu(old_name, u.head); |
| 2926 | } |
| 2927 | |
| 2928 | /* |
| 2929 | * __d_move - move a dentry |
| 2930 | * @dentry: entry to move |
| 2931 | * @target: new dentry |
| 2932 | * @exchange: exchange the two dentries |
| 2933 | * |
| 2934 | * Update the dcache to reflect the move of a file name. Negative |
| 2935 | * dcache entries should not be moved in this way. Caller must hold |
| 2936 | * rename_lock, the i_mutex of the source and target directories, |
| 2937 | * and the sb->s_vfs_rename_mutex if they differ. See lock_rename(). |
| 2938 | */ |
| 2939 | static void __d_move(struct dentry *dentry, struct dentry *target, |
| 2940 | bool exchange) |
| 2941 | { |
| 2942 | struct dentry *old_parent, *p; |
| 2943 | wait_queue_head_t *d_wait; |
| 2944 | struct inode *dir = NULL; |
| 2945 | unsigned n; |
| 2946 | |
| 2947 | WARN_ON(!dentry->d_inode); |
| 2948 | if (WARN_ON(dentry == target)) |
| 2949 | return; |
| 2950 | |
| 2951 | BUG_ON(d_ancestor(target, dentry)); |
| 2952 | old_parent = dentry->d_parent; |
| 2953 | p = d_ancestor(old_parent, target); |
| 2954 | if (IS_ROOT(dentry)) { |
| 2955 | BUG_ON(p); |
| 2956 | spin_lock(&target->d_parent->d_lock); |
| 2957 | } else if (!p) { |
| 2958 | /* target is not a descendent of dentry->d_parent */ |
| 2959 | spin_lock(&target->d_parent->d_lock); |
| 2960 | spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED); |
| 2961 | } else { |
| 2962 | BUG_ON(p == dentry); |
| 2963 | spin_lock(&old_parent->d_lock); |
| 2964 | if (p != target) |
| 2965 | spin_lock_nested(&target->d_parent->d_lock, |
| 2966 | DENTRY_D_LOCK_NESTED); |
| 2967 | } |
| 2968 | spin_lock_nested(&dentry->d_lock, 2); |
| 2969 | spin_lock_nested(&target->d_lock, 3); |
| 2970 | |
| 2971 | if (unlikely(d_in_lookup(target))) { |
| 2972 | dir = target->d_parent->d_inode; |
| 2973 | n = start_dir_add(dir); |
| 2974 | d_wait = __d_lookup_unhash(target); |
| 2975 | } |
| 2976 | |
| 2977 | write_seqcount_begin(&dentry->d_seq); |
| 2978 | write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED); |
| 2979 | |
| 2980 | /* unhash both */ |
| 2981 | if (!d_unhashed(dentry)) |
| 2982 | ___d_drop(dentry); |
| 2983 | if (!d_unhashed(target)) |
| 2984 | ___d_drop(target); |
| 2985 | |
| 2986 | /* ... and switch them in the tree */ |
| 2987 | dentry->d_parent = target->d_parent; |
| 2988 | if (!exchange) { |
| 2989 | copy_name(dentry, target); |
| 2990 | target->d_hash.pprev = NULL; |
| 2991 | dentry->d_parent->d_lockref.count++; |
| 2992 | if (dentry != old_parent) /* wasn't IS_ROOT */ |
| 2993 | WARN_ON(!--old_parent->d_lockref.count); |
| 2994 | } else { |
| 2995 | target->d_parent = old_parent; |
| 2996 | swap_names(dentry, target); |
| 2997 | list_move(&target->d_child, &target->d_parent->d_subdirs); |
| 2998 | __d_rehash(target); |
| 2999 | fsnotify_update_flags(target); |
| 3000 | } |
| 3001 | list_move(&dentry->d_child, &dentry->d_parent->d_subdirs); |
| 3002 | __d_rehash(dentry); |
| 3003 | fsnotify_update_flags(dentry); |
| 3004 | fscrypt_handle_d_move(dentry); |
| 3005 | |
| 3006 | write_seqcount_end(&target->d_seq); |
| 3007 | write_seqcount_end(&dentry->d_seq); |
| 3008 | |
| 3009 | if (dir) |
| 3010 | end_dir_add(dir, n, d_wait); |
| 3011 | |
| 3012 | if (dentry->d_parent != old_parent) |
| 3013 | spin_unlock(&dentry->d_parent->d_lock); |
| 3014 | if (dentry != old_parent) |
| 3015 | spin_unlock(&old_parent->d_lock); |
| 3016 | spin_unlock(&target->d_lock); |
| 3017 | spin_unlock(&dentry->d_lock); |
| 3018 | } |
| 3019 | |
| 3020 | /* |
| 3021 | * d_move - move a dentry |
| 3022 | * @dentry: entry to move |
| 3023 | * @target: new dentry |
| 3024 | * |
| 3025 | * Update the dcache to reflect the move of a file name. Negative |
| 3026 | * dcache entries should not be moved in this way. See the locking |
| 3027 | * requirements for __d_move. |
| 3028 | */ |
| 3029 | void d_move(struct dentry *dentry, struct dentry *target) |
| 3030 | { |
| 3031 | write_seqlock(&rename_lock); |
| 3032 | __d_move(dentry, target, false); |
| 3033 | write_sequnlock(&rename_lock); |
| 3034 | } |
| 3035 | EXPORT_SYMBOL(d_move); |
| 3036 | |
| 3037 | /* |
| 3038 | * d_exchange - exchange two dentries |
| 3039 | * @dentry1: first dentry |
| 3040 | * @dentry2: second dentry |
| 3041 | */ |
| 3042 | void d_exchange(struct dentry *dentry1, struct dentry *dentry2) |
| 3043 | { |
| 3044 | write_seqlock(&rename_lock); |
| 3045 | |
| 3046 | WARN_ON(!dentry1->d_inode); |
| 3047 | WARN_ON(!dentry2->d_inode); |
| 3048 | WARN_ON(IS_ROOT(dentry1)); |
| 3049 | WARN_ON(IS_ROOT(dentry2)); |
| 3050 | |
| 3051 | __d_move(dentry1, dentry2, true); |
| 3052 | |
| 3053 | write_sequnlock(&rename_lock); |
| 3054 | } |
| 3055 | |
| 3056 | /** |
| 3057 | * d_ancestor - search for an ancestor |
| 3058 | * @p1: ancestor dentry |
| 3059 | * @p2: child dentry |
| 3060 | * |
| 3061 | * Returns the ancestor dentry of p2 which is a child of p1, if p1 is |
| 3062 | * an ancestor of p2, else NULL. |
| 3063 | */ |
| 3064 | struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) |
| 3065 | { |
| 3066 | struct dentry *p; |
| 3067 | |
| 3068 | for (p = p2; !IS_ROOT(p); p = p->d_parent) { |
| 3069 | if (p->d_parent == p1) |
| 3070 | return p; |
| 3071 | } |
| 3072 | return NULL; |
| 3073 | } |
| 3074 | |
| 3075 | /* |
| 3076 | * This helper attempts to cope with remotely renamed directories |
| 3077 | * |
| 3078 | * It assumes that the caller is already holding |
| 3079 | * dentry->d_parent->d_inode->i_mutex, and rename_lock |
| 3080 | * |
| 3081 | * Note: If ever the locking in lock_rename() changes, then please |
| 3082 | * remember to update this too... |
| 3083 | */ |
| 3084 | static int __d_unalias(struct inode *inode, |
| 3085 | struct dentry *dentry, struct dentry *alias) |
| 3086 | { |
| 3087 | struct mutex *m1 = NULL; |
| 3088 | struct rw_semaphore *m2 = NULL; |
| 3089 | int ret = -ESTALE; |
| 3090 | |
| 3091 | /* If alias and dentry share a parent, then no extra locks required */ |
| 3092 | if (alias->d_parent == dentry->d_parent) |
| 3093 | goto out_unalias; |
| 3094 | |
| 3095 | /* See lock_rename() */ |
| 3096 | if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) |
| 3097 | goto out_err; |
| 3098 | m1 = &dentry->d_sb->s_vfs_rename_mutex; |
| 3099 | if (!inode_trylock_shared(alias->d_parent->d_inode)) |
| 3100 | goto out_err; |
| 3101 | m2 = &alias->d_parent->d_inode->i_rwsem; |
| 3102 | out_unalias: |
| 3103 | __d_move(alias, dentry, false); |
| 3104 | ret = 0; |
| 3105 | out_err: |
| 3106 | if (m2) |
| 3107 | up_read(m2); |
| 3108 | if (m1) |
| 3109 | mutex_unlock(m1); |
| 3110 | return ret; |
| 3111 | } |
| 3112 | |
| 3113 | /** |
| 3114 | * d_splice_alias - splice a disconnected dentry into the tree if one exists |
| 3115 | * @inode: the inode which may have a disconnected dentry |
| 3116 | * @dentry: a negative dentry which we want to point to the inode. |
| 3117 | * |
| 3118 | * If inode is a directory and has an IS_ROOT alias, then d_move that in |
| 3119 | * place of the given dentry and return it, else simply d_add the inode |
| 3120 | * to the dentry and return NULL. |
| 3121 | * |
| 3122 | * If a non-IS_ROOT directory is found, the filesystem is corrupt, and |
| 3123 | * we should error out: directories can't have multiple aliases. |
| 3124 | * |
| 3125 | * This is needed in the lookup routine of any filesystem that is exportable |
| 3126 | * (via knfsd) so that we can build dcache paths to directories effectively. |
| 3127 | * |
| 3128 | * If a dentry was found and moved, then it is returned. Otherwise NULL |
| 3129 | * is returned. This matches the expected return value of ->lookup. |
| 3130 | * |
| 3131 | * Cluster filesystems may call this function with a negative, hashed dentry. |
| 3132 | * In that case, we know that the inode will be a regular file, and also this |
| 3133 | * will only occur during atomic_open. So we need to check for the dentry |
| 3134 | * being already hashed only in the final case. |
| 3135 | */ |
| 3136 | struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) |
| 3137 | { |
| 3138 | if (IS_ERR(inode)) |
| 3139 | return ERR_CAST(inode); |
| 3140 | |
| 3141 | BUG_ON(!d_unhashed(dentry)); |
| 3142 | |
| 3143 | if (!inode) |
| 3144 | goto out; |
| 3145 | |
| 3146 | security_d_instantiate(dentry, inode); |
| 3147 | spin_lock(&inode->i_lock); |
| 3148 | if (S_ISDIR(inode->i_mode)) { |
| 3149 | struct dentry *new = __d_find_any_alias(inode); |
| 3150 | if (unlikely(new)) { |
| 3151 | /* The reference to new ensures it remains an alias */ |
| 3152 | spin_unlock(&inode->i_lock); |
| 3153 | write_seqlock(&rename_lock); |
| 3154 | if (unlikely(d_ancestor(new, dentry))) { |
| 3155 | write_sequnlock(&rename_lock); |
| 3156 | dput(new); |
| 3157 | new = ERR_PTR(-ELOOP); |
| 3158 | pr_warn_ratelimited( |
| 3159 | "VFS: Lookup of '%s' in %s %s" |
| 3160 | " would have caused loop\n", |
| 3161 | dentry->d_name.name, |
| 3162 | inode->i_sb->s_type->name, |
| 3163 | inode->i_sb->s_id); |
| 3164 | } else if (!IS_ROOT(new)) { |
| 3165 | struct dentry *old_parent = dget(new->d_parent); |
| 3166 | int err = __d_unalias(inode, dentry, new); |
| 3167 | write_sequnlock(&rename_lock); |
| 3168 | if (err) { |
| 3169 | dput(new); |
| 3170 | new = ERR_PTR(err); |
| 3171 | } |
| 3172 | dput(old_parent); |
| 3173 | } else { |
| 3174 | __d_move(new, dentry, false); |
| 3175 | write_sequnlock(&rename_lock); |
| 3176 | } |
| 3177 | iput(inode); |
| 3178 | return new; |
| 3179 | } |
| 3180 | } |
| 3181 | out: |
| 3182 | __d_add(dentry, inode); |
| 3183 | return NULL; |
| 3184 | } |
| 3185 | EXPORT_SYMBOL(d_splice_alias); |
| 3186 | |
| 3187 | /* |
| 3188 | * Test whether new_dentry is a subdirectory of old_dentry. |
| 3189 | * |
| 3190 | * Trivially implemented using the dcache structure |
| 3191 | */ |
| 3192 | |
| 3193 | /** |
| 3194 | * is_subdir - is new dentry a subdirectory of old_dentry |
| 3195 | * @new_dentry: new dentry |
| 3196 | * @old_dentry: old dentry |
| 3197 | * |
| 3198 | * Returns true if new_dentry is a subdirectory of the parent (at any depth). |
| 3199 | * Returns false otherwise. |
| 3200 | * Caller must ensure that "new_dentry" is pinned before calling is_subdir() |
| 3201 | */ |
| 3202 | |
| 3203 | bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry) |
| 3204 | { |
| 3205 | bool result; |
| 3206 | unsigned seq; |
| 3207 | |
| 3208 | if (new_dentry == old_dentry) |
| 3209 | return true; |
| 3210 | |
| 3211 | do { |
| 3212 | /* for restarting inner loop in case of seq retry */ |
| 3213 | seq = read_seqbegin(&rename_lock); |
| 3214 | /* |
| 3215 | * Need rcu_readlock to protect against the d_parent trashing |
| 3216 | * due to d_move |
| 3217 | */ |
| 3218 | rcu_read_lock(); |
| 3219 | if (d_ancestor(old_dentry, new_dentry)) |
| 3220 | result = true; |
| 3221 | else |
| 3222 | result = false; |
| 3223 | rcu_read_unlock(); |
| 3224 | } while (read_seqretry(&rename_lock, seq)); |
| 3225 | |
| 3226 | return result; |
| 3227 | } |
| 3228 | EXPORT_SYMBOL(is_subdir); |
| 3229 | |
| 3230 | static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry) |
| 3231 | { |
| 3232 | struct dentry *root = data; |
| 3233 | if (dentry != root) { |
| 3234 | if (d_unhashed(dentry) || !dentry->d_inode) |
| 3235 | return D_WALK_SKIP; |
| 3236 | |
| 3237 | if (!(dentry->d_flags & DCACHE_GENOCIDE)) { |
| 3238 | dentry->d_flags |= DCACHE_GENOCIDE; |
| 3239 | dentry->d_lockref.count--; |
| 3240 | } |
| 3241 | } |
| 3242 | return D_WALK_CONTINUE; |
| 3243 | } |
| 3244 | |
| 3245 | void d_genocide(struct dentry *parent) |
| 3246 | { |
| 3247 | d_walk(parent, parent, d_genocide_kill); |
| 3248 | } |
| 3249 | |
| 3250 | EXPORT_SYMBOL(d_genocide); |
| 3251 | |
| 3252 | void d_tmpfile(struct file *file, struct inode *inode) |
| 3253 | { |
| 3254 | struct dentry *dentry = file->f_path.dentry; |
| 3255 | |
| 3256 | inode_dec_link_count(inode); |
| 3257 | BUG_ON(dentry->d_name.name != dentry->d_iname || |
| 3258 | !hlist_unhashed(&dentry->d_u.d_alias) || |
| 3259 | !d_unlinked(dentry)); |
| 3260 | spin_lock(&dentry->d_parent->d_lock); |
| 3261 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); |
| 3262 | dentry->d_name.len = sprintf(dentry->d_iname, "#%llu", |
| 3263 | (unsigned long long)inode->i_ino); |
| 3264 | spin_unlock(&dentry->d_lock); |
| 3265 | spin_unlock(&dentry->d_parent->d_lock); |
| 3266 | d_instantiate(dentry, inode); |
| 3267 | } |
| 3268 | EXPORT_SYMBOL(d_tmpfile); |
| 3269 | |
| 3270 | static __initdata unsigned long dhash_entries; |
| 3271 | static int __init set_dhash_entries(char *str) |
| 3272 | { |
| 3273 | if (!str) |
| 3274 | return 0; |
| 3275 | dhash_entries = simple_strtoul(str, &str, 0); |
| 3276 | return 1; |
| 3277 | } |
| 3278 | __setup("dhash_entries=", set_dhash_entries); |
| 3279 | |
| 3280 | static void __init dcache_init_early(void) |
| 3281 | { |
| 3282 | /* If hashes are distributed across NUMA nodes, defer |
| 3283 | * hash allocation until vmalloc space is available. |
| 3284 | */ |
| 3285 | if (hashdist) |
| 3286 | return; |
| 3287 | |
| 3288 | dentry_hashtable = |
| 3289 | alloc_large_system_hash("Dentry cache", |
| 3290 | sizeof(struct hlist_bl_head), |
| 3291 | dhash_entries, |
| 3292 | 13, |
| 3293 | HASH_EARLY | HASH_ZERO, |
| 3294 | &d_hash_shift, |
| 3295 | NULL, |
| 3296 | 0, |
| 3297 | 0); |
| 3298 | d_hash_shift = 32 - d_hash_shift; |
| 3299 | } |
| 3300 | |
| 3301 | static void __init dcache_init(void) |
| 3302 | { |
| 3303 | /* |
| 3304 | * A constructor could be added for stable state like the lists, |
| 3305 | * but it is probably not worth it because of the cache nature |
| 3306 | * of the dcache. |
| 3307 | */ |
| 3308 | dentry_cache = KMEM_CACHE_USERCOPY(dentry, |
| 3309 | SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT, |
| 3310 | d_iname); |
| 3311 | |
| 3312 | /* Hash may have been set up in dcache_init_early */ |
| 3313 | if (!hashdist) |
| 3314 | return; |
| 3315 | |
| 3316 | dentry_hashtable = |
| 3317 | alloc_large_system_hash("Dentry cache", |
| 3318 | sizeof(struct hlist_bl_head), |
| 3319 | dhash_entries, |
| 3320 | 13, |
| 3321 | HASH_ZERO, |
| 3322 | &d_hash_shift, |
| 3323 | NULL, |
| 3324 | 0, |
| 3325 | 0); |
| 3326 | d_hash_shift = 32 - d_hash_shift; |
| 3327 | } |
| 3328 | |
| 3329 | /* SLAB cache for __getname() consumers */ |
| 3330 | struct kmem_cache *names_cachep __read_mostly; |
| 3331 | EXPORT_SYMBOL(names_cachep); |
| 3332 | |
| 3333 | void __init vfs_caches_init_early(void) |
| 3334 | { |
| 3335 | int i; |
| 3336 | |
| 3337 | for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++) |
| 3338 | INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]); |
| 3339 | |
| 3340 | dcache_init_early(); |
| 3341 | inode_init_early(); |
| 3342 | } |
| 3343 | |
| 3344 | void __init vfs_caches_init(void) |
| 3345 | { |
| 3346 | names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0, |
| 3347 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL); |
| 3348 | |
| 3349 | dcache_init(); |
| 3350 | inode_init(); |
| 3351 | files_init(); |
| 3352 | files_maxfiles_init(); |
| 3353 | mnt_init(); |
| 3354 | bdev_cache_init(); |
| 3355 | chrdev_init(); |
| 3356 | } |