4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
10 * Notes on the allocation strategy:
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
17 #include <linux/syscalls.h>
18 #include <linux/string.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/export.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <asm/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
33 #include <linux/bootmem.h>
34 #include <linux/fs_struct.h>
35 #include <linux/hardirq.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/rculist_bl.h>
38 #include <linux/prefetch.h>
39 #include <linux/ratelimit.h>
40 #include <linux/list_lru.h>
46 * dcache->d_inode->i_lock protects:
47 * - i_dentry, d_u.d_alias, d_inode of aliases
48 * dcache_hash_bucket lock protects:
49 * - the dcache hash table
50 * s_anon bl list spinlock protects:
51 * - the s_anon list (see __d_drop)
52 * dentry->d_sb->s_dentry_lru_lock protects:
53 * - the dcache lru lists and counters
60 * - d_parent and d_subdirs
61 * - childrens' d_child and d_parent
62 * - d_u.d_alias, d_inode
65 * dentry->d_inode->i_lock
67 * dentry->d_sb->s_dentry_lru_lock
68 * dcache_hash_bucket lock
71 * If there is an ancestor relationship:
72 * dentry->d_parent->...->d_parent->d_lock
74 * dentry->d_parent->d_lock
77 * If no ancestor relationship:
78 * if (dentry1 < dentry2)
82 int sysctl_vfs_cache_pressure __read_mostly = 100;
83 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
85 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
87 EXPORT_SYMBOL(rename_lock);
89 static struct kmem_cache *dentry_cache __read_mostly;
92 * This is the single most critical data structure when it comes
93 * to the dcache: the hashtable for lookups. Somebody should try
94 * to make this good - I've just made it work.
96 * This hash-function tries to avoid losing too many bits of hash
97 * information, yet avoid using a prime hash-size or similar.
100 static unsigned int d_hash_mask __read_mostly;
101 static unsigned int d_hash_shift __read_mostly;
103 static struct hlist_bl_head *dentry_hashtable __read_mostly;
105 static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
108 hash += (unsigned long) parent / L1_CACHE_BYTES;
109 return dentry_hashtable + hash_32(hash, d_hash_shift);
112 /* Statistics gathering. */
113 struct dentry_stat_t dentry_stat = {
117 static DEFINE_PER_CPU(long, nr_dentry);
118 static DEFINE_PER_CPU(long, nr_dentry_unused);
120 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
123 * Here we resort to our own counters instead of using generic per-cpu counters
124 * for consistency with what the vfs inode code does. We are expected to harvest
125 * better code and performance by having our own specialized counters.
127 * Please note that the loop is done over all possible CPUs, not over all online
128 * CPUs. The reason for this is that we don't want to play games with CPUs going
129 * on and off. If one of them goes off, we will just keep their counters.
131 * glommer: See cffbc8a for details, and if you ever intend to change this,
132 * please update all vfs counters to match.
134 static long get_nr_dentry(void)
138 for_each_possible_cpu(i)
139 sum += per_cpu(nr_dentry, i);
140 return sum < 0 ? 0 : sum;
143 static long get_nr_dentry_unused(void)
147 for_each_possible_cpu(i)
148 sum += per_cpu(nr_dentry_unused, i);
149 return sum < 0 ? 0 : sum;
152 int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
153 size_t *lenp, loff_t *ppos)
155 dentry_stat.nr_dentry = get_nr_dentry();
156 dentry_stat.nr_unused = get_nr_dentry_unused();
157 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
162 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
163 * The strings are both count bytes long, and count is non-zero.
165 #ifdef CONFIG_DCACHE_WORD_ACCESS
167 #include <asm/word-at-a-time.h>
169 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
170 * aligned allocation for this particular component. We don't
171 * strictly need the load_unaligned_zeropad() safety, but it
172 * doesn't hurt either.
174 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
175 * need the careful unaligned handling.
177 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
179 unsigned long a,b,mask;
182 a = *(unsigned long *)cs;
183 b = load_unaligned_zeropad(ct);
184 if (tcount < sizeof(unsigned long))
186 if (unlikely(a != b))
188 cs += sizeof(unsigned long);
189 ct += sizeof(unsigned long);
190 tcount -= sizeof(unsigned long);
194 mask = bytemask_from_count(tcount);
195 return unlikely(!!((a ^ b) & mask));
200 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
214 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
216 const unsigned char *cs;
218 * Be careful about RCU walk racing with rename:
219 * use ACCESS_ONCE to fetch the name pointer.
221 * NOTE! Even if a rename will mean that the length
222 * was not loaded atomically, we don't care. The
223 * RCU walk will check the sequence count eventually,
224 * and catch it. And we won't overrun the buffer,
225 * because we're reading the name pointer atomically,
226 * and a dentry name is guaranteed to be properly
227 * terminated with a NUL byte.
229 * End result: even if 'len' is wrong, we'll exit
230 * early because the data cannot match (there can
231 * be no NUL in the ct/tcount data)
233 cs = ACCESS_ONCE(dentry->d_name.name);
234 smp_read_barrier_depends();
235 return dentry_string_cmp(cs, ct, tcount);
238 struct external_name {
241 struct rcu_head head;
243 unsigned char name[];
246 static inline struct external_name *external_name(struct dentry *dentry)
248 return container_of(dentry->d_name.name, struct external_name, name[0]);
251 static void __d_free(struct rcu_head *head)
253 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
255 kmem_cache_free(dentry_cache, dentry);
258 static void __d_free_external(struct rcu_head *head)
260 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
261 kfree(external_name(dentry));
262 kmem_cache_free(dentry_cache, dentry);
265 static inline int dname_external(const struct dentry *dentry)
267 return dentry->d_name.name != dentry->d_iname;
270 static void dentry_free(struct dentry *dentry)
272 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
273 if (unlikely(dname_external(dentry))) {
274 struct external_name *p = external_name(dentry);
275 if (likely(atomic_dec_and_test(&p->u.count))) {
276 call_rcu(&dentry->d_u.d_rcu, __d_free_external);
280 /* if dentry was never visible to RCU, immediate free is OK */
281 if (!(dentry->d_flags & DCACHE_RCUACCESS))
282 __d_free(&dentry->d_u.d_rcu);
284 call_rcu(&dentry->d_u.d_rcu, __d_free);
288 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
289 * @dentry: the target dentry
290 * After this call, in-progress rcu-walk path lookup will fail. This
291 * should be called after unhashing, and after changing d_inode (if
292 * the dentry has not already been unhashed).
294 static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
296 assert_spin_locked(&dentry->d_lock);
297 /* Go through a barrier */
298 write_seqcount_barrier(&dentry->d_seq);
302 * Release the dentry's inode, using the filesystem
303 * d_iput() operation if defined. Dentry has no refcount
306 static void dentry_iput(struct dentry * dentry)
307 __releases(dentry->d_lock)
308 __releases(dentry->d_inode->i_lock)
310 struct inode *inode = dentry->d_inode;
312 dentry->d_inode = NULL;
313 hlist_del_init(&dentry->d_u.d_alias);
314 spin_unlock(&dentry->d_lock);
315 spin_unlock(&inode->i_lock);
317 fsnotify_inoderemove(inode);
318 if (dentry->d_op && dentry->d_op->d_iput)
319 dentry->d_op->d_iput(dentry, inode);
323 spin_unlock(&dentry->d_lock);
328 * Release the dentry's inode, using the filesystem
329 * d_iput() operation if defined. dentry remains in-use.
331 static void dentry_unlink_inode(struct dentry * dentry)
332 __releases(dentry->d_lock)
333 __releases(dentry->d_inode->i_lock)
335 struct inode *inode = dentry->d_inode;
336 __d_clear_type(dentry);
337 dentry->d_inode = NULL;
338 hlist_del_init(&dentry->d_u.d_alias);
339 dentry_rcuwalk_barrier(dentry);
340 spin_unlock(&dentry->d_lock);
341 spin_unlock(&inode->i_lock);
343 fsnotify_inoderemove(inode);
344 if (dentry->d_op && dentry->d_op->d_iput)
345 dentry->d_op->d_iput(dentry, inode);
351 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
352 * is in use - which includes both the "real" per-superblock
353 * LRU list _and_ the DCACHE_SHRINK_LIST use.
355 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
356 * on the shrink list (ie not on the superblock LRU list).
358 * The per-cpu "nr_dentry_unused" counters are updated with
359 * the DCACHE_LRU_LIST bit.
361 * These helper functions make sure we always follow the
362 * rules. d_lock must be held by the caller.
364 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
365 static void d_lru_add(struct dentry *dentry)
367 D_FLAG_VERIFY(dentry, 0);
368 dentry->d_flags |= DCACHE_LRU_LIST;
369 this_cpu_inc(nr_dentry_unused);
370 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
373 static void d_lru_del(struct dentry *dentry)
375 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
376 dentry->d_flags &= ~DCACHE_LRU_LIST;
377 this_cpu_dec(nr_dentry_unused);
378 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
381 static void d_shrink_del(struct dentry *dentry)
383 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
384 list_del_init(&dentry->d_lru);
385 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
386 this_cpu_dec(nr_dentry_unused);
389 static void d_shrink_add(struct dentry *dentry, struct list_head *list)
391 D_FLAG_VERIFY(dentry, 0);
392 list_add(&dentry->d_lru, list);
393 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
394 this_cpu_inc(nr_dentry_unused);
398 * These can only be called under the global LRU lock, ie during the
399 * callback for freeing the LRU list. "isolate" removes it from the
400 * LRU lists entirely, while shrink_move moves it to the indicated
403 static void d_lru_isolate(struct dentry *dentry)
405 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
406 dentry->d_flags &= ~DCACHE_LRU_LIST;
407 this_cpu_dec(nr_dentry_unused);
408 list_del_init(&dentry->d_lru);
411 static void d_lru_shrink_move(struct dentry *dentry, struct list_head *list)
413 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
414 dentry->d_flags |= DCACHE_SHRINK_LIST;
415 list_move_tail(&dentry->d_lru, list);
419 * dentry_lru_(add|del)_list) must be called with d_lock held.
421 static void dentry_lru_add(struct dentry *dentry)
423 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
428 * d_drop - drop a dentry
429 * @dentry: dentry to drop
431 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
432 * be found through a VFS lookup any more. Note that this is different from
433 * deleting the dentry - d_delete will try to mark the dentry negative if
434 * possible, giving a successful _negative_ lookup, while d_drop will
435 * just make the cache lookup fail.
437 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
438 * reason (NFS timeouts or autofs deletes).
440 * __d_drop requires dentry->d_lock.
442 void __d_drop(struct dentry *dentry)
444 if (!d_unhashed(dentry)) {
445 struct hlist_bl_head *b;
447 * Hashed dentries are normally on the dentry hashtable,
448 * with the exception of those newly allocated by
449 * d_obtain_alias, which are always IS_ROOT:
451 if (unlikely(IS_ROOT(dentry)))
452 b = &dentry->d_sb->s_anon;
454 b = d_hash(dentry->d_parent, dentry->d_name.hash);
457 __hlist_bl_del(&dentry->d_hash);
458 dentry->d_hash.pprev = NULL;
460 dentry_rcuwalk_barrier(dentry);
463 EXPORT_SYMBOL(__d_drop);
465 void d_drop(struct dentry *dentry)
467 spin_lock(&dentry->d_lock);
469 spin_unlock(&dentry->d_lock);
471 EXPORT_SYMBOL(d_drop);
473 static void __dentry_kill(struct dentry *dentry)
475 struct dentry *parent = NULL;
476 bool can_free = true;
477 if (!IS_ROOT(dentry))
478 parent = dentry->d_parent;
481 * The dentry is now unrecoverably dead to the world.
483 lockref_mark_dead(&dentry->d_lockref);
486 * inform the fs via d_prune that this dentry is about to be
487 * unhashed and destroyed.
489 if (dentry->d_flags & DCACHE_OP_PRUNE)
490 dentry->d_op->d_prune(dentry);
492 if (dentry->d_flags & DCACHE_LRU_LIST) {
493 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
496 /* if it was on the hash then remove it */
498 __list_del_entry(&dentry->d_child);
500 * Inform d_walk() that we are no longer attached to the
503 dentry->d_flags |= DCACHE_DENTRY_KILLED;
505 spin_unlock(&parent->d_lock);
508 * dentry_iput drops the locks, at which point nobody (except
509 * transient RCU lookups) can reach this dentry.
511 BUG_ON((int)dentry->d_lockref.count > 0);
512 this_cpu_dec(nr_dentry);
513 if (dentry->d_op && dentry->d_op->d_release)
514 dentry->d_op->d_release(dentry);
516 spin_lock(&dentry->d_lock);
517 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
518 dentry->d_flags |= DCACHE_MAY_FREE;
521 spin_unlock(&dentry->d_lock);
522 if (likely(can_free))
527 * Finish off a dentry we've decided to kill.
528 * dentry->d_lock must be held, returns with it unlocked.
529 * If ref is non-zero, then decrement the refcount too.
530 * Returns dentry requiring refcount drop, or NULL if we're done.
532 static struct dentry *dentry_kill(struct dentry *dentry)
533 __releases(dentry->d_lock)
535 struct inode *inode = dentry->d_inode;
536 struct dentry *parent = NULL;
538 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
541 if (!IS_ROOT(dentry)) {
542 parent = dentry->d_parent;
543 if (unlikely(!spin_trylock(&parent->d_lock))) {
545 spin_unlock(&inode->i_lock);
550 __dentry_kill(dentry);
554 spin_unlock(&dentry->d_lock);
556 return dentry; /* try again with same dentry */
559 static inline struct dentry *lock_parent(struct dentry *dentry)
561 struct dentry *parent = dentry->d_parent;
564 if (unlikely((int)dentry->d_lockref.count < 0))
566 if (likely(spin_trylock(&parent->d_lock)))
569 spin_unlock(&dentry->d_lock);
571 parent = ACCESS_ONCE(dentry->d_parent);
572 spin_lock(&parent->d_lock);
574 * We can't blindly lock dentry until we are sure
575 * that we won't violate the locking order.
576 * Any changes of dentry->d_parent must have
577 * been done with parent->d_lock held, so
578 * spin_lock() above is enough of a barrier
579 * for checking if it's still our child.
581 if (unlikely(parent != dentry->d_parent)) {
582 spin_unlock(&parent->d_lock);
586 if (parent != dentry)
587 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
596 * This is complicated by the fact that we do not want to put
597 * dentries that are no longer on any hash chain on the unused
598 * list: we'd much rather just get rid of them immediately.
600 * However, that implies that we have to traverse the dentry
601 * tree upwards to the parents which might _also_ now be
602 * scheduled for deletion (it may have been only waiting for
603 * its last child to go away).
605 * This tail recursion is done by hand as we don't want to depend
606 * on the compiler to always get this right (gcc generally doesn't).
607 * Real recursion would eat up our stack space.
611 * dput - release a dentry
612 * @dentry: dentry to release
614 * Release a dentry. This will drop the usage count and if appropriate
615 * call the dentry unlink method as well as removing it from the queues and
616 * releasing its resources. If the parent dentries were scheduled for release
617 * they too may now get deleted.
619 void dput(struct dentry *dentry)
621 if (unlikely(!dentry))
625 if (lockref_put_or_lock(&dentry->d_lockref))
628 /* Unreachable? Get rid of it */
629 if (unlikely(d_unhashed(dentry)))
632 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
633 if (dentry->d_op->d_delete(dentry))
637 if (!(dentry->d_flags & DCACHE_REFERENCED))
638 dentry->d_flags |= DCACHE_REFERENCED;
639 dentry_lru_add(dentry);
641 dentry->d_lockref.count--;
642 spin_unlock(&dentry->d_lock);
646 dentry = dentry_kill(dentry);
653 /* This must be called with d_lock held */
654 static inline void __dget_dlock(struct dentry *dentry)
656 dentry->d_lockref.count++;
659 static inline void __dget(struct dentry *dentry)
661 lockref_get(&dentry->d_lockref);
664 struct dentry *dget_parent(struct dentry *dentry)
670 * Do optimistic parent lookup without any
674 ret = ACCESS_ONCE(dentry->d_parent);
675 gotref = lockref_get_not_zero(&ret->d_lockref);
677 if (likely(gotref)) {
678 if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
685 * Don't need rcu_dereference because we re-check it was correct under
689 ret = dentry->d_parent;
690 spin_lock(&ret->d_lock);
691 if (unlikely(ret != dentry->d_parent)) {
692 spin_unlock(&ret->d_lock);
697 BUG_ON(!ret->d_lockref.count);
698 ret->d_lockref.count++;
699 spin_unlock(&ret->d_lock);
702 EXPORT_SYMBOL(dget_parent);
705 * d_find_alias - grab a hashed alias of inode
706 * @inode: inode in question
708 * If inode has a hashed alias, or is a directory and has any alias,
709 * acquire the reference to alias and return it. Otherwise return NULL.
710 * Notice that if inode is a directory there can be only one alias and
711 * it can be unhashed only if it has no children, or if it is the root
712 * of a filesystem, or if the directory was renamed and d_revalidate
713 * was the first vfs operation to notice.
715 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
716 * any other hashed alias over that one.
718 static struct dentry *__d_find_alias(struct inode *inode)
720 struct dentry *alias, *discon_alias;
724 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
725 spin_lock(&alias->d_lock);
726 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
727 if (IS_ROOT(alias) &&
728 (alias->d_flags & DCACHE_DISCONNECTED)) {
729 discon_alias = alias;
732 spin_unlock(&alias->d_lock);
736 spin_unlock(&alias->d_lock);
739 alias = discon_alias;
740 spin_lock(&alias->d_lock);
741 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
743 spin_unlock(&alias->d_lock);
746 spin_unlock(&alias->d_lock);
752 struct dentry *d_find_alias(struct inode *inode)
754 struct dentry *de = NULL;
756 if (!hlist_empty(&inode->i_dentry)) {
757 spin_lock(&inode->i_lock);
758 de = __d_find_alias(inode);
759 spin_unlock(&inode->i_lock);
763 EXPORT_SYMBOL(d_find_alias);
766 * Try to kill dentries associated with this inode.
767 * WARNING: you must own a reference to inode.
769 void d_prune_aliases(struct inode *inode)
771 struct dentry *dentry;
773 spin_lock(&inode->i_lock);
774 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
775 spin_lock(&dentry->d_lock);
776 if (!dentry->d_lockref.count) {
777 struct dentry *parent = lock_parent(dentry);
778 if (likely(!dentry->d_lockref.count)) {
779 __dentry_kill(dentry);
783 spin_unlock(&parent->d_lock);
785 spin_unlock(&dentry->d_lock);
787 spin_unlock(&inode->i_lock);
789 EXPORT_SYMBOL(d_prune_aliases);
791 static void shrink_dentry_list(struct list_head *list)
793 struct dentry *dentry, *parent;
795 while (!list_empty(list)) {
797 dentry = list_entry(list->prev, struct dentry, d_lru);
798 spin_lock(&dentry->d_lock);
799 parent = lock_parent(dentry);
802 * The dispose list is isolated and dentries are not accounted
803 * to the LRU here, so we can simply remove it from the list
804 * here regardless of whether it is referenced or not.
806 d_shrink_del(dentry);
809 * We found an inuse dentry which was not removed from
810 * the LRU because of laziness during lookup. Do not free it.
812 if ((int)dentry->d_lockref.count > 0) {
813 spin_unlock(&dentry->d_lock);
815 spin_unlock(&parent->d_lock);
820 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
821 bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
822 spin_unlock(&dentry->d_lock);
824 spin_unlock(&parent->d_lock);
830 inode = dentry->d_inode;
831 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
832 d_shrink_add(dentry, list);
833 spin_unlock(&dentry->d_lock);
835 spin_unlock(&parent->d_lock);
839 __dentry_kill(dentry);
842 * We need to prune ancestors too. This is necessary to prevent
843 * quadratic behavior of shrink_dcache_parent(), but is also
844 * expected to be beneficial in reducing dentry cache
848 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
849 parent = lock_parent(dentry);
850 if (dentry->d_lockref.count != 1) {
851 dentry->d_lockref.count--;
852 spin_unlock(&dentry->d_lock);
854 spin_unlock(&parent->d_lock);
857 inode = dentry->d_inode; /* can't be NULL */
858 if (unlikely(!spin_trylock(&inode->i_lock))) {
859 spin_unlock(&dentry->d_lock);
861 spin_unlock(&parent->d_lock);
865 __dentry_kill(dentry);
871 static enum lru_status
872 dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
874 struct list_head *freeable = arg;
875 struct dentry *dentry = container_of(item, struct dentry, d_lru);
879 * we are inverting the lru lock/dentry->d_lock here,
880 * so use a trylock. If we fail to get the lock, just skip
883 if (!spin_trylock(&dentry->d_lock))
887 * Referenced dentries are still in use. If they have active
888 * counts, just remove them from the LRU. Otherwise give them
889 * another pass through the LRU.
891 if (dentry->d_lockref.count) {
892 d_lru_isolate(dentry);
893 spin_unlock(&dentry->d_lock);
897 if (dentry->d_flags & DCACHE_REFERENCED) {
898 dentry->d_flags &= ~DCACHE_REFERENCED;
899 spin_unlock(&dentry->d_lock);
902 * The list move itself will be made by the common LRU code. At
903 * this point, we've dropped the dentry->d_lock but keep the
904 * lru lock. This is safe to do, since every list movement is
905 * protected by the lru lock even if both locks are held.
907 * This is guaranteed by the fact that all LRU management
908 * functions are intermediated by the LRU API calls like
909 * list_lru_add and list_lru_del. List movement in this file
910 * only ever occur through this functions or through callbacks
911 * like this one, that are called from the LRU API.
913 * The only exceptions to this are functions like
914 * shrink_dentry_list, and code that first checks for the
915 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be
916 * operating only with stack provided lists after they are
917 * properly isolated from the main list. It is thus, always a
923 d_lru_shrink_move(dentry, freeable);
924 spin_unlock(&dentry->d_lock);
930 * prune_dcache_sb - shrink the dcache
932 * @nr_to_scan : number of entries to try to free
933 * @nid: which node to scan for freeable entities
935 * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
936 * done when we need more memory an called from the superblock shrinker
939 * This function may fail to free any resources if all the dentries are in
942 long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan,
948 freed = list_lru_walk_node(&sb->s_dentry_lru, nid, dentry_lru_isolate,
949 &dispose, &nr_to_scan);
950 shrink_dentry_list(&dispose);
954 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
955 spinlock_t *lru_lock, void *arg)
957 struct list_head *freeable = arg;
958 struct dentry *dentry = container_of(item, struct dentry, d_lru);
961 * we are inverting the lru lock/dentry->d_lock here,
962 * so use a trylock. If we fail to get the lock, just skip
965 if (!spin_trylock(&dentry->d_lock))
968 d_lru_shrink_move(dentry, freeable);
969 spin_unlock(&dentry->d_lock);
976 * shrink_dcache_sb - shrink dcache for a superblock
979 * Shrink the dcache for the specified super block. This is used to free
980 * the dcache before unmounting a file system.
982 void shrink_dcache_sb(struct super_block *sb)
989 freed = list_lru_walk(&sb->s_dentry_lru,
990 dentry_lru_isolate_shrink, &dispose, UINT_MAX);
992 this_cpu_sub(nr_dentry_unused, freed);
993 shrink_dentry_list(&dispose);
996 EXPORT_SYMBOL(shrink_dcache_sb);
999 * enum d_walk_ret - action to talke during tree walk
1000 * @D_WALK_CONTINUE: contrinue walk
1001 * @D_WALK_QUIT: quit walk
1002 * @D_WALK_NORETRY: quit when retry is needed
1003 * @D_WALK_SKIP: skip this dentry and its children
1013 * d_walk - walk the dentry tree
1014 * @parent: start of walk
1015 * @data: data passed to @enter() and @finish()
1016 * @enter: callback when first entering the dentry
1017 * @finish: callback when successfully finished the walk
1019 * The @enter() and @finish() callbacks are called with d_lock held.
1021 static void d_walk(struct dentry *parent, void *data,
1022 enum d_walk_ret (*enter)(void *, struct dentry *),
1023 void (*finish)(void *))
1025 struct dentry *this_parent;
1026 struct list_head *next;
1028 enum d_walk_ret ret;
1032 read_seqbegin_or_lock(&rename_lock, &seq);
1033 this_parent = parent;
1034 spin_lock(&this_parent->d_lock);
1036 ret = enter(data, this_parent);
1038 case D_WALK_CONTINUE:
1043 case D_WALK_NORETRY:
1048 next = this_parent->d_subdirs.next;
1050 while (next != &this_parent->d_subdirs) {
1051 struct list_head *tmp = next;
1052 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1055 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1057 ret = enter(data, dentry);
1059 case D_WALK_CONTINUE:
1062 spin_unlock(&dentry->d_lock);
1064 case D_WALK_NORETRY:
1068 spin_unlock(&dentry->d_lock);
1072 if (!list_empty(&dentry->d_subdirs)) {
1073 spin_unlock(&this_parent->d_lock);
1074 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1075 this_parent = dentry;
1076 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1079 spin_unlock(&dentry->d_lock);
1082 * All done at this level ... ascend and resume the search.
1086 if (this_parent != parent) {
1087 struct dentry *child = this_parent;
1088 this_parent = child->d_parent;
1090 spin_unlock(&child->d_lock);
1091 spin_lock(&this_parent->d_lock);
1093 /* might go back up the wrong parent if we have had a rename. */
1094 if (need_seqretry(&rename_lock, seq))
1096 next = child->d_child.next;
1097 while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
1098 if (next == &this_parent->d_subdirs)
1100 child = list_entry(next, struct dentry, d_child);
1106 if (need_seqretry(&rename_lock, seq))
1113 spin_unlock(&this_parent->d_lock);
1114 done_seqretry(&rename_lock, seq);
1118 spin_unlock(&this_parent->d_lock);
1128 * Search for at least 1 mount point in the dentry's subdirs.
1129 * We descend to the next level whenever the d_subdirs
1130 * list is non-empty and continue searching.
1133 static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1136 if (d_mountpoint(dentry)) {
1140 return D_WALK_CONTINUE;
1144 * have_submounts - check for mounts over a dentry
1145 * @parent: dentry to check.
1147 * Return true if the parent or its subdirectories contain
1150 int have_submounts(struct dentry *parent)
1154 d_walk(parent, &ret, check_mount, NULL);
1158 EXPORT_SYMBOL(have_submounts);
1161 * Called by mount code to set a mountpoint and check if the mountpoint is
1162 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1163 * subtree can become unreachable).
1165 * Only one of d_invalidate() and d_set_mounted() must succeed. For
1166 * this reason take rename_lock and d_lock on dentry and ancestors.
1168 int d_set_mounted(struct dentry *dentry)
1172 write_seqlock(&rename_lock);
1173 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1174 /* Need exclusion wrt. d_invalidate() */
1175 spin_lock(&p->d_lock);
1176 if (unlikely(d_unhashed(p))) {
1177 spin_unlock(&p->d_lock);
1180 spin_unlock(&p->d_lock);
1182 spin_lock(&dentry->d_lock);
1183 if (!d_unlinked(dentry)) {
1184 dentry->d_flags |= DCACHE_MOUNTED;
1187 spin_unlock(&dentry->d_lock);
1189 write_sequnlock(&rename_lock);
1194 * Search the dentry child list of the specified parent,
1195 * and move any unused dentries to the end of the unused
1196 * list for prune_dcache(). We descend to the next level
1197 * whenever the d_subdirs list is non-empty and continue
1200 * It returns zero iff there are no unused children,
1201 * otherwise it returns the number of children moved to
1202 * the end of the unused list. This may not be the total
1203 * number of unused children, because select_parent can
1204 * drop the lock and return early due to latency
1208 struct select_data {
1209 struct dentry *start;
1210 struct list_head dispose;
1214 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1216 struct select_data *data = _data;
1217 enum d_walk_ret ret = D_WALK_CONTINUE;
1219 if (data->start == dentry)
1222 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1225 if (dentry->d_flags & DCACHE_LRU_LIST)
1227 if (!dentry->d_lockref.count) {
1228 d_shrink_add(dentry, &data->dispose);
1233 * We can return to the caller if we have found some (this
1234 * ensures forward progress). We'll be coming back to find
1237 if (!list_empty(&data->dispose))
1238 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1244 * shrink_dcache_parent - prune dcache
1245 * @parent: parent of entries to prune
1247 * Prune the dcache to remove unused children of the parent dentry.
1249 void shrink_dcache_parent(struct dentry *parent)
1252 struct select_data data;
1254 INIT_LIST_HEAD(&data.dispose);
1255 data.start = parent;
1258 d_walk(parent, &data, select_collect, NULL);
1262 shrink_dentry_list(&data.dispose);
1266 EXPORT_SYMBOL(shrink_dcache_parent);
1268 static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1270 /* it has busy descendents; complain about those instead */
1271 if (!list_empty(&dentry->d_subdirs))
1272 return D_WALK_CONTINUE;
1274 /* root with refcount 1 is fine */
1275 if (dentry == _data && dentry->d_lockref.count == 1)
1276 return D_WALK_CONTINUE;
1278 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1279 " still in use (%d) [unmount of %s %s]\n",
1282 dentry->d_inode->i_ino : 0UL,
1284 dentry->d_lockref.count,
1285 dentry->d_sb->s_type->name,
1286 dentry->d_sb->s_id);
1288 return D_WALK_CONTINUE;
1291 static void do_one_tree(struct dentry *dentry)
1293 shrink_dcache_parent(dentry);
1294 d_walk(dentry, dentry, umount_check, NULL);
1300 * destroy the dentries attached to a superblock on unmounting
1302 void shrink_dcache_for_umount(struct super_block *sb)
1304 struct dentry *dentry;
1306 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1308 dentry = sb->s_root;
1310 do_one_tree(dentry);
1312 while (!hlist_bl_empty(&sb->s_anon)) {
1313 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
1314 do_one_tree(dentry);
1318 struct detach_data {
1319 struct select_data select;
1320 struct dentry *mountpoint;
1322 static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry)
1324 struct detach_data *data = _data;
1326 if (d_mountpoint(dentry)) {
1327 __dget_dlock(dentry);
1328 data->mountpoint = dentry;
1332 return select_collect(&data->select, dentry);
1335 static void check_and_drop(void *_data)
1337 struct detach_data *data = _data;
1339 if (!data->mountpoint && !data->select.found)
1340 __d_drop(data->select.start);
1344 * d_invalidate - detach submounts, prune dcache, and drop
1345 * @dentry: dentry to invalidate (aka detach, prune and drop)
1349 * The final d_drop is done as an atomic operation relative to
1350 * rename_lock ensuring there are no races with d_set_mounted. This
1351 * ensures there are no unhashed dentries on the path to a mountpoint.
1353 void d_invalidate(struct dentry *dentry)
1356 * If it's already been dropped, return OK.
1358 spin_lock(&dentry->d_lock);
1359 if (d_unhashed(dentry)) {
1360 spin_unlock(&dentry->d_lock);
1363 spin_unlock(&dentry->d_lock);
1365 /* Negative dentries can be dropped without further checks */
1366 if (!dentry->d_inode) {
1372 struct detach_data data;
1374 data.mountpoint = NULL;
1375 INIT_LIST_HEAD(&data.select.dispose);
1376 data.select.start = dentry;
1377 data.select.found = 0;
1379 d_walk(dentry, &data, detach_and_collect, check_and_drop);
1381 if (data.select.found)
1382 shrink_dentry_list(&data.select.dispose);
1384 if (data.mountpoint) {
1385 detach_mounts(data.mountpoint);
1386 dput(data.mountpoint);
1389 if (!data.mountpoint && !data.select.found)
1395 EXPORT_SYMBOL(d_invalidate);
1398 * __d_alloc - allocate a dcache entry
1399 * @sb: filesystem it will belong to
1400 * @name: qstr of the name
1402 * Allocates a dentry. It returns %NULL if there is insufficient memory
1403 * available. On a success the dentry is returned. The name passed in is
1404 * copied and the copy passed in may be reused after this call.
1407 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1409 struct dentry *dentry;
1412 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1417 * We guarantee that the inline name is always NUL-terminated.
1418 * This way the memcpy() done by the name switching in rename
1419 * will still always have a NUL at the end, even if we might
1420 * be overwriting an internal NUL character
1422 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1423 if (name->len > DNAME_INLINE_LEN-1) {
1424 size_t size = offsetof(struct external_name, name[1]);
1425 struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
1427 kmem_cache_free(dentry_cache, dentry);
1430 atomic_set(&p->u.count, 1);
1433 dname = dentry->d_iname;
1436 dentry->d_name.len = name->len;
1437 dentry->d_name.hash = name->hash;
1438 memcpy(dname, name->name, name->len);
1439 dname[name->len] = 0;
1441 /* Make sure we always see the terminating NUL character */
1443 dentry->d_name.name = dname;
1445 dentry->d_lockref.count = 1;
1446 dentry->d_flags = 0;
1447 spin_lock_init(&dentry->d_lock);
1448 seqcount_init(&dentry->d_seq);
1449 dentry->d_inode = NULL;
1450 dentry->d_parent = dentry;
1452 dentry->d_op = NULL;
1453 dentry->d_fsdata = NULL;
1454 INIT_HLIST_BL_NODE(&dentry->d_hash);
1455 INIT_LIST_HEAD(&dentry->d_lru);
1456 INIT_LIST_HEAD(&dentry->d_subdirs);
1457 INIT_HLIST_NODE(&dentry->d_u.d_alias);
1458 INIT_LIST_HEAD(&dentry->d_child);
1459 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1461 this_cpu_inc(nr_dentry);
1467 * d_alloc - allocate a dcache entry
1468 * @parent: parent of entry to allocate
1469 * @name: qstr of the name
1471 * Allocates a dentry. It returns %NULL if there is insufficient memory
1472 * available. On a success the dentry is returned. The name passed in is
1473 * copied and the copy passed in may be reused after this call.
1475 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1477 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1481 spin_lock(&parent->d_lock);
1483 * don't need child lock because it is not subject
1484 * to concurrency here
1486 __dget_dlock(parent);
1487 dentry->d_parent = parent;
1488 list_add(&dentry->d_child, &parent->d_subdirs);
1489 spin_unlock(&parent->d_lock);
1493 EXPORT_SYMBOL(d_alloc);
1496 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1497 * @sb: the superblock
1498 * @name: qstr of the name
1500 * For a filesystem that just pins its dentries in memory and never
1501 * performs lookups at all, return an unhashed IS_ROOT dentry.
1503 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1505 return __d_alloc(sb, name);
1507 EXPORT_SYMBOL(d_alloc_pseudo);
1509 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1514 q.len = strlen(name);
1515 q.hash = full_name_hash(q.name, q.len);
1516 return d_alloc(parent, &q);
1518 EXPORT_SYMBOL(d_alloc_name);
1520 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1522 WARN_ON_ONCE(dentry->d_op);
1523 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1525 DCACHE_OP_REVALIDATE |
1526 DCACHE_OP_WEAK_REVALIDATE |
1527 DCACHE_OP_DELETE ));
1532 dentry->d_flags |= DCACHE_OP_HASH;
1534 dentry->d_flags |= DCACHE_OP_COMPARE;
1535 if (op->d_revalidate)
1536 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1537 if (op->d_weak_revalidate)
1538 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1540 dentry->d_flags |= DCACHE_OP_DELETE;
1542 dentry->d_flags |= DCACHE_OP_PRUNE;
1545 EXPORT_SYMBOL(d_set_d_op);
1547 static unsigned d_flags_for_inode(struct inode *inode)
1549 unsigned add_flags = DCACHE_FILE_TYPE;
1552 return DCACHE_MISS_TYPE;
1554 if (S_ISDIR(inode->i_mode)) {
1555 add_flags = DCACHE_DIRECTORY_TYPE;
1556 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1557 if (unlikely(!inode->i_op->lookup))
1558 add_flags = DCACHE_AUTODIR_TYPE;
1560 inode->i_opflags |= IOP_LOOKUP;
1562 } else if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1563 if (unlikely(inode->i_op->follow_link))
1564 add_flags = DCACHE_SYMLINK_TYPE;
1566 inode->i_opflags |= IOP_NOFOLLOW;
1569 if (unlikely(IS_AUTOMOUNT(inode)))
1570 add_flags |= DCACHE_NEED_AUTOMOUNT;
1574 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1576 unsigned add_flags = d_flags_for_inode(inode);
1578 spin_lock(&dentry->d_lock);
1579 __d_set_type(dentry, add_flags);
1581 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1582 dentry->d_inode = inode;
1583 dentry_rcuwalk_barrier(dentry);
1584 spin_unlock(&dentry->d_lock);
1585 fsnotify_d_instantiate(dentry, inode);
1589 * d_instantiate - fill in inode information for a dentry
1590 * @entry: dentry to complete
1591 * @inode: inode to attach to this dentry
1593 * Fill in inode information in the entry.
1595 * This turns negative dentries into productive full members
1598 * NOTE! This assumes that the inode count has been incremented
1599 * (or otherwise set) by the caller to indicate that it is now
1600 * in use by the dcache.
1603 void d_instantiate(struct dentry *entry, struct inode * inode)
1605 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1607 spin_lock(&inode->i_lock);
1608 __d_instantiate(entry, inode);
1610 spin_unlock(&inode->i_lock);
1611 security_d_instantiate(entry, inode);
1613 EXPORT_SYMBOL(d_instantiate);
1616 * d_instantiate_unique - instantiate a non-aliased dentry
1617 * @entry: dentry to instantiate
1618 * @inode: inode to attach to this dentry
1620 * Fill in inode information in the entry. On success, it returns NULL.
1621 * If an unhashed alias of "entry" already exists, then we return the
1622 * aliased dentry instead and drop one reference to inode.
1624 * Note that in order to avoid conflicts with rename() etc, the caller
1625 * had better be holding the parent directory semaphore.
1627 * This also assumes that the inode count has been incremented
1628 * (or otherwise set) by the caller to indicate that it is now
1629 * in use by the dcache.
1631 static struct dentry *__d_instantiate_unique(struct dentry *entry,
1632 struct inode *inode)
1634 struct dentry *alias;
1635 int len = entry->d_name.len;
1636 const char *name = entry->d_name.name;
1637 unsigned int hash = entry->d_name.hash;
1640 __d_instantiate(entry, NULL);
1644 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
1646 * Don't need alias->d_lock here, because aliases with
1647 * d_parent == entry->d_parent are not subject to name or
1648 * parent changes, because the parent inode i_mutex is held.
1650 if (alias->d_name.hash != hash)
1652 if (alias->d_parent != entry->d_parent)
1654 if (alias->d_name.len != len)
1656 if (dentry_cmp(alias, name, len))
1662 __d_instantiate(entry, inode);
1666 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1668 struct dentry *result;
1670 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1673 spin_lock(&inode->i_lock);
1674 result = __d_instantiate_unique(entry, inode);
1676 spin_unlock(&inode->i_lock);
1679 security_d_instantiate(entry, inode);
1683 BUG_ON(!d_unhashed(result));
1688 EXPORT_SYMBOL(d_instantiate_unique);
1691 * d_instantiate_no_diralias - instantiate a non-aliased dentry
1692 * @entry: dentry to complete
1693 * @inode: inode to attach to this dentry
1695 * Fill in inode information in the entry. If a directory alias is found, then
1696 * return an error (and drop inode). Together with d_materialise_unique() this
1697 * guarantees that a directory inode may never have more than one alias.
1699 int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1701 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1703 spin_lock(&inode->i_lock);
1704 if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1705 spin_unlock(&inode->i_lock);
1709 __d_instantiate(entry, inode);
1710 spin_unlock(&inode->i_lock);
1711 security_d_instantiate(entry, inode);
1715 EXPORT_SYMBOL(d_instantiate_no_diralias);
1717 struct dentry *d_make_root(struct inode *root_inode)
1719 struct dentry *res = NULL;
1722 static const struct qstr name = QSTR_INIT("/", 1);
1724 res = __d_alloc(root_inode->i_sb, &name);
1726 d_instantiate(res, root_inode);
1732 EXPORT_SYMBOL(d_make_root);
1734 static struct dentry * __d_find_any_alias(struct inode *inode)
1736 struct dentry *alias;
1738 if (hlist_empty(&inode->i_dentry))
1740 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
1746 * d_find_any_alias - find any alias for a given inode
1747 * @inode: inode to find an alias for
1749 * If any aliases exist for the given inode, take and return a
1750 * reference for one of them. If no aliases exist, return %NULL.
1752 struct dentry *d_find_any_alias(struct inode *inode)
1756 spin_lock(&inode->i_lock);
1757 de = __d_find_any_alias(inode);
1758 spin_unlock(&inode->i_lock);
1761 EXPORT_SYMBOL(d_find_any_alias);
1763 static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
1765 static const struct qstr anonstring = QSTR_INIT("/", 1);
1771 return ERR_PTR(-ESTALE);
1773 return ERR_CAST(inode);
1775 res = d_find_any_alias(inode);
1779 tmp = __d_alloc(inode->i_sb, &anonstring);
1781 res = ERR_PTR(-ENOMEM);
1785 spin_lock(&inode->i_lock);
1786 res = __d_find_any_alias(inode);
1788 spin_unlock(&inode->i_lock);
1793 /* attach a disconnected dentry */
1794 add_flags = d_flags_for_inode(inode);
1797 add_flags |= DCACHE_DISCONNECTED;
1799 spin_lock(&tmp->d_lock);
1800 tmp->d_inode = inode;
1801 tmp->d_flags |= add_flags;
1802 hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
1803 hlist_bl_lock(&tmp->d_sb->s_anon);
1804 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1805 hlist_bl_unlock(&tmp->d_sb->s_anon);
1806 spin_unlock(&tmp->d_lock);
1807 spin_unlock(&inode->i_lock);
1808 security_d_instantiate(tmp, inode);
1813 if (res && !IS_ERR(res))
1814 security_d_instantiate(res, inode);
1820 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
1821 * @inode: inode to allocate the dentry for
1823 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1824 * similar open by handle operations. The returned dentry may be anonymous,
1825 * or may have a full name (if the inode was already in the cache).
1827 * When called on a directory inode, we must ensure that the inode only ever
1828 * has one dentry. If a dentry is found, that is returned instead of
1829 * allocating a new one.
1831 * On successful return, the reference to the inode has been transferred
1832 * to the dentry. In case of an error the reference on the inode is released.
1833 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1834 * be passed in and the error will be propagated to the return value,
1835 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1837 struct dentry *d_obtain_alias(struct inode *inode)
1839 return __d_obtain_alias(inode, 1);
1841 EXPORT_SYMBOL(d_obtain_alias);
1844 * d_obtain_root - find or allocate a dentry for a given inode
1845 * @inode: inode to allocate the dentry for
1847 * Obtain an IS_ROOT dentry for the root of a filesystem.
1849 * We must ensure that directory inodes only ever have one dentry. If a
1850 * dentry is found, that is returned instead of allocating a new one.
1852 * On successful return, the reference to the inode has been transferred
1853 * to the dentry. In case of an error the reference on the inode is
1854 * released. A %NULL or IS_ERR inode may be passed in and will be the
1855 * error will be propagate to the return value, with a %NULL @inode
1856 * replaced by ERR_PTR(-ESTALE).
1858 struct dentry *d_obtain_root(struct inode *inode)
1860 return __d_obtain_alias(inode, 0);
1862 EXPORT_SYMBOL(d_obtain_root);
1865 * d_add_ci - lookup or allocate new dentry with case-exact name
1866 * @inode: the inode case-insensitive lookup has found
1867 * @dentry: the negative dentry that was passed to the parent's lookup func
1868 * @name: the case-exact name to be associated with the returned dentry
1870 * This is to avoid filling the dcache with case-insensitive names to the
1871 * same inode, only the actual correct case is stored in the dcache for
1872 * case-insensitive filesystems.
1874 * For a case-insensitive lookup match and if the the case-exact dentry
1875 * already exists in in the dcache, use it and return it.
1877 * If no entry exists with the exact case name, allocate new dentry with
1878 * the exact case, and return the spliced entry.
1880 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1883 struct dentry *found;
1887 * First check if a dentry matching the name already exists,
1888 * if not go ahead and create it now.
1890 found = d_hash_and_lookup(dentry->d_parent, name);
1892 new = d_alloc(dentry->d_parent, name);
1894 found = ERR_PTR(-ENOMEM);
1896 found = d_splice_alias(inode, new);
1907 EXPORT_SYMBOL(d_add_ci);
1910 * Do the slow-case of the dentry name compare.
1912 * Unlike the dentry_cmp() function, we need to atomically
1913 * load the name and length information, so that the
1914 * filesystem can rely on them, and can use the 'name' and
1915 * 'len' information without worrying about walking off the
1916 * end of memory etc.
1918 * Thus the read_seqcount_retry() and the "duplicate" info
1919 * in arguments (the low-level filesystem should not look
1920 * at the dentry inode or name contents directly, since
1921 * rename can change them while we're in RCU mode).
1923 enum slow_d_compare {
1929 static noinline enum slow_d_compare slow_dentry_cmp(
1930 const struct dentry *parent,
1931 struct dentry *dentry,
1933 const struct qstr *name)
1935 int tlen = dentry->d_name.len;
1936 const char *tname = dentry->d_name.name;
1938 if (read_seqcount_retry(&dentry->d_seq, seq)) {
1940 return D_COMP_SEQRETRY;
1942 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
1943 return D_COMP_NOMATCH;
1948 * __d_lookup_rcu - search for a dentry (racy, store-free)
1949 * @parent: parent dentry
1950 * @name: qstr of name we wish to find
1951 * @seqp: returns d_seq value at the point where the dentry was found
1952 * Returns: dentry, or NULL
1954 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
1955 * resolution (store-free path walking) design described in
1956 * Documentation/filesystems/path-lookup.txt.
1958 * This is not to be used outside core vfs.
1960 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
1961 * held, and rcu_read_lock held. The returned dentry must not be stored into
1962 * without taking d_lock and checking d_seq sequence count against @seq
1965 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
1968 * Alternatively, __d_lookup_rcu may be called again to look up the child of
1969 * the returned dentry, so long as its parent's seqlock is checked after the
1970 * child is looked up. Thus, an interlocking stepping of sequence lock checks
1971 * is formed, giving integrity down the path walk.
1973 * NOTE! The caller *has* to check the resulting dentry against the sequence
1974 * number we've returned before using any of the resulting dentry state!
1976 struct dentry *__d_lookup_rcu(const struct dentry *parent,
1977 const struct qstr *name,
1980 u64 hashlen = name->hash_len;
1981 const unsigned char *str = name->name;
1982 struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen));
1983 struct hlist_bl_node *node;
1984 struct dentry *dentry;
1987 * Note: There is significant duplication with __d_lookup_rcu which is
1988 * required to prevent single threaded performance regressions
1989 * especially on architectures where smp_rmb (in seqcounts) are costly.
1990 * Keep the two functions in sync.
1994 * The hash list is protected using RCU.
1996 * Carefully use d_seq when comparing a candidate dentry, to avoid
1997 * races with d_move().
1999 * It is possible that concurrent renames can mess up our list
2000 * walk here and result in missing our dentry, resulting in the
2001 * false-negative result. d_lookup() protects against concurrent
2002 * renames using rename_lock seqlock.
2004 * See Documentation/filesystems/path-lookup.txt for more details.
2006 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2011 * The dentry sequence count protects us from concurrent
2012 * renames, and thus protects parent and name fields.
2014 * The caller must perform a seqcount check in order
2015 * to do anything useful with the returned dentry.
2017 * NOTE! We do a "raw" seqcount_begin here. That means that
2018 * we don't wait for the sequence count to stabilize if it
2019 * is in the middle of a sequence change. If we do the slow
2020 * dentry compare, we will do seqretries until it is stable,
2021 * and if we end up with a successful lookup, we actually
2022 * want to exit RCU lookup anyway.
2024 seq = raw_seqcount_begin(&dentry->d_seq);
2025 if (dentry->d_parent != parent)
2027 if (d_unhashed(dentry))
2030 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2031 if (dentry->d_name.hash != hashlen_hash(hashlen))
2034 switch (slow_dentry_cmp(parent, dentry, seq, name)) {
2037 case D_COMP_NOMATCH:
2044 if (dentry->d_name.hash_len != hashlen)
2047 if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
2054 * d_lookup - search for a dentry
2055 * @parent: parent dentry
2056 * @name: qstr of name we wish to find
2057 * Returns: dentry, or NULL
2059 * d_lookup searches the children of the parent dentry for the name in
2060 * question. If the dentry is found its reference count is incremented and the
2061 * dentry is returned. The caller must use dput to free the entry when it has
2062 * finished using it. %NULL is returned if the dentry does not exist.
2064 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2066 struct dentry *dentry;
2070 seq = read_seqbegin(&rename_lock);
2071 dentry = __d_lookup(parent, name);
2074 } while (read_seqretry(&rename_lock, seq));
2077 EXPORT_SYMBOL(d_lookup);
2080 * __d_lookup - search for a dentry (racy)
2081 * @parent: parent dentry
2082 * @name: qstr of name we wish to find
2083 * Returns: dentry, or NULL
2085 * __d_lookup is like d_lookup, however it may (rarely) return a
2086 * false-negative result due to unrelated rename activity.
2088 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2089 * however it must be used carefully, eg. with a following d_lookup in
2090 * the case of failure.
2092 * __d_lookup callers must be commented.
2094 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2096 unsigned int len = name->len;
2097 unsigned int hash = name->hash;
2098 const unsigned char *str = name->name;
2099 struct hlist_bl_head *b = d_hash(parent, hash);
2100 struct hlist_bl_node *node;
2101 struct dentry *found = NULL;
2102 struct dentry *dentry;
2105 * Note: There is significant duplication with __d_lookup_rcu which is
2106 * required to prevent single threaded performance regressions
2107 * especially on architectures where smp_rmb (in seqcounts) are costly.
2108 * Keep the two functions in sync.
2112 * The hash list is protected using RCU.
2114 * Take d_lock when comparing a candidate dentry, to avoid races
2117 * It is possible that concurrent renames can mess up our list
2118 * walk here and result in missing our dentry, resulting in the
2119 * false-negative result. d_lookup() protects against concurrent
2120 * renames using rename_lock seqlock.
2122 * See Documentation/filesystems/path-lookup.txt for more details.
2126 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2128 if (dentry->d_name.hash != hash)
2131 spin_lock(&dentry->d_lock);
2132 if (dentry->d_parent != parent)
2134 if (d_unhashed(dentry))
2138 * It is safe to compare names since d_move() cannot
2139 * change the qstr (protected by d_lock).
2141 if (parent->d_flags & DCACHE_OP_COMPARE) {
2142 int tlen = dentry->d_name.len;
2143 const char *tname = dentry->d_name.name;
2144 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2147 if (dentry->d_name.len != len)
2149 if (dentry_cmp(dentry, str, len))
2153 dentry->d_lockref.count++;
2155 spin_unlock(&dentry->d_lock);
2158 spin_unlock(&dentry->d_lock);
2166 * d_hash_and_lookup - hash the qstr then search for a dentry
2167 * @dir: Directory to search in
2168 * @name: qstr of name we wish to find
2170 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2172 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2175 * Check for a fs-specific hash function. Note that we must
2176 * calculate the standard hash first, as the d_op->d_hash()
2177 * routine may choose to leave the hash value unchanged.
2179 name->hash = full_name_hash(name->name, name->len);
2180 if (dir->d_flags & DCACHE_OP_HASH) {
2181 int err = dir->d_op->d_hash(dir, name);
2182 if (unlikely(err < 0))
2183 return ERR_PTR(err);
2185 return d_lookup(dir, name);
2187 EXPORT_SYMBOL(d_hash_and_lookup);
2190 * d_validate - verify dentry provided from insecure source (deprecated)
2191 * @dentry: The dentry alleged to be valid child of @dparent
2192 * @dparent: The parent dentry (known to be valid)
2194 * An insecure source has sent us a dentry, here we verify it and dget() it.
2195 * This is used by ncpfs in its readdir implementation.
2196 * Zero is returned in the dentry is invalid.
2198 * This function is slow for big directories, and deprecated, do not use it.
2200 int d_validate(struct dentry *dentry, struct dentry *dparent)
2202 struct dentry *child;
2204 spin_lock(&dparent->d_lock);
2205 list_for_each_entry(child, &dparent->d_subdirs, d_child) {
2206 if (dentry == child) {
2207 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2208 __dget_dlock(dentry);
2209 spin_unlock(&dentry->d_lock);
2210 spin_unlock(&dparent->d_lock);
2214 spin_unlock(&dparent->d_lock);
2218 EXPORT_SYMBOL(d_validate);
2221 * When a file is deleted, we have two options:
2222 * - turn this dentry into a negative dentry
2223 * - unhash this dentry and free it.
2225 * Usually, we want to just turn this into
2226 * a negative dentry, but if anybody else is
2227 * currently using the dentry or the inode
2228 * we can't do that and we fall back on removing
2229 * it from the hash queues and waiting for
2230 * it to be deleted later when it has no users
2234 * d_delete - delete a dentry
2235 * @dentry: The dentry to delete
2237 * Turn the dentry into a negative dentry if possible, otherwise
2238 * remove it from the hash queues so it can be deleted later
2241 void d_delete(struct dentry * dentry)
2243 struct inode *inode;
2246 * Are we the only user?
2249 spin_lock(&dentry->d_lock);
2250 inode = dentry->d_inode;
2251 isdir = S_ISDIR(inode->i_mode);
2252 if (dentry->d_lockref.count == 1) {
2253 if (!spin_trylock(&inode->i_lock)) {
2254 spin_unlock(&dentry->d_lock);
2258 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2259 dentry_unlink_inode(dentry);
2260 fsnotify_nameremove(dentry, isdir);
2264 if (!d_unhashed(dentry))
2267 spin_unlock(&dentry->d_lock);
2269 fsnotify_nameremove(dentry, isdir);
2271 EXPORT_SYMBOL(d_delete);
2273 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2275 BUG_ON(!d_unhashed(entry));
2277 entry->d_flags |= DCACHE_RCUACCESS;
2278 hlist_bl_add_head_rcu(&entry->d_hash, b);
2282 static void _d_rehash(struct dentry * entry)
2284 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2288 * d_rehash - add an entry back to the hash
2289 * @entry: dentry to add to the hash
2291 * Adds a dentry to the hash according to its name.
2294 void d_rehash(struct dentry * entry)
2296 spin_lock(&entry->d_lock);
2298 spin_unlock(&entry->d_lock);
2300 EXPORT_SYMBOL(d_rehash);
2303 * dentry_update_name_case - update case insensitive dentry with a new name
2304 * @dentry: dentry to be updated
2307 * Update a case insensitive dentry with new case of name.
2309 * dentry must have been returned by d_lookup with name @name. Old and new
2310 * name lengths must match (ie. no d_compare which allows mismatched name
2313 * Parent inode i_mutex must be held over d_lookup and into this call (to
2314 * keep renames and concurrent inserts, and readdir(2) away).
2316 void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2318 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
2319 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2321 spin_lock(&dentry->d_lock);
2322 write_seqcount_begin(&dentry->d_seq);
2323 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2324 write_seqcount_end(&dentry->d_seq);
2325 spin_unlock(&dentry->d_lock);
2327 EXPORT_SYMBOL(dentry_update_name_case);
2329 static void swap_names(struct dentry *dentry, struct dentry *target)
2331 if (unlikely(dname_external(target))) {
2332 if (unlikely(dname_external(dentry))) {
2334 * Both external: swap the pointers
2336 swap(target->d_name.name, dentry->d_name.name);
2339 * dentry:internal, target:external. Steal target's
2340 * storage and make target internal.
2342 memcpy(target->d_iname, dentry->d_name.name,
2343 dentry->d_name.len + 1);
2344 dentry->d_name.name = target->d_name.name;
2345 target->d_name.name = target->d_iname;
2348 if (unlikely(dname_external(dentry))) {
2350 * dentry:external, target:internal. Give dentry's
2351 * storage to target and make dentry internal
2353 memcpy(dentry->d_iname, target->d_name.name,
2354 target->d_name.len + 1);
2355 target->d_name.name = dentry->d_name.name;
2356 dentry->d_name.name = dentry->d_iname;
2359 * Both are internal.
2362 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2363 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2364 swap(((long *) &dentry->d_iname)[i],
2365 ((long *) &target->d_iname)[i]);
2369 swap(dentry->d_name.hash_len, target->d_name.hash_len);
2372 static void copy_name(struct dentry *dentry, struct dentry *target)
2374 struct external_name *old_name = NULL;
2375 if (unlikely(dname_external(dentry)))
2376 old_name = external_name(dentry);
2377 if (unlikely(dname_external(target))) {
2378 atomic_inc(&external_name(target)->u.count);
2379 dentry->d_name = target->d_name;
2381 memcpy(dentry->d_iname, target->d_name.name,
2382 target->d_name.len + 1);
2383 dentry->d_name.name = dentry->d_iname;
2384 dentry->d_name.hash_len = target->d_name.hash_len;
2386 if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2387 kfree_rcu(old_name, u.head);
2390 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2393 * XXXX: do we really need to take target->d_lock?
2395 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2396 spin_lock(&target->d_parent->d_lock);
2398 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2399 spin_lock(&dentry->d_parent->d_lock);
2400 spin_lock_nested(&target->d_parent->d_lock,
2401 DENTRY_D_LOCK_NESTED);
2403 spin_lock(&target->d_parent->d_lock);
2404 spin_lock_nested(&dentry->d_parent->d_lock,
2405 DENTRY_D_LOCK_NESTED);
2408 if (target < dentry) {
2409 spin_lock_nested(&target->d_lock, 2);
2410 spin_lock_nested(&dentry->d_lock, 3);
2412 spin_lock_nested(&dentry->d_lock, 2);
2413 spin_lock_nested(&target->d_lock, 3);
2417 static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target)
2419 if (target->d_parent != dentry->d_parent)
2420 spin_unlock(&dentry->d_parent->d_lock);
2421 if (target->d_parent != target)
2422 spin_unlock(&target->d_parent->d_lock);
2423 spin_unlock(&target->d_lock);
2424 spin_unlock(&dentry->d_lock);
2428 * When switching names, the actual string doesn't strictly have to
2429 * be preserved in the target - because we're dropping the target
2430 * anyway. As such, we can just do a simple memcpy() to copy over
2431 * the new name before we switch, unless we are going to rehash
2432 * it. Note that if we *do* unhash the target, we are not allowed
2433 * to rehash it without giving it a new name/hash key - whether
2434 * we swap or overwrite the names here, resulting name won't match
2435 * the reality in filesystem; it's only there for d_path() purposes.
2436 * Note that all of this is happening under rename_lock, so the
2437 * any hash lookup seeing it in the middle of manipulations will
2438 * be discarded anyway. So we do not care what happens to the hash
2442 * __d_move - move a dentry
2443 * @dentry: entry to move
2444 * @target: new dentry
2445 * @exchange: exchange the two dentries
2447 * Update the dcache to reflect the move of a file name. Negative
2448 * dcache entries should not be moved in this way. Caller must hold
2449 * rename_lock, the i_mutex of the source and target directories,
2450 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2452 static void __d_move(struct dentry *dentry, struct dentry *target,
2455 if (!dentry->d_inode)
2456 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2458 BUG_ON(d_ancestor(dentry, target));
2459 BUG_ON(d_ancestor(target, dentry));
2461 dentry_lock_for_move(dentry, target);
2463 write_seqcount_begin(&dentry->d_seq);
2464 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2466 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2469 * Move the dentry to the target hash queue. Don't bother checking
2470 * for the same hash queue because of how unlikely it is.
2473 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2476 * Unhash the target (d_delete() is not usable here). If exchanging
2477 * the two dentries, then rehash onto the other's hash queue.
2482 d_hash(dentry->d_parent, dentry->d_name.hash));
2485 /* Switch the names.. */
2487 swap_names(dentry, target);
2489 copy_name(dentry, target);
2491 /* ... and switch them in the tree */
2492 if (IS_ROOT(dentry)) {
2493 /* splicing a tree */
2494 dentry->d_parent = target->d_parent;
2495 target->d_parent = target;
2496 list_del_init(&target->d_child);
2497 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2499 /* swapping two dentries */
2500 swap(dentry->d_parent, target->d_parent);
2501 list_move(&target->d_child, &target->d_parent->d_subdirs);
2502 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2504 fsnotify_d_move(target);
2505 fsnotify_d_move(dentry);
2508 write_seqcount_end(&target->d_seq);
2509 write_seqcount_end(&dentry->d_seq);
2511 dentry_unlock_for_move(dentry, target);
2515 * d_move - move a dentry
2516 * @dentry: entry to move
2517 * @target: new dentry
2519 * Update the dcache to reflect the move of a file name. Negative
2520 * dcache entries should not be moved in this way. See the locking
2521 * requirements for __d_move.
2523 void d_move(struct dentry *dentry, struct dentry *target)
2525 write_seqlock(&rename_lock);
2526 __d_move(dentry, target, false);
2527 write_sequnlock(&rename_lock);
2529 EXPORT_SYMBOL(d_move);
2532 * d_exchange - exchange two dentries
2533 * @dentry1: first dentry
2534 * @dentry2: second dentry
2536 void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2538 write_seqlock(&rename_lock);
2540 WARN_ON(!dentry1->d_inode);
2541 WARN_ON(!dentry2->d_inode);
2542 WARN_ON(IS_ROOT(dentry1));
2543 WARN_ON(IS_ROOT(dentry2));
2545 __d_move(dentry1, dentry2, true);
2547 write_sequnlock(&rename_lock);
2551 * d_ancestor - search for an ancestor
2552 * @p1: ancestor dentry
2555 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2556 * an ancestor of p2, else NULL.
2558 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2562 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2563 if (p->d_parent == p1)
2570 * This helper attempts to cope with remotely renamed directories
2572 * It assumes that the caller is already holding
2573 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
2575 * Note: If ever the locking in lock_rename() changes, then please
2576 * remember to update this too...
2578 static int __d_unalias(struct inode *inode,
2579 struct dentry *dentry, struct dentry *alias)
2581 struct mutex *m1 = NULL, *m2 = NULL;
2584 /* If alias and dentry share a parent, then no extra locks required */
2585 if (alias->d_parent == dentry->d_parent)
2588 /* See lock_rename() */
2589 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2591 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2592 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
2594 m2 = &alias->d_parent->d_inode->i_mutex;
2596 __d_move(alias, dentry, false);
2599 spin_unlock(&inode->i_lock);
2608 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2609 * @inode: the inode which may have a disconnected dentry
2610 * @dentry: a negative dentry which we want to point to the inode.
2612 * If inode is a directory and has an IS_ROOT alias, then d_move that in
2613 * place of the given dentry and return it, else simply d_add the inode
2614 * to the dentry and return NULL.
2616 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2617 * we should error out: directories can't have multiple aliases.
2619 * This is needed in the lookup routine of any filesystem that is exportable
2620 * (via knfsd) so that we can build dcache paths to directories effectively.
2622 * If a dentry was found and moved, then it is returned. Otherwise NULL
2623 * is returned. This matches the expected return value of ->lookup.
2625 * Cluster filesystems may call this function with a negative, hashed dentry.
2626 * In that case, we know that the inode will be a regular file, and also this
2627 * will only occur during atomic_open. So we need to check for the dentry
2628 * being already hashed only in the final case.
2630 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2633 return ERR_CAST(inode);
2635 BUG_ON(!d_unhashed(dentry));
2638 __d_instantiate(dentry, NULL);
2641 spin_lock(&inode->i_lock);
2642 if (S_ISDIR(inode->i_mode)) {
2643 struct dentry *new = __d_find_any_alias(inode);
2644 if (unlikely(new)) {
2645 write_seqlock(&rename_lock);
2646 if (unlikely(d_ancestor(new, dentry))) {
2647 write_sequnlock(&rename_lock);
2648 spin_unlock(&inode->i_lock);
2650 new = ERR_PTR(-ELOOP);
2651 pr_warn_ratelimited(
2652 "VFS: Lookup of '%s' in %s %s"
2653 " would have caused loop\n",
2654 dentry->d_name.name,
2655 inode->i_sb->s_type->name,
2657 } else if (!IS_ROOT(new)) {
2658 int err = __d_unalias(inode, dentry, new);
2659 write_sequnlock(&rename_lock);
2665 __d_move(new, dentry, false);
2666 write_sequnlock(&rename_lock);
2667 spin_unlock(&inode->i_lock);
2668 security_d_instantiate(new, inode);
2674 /* already taking inode->i_lock, so d_add() by hand */
2675 __d_instantiate(dentry, inode);
2676 spin_unlock(&inode->i_lock);
2678 security_d_instantiate(dentry, inode);
2682 EXPORT_SYMBOL(d_splice_alias);
2684 static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2688 return -ENAMETOOLONG;
2690 memcpy(*buffer, str, namelen);
2695 * prepend_name - prepend a pathname in front of current buffer pointer
2696 * @buffer: buffer pointer
2697 * @buflen: allocated length of the buffer
2698 * @name: name string and length qstr structure
2700 * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
2701 * make sure that either the old or the new name pointer and length are
2702 * fetched. However, there may be mismatch between length and pointer.
2703 * The length cannot be trusted, we need to copy it byte-by-byte until
2704 * the length is reached or a null byte is found. It also prepends "/" at
2705 * the beginning of the name. The sequence number check at the caller will
2706 * retry it again when a d_move() does happen. So any garbage in the buffer
2707 * due to mismatched pointer and length will be discarded.
2709 * Data dependency barrier is needed to make sure that we see that terminating
2710 * NUL. Alpha strikes again, film at 11...
2712 static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2714 const char *dname = ACCESS_ONCE(name->name);
2715 u32 dlen = ACCESS_ONCE(name->len);
2718 smp_read_barrier_depends();
2720 *buflen -= dlen + 1;
2722 return -ENAMETOOLONG;
2723 p = *buffer -= dlen + 1;
2735 * prepend_path - Prepend path string to a buffer
2736 * @path: the dentry/vfsmount to report
2737 * @root: root vfsmnt/dentry
2738 * @buffer: pointer to the end of the buffer
2739 * @buflen: pointer to buffer length
2741 * The function will first try to write out the pathname without taking any
2742 * lock other than the RCU read lock to make sure that dentries won't go away.
2743 * It only checks the sequence number of the global rename_lock as any change
2744 * in the dentry's d_seq will be preceded by changes in the rename_lock
2745 * sequence number. If the sequence number had been changed, it will restart
2746 * the whole pathname back-tracing sequence again by taking the rename_lock.
2747 * In this case, there is no need to take the RCU read lock as the recursive
2748 * parent pointer references will keep the dentry chain alive as long as no
2749 * rename operation is performed.
2751 static int prepend_path(const struct path *path,
2752 const struct path *root,
2753 char **buffer, int *buflen)
2755 struct dentry *dentry;
2756 struct vfsmount *vfsmnt;
2759 unsigned seq, m_seq = 0;
2765 read_seqbegin_or_lock(&mount_lock, &m_seq);
2772 dentry = path->dentry;
2774 mnt = real_mount(vfsmnt);
2775 read_seqbegin_or_lock(&rename_lock, &seq);
2776 while (dentry != root->dentry || vfsmnt != root->mnt) {
2777 struct dentry * parent;
2779 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
2780 struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
2782 if (mnt != parent) {
2783 dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
2789 * Filesystems needing to implement special "root names"
2790 * should do so with ->d_dname()
2792 if (IS_ROOT(dentry) &&
2793 (dentry->d_name.len != 1 ||
2794 dentry->d_name.name[0] != '/')) {
2795 WARN(1, "Root dentry has weird name <%.*s>\n",
2796 (int) dentry->d_name.len,
2797 dentry->d_name.name);
2800 error = is_mounted(vfsmnt) ? 1 : 2;
2803 parent = dentry->d_parent;
2805 error = prepend_name(&bptr, &blen, &dentry->d_name);
2813 if (need_seqretry(&rename_lock, seq)) {
2817 done_seqretry(&rename_lock, seq);
2821 if (need_seqretry(&mount_lock, m_seq)) {
2825 done_seqretry(&mount_lock, m_seq);
2827 if (error >= 0 && bptr == *buffer) {
2829 error = -ENAMETOOLONG;
2839 * __d_path - return the path of a dentry
2840 * @path: the dentry/vfsmount to report
2841 * @root: root vfsmnt/dentry
2842 * @buf: buffer to return value in
2843 * @buflen: buffer length
2845 * Convert a dentry into an ASCII path name.
2847 * Returns a pointer into the buffer or an error code if the
2848 * path was too long.
2850 * "buflen" should be positive.
2852 * If the path is not reachable from the supplied root, return %NULL.
2854 char *__d_path(const struct path *path,
2855 const struct path *root,
2856 char *buf, int buflen)
2858 char *res = buf + buflen;
2861 prepend(&res, &buflen, "\0", 1);
2862 error = prepend_path(path, root, &res, &buflen);
2865 return ERR_PTR(error);
2871 char *d_absolute_path(const struct path *path,
2872 char *buf, int buflen)
2874 struct path root = {};
2875 char *res = buf + buflen;
2878 prepend(&res, &buflen, "\0", 1);
2879 error = prepend_path(path, &root, &res, &buflen);
2884 return ERR_PTR(error);
2889 * same as __d_path but appends "(deleted)" for unlinked files.
2891 static int path_with_deleted(const struct path *path,
2892 const struct path *root,
2893 char **buf, int *buflen)
2895 prepend(buf, buflen, "\0", 1);
2896 if (d_unlinked(path->dentry)) {
2897 int error = prepend(buf, buflen, " (deleted)", 10);
2902 return prepend_path(path, root, buf, buflen);
2905 static int prepend_unreachable(char **buffer, int *buflen)
2907 return prepend(buffer, buflen, "(unreachable)", 13);
2910 static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
2915 seq = read_seqcount_begin(&fs->seq);
2917 } while (read_seqcount_retry(&fs->seq, seq));
2921 * d_path - return the path of a dentry
2922 * @path: path to report
2923 * @buf: buffer to return value in
2924 * @buflen: buffer length
2926 * Convert a dentry into an ASCII path name. If the entry has been deleted
2927 * the string " (deleted)" is appended. Note that this is ambiguous.
2929 * Returns a pointer into the buffer or an error code if the path was
2930 * too long. Note: Callers should use the returned pointer, not the passed
2931 * in buffer, to use the name! The implementation often starts at an offset
2932 * into the buffer, and may leave 0 bytes at the start.
2934 * "buflen" should be positive.
2936 char *d_path(const struct path *path, char *buf, int buflen)
2938 char *res = buf + buflen;
2943 * We have various synthetic filesystems that never get mounted. On
2944 * these filesystems dentries are never used for lookup purposes, and
2945 * thus don't need to be hashed. They also don't need a name until a
2946 * user wants to identify the object in /proc/pid/fd/. The little hack
2947 * below allows us to generate a name for these objects on demand:
2949 * Some pseudo inodes are mountable. When they are mounted
2950 * path->dentry == path->mnt->mnt_root. In that case don't call d_dname
2951 * and instead have d_path return the mounted path.
2953 if (path->dentry->d_op && path->dentry->d_op->d_dname &&
2954 (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
2955 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2958 get_fs_root_rcu(current->fs, &root);
2959 error = path_with_deleted(path, &root, &res, &buflen);
2963 res = ERR_PTR(error);
2966 EXPORT_SYMBOL(d_path);
2969 * Helper function for dentry_operations.d_dname() members
2971 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
2972 const char *fmt, ...)
2978 va_start(args, fmt);
2979 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
2982 if (sz > sizeof(temp) || sz > buflen)
2983 return ERR_PTR(-ENAMETOOLONG);
2985 buffer += buflen - sz;
2986 return memcpy(buffer, temp, sz);
2989 char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
2991 char *end = buffer + buflen;
2992 /* these dentries are never renamed, so d_lock is not needed */
2993 if (prepend(&end, &buflen, " (deleted)", 11) ||
2994 prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
2995 prepend(&end, &buflen, "/", 1))
2996 end = ERR_PTR(-ENAMETOOLONG);
2999 EXPORT_SYMBOL(simple_dname);
3002 * Write full pathname from the root of the filesystem into the buffer.
3004 static char *__dentry_path(struct dentry *d, char *buf, int buflen)
3006 struct dentry *dentry;
3019 prepend(&end, &len, "\0", 1);
3023 read_seqbegin_or_lock(&rename_lock, &seq);
3024 while (!IS_ROOT(dentry)) {
3025 struct dentry *parent = dentry->d_parent;
3028 error = prepend_name(&end, &len, &dentry->d_name);
3037 if (need_seqretry(&rename_lock, seq)) {
3041 done_seqretry(&rename_lock, seq);
3046 return ERR_PTR(-ENAMETOOLONG);
3049 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3051 return __dentry_path(dentry, buf, buflen);
3053 EXPORT_SYMBOL(dentry_path_raw);
3055 char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3060 if (d_unlinked(dentry)) {
3062 if (prepend(&p, &buflen, "//deleted", 10) != 0)
3066 retval = __dentry_path(dentry, buf, buflen);
3067 if (!IS_ERR(retval) && p)
3068 *p = '/'; /* restore '/' overriden with '\0' */
3071 return ERR_PTR(-ENAMETOOLONG);
3074 static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3080 seq = read_seqcount_begin(&fs->seq);
3083 } while (read_seqcount_retry(&fs->seq, seq));
3087 * NOTE! The user-level library version returns a
3088 * character pointer. The kernel system call just
3089 * returns the length of the buffer filled (which
3090 * includes the ending '\0' character), or a negative
3091 * error value. So libc would do something like
3093 * char *getcwd(char * buf, size_t size)
3097 * retval = sys_getcwd(buf, size);
3104 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3107 struct path pwd, root;
3108 char *page = __getname();
3114 get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3117 if (!d_unlinked(pwd.dentry)) {
3119 char *cwd = page + PATH_MAX;
3120 int buflen = PATH_MAX;
3122 prepend(&cwd, &buflen, "\0", 1);
3123 error = prepend_path(&pwd, &root, &cwd, &buflen);
3129 /* Unreachable from current root */
3131 error = prepend_unreachable(&cwd, &buflen);
3137 len = PATH_MAX + page - cwd;
3140 if (copy_to_user(buf, cwd, len))
3153 * Test whether new_dentry is a subdirectory of old_dentry.
3155 * Trivially implemented using the dcache structure
3159 * is_subdir - is new dentry a subdirectory of old_dentry
3160 * @new_dentry: new dentry
3161 * @old_dentry: old dentry
3163 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
3164 * Returns 0 otherwise.
3165 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3168 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3173 if (new_dentry == old_dentry)
3177 /* for restarting inner loop in case of seq retry */
3178 seq = read_seqbegin(&rename_lock);
3180 * Need rcu_readlock to protect against the d_parent trashing
3184 if (d_ancestor(old_dentry, new_dentry))
3189 } while (read_seqretry(&rename_lock, seq));
3194 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3196 struct dentry *root = data;
3197 if (dentry != root) {
3198 if (d_unhashed(dentry) || !dentry->d_inode)
3201 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3202 dentry->d_flags |= DCACHE_GENOCIDE;
3203 dentry->d_lockref.count--;
3206 return D_WALK_CONTINUE;
3209 void d_genocide(struct dentry *parent)
3211 d_walk(parent, parent, d_genocide_kill, NULL);
3214 void d_tmpfile(struct dentry *dentry, struct inode *inode)
3216 inode_dec_link_count(inode);
3217 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3218 !hlist_unhashed(&dentry->d_u.d_alias) ||
3219 !d_unlinked(dentry));
3220 spin_lock(&dentry->d_parent->d_lock);
3221 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3222 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3223 (unsigned long long)inode->i_ino);
3224 spin_unlock(&dentry->d_lock);
3225 spin_unlock(&dentry->d_parent->d_lock);
3226 d_instantiate(dentry, inode);
3228 EXPORT_SYMBOL(d_tmpfile);
3230 static __initdata unsigned long dhash_entries;
3231 static int __init set_dhash_entries(char *str)
3235 dhash_entries = simple_strtoul(str, &str, 0);
3238 __setup("dhash_entries=", set_dhash_entries);
3240 static void __init dcache_init_early(void)
3244 /* If hashes are distributed across NUMA nodes, defer
3245 * hash allocation until vmalloc space is available.
3251 alloc_large_system_hash("Dentry cache",
3252 sizeof(struct hlist_bl_head),
3261 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3262 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3265 static void __init dcache_init(void)
3270 * A constructor could be added for stable state like the lists,
3271 * but it is probably not worth it because of the cache nature
3274 dentry_cache = KMEM_CACHE(dentry,
3275 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
3277 /* Hash may have been set up in dcache_init_early */
3282 alloc_large_system_hash("Dentry cache",
3283 sizeof(struct hlist_bl_head),
3292 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3293 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3296 /* SLAB cache for __getname() consumers */
3297 struct kmem_cache *names_cachep __read_mostly;
3298 EXPORT_SYMBOL(names_cachep);
3300 EXPORT_SYMBOL(d_genocide);
3302 void __init vfs_caches_init_early(void)
3304 dcache_init_early();
3308 void __init vfs_caches_init(unsigned long mempages)
3310 unsigned long reserve;
3312 /* Base hash sizes on available memory, with a reserve equal to
3313 150% of current kernel size */
3315 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3316 mempages -= reserve;
3318 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3319 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3323 files_init(mempages);