4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/idr.h>
19 #include <linux/init.h> /* init_rootfs */
20 #include <linux/fs_struct.h> /* get_fs_root et.al. */
21 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
22 #include <linux/uaccess.h>
23 #include <linux/proc_ns.h>
24 #include <linux/magic.h>
25 #include <linux/bootmem.h>
26 #include <linux/task_work.h>
30 /* Maximum number of mounts in a mount namespace */
31 unsigned int sysctl_mount_max __read_mostly = 100000;
33 static unsigned int m_hash_mask __read_mostly;
34 static unsigned int m_hash_shift __read_mostly;
35 static unsigned int mp_hash_mask __read_mostly;
36 static unsigned int mp_hash_shift __read_mostly;
38 static __initdata unsigned long mhash_entries;
39 static int __init set_mhash_entries(char *str)
43 mhash_entries = simple_strtoul(str, &str, 0);
46 __setup("mhash_entries=", set_mhash_entries);
48 static __initdata unsigned long mphash_entries;
49 static int __init set_mphash_entries(char *str)
53 mphash_entries = simple_strtoul(str, &str, 0);
56 __setup("mphash_entries=", set_mphash_entries);
59 static DEFINE_IDA(mnt_id_ida);
60 static DEFINE_IDA(mnt_group_ida);
61 static DEFINE_SPINLOCK(mnt_id_lock);
62 static int mnt_id_start = 0;
63 static int mnt_group_start = 1;
65 static struct hlist_head *mount_hashtable __read_mostly;
66 static struct hlist_head *mountpoint_hashtable __read_mostly;
67 static struct kmem_cache *mnt_cache __read_mostly;
68 static DECLARE_RWSEM(namespace_sem);
71 struct kobject *fs_kobj;
72 EXPORT_SYMBOL_GPL(fs_kobj);
75 * vfsmount lock may be taken for read to prevent changes to the
76 * vfsmount hash, ie. during mountpoint lookups or walking back
79 * It should be taken for write in all cases where the vfsmount
80 * tree or hash is modified or when a vfsmount structure is modified.
82 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
84 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
86 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
87 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
88 tmp = tmp + (tmp >> m_hash_shift);
89 return &mount_hashtable[tmp & m_hash_mask];
92 static inline struct hlist_head *mp_hash(struct dentry *dentry)
94 unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
95 tmp = tmp + (tmp >> mp_hash_shift);
96 return &mountpoint_hashtable[tmp & mp_hash_mask];
100 * allocation is serialized by namespace_sem, but we need the spinlock to
101 * serialize with freeing.
103 static int mnt_alloc_id(struct mount *mnt)
108 ida_pre_get(&mnt_id_ida, GFP_KERNEL);
109 spin_lock(&mnt_id_lock);
110 res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
112 mnt_id_start = mnt->mnt_id + 1;
113 spin_unlock(&mnt_id_lock);
120 static void mnt_free_id(struct mount *mnt)
122 int id = mnt->mnt_id;
123 spin_lock(&mnt_id_lock);
124 ida_remove(&mnt_id_ida, id);
125 if (mnt_id_start > id)
127 spin_unlock(&mnt_id_lock);
131 * Allocate a new peer group ID
133 * mnt_group_ida is protected by namespace_sem
135 static int mnt_alloc_group_id(struct mount *mnt)
139 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
142 res = ida_get_new_above(&mnt_group_ida,
146 mnt_group_start = mnt->mnt_group_id + 1;
152 * Release a peer group ID
154 void mnt_release_group_id(struct mount *mnt)
156 int id = mnt->mnt_group_id;
157 ida_remove(&mnt_group_ida, id);
158 if (mnt_group_start > id)
159 mnt_group_start = id;
160 mnt->mnt_group_id = 0;
164 * vfsmount lock must be held for read
166 static inline void mnt_add_count(struct mount *mnt, int n)
169 this_cpu_add(mnt->mnt_pcp->mnt_count, n);
178 * vfsmount lock must be held for write
180 unsigned int mnt_get_count(struct mount *mnt)
183 unsigned int count = 0;
186 for_each_possible_cpu(cpu) {
187 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
192 return mnt->mnt_count;
196 static void drop_mountpoint(struct fs_pin *p)
198 struct mount *m = container_of(p, struct mount, mnt_umount);
199 dput(m->mnt_ex_mountpoint);
204 static struct mount *alloc_vfsmnt(const char *name)
206 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
210 err = mnt_alloc_id(mnt);
215 mnt->mnt_devname = kstrdup_const(name, GFP_KERNEL);
216 if (!mnt->mnt_devname)
221 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
223 goto out_free_devname;
225 this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
228 mnt->mnt_writers = 0;
231 INIT_HLIST_NODE(&mnt->mnt_hash);
232 INIT_LIST_HEAD(&mnt->mnt_child);
233 INIT_LIST_HEAD(&mnt->mnt_mounts);
234 INIT_LIST_HEAD(&mnt->mnt_list);
235 INIT_LIST_HEAD(&mnt->mnt_expire);
236 INIT_LIST_HEAD(&mnt->mnt_share);
237 INIT_LIST_HEAD(&mnt->mnt_slave_list);
238 INIT_LIST_HEAD(&mnt->mnt_slave);
239 INIT_HLIST_NODE(&mnt->mnt_mp_list);
240 #ifdef CONFIG_FSNOTIFY
241 INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
243 init_fs_pin(&mnt->mnt_umount, drop_mountpoint);
249 kfree_const(mnt->mnt_devname);
254 kmem_cache_free(mnt_cache, mnt);
259 * Most r/o checks on a fs are for operations that take
260 * discrete amounts of time, like a write() or unlink().
261 * We must keep track of when those operations start
262 * (for permission checks) and when they end, so that
263 * we can determine when writes are able to occur to
267 * __mnt_is_readonly: check whether a mount is read-only
268 * @mnt: the mount to check for its write status
270 * This shouldn't be used directly ouside of the VFS.
271 * It does not guarantee that the filesystem will stay
272 * r/w, just that it is right *now*. This can not and
273 * should not be used in place of IS_RDONLY(inode).
274 * mnt_want/drop_write() will _keep_ the filesystem
277 int __mnt_is_readonly(struct vfsmount *mnt)
279 if (mnt->mnt_flags & MNT_READONLY)
281 if (mnt->mnt_sb->s_flags & MS_RDONLY)
285 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
287 static inline void mnt_inc_writers(struct mount *mnt)
290 this_cpu_inc(mnt->mnt_pcp->mnt_writers);
296 static inline void mnt_dec_writers(struct mount *mnt)
299 this_cpu_dec(mnt->mnt_pcp->mnt_writers);
305 static unsigned int mnt_get_writers(struct mount *mnt)
308 unsigned int count = 0;
311 for_each_possible_cpu(cpu) {
312 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
317 return mnt->mnt_writers;
321 static int mnt_is_readonly(struct vfsmount *mnt)
323 if (mnt->mnt_sb->s_readonly_remount)
325 /* Order wrt setting s_flags/s_readonly_remount in do_remount() */
327 return __mnt_is_readonly(mnt);
331 * Most r/o & frozen checks on a fs are for operations that take discrete
332 * amounts of time, like a write() or unlink(). We must keep track of when
333 * those operations start (for permission checks) and when they end, so that we
334 * can determine when writes are able to occur to a filesystem.
337 * __mnt_want_write - get write access to a mount without freeze protection
338 * @m: the mount on which to take a write
340 * This tells the low-level filesystem that a write is about to be performed to
341 * it, and makes sure that writes are allowed (mnt it read-write) before
342 * returning success. This operation does not protect against filesystem being
343 * frozen. When the write operation is finished, __mnt_drop_write() must be
344 * called. This is effectively a refcount.
346 int __mnt_want_write(struct vfsmount *m)
348 struct mount *mnt = real_mount(m);
352 mnt_inc_writers(mnt);
354 * The store to mnt_inc_writers must be visible before we pass
355 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
356 * incremented count after it has set MNT_WRITE_HOLD.
359 while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
362 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
363 * be set to match its requirements. So we must not load that until
364 * MNT_WRITE_HOLD is cleared.
367 if (mnt_is_readonly(m)) {
368 mnt_dec_writers(mnt);
377 * mnt_want_write - get write access to a mount
378 * @m: the mount on which to take a write
380 * This tells the low-level filesystem that a write is about to be performed to
381 * it, and makes sure that writes are allowed (mount is read-write, filesystem
382 * is not frozen) before returning success. When the write operation is
383 * finished, mnt_drop_write() must be called. This is effectively a refcount.
385 int mnt_want_write(struct vfsmount *m)
389 sb_start_write(m->mnt_sb);
390 ret = __mnt_want_write(m);
392 sb_end_write(m->mnt_sb);
395 EXPORT_SYMBOL_GPL(mnt_want_write);
398 * mnt_clone_write - get write access to a mount
399 * @mnt: the mount on which to take a write
401 * This is effectively like mnt_want_write, except
402 * it must only be used to take an extra write reference
403 * on a mountpoint that we already know has a write reference
404 * on it. This allows some optimisation.
406 * After finished, mnt_drop_write must be called as usual to
407 * drop the reference.
409 int mnt_clone_write(struct vfsmount *mnt)
411 /* superblock may be r/o */
412 if (__mnt_is_readonly(mnt))
415 mnt_inc_writers(real_mount(mnt));
419 EXPORT_SYMBOL_GPL(mnt_clone_write);
422 * __mnt_want_write_file - get write access to a file's mount
423 * @file: the file who's mount on which to take a write
425 * This is like __mnt_want_write, but it takes a file and can
426 * do some optimisations if the file is open for write already
428 int __mnt_want_write_file(struct file *file)
430 if (!(file->f_mode & FMODE_WRITER))
431 return __mnt_want_write(file->f_path.mnt);
433 return mnt_clone_write(file->f_path.mnt);
437 * mnt_want_write_file - get write access to a file's mount
438 * @file: the file who's mount on which to take a write
440 * This is like mnt_want_write, but it takes a file and can
441 * do some optimisations if the file is open for write already
443 int mnt_want_write_file(struct file *file)
447 sb_start_write(file->f_path.mnt->mnt_sb);
448 ret = __mnt_want_write_file(file);
450 sb_end_write(file->f_path.mnt->mnt_sb);
453 EXPORT_SYMBOL_GPL(mnt_want_write_file);
456 * __mnt_drop_write - give up write access to a mount
457 * @mnt: the mount on which to give up write access
459 * Tells the low-level filesystem that we are done
460 * performing writes to it. Must be matched with
461 * __mnt_want_write() call above.
463 void __mnt_drop_write(struct vfsmount *mnt)
466 mnt_dec_writers(real_mount(mnt));
471 * mnt_drop_write - give up write access to a mount
472 * @mnt: the mount on which to give up write access
474 * Tells the low-level filesystem that we are done performing writes to it and
475 * also allows filesystem to be frozen again. Must be matched with
476 * mnt_want_write() call above.
478 void mnt_drop_write(struct vfsmount *mnt)
480 __mnt_drop_write(mnt);
481 sb_end_write(mnt->mnt_sb);
483 EXPORT_SYMBOL_GPL(mnt_drop_write);
485 void __mnt_drop_write_file(struct file *file)
487 __mnt_drop_write(file->f_path.mnt);
490 void mnt_drop_write_file(struct file *file)
492 mnt_drop_write(file->f_path.mnt);
494 EXPORT_SYMBOL(mnt_drop_write_file);
496 static int mnt_make_readonly(struct mount *mnt)
501 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
503 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
504 * should be visible before we do.
509 * With writers on hold, if this value is zero, then there are
510 * definitely no active writers (although held writers may subsequently
511 * increment the count, they'll have to wait, and decrement it after
512 * seeing MNT_READONLY).
514 * It is OK to have counter incremented on one CPU and decremented on
515 * another: the sum will add up correctly. The danger would be when we
516 * sum up each counter, if we read a counter before it is incremented,
517 * but then read another CPU's count which it has been subsequently
518 * decremented from -- we would see more decrements than we should.
519 * MNT_WRITE_HOLD protects against this scenario, because
520 * mnt_want_write first increments count, then smp_mb, then spins on
521 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
522 * we're counting up here.
524 if (mnt_get_writers(mnt) > 0)
527 mnt->mnt.mnt_flags |= MNT_READONLY;
529 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
530 * that become unheld will see MNT_READONLY.
533 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
538 static void __mnt_unmake_readonly(struct mount *mnt)
541 mnt->mnt.mnt_flags &= ~MNT_READONLY;
545 int sb_prepare_remount_readonly(struct super_block *sb)
550 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
551 if (atomic_long_read(&sb->s_remove_count))
555 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
556 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
557 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
559 if (mnt_get_writers(mnt) > 0) {
565 if (!err && atomic_long_read(&sb->s_remove_count))
569 sb->s_readonly_remount = 1;
572 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
573 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
574 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
581 static void free_vfsmnt(struct mount *mnt)
583 kfree(mnt->mnt.data);
584 kfree_const(mnt->mnt_devname);
586 free_percpu(mnt->mnt_pcp);
588 kmem_cache_free(mnt_cache, mnt);
591 static void delayed_free_vfsmnt(struct rcu_head *head)
593 free_vfsmnt(container_of(head, struct mount, mnt_rcu));
596 /* call under rcu_read_lock */
597 int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
600 if (read_seqretry(&mount_lock, seq))
604 mnt = real_mount(bastard);
605 mnt_add_count(mnt, 1);
606 if (likely(!read_seqretry(&mount_lock, seq)))
608 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
609 mnt_add_count(mnt, -1);
615 /* call under rcu_read_lock */
616 bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
618 int res = __legitimize_mnt(bastard, seq);
621 if (unlikely(res < 0)) {
630 * find the first mount at @dentry on vfsmount @mnt.
631 * call under rcu_read_lock()
633 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
635 struct hlist_head *head = m_hash(mnt, dentry);
638 hlist_for_each_entry_rcu(p, head, mnt_hash)
639 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
645 * lookup_mnt - Return the first child mount mounted at path
647 * "First" means first mounted chronologically. If you create the
650 * mount /dev/sda1 /mnt
651 * mount /dev/sda2 /mnt
652 * mount /dev/sda3 /mnt
654 * Then lookup_mnt() on the base /mnt dentry in the root mount will
655 * return successively the root dentry and vfsmount of /dev/sda1, then
656 * /dev/sda2, then /dev/sda3, then NULL.
658 * lookup_mnt takes a reference to the found vfsmount.
660 struct vfsmount *lookup_mnt(struct path *path)
662 struct mount *child_mnt;
668 seq = read_seqbegin(&mount_lock);
669 child_mnt = __lookup_mnt(path->mnt, path->dentry);
670 m = child_mnt ? &child_mnt->mnt : NULL;
671 } while (!legitimize_mnt(m, seq));
677 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
678 * current mount namespace.
680 * The common case is dentries are not mountpoints at all and that
681 * test is handled inline. For the slow case when we are actually
682 * dealing with a mountpoint of some kind, walk through all of the
683 * mounts in the current mount namespace and test to see if the dentry
686 * The mount_hashtable is not usable in the context because we
687 * need to identify all mounts that may be in the current mount
688 * namespace not just a mount that happens to have some specified
691 bool __is_local_mountpoint(struct dentry *dentry)
693 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
695 bool is_covered = false;
697 if (!d_mountpoint(dentry))
700 down_read(&namespace_sem);
701 list_for_each_entry(mnt, &ns->list, mnt_list) {
702 is_covered = (mnt->mnt_mountpoint == dentry);
706 up_read(&namespace_sem);
711 static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
713 struct hlist_head *chain = mp_hash(dentry);
714 struct mountpoint *mp;
716 hlist_for_each_entry(mp, chain, m_hash) {
717 if (mp->m_dentry == dentry) {
718 /* might be worth a WARN_ON() */
719 if (d_unlinked(dentry))
720 return ERR_PTR(-ENOENT);
728 static struct mountpoint *get_mountpoint(struct dentry *dentry)
730 struct mountpoint *mp, *new = NULL;
733 if (d_mountpoint(dentry)) {
735 read_seqlock_excl(&mount_lock);
736 mp = lookup_mountpoint(dentry);
737 read_sequnlock_excl(&mount_lock);
743 new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
745 return ERR_PTR(-ENOMEM);
748 /* Exactly one processes may set d_mounted */
749 ret = d_set_mounted(dentry);
751 /* Someone else set d_mounted? */
755 /* The dentry is not available as a mountpoint? */
760 /* Add the new mountpoint to the hash table */
761 read_seqlock_excl(&mount_lock);
762 new->m_dentry = dentry;
764 hlist_add_head(&new->m_hash, mp_hash(dentry));
765 INIT_HLIST_HEAD(&new->m_list);
766 read_sequnlock_excl(&mount_lock);
775 static void put_mountpoint(struct mountpoint *mp)
777 if (!--mp->m_count) {
778 struct dentry *dentry = mp->m_dentry;
779 BUG_ON(!hlist_empty(&mp->m_list));
780 spin_lock(&dentry->d_lock);
781 dentry->d_flags &= ~DCACHE_MOUNTED;
782 spin_unlock(&dentry->d_lock);
783 hlist_del(&mp->m_hash);
788 static inline int check_mnt(struct mount *mnt)
790 return mnt->mnt_ns == current->nsproxy->mnt_ns;
794 * vfsmount lock must be held for write
796 static void touch_mnt_namespace(struct mnt_namespace *ns)
800 wake_up_interruptible(&ns->poll);
805 * vfsmount lock must be held for write
807 static void __touch_mnt_namespace(struct mnt_namespace *ns)
809 if (ns && ns->event != event) {
811 wake_up_interruptible(&ns->poll);
816 * vfsmount lock must be held for write
818 static void unhash_mnt(struct mount *mnt)
820 mnt->mnt_parent = mnt;
821 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
822 list_del_init(&mnt->mnt_child);
823 hlist_del_init_rcu(&mnt->mnt_hash);
824 hlist_del_init(&mnt->mnt_mp_list);
825 put_mountpoint(mnt->mnt_mp);
830 * vfsmount lock must be held for write
832 static void detach_mnt(struct mount *mnt, struct path *old_path)
834 old_path->dentry = mnt->mnt_mountpoint;
835 old_path->mnt = &mnt->mnt_parent->mnt;
840 * vfsmount lock must be held for write
842 static void umount_mnt(struct mount *mnt)
844 /* old mountpoint will be dropped when we can do that */
845 mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint;
850 * vfsmount lock must be held for write
852 void mnt_set_mountpoint(struct mount *mnt,
853 struct mountpoint *mp,
854 struct mount *child_mnt)
857 mnt_add_count(mnt, 1); /* essentially, that's mntget */
858 child_mnt->mnt_mountpoint = dget(mp->m_dentry);
859 child_mnt->mnt_parent = mnt;
860 child_mnt->mnt_mp = mp;
861 hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
864 static void __attach_mnt(struct mount *mnt, struct mount *parent)
866 hlist_add_head_rcu(&mnt->mnt_hash,
867 m_hash(&parent->mnt, mnt->mnt_mountpoint));
868 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
872 * vfsmount lock must be held for write
874 static void attach_mnt(struct mount *mnt,
875 struct mount *parent,
876 struct mountpoint *mp)
878 mnt_set_mountpoint(parent, mp, mnt);
879 __attach_mnt(mnt, parent);
882 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
884 struct mountpoint *old_mp = mnt->mnt_mp;
885 struct dentry *old_mountpoint = mnt->mnt_mountpoint;
886 struct mount *old_parent = mnt->mnt_parent;
888 list_del_init(&mnt->mnt_child);
889 hlist_del_init(&mnt->mnt_mp_list);
890 hlist_del_init_rcu(&mnt->mnt_hash);
892 attach_mnt(mnt, parent, mp);
894 put_mountpoint(old_mp);
897 * Safely avoid even the suggestion this code might sleep or
898 * lock the mount hash by taking advantage of the knowledge that
899 * mnt_change_mountpoint will not release the final reference
902 * During mounting, the mount passed in as the parent mount will
903 * continue to use the old mountpoint and during unmounting, the
904 * old mountpoint will continue to exist until namespace_unlock,
905 * which happens well after mnt_change_mountpoint.
907 spin_lock(&old_mountpoint->d_lock);
908 old_mountpoint->d_lockref.count--;
909 spin_unlock(&old_mountpoint->d_lock);
911 mnt_add_count(old_parent, -1);
915 * vfsmount lock must be held for write
917 static void commit_tree(struct mount *mnt)
919 struct mount *parent = mnt->mnt_parent;
922 struct mnt_namespace *n = parent->mnt_ns;
924 BUG_ON(parent == mnt);
926 list_add_tail(&head, &mnt->mnt_list);
927 list_for_each_entry(m, &head, mnt_list)
930 list_splice(&head, n->list.prev);
932 n->mounts += n->pending_mounts;
933 n->pending_mounts = 0;
935 __attach_mnt(mnt, parent);
936 touch_mnt_namespace(n);
939 static struct mount *next_mnt(struct mount *p, struct mount *root)
941 struct list_head *next = p->mnt_mounts.next;
942 if (next == &p->mnt_mounts) {
946 next = p->mnt_child.next;
947 if (next != &p->mnt_parent->mnt_mounts)
952 return list_entry(next, struct mount, mnt_child);
955 static struct mount *skip_mnt_tree(struct mount *p)
957 struct list_head *prev = p->mnt_mounts.prev;
958 while (prev != &p->mnt_mounts) {
959 p = list_entry(prev, struct mount, mnt_child);
960 prev = p->mnt_mounts.prev;
966 vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
972 return ERR_PTR(-ENODEV);
974 mnt = alloc_vfsmnt(name);
976 return ERR_PTR(-ENOMEM);
978 mnt->mnt.data = NULL;
979 if (type->alloc_mnt_data) {
980 mnt->mnt.data = type->alloc_mnt_data();
981 if (!mnt->mnt.data) {
984 return ERR_PTR(-ENOMEM);
987 if (flags & MS_KERNMOUNT)
988 mnt->mnt.mnt_flags = MNT_INTERNAL;
990 root = mount_fs(type, flags, name, &mnt->mnt, data);
992 kfree(mnt->mnt.data);
995 return ERR_CAST(root);
998 mnt->mnt.mnt_root = root;
999 mnt->mnt.mnt_sb = root->d_sb;
1000 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1001 mnt->mnt_parent = mnt;
1003 list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
1004 unlock_mount_hash();
1007 EXPORT_SYMBOL_GPL(vfs_kern_mount);
1009 static struct mount *clone_mnt(struct mount *old, struct dentry *root,
1012 struct super_block *sb = old->mnt.mnt_sb;
1016 mnt = alloc_vfsmnt(old->mnt_devname);
1018 return ERR_PTR(-ENOMEM);
1020 if (sb->s_op->clone_mnt_data) {
1021 mnt->mnt.data = sb->s_op->clone_mnt_data(old->mnt.data);
1022 if (!mnt->mnt.data) {
1028 if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
1029 mnt->mnt_group_id = 0; /* not a peer of original */
1031 mnt->mnt_group_id = old->mnt_group_id;
1033 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
1034 err = mnt_alloc_group_id(mnt);
1039 mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
1040 /* Don't allow unprivileged users to change mount flags */
1041 if (flag & CL_UNPRIVILEGED) {
1042 mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
1044 if (mnt->mnt.mnt_flags & MNT_READONLY)
1045 mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
1047 if (mnt->mnt.mnt_flags & MNT_NODEV)
1048 mnt->mnt.mnt_flags |= MNT_LOCK_NODEV;
1050 if (mnt->mnt.mnt_flags & MNT_NOSUID)
1051 mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID;
1053 if (mnt->mnt.mnt_flags & MNT_NOEXEC)
1054 mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC;
1057 /* Don't allow unprivileged users to reveal what is under a mount */
1058 if ((flag & CL_UNPRIVILEGED) &&
1059 (!(flag & CL_EXPIRE) || list_empty(&old->mnt_expire)))
1060 mnt->mnt.mnt_flags |= MNT_LOCKED;
1062 atomic_inc(&sb->s_active);
1063 mnt->mnt.mnt_sb = sb;
1064 mnt->mnt.mnt_root = dget(root);
1065 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1066 mnt->mnt_parent = mnt;
1068 list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
1069 unlock_mount_hash();
1071 if ((flag & CL_SLAVE) ||
1072 ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
1073 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
1074 mnt->mnt_master = old;
1075 CLEAR_MNT_SHARED(mnt);
1076 } else if (!(flag & CL_PRIVATE)) {
1077 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
1078 list_add(&mnt->mnt_share, &old->mnt_share);
1079 if (IS_MNT_SLAVE(old))
1080 list_add(&mnt->mnt_slave, &old->mnt_slave);
1081 mnt->mnt_master = old->mnt_master;
1083 if (flag & CL_MAKE_SHARED)
1084 set_mnt_shared(mnt);
1086 /* stick the duplicate mount on the same expiry list
1087 * as the original if that was on one */
1088 if (flag & CL_EXPIRE) {
1089 if (!list_empty(&old->mnt_expire))
1090 list_add(&mnt->mnt_expire, &old->mnt_expire);
1096 kfree(mnt->mnt.data);
1099 return ERR_PTR(err);
1102 static void cleanup_mnt(struct mount *mnt)
1105 * This probably indicates that somebody messed
1106 * up a mnt_want/drop_write() pair. If this
1107 * happens, the filesystem was probably unable
1108 * to make r/w->r/o transitions.
1111 * The locking used to deal with mnt_count decrement provides barriers,
1112 * so mnt_get_writers() below is safe.
1114 WARN_ON(mnt_get_writers(mnt));
1115 if (unlikely(mnt->mnt_pins.first))
1117 fsnotify_vfsmount_delete(&mnt->mnt);
1118 dput(mnt->mnt.mnt_root);
1119 deactivate_super(mnt->mnt.mnt_sb);
1121 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
1124 static void __cleanup_mnt(struct rcu_head *head)
1126 cleanup_mnt(container_of(head, struct mount, mnt_rcu));
1129 static LLIST_HEAD(delayed_mntput_list);
1130 static void delayed_mntput(struct work_struct *unused)
1132 struct llist_node *node = llist_del_all(&delayed_mntput_list);
1133 struct llist_node *next;
1135 for (; node; node = next) {
1136 next = llist_next(node);
1137 cleanup_mnt(llist_entry(node, struct mount, mnt_llist));
1140 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
1142 static void mntput_no_expire(struct mount *mnt)
1145 mnt_add_count(mnt, -1);
1146 if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */
1151 if (mnt_get_count(mnt)) {
1153 unlock_mount_hash();
1156 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
1158 unlock_mount_hash();
1161 mnt->mnt.mnt_flags |= MNT_DOOMED;
1164 list_del(&mnt->mnt_instance);
1166 if (unlikely(!list_empty(&mnt->mnt_mounts))) {
1167 struct mount *p, *tmp;
1168 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
1172 unlock_mount_hash();
1174 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
1175 struct task_struct *task = current;
1176 if (likely(!(task->flags & PF_KTHREAD))) {
1177 init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
1178 if (!task_work_add(task, &mnt->mnt_rcu, true))
1181 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
1182 schedule_delayed_work(&delayed_mntput_work, 1);
1188 void mntput(struct vfsmount *mnt)
1191 struct mount *m = real_mount(mnt);
1192 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
1193 if (unlikely(m->mnt_expiry_mark))
1194 m->mnt_expiry_mark = 0;
1195 mntput_no_expire(m);
1198 EXPORT_SYMBOL(mntput);
1200 struct vfsmount *mntget(struct vfsmount *mnt)
1203 mnt_add_count(real_mount(mnt), 1);
1206 EXPORT_SYMBOL(mntget);
1208 struct vfsmount *mnt_clone_internal(struct path *path)
1211 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE);
1214 p->mnt.mnt_flags |= MNT_INTERNAL;
1218 static inline void mangle(struct seq_file *m, const char *s)
1220 seq_escape(m, s, " \t\n\\");
1224 * Simple .show_options callback for filesystems which don't want to
1225 * implement more complex mount option showing.
1227 * See also save_mount_options().
1229 int generic_show_options(struct seq_file *m, struct dentry *root)
1231 const char *options;
1234 options = rcu_dereference(root->d_sb->s_options);
1236 if (options != NULL && options[0]) {
1244 EXPORT_SYMBOL(generic_show_options);
1247 * If filesystem uses generic_show_options(), this function should be
1248 * called from the fill_super() callback.
1250 * The .remount_fs callback usually needs to be handled in a special
1251 * way, to make sure, that previous options are not overwritten if the
1254 * Also note, that if the filesystem's .remount_fs function doesn't
1255 * reset all options to their default value, but changes only newly
1256 * given options, then the displayed options will not reflect reality
1259 void save_mount_options(struct super_block *sb, char *options)
1261 BUG_ON(sb->s_options);
1262 rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL));
1264 EXPORT_SYMBOL(save_mount_options);
1266 void replace_mount_options(struct super_block *sb, char *options)
1268 char *old = sb->s_options;
1269 rcu_assign_pointer(sb->s_options, options);
1275 EXPORT_SYMBOL(replace_mount_options);
1277 #ifdef CONFIG_PROC_FS
1278 /* iterator; we want it to have access to namespace_sem, thus here... */
1279 static void *m_start(struct seq_file *m, loff_t *pos)
1281 struct proc_mounts *p = m->private;
1283 down_read(&namespace_sem);
1284 if (p->cached_event == p->ns->event) {
1285 void *v = p->cached_mount;
1286 if (*pos == p->cached_index)
1288 if (*pos == p->cached_index + 1) {
1289 v = seq_list_next(v, &p->ns->list, &p->cached_index);
1290 return p->cached_mount = v;
1294 p->cached_event = p->ns->event;
1295 p->cached_mount = seq_list_start(&p->ns->list, *pos);
1296 p->cached_index = *pos;
1297 return p->cached_mount;
1300 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
1302 struct proc_mounts *p = m->private;
1304 p->cached_mount = seq_list_next(v, &p->ns->list, pos);
1305 p->cached_index = *pos;
1306 return p->cached_mount;
1309 static void m_stop(struct seq_file *m, void *v)
1311 up_read(&namespace_sem);
1314 static int m_show(struct seq_file *m, void *v)
1316 struct proc_mounts *p = m->private;
1317 struct mount *r = list_entry(v, struct mount, mnt_list);
1318 return p->show(m, &r->mnt);
1321 const struct seq_operations mounts_op = {
1327 #endif /* CONFIG_PROC_FS */
1330 * may_umount_tree - check if a mount tree is busy
1331 * @mnt: root of mount tree
1333 * This is called to check if a tree of mounts has any
1334 * open files, pwds, chroots or sub mounts that are
1337 int may_umount_tree(struct vfsmount *m)
1339 struct mount *mnt = real_mount(m);
1340 int actual_refs = 0;
1341 int minimum_refs = 0;
1345 /* write lock needed for mnt_get_count */
1347 for (p = mnt; p; p = next_mnt(p, mnt)) {
1348 actual_refs += mnt_get_count(p);
1351 unlock_mount_hash();
1353 if (actual_refs > minimum_refs)
1359 EXPORT_SYMBOL(may_umount_tree);
1362 * may_umount - check if a mount point is busy
1363 * @mnt: root of mount
1365 * This is called to check if a mount point has any
1366 * open files, pwds, chroots or sub mounts. If the
1367 * mount has sub mounts this will return busy
1368 * regardless of whether the sub mounts are busy.
1370 * Doesn't take quota and stuff into account. IOW, in some cases it will
1371 * give false negatives. The main reason why it's here is that we need
1372 * a non-destructive way to look for easily umountable filesystems.
1374 int may_umount(struct vfsmount *mnt)
1377 down_read(&namespace_sem);
1379 if (propagate_mount_busy(real_mount(mnt), 2))
1381 unlock_mount_hash();
1382 up_read(&namespace_sem);
1386 EXPORT_SYMBOL(may_umount);
1388 static HLIST_HEAD(unmounted); /* protected by namespace_sem */
1390 static void namespace_unlock(void)
1392 struct hlist_head head;
1394 hlist_move_list(&unmounted, &head);
1396 up_write(&namespace_sem);
1398 if (likely(hlist_empty(&head)))
1403 group_pin_kill(&head);
1406 static inline void namespace_lock(void)
1408 down_write(&namespace_sem);
1411 enum umount_tree_flags {
1413 UMOUNT_PROPAGATE = 2,
1414 UMOUNT_CONNECTED = 4,
1417 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
1419 /* Leaving mounts connected is only valid for lazy umounts */
1420 if (how & UMOUNT_SYNC)
1423 /* A mount without a parent has nothing to be connected to */
1424 if (!mnt_has_parent(mnt))
1427 /* Because the reference counting rules change when mounts are
1428 * unmounted and connected, umounted mounts may not be
1429 * connected to mounted mounts.
1431 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
1434 /* Has it been requested that the mount remain connected? */
1435 if (how & UMOUNT_CONNECTED)
1438 /* Is the mount locked such that it needs to remain connected? */
1439 if (IS_MNT_LOCKED(mnt))
1442 /* By default disconnect the mount */
1447 * mount_lock must be held
1448 * namespace_sem must be held for write
1450 static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1452 LIST_HEAD(tmp_list);
1455 if (how & UMOUNT_PROPAGATE)
1456 propagate_mount_unlock(mnt);
1458 /* Gather the mounts to umount */
1459 for (p = mnt; p; p = next_mnt(p, mnt)) {
1460 p->mnt.mnt_flags |= MNT_UMOUNT;
1461 list_move(&p->mnt_list, &tmp_list);
1464 /* Hide the mounts from mnt_mounts */
1465 list_for_each_entry(p, &tmp_list, mnt_list) {
1466 list_del_init(&p->mnt_child);
1469 /* Add propogated mounts to the tmp_list */
1470 if (how & UMOUNT_PROPAGATE)
1471 propagate_umount(&tmp_list);
1473 while (!list_empty(&tmp_list)) {
1474 struct mnt_namespace *ns;
1476 p = list_first_entry(&tmp_list, struct mount, mnt_list);
1477 list_del_init(&p->mnt_expire);
1478 list_del_init(&p->mnt_list);
1482 __touch_mnt_namespace(ns);
1485 if (how & UMOUNT_SYNC)
1486 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1488 disconnect = disconnect_mount(p, how);
1490 pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
1491 disconnect ? &unmounted : NULL);
1492 if (mnt_has_parent(p)) {
1493 mnt_add_count(p->mnt_parent, -1);
1495 /* Don't forget about p */
1496 list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
1501 change_mnt_propagation(p, MS_PRIVATE);
1505 static void shrink_submounts(struct mount *mnt);
1507 static int do_umount(struct mount *mnt, int flags)
1509 struct super_block *sb = mnt->mnt.mnt_sb;
1512 retval = security_sb_umount(&mnt->mnt, flags);
1517 * Allow userspace to request a mountpoint be expired rather than
1518 * unmounting unconditionally. Unmount only happens if:
1519 * (1) the mark is already set (the mark is cleared by mntput())
1520 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1522 if (flags & MNT_EXPIRE) {
1523 if (&mnt->mnt == current->fs->root.mnt ||
1524 flags & (MNT_FORCE | MNT_DETACH))
1528 * probably don't strictly need the lock here if we examined
1529 * all race cases, but it's a slowpath.
1532 if (mnt_get_count(mnt) != 2) {
1533 unlock_mount_hash();
1536 unlock_mount_hash();
1538 if (!xchg(&mnt->mnt_expiry_mark, 1))
1543 * If we may have to abort operations to get out of this
1544 * mount, and they will themselves hold resources we must
1545 * allow the fs to do things. In the Unix tradition of
1546 * 'Gee thats tricky lets do it in userspace' the umount_begin
1547 * might fail to complete on the first run through as other tasks
1548 * must return, and the like. Thats for the mount program to worry
1549 * about for the moment.
1552 if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1553 sb->s_op->umount_begin(sb);
1557 * No sense to grab the lock for this test, but test itself looks
1558 * somewhat bogus. Suggestions for better replacement?
1559 * Ho-hum... In principle, we might treat that as umount + switch
1560 * to rootfs. GC would eventually take care of the old vfsmount.
1561 * Actually it makes sense, especially if rootfs would contain a
1562 * /reboot - static binary that would close all descriptors and
1563 * call reboot(9). Then init(8) could umount root and exec /reboot.
1565 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1567 * Special case for "unmounting" root ...
1568 * we just try to remount it readonly.
1570 if (!capable(CAP_SYS_ADMIN))
1572 down_write(&sb->s_umount);
1573 if (!(sb->s_flags & MS_RDONLY))
1574 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
1575 up_write(&sb->s_umount);
1583 if (flags & MNT_DETACH) {
1584 if (!list_empty(&mnt->mnt_list))
1585 umount_tree(mnt, UMOUNT_PROPAGATE);
1588 shrink_submounts(mnt);
1590 if (!propagate_mount_busy(mnt, 2)) {
1591 if (!list_empty(&mnt->mnt_list))
1592 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
1596 unlock_mount_hash();
1602 * __detach_mounts - lazily unmount all mounts on the specified dentry
1604 * During unlink, rmdir, and d_drop it is possible to loose the path
1605 * to an existing mountpoint, and wind up leaking the mount.
1606 * detach_mounts allows lazily unmounting those mounts instead of
1609 * The caller may hold dentry->d_inode->i_mutex.
1611 void __detach_mounts(struct dentry *dentry)
1613 struct mountpoint *mp;
1618 mp = lookup_mountpoint(dentry);
1619 if (IS_ERR_OR_NULL(mp))
1623 while (!hlist_empty(&mp->m_list)) {
1624 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1625 if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
1626 hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
1629 else umount_tree(mnt, UMOUNT_CONNECTED);
1633 unlock_mount_hash();
1638 * Is the caller allowed to modify his namespace?
1640 static inline bool may_mount(void)
1642 return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
1646 * Now umount can handle mount points as well as block devices.
1647 * This is important for filesystems which use unnamed block devices.
1649 * We now support a flag for forced unmount like the other 'big iron'
1650 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1653 SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1658 int lookup_flags = 0;
1660 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
1666 if (!(flags & UMOUNT_NOFOLLOW))
1667 lookup_flags |= LOOKUP_FOLLOW;
1669 retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path);
1672 mnt = real_mount(path.mnt);
1674 if (path.dentry != path.mnt->mnt_root)
1676 if (!check_mnt(mnt))
1678 if (mnt->mnt.mnt_flags & MNT_LOCKED)
1681 if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
1684 retval = do_umount(mnt, flags);
1686 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1688 mntput_no_expire(mnt);
1693 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1696 * The 2.0 compatible umount. No flags.
1698 SYSCALL_DEFINE1(oldumount, char __user *, name)
1700 return sys_umount(name, 0);
1705 static bool is_mnt_ns_file(struct dentry *dentry)
1707 /* Is this a proxy for a mount namespace? */
1708 return dentry->d_op == &ns_dentry_operations &&
1709 dentry->d_fsdata == &mntns_operations;
1712 struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
1714 return container_of(ns, struct mnt_namespace, ns);
1717 static bool mnt_ns_loop(struct dentry *dentry)
1719 /* Could bind mounting the mount namespace inode cause a
1720 * mount namespace loop?
1722 struct mnt_namespace *mnt_ns;
1723 if (!is_mnt_ns_file(dentry))
1726 mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode));
1727 return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
1730 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1733 struct mount *res, *p, *q, *r, *parent;
1735 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
1736 return ERR_PTR(-EINVAL);
1738 if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
1739 return ERR_PTR(-EINVAL);
1741 res = q = clone_mnt(mnt, dentry, flag);
1745 q->mnt_mountpoint = mnt->mnt_mountpoint;
1748 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1750 if (!is_subdir(r->mnt_mountpoint, dentry))
1753 for (s = r; s; s = next_mnt(s, r)) {
1754 if (!(flag & CL_COPY_UNBINDABLE) &&
1755 IS_MNT_UNBINDABLE(s)) {
1756 s = skip_mnt_tree(s);
1759 if (!(flag & CL_COPY_MNT_NS_FILE) &&
1760 is_mnt_ns_file(s->mnt.mnt_root)) {
1761 s = skip_mnt_tree(s);
1764 while (p != s->mnt_parent) {
1770 q = clone_mnt(p, p->mnt.mnt_root, flag);
1774 list_add_tail(&q->mnt_list, &res->mnt_list);
1775 attach_mnt(q, parent, p->mnt_mp);
1776 unlock_mount_hash();
1783 umount_tree(res, UMOUNT_SYNC);
1784 unlock_mount_hash();
1789 /* Caller should check returned pointer for errors */
1791 struct vfsmount *collect_mounts(struct path *path)
1795 if (!check_mnt(real_mount(path->mnt)))
1796 tree = ERR_PTR(-EINVAL);
1798 tree = copy_tree(real_mount(path->mnt), path->dentry,
1799 CL_COPY_ALL | CL_PRIVATE);
1802 return ERR_CAST(tree);
1806 void drop_collected_mounts(struct vfsmount *mnt)
1810 umount_tree(real_mount(mnt), UMOUNT_SYNC);
1811 unlock_mount_hash();
1816 * clone_private_mount - create a private clone of a path
1818 * This creates a new vfsmount, which will be the clone of @path. The new will
1819 * not be attached anywhere in the namespace and will be private (i.e. changes
1820 * to the originating mount won't be propagated into this).
1822 * Release with mntput().
1824 struct vfsmount *clone_private_mount(struct path *path)
1826 struct mount *old_mnt = real_mount(path->mnt);
1827 struct mount *new_mnt;
1829 if (IS_MNT_UNBINDABLE(old_mnt))
1830 return ERR_PTR(-EINVAL);
1832 down_read(&namespace_sem);
1833 new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
1834 up_read(&namespace_sem);
1835 if (IS_ERR(new_mnt))
1836 return ERR_CAST(new_mnt);
1838 return &new_mnt->mnt;
1840 EXPORT_SYMBOL_GPL(clone_private_mount);
1842 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
1843 struct vfsmount *root)
1846 int res = f(root, arg);
1849 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
1850 res = f(&mnt->mnt, arg);
1857 static void cleanup_group_ids(struct mount *mnt, struct mount *end)
1861 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
1862 if (p->mnt_group_id && !IS_MNT_SHARED(p))
1863 mnt_release_group_id(p);
1867 static int invent_group_ids(struct mount *mnt, bool recurse)
1871 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
1872 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
1873 int err = mnt_alloc_group_id(p);
1875 cleanup_group_ids(mnt, p);
1884 int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
1886 unsigned int max = READ_ONCE(sysctl_mount_max);
1887 unsigned int mounts = 0, old, pending, sum;
1890 for (p = mnt; p; p = next_mnt(p, mnt))
1894 pending = ns->pending_mounts;
1895 sum = old + pending;
1899 (mounts > (max - sum)))
1902 ns->pending_mounts = pending + mounts;
1907 * @source_mnt : mount tree to be attached
1908 * @nd : place the mount tree @source_mnt is attached
1909 * @parent_nd : if non-null, detach the source_mnt from its parent and
1910 * store the parent mount and mountpoint dentry.
1911 * (done when source_mnt is moved)
1913 * NOTE: in the table below explains the semantics when a source mount
1914 * of a given type is attached to a destination mount of a given type.
1915 * ---------------------------------------------------------------------------
1916 * | BIND MOUNT OPERATION |
1917 * |**************************************************************************
1918 * | source-->| shared | private | slave | unbindable |
1922 * |**************************************************************************
1923 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1925 * |non-shared| shared (+) | private | slave (*) | invalid |
1926 * ***************************************************************************
1927 * A bind operation clones the source mount and mounts the clone on the
1928 * destination mount.
1930 * (++) the cloned mount is propagated to all the mounts in the propagation
1931 * tree of the destination mount and the cloned mount is added to
1932 * the peer group of the source mount.
1933 * (+) the cloned mount is created under the destination mount and is marked
1934 * as shared. The cloned mount is added to the peer group of the source
1936 * (+++) the mount is propagated to all the mounts in the propagation tree
1937 * of the destination mount and the cloned mount is made slave
1938 * of the same master as that of the source mount. The cloned mount
1939 * is marked as 'shared and slave'.
1940 * (*) the cloned mount is made a slave of the same master as that of the
1943 * ---------------------------------------------------------------------------
1944 * | MOVE MOUNT OPERATION |
1945 * |**************************************************************************
1946 * | source-->| shared | private | slave | unbindable |
1950 * |**************************************************************************
1951 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
1953 * |non-shared| shared (+*) | private | slave (*) | unbindable |
1954 * ***************************************************************************
1956 * (+) the mount is moved to the destination. And is then propagated to
1957 * all the mounts in the propagation tree of the destination mount.
1958 * (+*) the mount is moved to the destination.
1959 * (+++) the mount is moved to the destination and is then propagated to
1960 * all the mounts belonging to the destination mount's propagation tree.
1961 * the mount is marked as 'shared and slave'.
1962 * (*) the mount continues to be a slave at the new location.
1964 * if the source mount is a tree, the operations explained above is
1965 * applied to each mount in the tree.
1966 * Must be called without spinlocks held, since this function can sleep
1969 static int attach_recursive_mnt(struct mount *source_mnt,
1970 struct mount *dest_mnt,
1971 struct mountpoint *dest_mp,
1972 struct path *parent_path)
1974 HLIST_HEAD(tree_list);
1975 struct mnt_namespace *ns = dest_mnt->mnt_ns;
1976 struct mountpoint *smp;
1977 struct mount *child, *p;
1978 struct hlist_node *n;
1981 /* Preallocate a mountpoint in case the new mounts need
1982 * to be tucked under other mounts.
1984 smp = get_mountpoint(source_mnt->mnt.mnt_root);
1986 return PTR_ERR(smp);
1988 /* Is there space to add these mounts to the mount namespace? */
1990 err = count_mounts(ns, source_mnt);
1995 if (IS_MNT_SHARED(dest_mnt)) {
1996 err = invent_group_ids(source_mnt, true);
1999 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
2002 goto out_cleanup_ids;
2003 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
2009 detach_mnt(source_mnt, parent_path);
2010 attach_mnt(source_mnt, dest_mnt, dest_mp);
2011 touch_mnt_namespace(source_mnt->mnt_ns);
2013 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
2014 commit_tree(source_mnt);
2017 hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
2019 hlist_del_init(&child->mnt_hash);
2020 q = __lookup_mnt(&child->mnt_parent->mnt,
2021 child->mnt_mountpoint);
2023 mnt_change_mountpoint(child, smp, q);
2026 put_mountpoint(smp);
2027 unlock_mount_hash();
2032 while (!hlist_empty(&tree_list)) {
2033 child = hlist_entry(tree_list.first, struct mount, mnt_hash);
2034 child->mnt_parent->mnt_ns->pending_mounts = 0;
2035 umount_tree(child, UMOUNT_SYNC);
2037 unlock_mount_hash();
2038 cleanup_group_ids(source_mnt, NULL);
2040 ns->pending_mounts = 0;
2042 read_seqlock_excl(&mount_lock);
2043 put_mountpoint(smp);
2044 read_sequnlock_excl(&mount_lock);
2049 static struct mountpoint *lock_mount(struct path *path)
2051 struct vfsmount *mnt;
2052 struct dentry *dentry = path->dentry;
2054 mutex_lock(&dentry->d_inode->i_mutex);
2055 if (unlikely(cant_mount(dentry))) {
2056 mutex_unlock(&dentry->d_inode->i_mutex);
2057 return ERR_PTR(-ENOENT);
2060 mnt = lookup_mnt(path);
2062 struct mountpoint *mp = get_mountpoint(dentry);
2065 mutex_unlock(&dentry->d_inode->i_mutex);
2071 mutex_unlock(&path->dentry->d_inode->i_mutex);
2074 dentry = path->dentry = dget(mnt->mnt_root);
2078 static void unlock_mount(struct mountpoint *where)
2080 struct dentry *dentry = where->m_dentry;
2082 read_seqlock_excl(&mount_lock);
2083 put_mountpoint(where);
2084 read_sequnlock_excl(&mount_lock);
2087 mutex_unlock(&dentry->d_inode->i_mutex);
2090 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
2092 if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER)
2095 if (d_is_dir(mp->m_dentry) !=
2096 d_is_dir(mnt->mnt.mnt_root))
2099 return attach_recursive_mnt(mnt, p, mp, NULL);
2103 * Sanity check the flags to change_mnt_propagation.
2106 static int flags_to_propagation_type(int flags)
2108 int type = flags & ~(MS_REC | MS_SILENT);
2110 /* Fail if any non-propagation flags are set */
2111 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2113 /* Only one propagation flag should be set */
2114 if (!is_power_of_2(type))
2120 * recursively change the type of the mountpoint.
2122 static int do_change_type(struct path *path, int flag)
2125 struct mount *mnt = real_mount(path->mnt);
2126 int recurse = flag & MS_REC;
2130 if (path->dentry != path->mnt->mnt_root)
2133 type = flags_to_propagation_type(flag);
2138 if (type == MS_SHARED) {
2139 err = invent_group_ids(mnt, recurse);
2145 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
2146 change_mnt_propagation(m, type);
2147 unlock_mount_hash();
2154 static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
2156 struct mount *child;
2157 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
2158 if (!is_subdir(child->mnt_mountpoint, dentry))
2161 if (child->mnt.mnt_flags & MNT_LOCKED)
2168 * do loopback mount.
2170 static int do_loopback(struct path *path, const char *old_name,
2173 struct path old_path;
2174 struct mount *mnt = NULL, *old, *parent;
2175 struct mountpoint *mp;
2177 if (!old_name || !*old_name)
2179 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
2184 if (mnt_ns_loop(old_path.dentry))
2187 mp = lock_mount(path);
2192 old = real_mount(old_path.mnt);
2193 parent = real_mount(path->mnt);
2196 if (IS_MNT_UNBINDABLE(old))
2199 if (!check_mnt(parent))
2202 if (!check_mnt(old) && old_path.dentry->d_op != &ns_dentry_operations)
2205 if (!recurse && has_locked_children(old, old_path.dentry))
2209 mnt = copy_tree(old, old_path.dentry, CL_COPY_MNT_NS_FILE);
2211 mnt = clone_mnt(old, old_path.dentry, 0);
2218 mnt->mnt.mnt_flags &= ~MNT_LOCKED;
2220 err = graft_tree(mnt, parent, mp);
2223 umount_tree(mnt, UMOUNT_SYNC);
2224 unlock_mount_hash();
2229 path_put(&old_path);
2233 static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
2236 int readonly_request = 0;
2238 if (ms_flags & MS_RDONLY)
2239 readonly_request = 1;
2240 if (readonly_request == __mnt_is_readonly(mnt))
2243 if (readonly_request)
2244 error = mnt_make_readonly(real_mount(mnt));
2246 __mnt_unmake_readonly(real_mount(mnt));
2251 * change filesystem flags. dir should be a physical root of filesystem.
2252 * If you've mounted a non-root directory somewhere and want to do remount
2253 * on it - tough luck.
2255 static int do_remount(struct path *path, int flags, int mnt_flags,
2259 struct super_block *sb = path->mnt->mnt_sb;
2260 struct mount *mnt = real_mount(path->mnt);
2262 if (!check_mnt(mnt))
2265 if (path->dentry != path->mnt->mnt_root)
2268 /* Don't allow changing of locked mnt flags.
2270 * No locks need to be held here while testing the various
2271 * MNT_LOCK flags because those flags can never be cleared
2272 * once they are set.
2274 if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
2275 !(mnt_flags & MNT_READONLY)) {
2278 if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
2279 !(mnt_flags & MNT_NODEV)) {
2280 /* Was the nodev implicitly added in mount? */
2281 if ((mnt->mnt_ns->user_ns != &init_user_ns) &&
2282 !(sb->s_type->fs_flags & FS_USERNS_DEV_MOUNT)) {
2283 mnt_flags |= MNT_NODEV;
2288 if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
2289 !(mnt_flags & MNT_NOSUID)) {
2292 if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) &&
2293 !(mnt_flags & MNT_NOEXEC)) {
2296 if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
2297 ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) {
2301 err = security_sb_remount(sb, data);
2305 down_write(&sb->s_umount);
2306 if (flags & MS_BIND)
2307 err = change_mount_flags(path->mnt, flags);
2308 else if (!capable(CAP_SYS_ADMIN))
2311 err = do_remount_sb2(path->mnt, sb, flags, data, 0);
2314 propagate_remount(mnt);
2315 unlock_mount_hash();
2320 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
2321 mnt->mnt.mnt_flags = mnt_flags;
2322 touch_mnt_namespace(mnt->mnt_ns);
2323 unlock_mount_hash();
2325 up_write(&sb->s_umount);
2329 static inline int tree_contains_unbindable(struct mount *mnt)
2332 for (p = mnt; p; p = next_mnt(p, mnt)) {
2333 if (IS_MNT_UNBINDABLE(p))
2339 static int do_move_mount(struct path *path, const char *old_name)
2341 struct path old_path, parent_path;
2344 struct mountpoint *mp;
2346 if (!old_name || !*old_name)
2348 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
2352 mp = lock_mount(path);
2357 old = real_mount(old_path.mnt);
2358 p = real_mount(path->mnt);
2361 if (!check_mnt(p) || !check_mnt(old))
2364 if (old->mnt.mnt_flags & MNT_LOCKED)
2368 if (old_path.dentry != old_path.mnt->mnt_root)
2371 if (!mnt_has_parent(old))
2374 if (d_is_dir(path->dentry) !=
2375 d_is_dir(old_path.dentry))
2378 * Don't move a mount residing in a shared parent.
2380 if (IS_MNT_SHARED(old->mnt_parent))
2383 * Don't move a mount tree containing unbindable mounts to a destination
2384 * mount which is shared.
2386 if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
2389 for (; mnt_has_parent(p); p = p->mnt_parent)
2393 err = attach_recursive_mnt(old, real_mount(path->mnt), mp, &parent_path);
2397 /* if the mount is moved, it should no longer be expire
2399 list_del_init(&old->mnt_expire);
2404 path_put(&parent_path);
2405 path_put(&old_path);
2409 static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
2412 const char *subtype = strchr(fstype, '.');
2421 mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL);
2423 if (!mnt->mnt_sb->s_subtype)
2429 return ERR_PTR(err);
2433 * add a mount into a namespace's mount tree
2435 static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
2437 struct mountpoint *mp;
2438 struct mount *parent;
2441 mnt_flags &= ~MNT_INTERNAL_FLAGS;
2443 mp = lock_mount(path);
2447 parent = real_mount(path->mnt);
2449 if (unlikely(!check_mnt(parent))) {
2450 /* that's acceptable only for automounts done in private ns */
2451 if (!(mnt_flags & MNT_SHRINKABLE))
2453 /* ... and for those we'd better have mountpoint still alive */
2454 if (!parent->mnt_ns)
2458 /* Refuse the same filesystem on the same mount point */
2460 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb &&
2461 path->mnt->mnt_root == path->dentry)
2465 if (d_is_symlink(newmnt->mnt.mnt_root))
2468 newmnt->mnt.mnt_flags = mnt_flags;
2469 err = graft_tree(newmnt, parent, mp);
2476 static bool fs_fully_visible(struct file_system_type *fs_type, int *new_mnt_flags);
2479 * create a new mount for userspace and request it to be added into the
2482 static int do_new_mount(struct path *path, const char *fstype, int flags,
2483 int mnt_flags, const char *name, void *data)
2485 struct file_system_type *type;
2486 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2487 struct vfsmount *mnt;
2493 type = get_fs_type(fstype);
2497 if (user_ns != &init_user_ns) {
2498 if (!(type->fs_flags & FS_USERNS_MOUNT)) {
2499 put_filesystem(type);
2502 /* Only in special cases allow devices from mounts
2503 * created outside the initial user namespace.
2505 if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) {
2507 mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
2509 if (type->fs_flags & FS_USERNS_VISIBLE) {
2510 if (!fs_fully_visible(type, &mnt_flags)) {
2511 put_filesystem(type);
2517 mnt = vfs_kern_mount(type, flags, name, data);
2518 if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
2519 !mnt->mnt_sb->s_subtype)
2520 mnt = fs_set_subtype(mnt, fstype);
2522 put_filesystem(type);
2524 return PTR_ERR(mnt);
2526 err = do_add_mount(real_mount(mnt), path, mnt_flags);
2532 int finish_automount(struct vfsmount *m, struct path *path)
2534 struct mount *mnt = real_mount(m);
2536 /* The new mount record should have at least 2 refs to prevent it being
2537 * expired before we get a chance to add it
2539 BUG_ON(mnt_get_count(mnt) < 2);
2541 if (m->mnt_sb == path->mnt->mnt_sb &&
2542 m->mnt_root == path->dentry) {
2547 err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
2551 /* remove m from any expiration list it may be on */
2552 if (!list_empty(&mnt->mnt_expire)) {
2554 list_del_init(&mnt->mnt_expire);
2563 * mnt_set_expiry - Put a mount on an expiration list
2564 * @mnt: The mount to list.
2565 * @expiry_list: The list to add the mount to.
2567 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
2571 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
2575 EXPORT_SYMBOL(mnt_set_expiry);
2578 * process a list of expirable mountpoints with the intent of discarding any
2579 * mountpoints that aren't in use and haven't been touched since last we came
2582 void mark_mounts_for_expiry(struct list_head *mounts)
2584 struct mount *mnt, *next;
2585 LIST_HEAD(graveyard);
2587 if (list_empty(mounts))
2593 /* extract from the expiration list every vfsmount that matches the
2594 * following criteria:
2595 * - only referenced by its parent vfsmount
2596 * - still marked for expiry (marked on the last call here; marks are
2597 * cleared by mntput())
2599 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
2600 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
2601 propagate_mount_busy(mnt, 1))
2603 list_move(&mnt->mnt_expire, &graveyard);
2605 while (!list_empty(&graveyard)) {
2606 mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
2607 touch_mnt_namespace(mnt->mnt_ns);
2608 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
2610 unlock_mount_hash();
2614 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
2617 * Ripoff of 'select_parent()'
2619 * search the list of submounts for a given mountpoint, and move any
2620 * shrinkable submounts to the 'graveyard' list.
2622 static int select_submounts(struct mount *parent, struct list_head *graveyard)
2624 struct mount *this_parent = parent;
2625 struct list_head *next;
2629 next = this_parent->mnt_mounts.next;
2631 while (next != &this_parent->mnt_mounts) {
2632 struct list_head *tmp = next;
2633 struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
2636 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
2639 * Descend a level if the d_mounts list is non-empty.
2641 if (!list_empty(&mnt->mnt_mounts)) {
2646 if (!propagate_mount_busy(mnt, 1)) {
2647 list_move_tail(&mnt->mnt_expire, graveyard);
2652 * All done at this level ... ascend and resume the search
2654 if (this_parent != parent) {
2655 next = this_parent->mnt_child.next;
2656 this_parent = this_parent->mnt_parent;
2663 * process a list of expirable mountpoints with the intent of discarding any
2664 * submounts of a specific parent mountpoint
2666 * mount_lock must be held for write
2668 static void shrink_submounts(struct mount *mnt)
2670 LIST_HEAD(graveyard);
2673 /* extract submounts of 'mountpoint' from the expiration list */
2674 while (select_submounts(mnt, &graveyard)) {
2675 while (!list_empty(&graveyard)) {
2676 m = list_first_entry(&graveyard, struct mount,
2678 touch_mnt_namespace(m->mnt_ns);
2679 umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
2685 * Some copy_from_user() implementations do not return the exact number of
2686 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
2687 * Note that this function differs from copy_from_user() in that it will oops
2688 * on bad values of `to', rather than returning a short copy.
2690 static long exact_copy_from_user(void *to, const void __user * from,
2694 const char __user *f = from;
2697 if (!access_ok(VERIFY_READ, from, n))
2701 if (__get_user(c, f)) {
2712 int copy_mount_options(const void __user * data, unsigned long *where)
2722 if (!(page = __get_free_page(GFP_KERNEL)))
2725 /* We only care that *some* data at the address the user
2726 * gave us is valid. Just in case, we'll zero
2727 * the remainder of the page.
2729 /* copy_from_user cannot cross TASK_SIZE ! */
2730 size = TASK_SIZE - (unsigned long)data;
2731 if (size > PAGE_SIZE)
2734 i = size - exact_copy_from_user((void *)page, data, size);
2740 memset((char *)page + i, 0, PAGE_SIZE - i);
2745 char *copy_mount_string(const void __user *data)
2747 return data ? strndup_user(data, PAGE_SIZE) : NULL;
2751 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
2752 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
2754 * data is a (void *) that can point to any structure up to
2755 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
2756 * information (or be NULL).
2758 * Pre-0.97 versions of mount() didn't have a flags word.
2759 * When the flags word was introduced its top half was required
2760 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
2761 * Therefore, if this magic number is present, it carries no information
2762 * and must be discarded.
2764 long do_mount(const char *dev_name, const char __user *dir_name,
2765 const char *type_page, unsigned long flags, void *data_page)
2772 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
2773 flags &= ~MS_MGC_MSK;
2775 /* Basic sanity checks */
2777 ((char *)data_page)[PAGE_SIZE - 1] = 0;
2779 /* ... and get the mountpoint */
2780 retval = user_path(dir_name, &path);
2784 retval = security_sb_mount(dev_name, &path,
2785 type_page, flags, data_page);
2786 if (!retval && !may_mount())
2791 /* Default to relatime unless overriden */
2792 if (!(flags & MS_NOATIME))
2793 mnt_flags |= MNT_RELATIME;
2795 /* Separate the per-mountpoint flags */
2796 if (flags & MS_NOSUID)
2797 mnt_flags |= MNT_NOSUID;
2798 if (flags & MS_NODEV)
2799 mnt_flags |= MNT_NODEV;
2800 if (flags & MS_NOEXEC)
2801 mnt_flags |= MNT_NOEXEC;
2802 if (flags & MS_NOATIME)
2803 mnt_flags |= MNT_NOATIME;
2804 if (flags & MS_NODIRATIME)
2805 mnt_flags |= MNT_NODIRATIME;
2806 if (flags & MS_STRICTATIME)
2807 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
2808 if (flags & MS_RDONLY)
2809 mnt_flags |= MNT_READONLY;
2811 /* The default atime for remount is preservation */
2812 if ((flags & MS_REMOUNT) &&
2813 ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
2814 MS_STRICTATIME)) == 0)) {
2815 mnt_flags &= ~MNT_ATIME_MASK;
2816 mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK;
2819 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
2820 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
2823 if (flags & MS_REMOUNT)
2824 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
2826 else if (flags & MS_BIND)
2827 retval = do_loopback(&path, dev_name, flags & MS_REC);
2828 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2829 retval = do_change_type(&path, flags);
2830 else if (flags & MS_MOVE)
2831 retval = do_move_mount(&path, dev_name);
2833 retval = do_new_mount(&path, type_page, flags, mnt_flags,
2834 dev_name, data_page);
2840 static void free_mnt_ns(struct mnt_namespace *ns)
2842 ns_free_inum(&ns->ns);
2843 put_user_ns(ns->user_ns);
2848 * Assign a sequence number so we can detect when we attempt to bind
2849 * mount a reference to an older mount namespace into the current
2850 * mount namespace, preventing reference counting loops. A 64bit
2851 * number incrementing at 10Ghz will take 12,427 years to wrap which
2852 * is effectively never, so we can ignore the possibility.
2854 static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
2856 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
2858 struct mnt_namespace *new_ns;
2861 new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
2863 return ERR_PTR(-ENOMEM);
2864 ret = ns_alloc_inum(&new_ns->ns);
2867 return ERR_PTR(ret);
2869 new_ns->ns.ops = &mntns_operations;
2870 new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
2871 atomic_set(&new_ns->count, 1);
2872 new_ns->root = NULL;
2873 INIT_LIST_HEAD(&new_ns->list);
2874 init_waitqueue_head(&new_ns->poll);
2876 new_ns->user_ns = get_user_ns(user_ns);
2878 new_ns->pending_mounts = 0;
2882 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
2883 struct user_namespace *user_ns, struct fs_struct *new_fs)
2885 struct mnt_namespace *new_ns;
2886 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
2887 struct mount *p, *q;
2894 if (likely(!(flags & CLONE_NEWNS))) {
2901 new_ns = alloc_mnt_ns(user_ns);
2906 /* First pass: copy the tree topology */
2907 copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
2908 if (user_ns != ns->user_ns)
2909 copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED;
2910 new = copy_tree(old, old->mnt.mnt_root, copy_flags);
2913 free_mnt_ns(new_ns);
2914 return ERR_CAST(new);
2917 list_add_tail(&new_ns->list, &new->mnt_list);
2920 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
2921 * as belonging to new namespace. We have already acquired a private
2922 * fs_struct, so tsk->fs->lock is not needed.
2930 if (&p->mnt == new_fs->root.mnt) {
2931 new_fs->root.mnt = mntget(&q->mnt);
2934 if (&p->mnt == new_fs->pwd.mnt) {
2935 new_fs->pwd.mnt = mntget(&q->mnt);
2939 p = next_mnt(p, old);
2940 q = next_mnt(q, new);
2943 while (p->mnt.mnt_root != q->mnt.mnt_root)
2944 p = next_mnt(p, old);
2957 * create_mnt_ns - creates a private namespace and adds a root filesystem
2958 * @mnt: pointer to the new root filesystem mountpoint
2960 static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
2962 struct mnt_namespace *new_ns = alloc_mnt_ns(&init_user_ns);
2963 if (!IS_ERR(new_ns)) {
2964 struct mount *mnt = real_mount(m);
2965 mnt->mnt_ns = new_ns;
2968 list_add(&mnt->mnt_list, &new_ns->list);
2975 struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
2977 struct mnt_namespace *ns;
2978 struct super_block *s;
2982 ns = create_mnt_ns(mnt);
2984 return ERR_CAST(ns);
2986 err = vfs_path_lookup(mnt->mnt_root, mnt,
2987 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
2992 return ERR_PTR(err);
2994 /* trade a vfsmount reference for active sb one */
2995 s = path.mnt->mnt_sb;
2996 atomic_inc(&s->s_active);
2998 /* lock the sucker */
2999 down_write(&s->s_umount);
3000 /* ... and return the root of (sub)tree on it */
3003 EXPORT_SYMBOL(mount_subtree);
3005 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
3006 char __user *, type, unsigned long, flags, void __user *, data)
3011 unsigned long data_page;
3013 kernel_type = copy_mount_string(type);
3014 ret = PTR_ERR(kernel_type);
3015 if (IS_ERR(kernel_type))
3018 kernel_dev = copy_mount_string(dev_name);
3019 ret = PTR_ERR(kernel_dev);
3020 if (IS_ERR(kernel_dev))
3023 ret = copy_mount_options(data, &data_page);
3027 ret = do_mount(kernel_dev, dir_name, kernel_type, flags,
3028 (void *) data_page);
3030 free_page(data_page);
3040 * Return true if path is reachable from root
3042 * namespace_sem or mount_lock is held
3044 bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
3045 const struct path *root)
3047 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
3048 dentry = mnt->mnt_mountpoint;
3049 mnt = mnt->mnt_parent;
3051 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
3054 int path_is_under(struct path *path1, struct path *path2)
3057 read_seqlock_excl(&mount_lock);
3058 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
3059 read_sequnlock_excl(&mount_lock);
3062 EXPORT_SYMBOL(path_is_under);
3065 * pivot_root Semantics:
3066 * Moves the root file system of the current process to the directory put_old,
3067 * makes new_root as the new root file system of the current process, and sets
3068 * root/cwd of all processes which had them on the current root to new_root.
3071 * The new_root and put_old must be directories, and must not be on the
3072 * same file system as the current process root. The put_old must be
3073 * underneath new_root, i.e. adding a non-zero number of /.. to the string
3074 * pointed to by put_old must yield the same directory as new_root. No other
3075 * file system may be mounted on put_old. After all, new_root is a mountpoint.
3077 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
3078 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
3079 * in this situation.
3082 * - we don't move root/cwd if they are not at the root (reason: if something
3083 * cared enough to change them, it's probably wrong to force them elsewhere)
3084 * - it's okay to pick a root that isn't the root of a file system, e.g.
3085 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
3086 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
3089 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
3090 const char __user *, put_old)
3092 struct path new, old, parent_path, root_parent, root;
3093 struct mount *new_mnt, *root_mnt, *old_mnt;
3094 struct mountpoint *old_mp, *root_mp;
3100 error = user_path_dir(new_root, &new);
3104 error = user_path_dir(put_old, &old);
3108 error = security_sb_pivotroot(&old, &new);
3112 get_fs_root(current->fs, &root);
3113 old_mp = lock_mount(&old);
3114 error = PTR_ERR(old_mp);
3119 new_mnt = real_mount(new.mnt);
3120 root_mnt = real_mount(root.mnt);
3121 old_mnt = real_mount(old.mnt);
3122 if (IS_MNT_SHARED(old_mnt) ||
3123 IS_MNT_SHARED(new_mnt->mnt_parent) ||
3124 IS_MNT_SHARED(root_mnt->mnt_parent))
3126 if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
3128 if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
3131 if (d_unlinked(new.dentry))
3134 if (new_mnt == root_mnt || old_mnt == root_mnt)
3135 goto out4; /* loop, on the same file system */
3137 if (root.mnt->mnt_root != root.dentry)
3138 goto out4; /* not a mountpoint */
3139 if (!mnt_has_parent(root_mnt))
3140 goto out4; /* not attached */
3141 root_mp = root_mnt->mnt_mp;
3142 if (new.mnt->mnt_root != new.dentry)
3143 goto out4; /* not a mountpoint */
3144 if (!mnt_has_parent(new_mnt))
3145 goto out4; /* not attached */
3146 /* make sure we can reach put_old from new_root */
3147 if (!is_path_reachable(old_mnt, old.dentry, &new))
3149 /* make certain new is below the root */
3150 if (!is_path_reachable(new_mnt, new.dentry, &root))
3152 root_mp->m_count++; /* pin it so it won't go away */
3154 detach_mnt(new_mnt, &parent_path);
3155 detach_mnt(root_mnt, &root_parent);
3156 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
3157 new_mnt->mnt.mnt_flags |= MNT_LOCKED;
3158 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
3160 /* mount old root on put_old */
3161 attach_mnt(root_mnt, old_mnt, old_mp);
3162 /* mount new_root on / */
3163 attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp);
3164 touch_mnt_namespace(current->nsproxy->mnt_ns);
3165 /* A moved mount should not expire automatically */
3166 list_del_init(&new_mnt->mnt_expire);
3167 put_mountpoint(root_mp);
3168 unlock_mount_hash();
3169 chroot_fs_refs(&root, &new);
3172 unlock_mount(old_mp);
3174 path_put(&root_parent);
3175 path_put(&parent_path);
3187 static void __init init_mount_tree(void)
3189 struct vfsmount *mnt;
3190 struct mnt_namespace *ns;
3192 struct file_system_type *type;
3194 type = get_fs_type("rootfs");
3196 panic("Can't find rootfs type");
3197 mnt = vfs_kern_mount(type, 0, "rootfs", NULL);
3198 put_filesystem(type);
3200 panic("Can't create rootfs");
3202 ns = create_mnt_ns(mnt);
3204 panic("Can't allocate initial namespace");
3206 init_task.nsproxy->mnt_ns = ns;
3210 root.dentry = mnt->mnt_root;
3211 mnt->mnt_flags |= MNT_LOCKED;
3213 set_fs_pwd(current->fs, &root);
3214 set_fs_root(current->fs, &root);
3217 void __init mnt_init(void)
3222 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
3223 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
3225 mount_hashtable = alloc_large_system_hash("Mount-cache",
3226 sizeof(struct hlist_head),
3229 &m_hash_shift, &m_hash_mask, 0, 0);
3230 mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
3231 sizeof(struct hlist_head),
3234 &mp_hash_shift, &mp_hash_mask, 0, 0);
3236 if (!mount_hashtable || !mountpoint_hashtable)
3237 panic("Failed to allocate mount hash table\n");
3239 for (u = 0; u <= m_hash_mask; u++)
3240 INIT_HLIST_HEAD(&mount_hashtable[u]);
3241 for (u = 0; u <= mp_hash_mask; u++)
3242 INIT_HLIST_HEAD(&mountpoint_hashtable[u]);
3248 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
3250 fs_kobj = kobject_create_and_add("fs", NULL);
3252 printk(KERN_WARNING "%s: kobj create error\n", __func__);
3257 void put_mnt_ns(struct mnt_namespace *ns)
3259 if (!atomic_dec_and_test(&ns->count))
3261 drop_collected_mounts(&ns->root->mnt);
3265 struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
3267 struct vfsmount *mnt;
3268 mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, data);
3271 * it is a longterm mount, don't release mnt until
3272 * we unmount before file sys is unregistered
3274 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
3278 EXPORT_SYMBOL_GPL(kern_mount_data);
3280 void kern_unmount(struct vfsmount *mnt)
3282 /* release long term mount so mount point can be released */
3283 if (!IS_ERR_OR_NULL(mnt)) {
3284 real_mount(mnt)->mnt_ns = NULL;
3285 synchronize_rcu(); /* yecchhh... */
3289 EXPORT_SYMBOL(kern_unmount);
3291 bool our_mnt(struct vfsmount *mnt)
3293 return check_mnt(real_mount(mnt));
3296 bool current_chrooted(void)
3298 /* Does the current process have a non-standard root */
3299 struct path ns_root;
3300 struct path fs_root;
3303 /* Find the namespace root */
3304 ns_root.mnt = ¤t->nsproxy->mnt_ns->root->mnt;
3305 ns_root.dentry = ns_root.mnt->mnt_root;
3307 while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
3310 get_fs_root(current->fs, &fs_root);
3312 chrooted = !path_equal(&fs_root, &ns_root);
3320 static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
3322 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
3323 int new_flags = *new_mnt_flags;
3325 bool visible = false;
3330 down_read(&namespace_sem);
3331 list_for_each_entry(mnt, &ns->list, mnt_list) {
3332 struct mount *child;
3335 if (mnt->mnt.mnt_sb->s_type != type)
3338 /* This mount is not fully visible if it's root directory
3339 * is not the root directory of the filesystem.
3341 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
3344 /* Read the mount flags and filter out flags that
3345 * may safely be ignored.
3347 mnt_flags = mnt->mnt.mnt_flags;
3348 if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC)
3349 mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC);
3351 /* Don't miss readonly hidden in the superblock flags */
3352 if (mnt->mnt.mnt_sb->s_flags & MS_RDONLY)
3353 mnt_flags |= MNT_LOCK_READONLY;
3355 /* Verify the mount flags are equal to or more permissive
3356 * than the proposed new mount.
3358 if ((mnt_flags & MNT_LOCK_READONLY) &&
3359 !(new_flags & MNT_READONLY))
3361 if ((mnt_flags & MNT_LOCK_NODEV) &&
3362 !(new_flags & MNT_NODEV))
3364 if ((mnt_flags & MNT_LOCK_NOSUID) &&
3365 !(new_flags & MNT_NOSUID))
3367 if ((mnt_flags & MNT_LOCK_NOEXEC) &&
3368 !(new_flags & MNT_NOEXEC))
3370 if ((mnt_flags & MNT_LOCK_ATIME) &&
3371 ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
3374 /* This mount is not fully visible if there are any
3375 * locked child mounts that cover anything except for
3376 * empty directories.
3378 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
3379 struct inode *inode = child->mnt_mountpoint->d_inode;
3380 /* Only worry about locked mounts */
3381 if (!(child->mnt.mnt_flags & MNT_LOCKED))
3383 /* Is the directory permanetly empty? */
3384 if (!is_empty_dir_inode(inode))
3387 /* Preserve the locked attributes */
3388 *new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \
3398 up_read(&namespace_sem);
3402 static struct ns_common *mntns_get(struct task_struct *task)
3404 struct ns_common *ns = NULL;
3405 struct nsproxy *nsproxy;
3408 nsproxy = task->nsproxy;
3410 ns = &nsproxy->mnt_ns->ns;
3411 get_mnt_ns(to_mnt_ns(ns));
3418 static void mntns_put(struct ns_common *ns)
3420 put_mnt_ns(to_mnt_ns(ns));
3423 static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
3425 struct fs_struct *fs = current->fs;
3426 struct mnt_namespace *mnt_ns = to_mnt_ns(ns);
3429 if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
3430 !ns_capable(current_user_ns(), CAP_SYS_CHROOT) ||
3431 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
3438 put_mnt_ns(nsproxy->mnt_ns);
3439 nsproxy->mnt_ns = mnt_ns;
3442 root.mnt = &mnt_ns->root->mnt;
3443 root.dentry = mnt_ns->root->mnt.mnt_root;
3445 while(d_mountpoint(root.dentry) && follow_down_one(&root))
3448 /* Update the pwd and root */
3449 set_fs_pwd(fs, &root);
3450 set_fs_root(fs, &root);
3456 const struct proc_ns_operations mntns_operations = {
3458 .type = CLONE_NEWNS,
3461 .install = mntns_install,