4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/syscalls.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/spinlock.h>
15 #include <linux/percpu.h>
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/acct.h>
19 #include <linux/capability.h>
20 #include <linux/cpumask.h>
21 #include <linux/module.h>
22 #include <linux/sysfs.h>
23 #include <linux/seq_file.h>
24 #include <linux/mnt_namespace.h>
25 #include <linux/namei.h>
26 #include <linux/nsproxy.h>
27 #include <linux/security.h>
28 #include <linux/mount.h>
29 #include <linux/ramfs.h>
30 #include <linux/log2.h>
31 #include <linux/idr.h>
32 #include <linux/fs_struct.h>
33 #include <linux/fsnotify.h>
34 #include <asm/uaccess.h>
35 #include <asm/unistd.h>
39 #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
40 #define HASH_SIZE (1UL << HASH_SHIFT)
43 static DEFINE_IDA(mnt_id_ida);
44 static DEFINE_IDA(mnt_group_ida);
45 static DEFINE_SPINLOCK(mnt_id_lock);
46 static int mnt_id_start = 0;
47 static int mnt_group_start = 1;
49 static struct list_head *mount_hashtable __read_mostly;
50 static struct kmem_cache *mnt_cache __read_mostly;
51 static struct rw_semaphore namespace_sem;
54 struct kobject *fs_kobj;
55 EXPORT_SYMBOL_GPL(fs_kobj);
58 * vfsmount lock may be taken for read to prevent changes to the
59 * vfsmount hash, ie. during mountpoint lookups or walking back
62 * It should be taken for write in all cases where the vfsmount
63 * tree or hash is modified or when a vfsmount structure is modified.
65 DEFINE_BRLOCK(vfsmount_lock);
67 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
69 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
70 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
71 tmp = tmp + (tmp >> HASH_SHIFT);
72 return tmp & (HASH_SIZE - 1);
75 #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
78 * allocation is serialized by namespace_sem, but we need the spinlock to
79 * serialize with freeing.
81 static int mnt_alloc_id(struct mount *mnt)
86 ida_pre_get(&mnt_id_ida, GFP_KERNEL);
87 spin_lock(&mnt_id_lock);
88 res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
90 mnt_id_start = mnt->mnt_id + 1;
91 spin_unlock(&mnt_id_lock);
98 static void mnt_free_id(struct mount *mnt)
100 int id = mnt->mnt_id;
101 spin_lock(&mnt_id_lock);
102 ida_remove(&mnt_id_ida, id);
103 if (mnt_id_start > id)
105 spin_unlock(&mnt_id_lock);
109 * Allocate a new peer group ID
111 * mnt_group_ida is protected by namespace_sem
113 static int mnt_alloc_group_id(struct mount *mnt)
117 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
120 res = ida_get_new_above(&mnt_group_ida,
124 mnt_group_start = mnt->mnt_group_id + 1;
130 * Release a peer group ID
132 void mnt_release_group_id(struct mount *mnt)
134 int id = mnt->mnt_group_id;
135 ida_remove(&mnt_group_ida, id);
136 if (mnt_group_start > id)
137 mnt_group_start = id;
138 mnt->mnt_group_id = 0;
142 * vfsmount lock must be held for read
144 static inline void mnt_add_count(struct mount *mnt, int n)
147 this_cpu_add(mnt->mnt_pcp->mnt_count, n);
156 * vfsmount lock must be held for write
158 unsigned int mnt_get_count(struct mount *mnt)
161 unsigned int count = 0;
164 for_each_possible_cpu(cpu) {
165 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
170 return mnt->mnt_count;
174 static struct mount *alloc_vfsmnt(const char *name)
176 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
180 err = mnt_alloc_id(mnt);
185 mnt->mnt_devname = kstrdup(name, GFP_KERNEL);
186 if (!mnt->mnt_devname)
191 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
193 goto out_free_devname;
195 this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
198 mnt->mnt_writers = 0;
201 INIT_LIST_HEAD(&mnt->mnt_hash);
202 INIT_LIST_HEAD(&mnt->mnt_child);
203 INIT_LIST_HEAD(&mnt->mnt_mounts);
204 INIT_LIST_HEAD(&mnt->mnt_list);
205 INIT_LIST_HEAD(&mnt->mnt_expire);
206 INIT_LIST_HEAD(&mnt->mnt_share);
207 INIT_LIST_HEAD(&mnt->mnt_slave_list);
208 INIT_LIST_HEAD(&mnt->mnt_slave);
209 #ifdef CONFIG_FSNOTIFY
210 INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
217 kfree(mnt->mnt_devname);
222 kmem_cache_free(mnt_cache, mnt);
227 * Most r/o checks on a fs are for operations that take
228 * discrete amounts of time, like a write() or unlink().
229 * We must keep track of when those operations start
230 * (for permission checks) and when they end, so that
231 * we can determine when writes are able to occur to
235 * __mnt_is_readonly: check whether a mount is read-only
236 * @mnt: the mount to check for its write status
238 * This shouldn't be used directly ouside of the VFS.
239 * It does not guarantee that the filesystem will stay
240 * r/w, just that it is right *now*. This can not and
241 * should not be used in place of IS_RDONLY(inode).
242 * mnt_want/drop_write() will _keep_ the filesystem
245 int __mnt_is_readonly(struct vfsmount *mnt)
247 if (mnt->mnt_flags & MNT_READONLY)
249 if (mnt->mnt_sb->s_flags & MS_RDONLY)
253 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
255 static inline void mnt_inc_writers(struct mount *mnt)
258 this_cpu_inc(mnt->mnt_pcp->mnt_writers);
264 static inline void mnt_dec_writers(struct mount *mnt)
267 this_cpu_dec(mnt->mnt_pcp->mnt_writers);
273 static unsigned int mnt_get_writers(struct mount *mnt)
276 unsigned int count = 0;
279 for_each_possible_cpu(cpu) {
280 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
285 return mnt->mnt_writers;
290 * Most r/o checks on a fs are for operations that take
291 * discrete amounts of time, like a write() or unlink().
292 * We must keep track of when those operations start
293 * (for permission checks) and when they end, so that
294 * we can determine when writes are able to occur to
298 * mnt_want_write - get write access to a mount
299 * @m: the mount on which to take a write
301 * This tells the low-level filesystem that a write is
302 * about to be performed to it, and makes sure that
303 * writes are allowed before returning success. When
304 * the write operation is finished, mnt_drop_write()
305 * must be called. This is effectively a refcount.
307 int mnt_want_write(struct vfsmount *m)
309 struct mount *mnt = real_mount(m);
313 mnt_inc_writers(mnt);
315 * The store to mnt_inc_writers must be visible before we pass
316 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
317 * incremented count after it has set MNT_WRITE_HOLD.
320 while (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
323 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
324 * be set to match its requirements. So we must not load that until
325 * MNT_WRITE_HOLD is cleared.
328 if (__mnt_is_readonly(m)) {
329 mnt_dec_writers(mnt);
337 EXPORT_SYMBOL_GPL(mnt_want_write);
340 * mnt_clone_write - get write access to a mount
341 * @mnt: the mount on which to take a write
343 * This is effectively like mnt_want_write, except
344 * it must only be used to take an extra write reference
345 * on a mountpoint that we already know has a write reference
346 * on it. This allows some optimisation.
348 * After finished, mnt_drop_write must be called as usual to
349 * drop the reference.
351 int mnt_clone_write(struct vfsmount *mnt)
353 /* superblock may be r/o */
354 if (__mnt_is_readonly(mnt))
357 mnt_inc_writers(real_mount(mnt));
361 EXPORT_SYMBOL_GPL(mnt_clone_write);
364 * mnt_want_write_file - get write access to a file's mount
365 * @file: the file who's mount on which to take a write
367 * This is like mnt_want_write, but it takes a file and can
368 * do some optimisations if the file is open for write already
370 int mnt_want_write_file(struct file *file)
372 struct inode *inode = file->f_dentry->d_inode;
373 if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode))
374 return mnt_want_write(file->f_path.mnt);
376 return mnt_clone_write(file->f_path.mnt);
378 EXPORT_SYMBOL_GPL(mnt_want_write_file);
381 * mnt_drop_write - give up write access to a mount
382 * @mnt: the mount on which to give up write access
384 * Tells the low-level filesystem that we are done
385 * performing writes to it. Must be matched with
386 * mnt_want_write() call above.
388 void mnt_drop_write(struct vfsmount *mnt)
391 mnt_dec_writers(real_mount(mnt));
394 EXPORT_SYMBOL_GPL(mnt_drop_write);
396 void mnt_drop_write_file(struct file *file)
398 mnt_drop_write(file->f_path.mnt);
400 EXPORT_SYMBOL(mnt_drop_write_file);
402 static int mnt_make_readonly(struct mount *mnt)
406 br_write_lock(vfsmount_lock);
407 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
409 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
410 * should be visible before we do.
415 * With writers on hold, if this value is zero, then there are
416 * definitely no active writers (although held writers may subsequently
417 * increment the count, they'll have to wait, and decrement it after
418 * seeing MNT_READONLY).
420 * It is OK to have counter incremented on one CPU and decremented on
421 * another: the sum will add up correctly. The danger would be when we
422 * sum up each counter, if we read a counter before it is incremented,
423 * but then read another CPU's count which it has been subsequently
424 * decremented from -- we would see more decrements than we should.
425 * MNT_WRITE_HOLD protects against this scenario, because
426 * mnt_want_write first increments count, then smp_mb, then spins on
427 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
428 * we're counting up here.
430 if (mnt_get_writers(mnt) > 0)
433 mnt->mnt.mnt_flags |= MNT_READONLY;
435 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
436 * that become unheld will see MNT_READONLY.
439 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
440 br_write_unlock(vfsmount_lock);
444 static void __mnt_unmake_readonly(struct mount *mnt)
446 br_write_lock(vfsmount_lock);
447 mnt->mnt.mnt_flags &= ~MNT_READONLY;
448 br_write_unlock(vfsmount_lock);
451 static void free_vfsmnt(struct mount *mnt)
453 kfree(mnt->mnt_devname);
456 free_percpu(mnt->mnt_pcp);
458 kmem_cache_free(mnt_cache, mnt);
462 * find the first or last mount at @dentry on vfsmount @mnt depending on
463 * @dir. If @dir is set return the first mount else return the last mount.
464 * vfsmount_lock must be held for read or write.
466 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
469 struct list_head *head = mount_hashtable + hash(mnt, dentry);
470 struct list_head *tmp = head;
471 struct mount *p, *found = NULL;
474 tmp = dir ? tmp->next : tmp->prev;
478 p = list_entry(tmp, struct mount, mnt_hash);
479 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) {
488 * lookup_mnt increments the ref count before returning
489 * the vfsmount struct.
491 struct vfsmount *lookup_mnt(struct path *path)
493 struct mount *child_mnt;
495 br_read_lock(vfsmount_lock);
496 child_mnt = __lookup_mnt(path->mnt, path->dentry, 1);
498 mnt_add_count(child_mnt, 1);
499 br_read_unlock(vfsmount_lock);
500 return &child_mnt->mnt;
502 br_read_unlock(vfsmount_lock);
507 static inline int check_mnt(struct mount *mnt)
509 return mnt->mnt_ns == current->nsproxy->mnt_ns;
513 * vfsmount lock must be held for write
515 static void touch_mnt_namespace(struct mnt_namespace *ns)
519 wake_up_interruptible(&ns->poll);
524 * vfsmount lock must be held for write
526 static void __touch_mnt_namespace(struct mnt_namespace *ns)
528 if (ns && ns->event != event) {
530 wake_up_interruptible(&ns->poll);
535 * Clear dentry's mounted state if it has no remaining mounts.
536 * vfsmount_lock must be held for write.
538 static void dentry_reset_mounted(struct dentry *dentry)
542 for (u = 0; u < HASH_SIZE; u++) {
545 list_for_each_entry(p, &mount_hashtable[u], mnt_hash) {
546 if (p->mnt_mountpoint == dentry)
550 spin_lock(&dentry->d_lock);
551 dentry->d_flags &= ~DCACHE_MOUNTED;
552 spin_unlock(&dentry->d_lock);
556 * vfsmount lock must be held for write
558 static void detach_mnt(struct mount *mnt, struct path *old_path)
560 old_path->dentry = mnt->mnt_mountpoint;
561 old_path->mnt = &mnt->mnt_parent->mnt;
562 mnt->mnt_parent = mnt;
563 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
564 list_del_init(&mnt->mnt_child);
565 list_del_init(&mnt->mnt_hash);
566 dentry_reset_mounted(old_path->dentry);
570 * vfsmount lock must be held for write
572 void mnt_set_mountpoint(struct mount *mnt, struct dentry *dentry,
573 struct mount *child_mnt)
575 mnt_add_count(mnt, 1); /* essentially, that's mntget */
576 child_mnt->mnt_mountpoint = dget(dentry);
577 child_mnt->mnt_parent = mnt;
578 spin_lock(&dentry->d_lock);
579 dentry->d_flags |= DCACHE_MOUNTED;
580 spin_unlock(&dentry->d_lock);
584 * vfsmount lock must be held for write
586 static void attach_mnt(struct mount *mnt, struct path *path)
588 mnt_set_mountpoint(real_mount(path->mnt), path->dentry, mnt);
589 list_add_tail(&mnt->mnt_hash, mount_hashtable +
590 hash(path->mnt, path->dentry));
591 list_add_tail(&mnt->mnt_child, &real_mount(path->mnt)->mnt_mounts);
594 static inline void __mnt_make_longterm(struct mount *mnt)
597 atomic_inc(&mnt->mnt_longterm);
601 /* needs vfsmount lock for write */
602 static inline void __mnt_make_shortterm(struct mount *mnt)
605 atomic_dec(&mnt->mnt_longterm);
610 * vfsmount lock must be held for write
612 static void commit_tree(struct mount *mnt)
614 struct mount *parent = mnt->mnt_parent;
617 struct mnt_namespace *n = parent->mnt_ns;
619 BUG_ON(parent == mnt);
621 list_add_tail(&head, &mnt->mnt_list);
622 list_for_each_entry(m, &head, mnt_list) {
624 __mnt_make_longterm(m);
627 list_splice(&head, n->list.prev);
629 list_add_tail(&mnt->mnt_hash, mount_hashtable +
630 hash(&parent->mnt, mnt->mnt_mountpoint));
631 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
632 touch_mnt_namespace(n);
635 static struct mount *next_mnt(struct mount *p, struct mount *root)
637 struct list_head *next = p->mnt_mounts.next;
638 if (next == &p->mnt_mounts) {
642 next = p->mnt_child.next;
643 if (next != &p->mnt_parent->mnt_mounts)
648 return list_entry(next, struct mount, mnt_child);
651 static struct mount *skip_mnt_tree(struct mount *p)
653 struct list_head *prev = p->mnt_mounts.prev;
654 while (prev != &p->mnt_mounts) {
655 p = list_entry(prev, struct mount, mnt_child);
656 prev = p->mnt_mounts.prev;
662 vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
668 return ERR_PTR(-ENODEV);
670 mnt = alloc_vfsmnt(name);
672 return ERR_PTR(-ENOMEM);
674 if (flags & MS_KERNMOUNT)
675 mnt->mnt.mnt_flags = MNT_INTERNAL;
677 root = mount_fs(type, flags, name, data);
680 return ERR_CAST(root);
683 mnt->mnt.mnt_root = root;
684 mnt->mnt.mnt_sb = root->d_sb;
685 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
686 mnt->mnt_parent = mnt;
689 EXPORT_SYMBOL_GPL(vfs_kern_mount);
691 static struct mount *clone_mnt(struct mount *old, struct dentry *root,
694 struct super_block *sb = old->mnt.mnt_sb;
695 struct mount *mnt = alloc_vfsmnt(old->mnt_devname);
698 if (flag & (CL_SLAVE | CL_PRIVATE))
699 mnt->mnt_group_id = 0; /* not a peer of original */
701 mnt->mnt_group_id = old->mnt_group_id;
703 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
704 int err = mnt_alloc_group_id(mnt);
709 mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
710 atomic_inc(&sb->s_active);
711 mnt->mnt.mnt_sb = sb;
712 mnt->mnt.mnt_root = dget(root);
713 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
714 mnt->mnt_parent = mnt;
716 if (flag & CL_SLAVE) {
717 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
718 mnt->mnt_master = old;
719 CLEAR_MNT_SHARED(mnt);
720 } else if (!(flag & CL_PRIVATE)) {
721 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
722 list_add(&mnt->mnt_share, &old->mnt_share);
723 if (IS_MNT_SLAVE(old))
724 list_add(&mnt->mnt_slave, &old->mnt_slave);
725 mnt->mnt_master = old->mnt_master;
727 if (flag & CL_MAKE_SHARED)
730 /* stick the duplicate mount on the same expiry list
731 * as the original if that was on one */
732 if (flag & CL_EXPIRE) {
733 if (!list_empty(&old->mnt_expire))
734 list_add(&mnt->mnt_expire, &old->mnt_expire);
744 static inline void mntfree(struct mount *mnt)
746 struct vfsmount *m = &mnt->mnt;
747 struct super_block *sb = m->mnt_sb;
750 * This probably indicates that somebody messed
751 * up a mnt_want/drop_write() pair. If this
752 * happens, the filesystem was probably unable
753 * to make r/w->r/o transitions.
756 * The locking used to deal with mnt_count decrement provides barriers,
757 * so mnt_get_writers() below is safe.
759 WARN_ON(mnt_get_writers(mnt));
760 fsnotify_vfsmount_delete(m);
763 deactivate_super(sb);
766 static void mntput_no_expire(struct mount *mnt)
770 br_read_lock(vfsmount_lock);
771 if (likely(atomic_read(&mnt->mnt_longterm))) {
772 mnt_add_count(mnt, -1);
773 br_read_unlock(vfsmount_lock);
776 br_read_unlock(vfsmount_lock);
778 br_write_lock(vfsmount_lock);
779 mnt_add_count(mnt, -1);
780 if (mnt_get_count(mnt)) {
781 br_write_unlock(vfsmount_lock);
785 mnt_add_count(mnt, -1);
786 if (likely(mnt_get_count(mnt)))
788 br_write_lock(vfsmount_lock);
790 if (unlikely(mnt->mnt_pinned)) {
791 mnt_add_count(mnt, mnt->mnt_pinned + 1);
793 br_write_unlock(vfsmount_lock);
794 acct_auto_close_mnt(&mnt->mnt);
797 br_write_unlock(vfsmount_lock);
801 void mntput(struct vfsmount *mnt)
804 struct mount *m = real_mount(mnt);
805 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
806 if (unlikely(m->mnt_expiry_mark))
807 m->mnt_expiry_mark = 0;
811 EXPORT_SYMBOL(mntput);
813 struct vfsmount *mntget(struct vfsmount *mnt)
816 mnt_add_count(real_mount(mnt), 1);
819 EXPORT_SYMBOL(mntget);
821 void mnt_pin(struct vfsmount *mnt)
823 br_write_lock(vfsmount_lock);
824 real_mount(mnt)->mnt_pinned++;
825 br_write_unlock(vfsmount_lock);
827 EXPORT_SYMBOL(mnt_pin);
829 void mnt_unpin(struct vfsmount *m)
831 struct mount *mnt = real_mount(m);
832 br_write_lock(vfsmount_lock);
833 if (mnt->mnt_pinned) {
834 mnt_add_count(mnt, 1);
837 br_write_unlock(vfsmount_lock);
839 EXPORT_SYMBOL(mnt_unpin);
841 static inline void mangle(struct seq_file *m, const char *s)
843 seq_escape(m, s, " \t\n\\");
847 * Simple .show_options callback for filesystems which don't want to
848 * implement more complex mount option showing.
850 * See also save_mount_options().
852 int generic_show_options(struct seq_file *m, struct vfsmount *mnt)
857 options = rcu_dereference(mnt->mnt_sb->s_options);
859 if (options != NULL && options[0]) {
867 EXPORT_SYMBOL(generic_show_options);
870 * If filesystem uses generic_show_options(), this function should be
871 * called from the fill_super() callback.
873 * The .remount_fs callback usually needs to be handled in a special
874 * way, to make sure, that previous options are not overwritten if the
877 * Also note, that if the filesystem's .remount_fs function doesn't
878 * reset all options to their default value, but changes only newly
879 * given options, then the displayed options will not reflect reality
882 void save_mount_options(struct super_block *sb, char *options)
884 BUG_ON(sb->s_options);
885 rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL));
887 EXPORT_SYMBOL(save_mount_options);
889 void replace_mount_options(struct super_block *sb, char *options)
891 char *old = sb->s_options;
892 rcu_assign_pointer(sb->s_options, options);
898 EXPORT_SYMBOL(replace_mount_options);
900 #ifdef CONFIG_PROC_FS
901 /* iterator; we want it to have access to namespace_sem, thus here... */
902 static void *m_start(struct seq_file *m, loff_t *pos)
904 struct proc_mounts *p = container_of(m, struct proc_mounts, m);
906 down_read(&namespace_sem);
907 return seq_list_start(&p->ns->list, *pos);
910 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
912 struct proc_mounts *p = container_of(m, struct proc_mounts, m);
914 return seq_list_next(v, &p->ns->list, pos);
917 static void m_stop(struct seq_file *m, void *v)
919 up_read(&namespace_sem);
922 static int m_show(struct seq_file *m, void *v)
924 struct proc_mounts *p = container_of(m, struct proc_mounts, m);
925 struct mount *r = list_entry(v, struct mount, mnt_list);
926 return p->show(m, &r->mnt);
929 const struct seq_operations mounts_op = {
935 #endif /* CONFIG_PROC_FS */
938 * may_umount_tree - check if a mount tree is busy
939 * @mnt: root of mount tree
941 * This is called to check if a tree of mounts has any
942 * open files, pwds, chroots or sub mounts that are
945 int may_umount_tree(struct vfsmount *m)
947 struct mount *mnt = real_mount(m);
949 int minimum_refs = 0;
953 /* write lock needed for mnt_get_count */
954 br_write_lock(vfsmount_lock);
955 for (p = mnt; p; p = next_mnt(p, mnt)) {
956 actual_refs += mnt_get_count(p);
959 br_write_unlock(vfsmount_lock);
961 if (actual_refs > minimum_refs)
967 EXPORT_SYMBOL(may_umount_tree);
970 * may_umount - check if a mount point is busy
971 * @mnt: root of mount
973 * This is called to check if a mount point has any
974 * open files, pwds, chroots or sub mounts. If the
975 * mount has sub mounts this will return busy
976 * regardless of whether the sub mounts are busy.
978 * Doesn't take quota and stuff into account. IOW, in some cases it will
979 * give false negatives. The main reason why it's here is that we need
980 * a non-destructive way to look for easily umountable filesystems.
982 int may_umount(struct vfsmount *mnt)
985 down_read(&namespace_sem);
986 br_write_lock(vfsmount_lock);
987 if (propagate_mount_busy(real_mount(mnt), 2))
989 br_write_unlock(vfsmount_lock);
990 up_read(&namespace_sem);
994 EXPORT_SYMBOL(may_umount);
996 void release_mounts(struct list_head *head)
999 while (!list_empty(head)) {
1000 mnt = list_first_entry(head, struct mount, mnt_hash);
1001 list_del_init(&mnt->mnt_hash);
1002 if (mnt_has_parent(mnt)) {
1003 struct dentry *dentry;
1006 br_write_lock(vfsmount_lock);
1007 dentry = mnt->mnt_mountpoint;
1008 m = mnt->mnt_parent;
1009 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1010 mnt->mnt_parent = mnt;
1012 br_write_unlock(vfsmount_lock);
1021 * vfsmount lock must be held for write
1022 * namespace_sem must be held for write
1024 void umount_tree(struct mount *mnt, int propagate, struct list_head *kill)
1026 LIST_HEAD(tmp_list);
1029 for (p = mnt; p; p = next_mnt(p, mnt))
1030 list_move(&p->mnt_hash, &tmp_list);
1033 propagate_umount(&tmp_list);
1035 list_for_each_entry(p, &tmp_list, mnt_hash) {
1036 list_del_init(&p->mnt_expire);
1037 list_del_init(&p->mnt_list);
1038 __touch_mnt_namespace(p->mnt_ns);
1040 __mnt_make_shortterm(p);
1041 list_del_init(&p->mnt_child);
1042 if (mnt_has_parent(p)) {
1043 p->mnt_parent->mnt_ghosts++;
1044 dentry_reset_mounted(p->mnt_mountpoint);
1046 change_mnt_propagation(p, MS_PRIVATE);
1048 list_splice(&tmp_list, kill);
1051 static void shrink_submounts(struct mount *mnt, struct list_head *umounts);
1053 static int do_umount(struct mount *mnt, int flags)
1055 struct super_block *sb = mnt->mnt.mnt_sb;
1057 LIST_HEAD(umount_list);
1059 retval = security_sb_umount(&mnt->mnt, flags);
1064 * Allow userspace to request a mountpoint be expired rather than
1065 * unmounting unconditionally. Unmount only happens if:
1066 * (1) the mark is already set (the mark is cleared by mntput())
1067 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1069 if (flags & MNT_EXPIRE) {
1070 if (&mnt->mnt == current->fs->root.mnt ||
1071 flags & (MNT_FORCE | MNT_DETACH))
1075 * probably don't strictly need the lock here if we examined
1076 * all race cases, but it's a slowpath.
1078 br_write_lock(vfsmount_lock);
1079 if (mnt_get_count(mnt) != 2) {
1080 br_write_unlock(vfsmount_lock);
1083 br_write_unlock(vfsmount_lock);
1085 if (!xchg(&mnt->mnt_expiry_mark, 1))
1090 * If we may have to abort operations to get out of this
1091 * mount, and they will themselves hold resources we must
1092 * allow the fs to do things. In the Unix tradition of
1093 * 'Gee thats tricky lets do it in userspace' the umount_begin
1094 * might fail to complete on the first run through as other tasks
1095 * must return, and the like. Thats for the mount program to worry
1096 * about for the moment.
1099 if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1100 sb->s_op->umount_begin(sb);
1104 * No sense to grab the lock for this test, but test itself looks
1105 * somewhat bogus. Suggestions for better replacement?
1106 * Ho-hum... In principle, we might treat that as umount + switch
1107 * to rootfs. GC would eventually take care of the old vfsmount.
1108 * Actually it makes sense, especially if rootfs would contain a
1109 * /reboot - static binary that would close all descriptors and
1110 * call reboot(9). Then init(8) could umount root and exec /reboot.
1112 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1114 * Special case for "unmounting" root ...
1115 * we just try to remount it readonly.
1117 down_write(&sb->s_umount);
1118 if (!(sb->s_flags & MS_RDONLY))
1119 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
1120 up_write(&sb->s_umount);
1124 down_write(&namespace_sem);
1125 br_write_lock(vfsmount_lock);
1128 if (!(flags & MNT_DETACH))
1129 shrink_submounts(mnt, &umount_list);
1132 if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
1133 if (!list_empty(&mnt->mnt_list))
1134 umount_tree(mnt, 1, &umount_list);
1137 br_write_unlock(vfsmount_lock);
1138 up_write(&namespace_sem);
1139 release_mounts(&umount_list);
1144 * Now umount can handle mount points as well as block devices.
1145 * This is important for filesystems which use unnamed block devices.
1147 * We now support a flag for forced unmount like the other 'big iron'
1148 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1151 SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1156 int lookup_flags = 0;
1158 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
1161 if (!(flags & UMOUNT_NOFOLLOW))
1162 lookup_flags |= LOOKUP_FOLLOW;
1164 retval = user_path_at(AT_FDCWD, name, lookup_flags, &path);
1167 mnt = real_mount(path.mnt);
1169 if (path.dentry != path.mnt->mnt_root)
1171 if (!check_mnt(mnt))
1175 if (!capable(CAP_SYS_ADMIN))
1178 retval = do_umount(mnt, flags);
1180 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1182 mntput_no_expire(mnt);
1187 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1190 * The 2.0 compatible umount. No flags.
1192 SYSCALL_DEFINE1(oldumount, char __user *, name)
1194 return sys_umount(name, 0);
1199 static int mount_is_safe(struct path *path)
1201 if (capable(CAP_SYS_ADMIN))
1205 if (S_ISLNK(path->dentry->d_inode->i_mode))
1207 if (path->dentry->d_inode->i_mode & S_ISVTX) {
1208 if (current_uid() != path->dentry->d_inode->i_uid)
1211 if (inode_permission(path->dentry->d_inode, MAY_WRITE))
1217 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1220 struct mount *res, *p, *q, *r;
1223 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
1226 res = q = clone_mnt(mnt, dentry, flag);
1229 q->mnt_mountpoint = mnt->mnt_mountpoint;
1232 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1234 if (!is_subdir(r->mnt_mountpoint, dentry))
1237 for (s = r; s; s = next_mnt(s, r)) {
1238 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) {
1239 s = skip_mnt_tree(s);
1242 while (p != s->mnt_parent) {
1248 path.dentry = p->mnt_mountpoint;
1249 q = clone_mnt(p, p->mnt.mnt_root, flag);
1252 br_write_lock(vfsmount_lock);
1253 list_add_tail(&q->mnt_list, &res->mnt_list);
1254 attach_mnt(q, &path);
1255 br_write_unlock(vfsmount_lock);
1261 LIST_HEAD(umount_list);
1262 br_write_lock(vfsmount_lock);
1263 umount_tree(res, 0, &umount_list);
1264 br_write_unlock(vfsmount_lock);
1265 release_mounts(&umount_list);
1270 struct vfsmount *collect_mounts(struct path *path)
1273 down_write(&namespace_sem);
1274 tree = copy_tree(real_mount(path->mnt), path->dentry,
1275 CL_COPY_ALL | CL_PRIVATE);
1276 up_write(&namespace_sem);
1277 return tree ? &tree->mnt : NULL;
1280 void drop_collected_mounts(struct vfsmount *mnt)
1282 LIST_HEAD(umount_list);
1283 down_write(&namespace_sem);
1284 br_write_lock(vfsmount_lock);
1285 umount_tree(real_mount(mnt), 0, &umount_list);
1286 br_write_unlock(vfsmount_lock);
1287 up_write(&namespace_sem);
1288 release_mounts(&umount_list);
1291 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
1292 struct vfsmount *root)
1295 int res = f(root, arg);
1298 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
1299 res = f(&mnt->mnt, arg);
1306 static void cleanup_group_ids(struct mount *mnt, struct mount *end)
1310 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
1311 if (p->mnt_group_id && !IS_MNT_SHARED(p))
1312 mnt_release_group_id(p);
1316 static int invent_group_ids(struct mount *mnt, bool recurse)
1320 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
1321 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
1322 int err = mnt_alloc_group_id(p);
1324 cleanup_group_ids(mnt, p);
1334 * @source_mnt : mount tree to be attached
1335 * @nd : place the mount tree @source_mnt is attached
1336 * @parent_nd : if non-null, detach the source_mnt from its parent and
1337 * store the parent mount and mountpoint dentry.
1338 * (done when source_mnt is moved)
1340 * NOTE: in the table below explains the semantics when a source mount
1341 * of a given type is attached to a destination mount of a given type.
1342 * ---------------------------------------------------------------------------
1343 * | BIND MOUNT OPERATION |
1344 * |**************************************************************************
1345 * | source-->| shared | private | slave | unbindable |
1349 * |**************************************************************************
1350 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1352 * |non-shared| shared (+) | private | slave (*) | invalid |
1353 * ***************************************************************************
1354 * A bind operation clones the source mount and mounts the clone on the
1355 * destination mount.
1357 * (++) the cloned mount is propagated to all the mounts in the propagation
1358 * tree of the destination mount and the cloned mount is added to
1359 * the peer group of the source mount.
1360 * (+) the cloned mount is created under the destination mount and is marked
1361 * as shared. The cloned mount is added to the peer group of the source
1363 * (+++) the mount is propagated to all the mounts in the propagation tree
1364 * of the destination mount and the cloned mount is made slave
1365 * of the same master as that of the source mount. The cloned mount
1366 * is marked as 'shared and slave'.
1367 * (*) the cloned mount is made a slave of the same master as that of the
1370 * ---------------------------------------------------------------------------
1371 * | MOVE MOUNT OPERATION |
1372 * |**************************************************************************
1373 * | source-->| shared | private | slave | unbindable |
1377 * |**************************************************************************
1378 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
1380 * |non-shared| shared (+*) | private | slave (*) | unbindable |
1381 * ***************************************************************************
1383 * (+) the mount is moved to the destination. And is then propagated to
1384 * all the mounts in the propagation tree of the destination mount.
1385 * (+*) the mount is moved to the destination.
1386 * (+++) the mount is moved to the destination and is then propagated to
1387 * all the mounts belonging to the destination mount's propagation tree.
1388 * the mount is marked as 'shared and slave'.
1389 * (*) the mount continues to be a slave at the new location.
1391 * if the source mount is a tree, the operations explained above is
1392 * applied to each mount in the tree.
1393 * Must be called without spinlocks held, since this function can sleep
1396 static int attach_recursive_mnt(struct mount *source_mnt,
1397 struct path *path, struct path *parent_path)
1399 LIST_HEAD(tree_list);
1400 struct mount *dest_mnt = real_mount(path->mnt);
1401 struct dentry *dest_dentry = path->dentry;
1402 struct mount *child, *p;
1405 if (IS_MNT_SHARED(dest_mnt)) {
1406 err = invent_group_ids(source_mnt, true);
1410 err = propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list);
1412 goto out_cleanup_ids;
1414 br_write_lock(vfsmount_lock);
1416 if (IS_MNT_SHARED(dest_mnt)) {
1417 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
1421 detach_mnt(source_mnt, parent_path);
1422 attach_mnt(source_mnt, path);
1423 touch_mnt_namespace(source_mnt->mnt_ns);
1425 mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
1426 commit_tree(source_mnt);
1429 list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
1430 list_del_init(&child->mnt_hash);
1433 br_write_unlock(vfsmount_lock);
1438 if (IS_MNT_SHARED(dest_mnt))
1439 cleanup_group_ids(source_mnt, NULL);
1444 static int lock_mount(struct path *path)
1446 struct vfsmount *mnt;
1448 mutex_lock(&path->dentry->d_inode->i_mutex);
1449 if (unlikely(cant_mount(path->dentry))) {
1450 mutex_unlock(&path->dentry->d_inode->i_mutex);
1453 down_write(&namespace_sem);
1454 mnt = lookup_mnt(path);
1457 up_write(&namespace_sem);
1458 mutex_unlock(&path->dentry->d_inode->i_mutex);
1461 path->dentry = dget(mnt->mnt_root);
1465 static void unlock_mount(struct path *path)
1467 up_write(&namespace_sem);
1468 mutex_unlock(&path->dentry->d_inode->i_mutex);
1471 static int graft_tree(struct mount *mnt, struct path *path)
1473 if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER)
1476 if (S_ISDIR(path->dentry->d_inode->i_mode) !=
1477 S_ISDIR(mnt->mnt.mnt_root->d_inode->i_mode))
1480 if (d_unlinked(path->dentry))
1483 return attach_recursive_mnt(mnt, path, NULL);
1487 * Sanity check the flags to change_mnt_propagation.
1490 static int flags_to_propagation_type(int flags)
1492 int type = flags & ~(MS_REC | MS_SILENT);
1494 /* Fail if any non-propagation flags are set */
1495 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
1497 /* Only one propagation flag should be set */
1498 if (!is_power_of_2(type))
1504 * recursively change the type of the mountpoint.
1506 static int do_change_type(struct path *path, int flag)
1509 struct mount *mnt = real_mount(path->mnt);
1510 int recurse = flag & MS_REC;
1514 if (!capable(CAP_SYS_ADMIN))
1517 if (path->dentry != path->mnt->mnt_root)
1520 type = flags_to_propagation_type(flag);
1524 down_write(&namespace_sem);
1525 if (type == MS_SHARED) {
1526 err = invent_group_ids(mnt, recurse);
1531 br_write_lock(vfsmount_lock);
1532 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
1533 change_mnt_propagation(m, type);
1534 br_write_unlock(vfsmount_lock);
1537 up_write(&namespace_sem);
1542 * do loopback mount.
1544 static int do_loopback(struct path *path, char *old_name,
1547 LIST_HEAD(umount_list);
1548 struct path old_path;
1549 struct mount *mnt = NULL, *old;
1550 int err = mount_is_safe(path);
1553 if (!old_name || !*old_name)
1555 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
1559 err = lock_mount(path);
1563 old = real_mount(old_path.mnt);
1566 if (IS_MNT_UNBINDABLE(old))
1569 if (!check_mnt(real_mount(path->mnt)) || !check_mnt(old))
1574 mnt = copy_tree(old, old_path.dentry, 0);
1576 mnt = clone_mnt(old, old_path.dentry, 0);
1581 err = graft_tree(mnt, path);
1583 br_write_lock(vfsmount_lock);
1584 umount_tree(mnt, 0, &umount_list);
1585 br_write_unlock(vfsmount_lock);
1589 release_mounts(&umount_list);
1591 path_put(&old_path);
1595 static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
1598 int readonly_request = 0;
1600 if (ms_flags & MS_RDONLY)
1601 readonly_request = 1;
1602 if (readonly_request == __mnt_is_readonly(mnt))
1605 if (readonly_request)
1606 error = mnt_make_readonly(real_mount(mnt));
1608 __mnt_unmake_readonly(real_mount(mnt));
1613 * change filesystem flags. dir should be a physical root of filesystem.
1614 * If you've mounted a non-root directory somewhere and want to do remount
1615 * on it - tough luck.
1617 static int do_remount(struct path *path, int flags, int mnt_flags,
1621 struct super_block *sb = path->mnt->mnt_sb;
1622 struct mount *mnt = real_mount(path->mnt);
1624 if (!capable(CAP_SYS_ADMIN))
1627 if (!check_mnt(mnt))
1630 if (path->dentry != path->mnt->mnt_root)
1633 err = security_sb_remount(sb, data);
1637 down_write(&sb->s_umount);
1638 if (flags & MS_BIND)
1639 err = change_mount_flags(path->mnt, flags);
1641 err = do_remount_sb(sb, flags, data, 0);
1643 br_write_lock(vfsmount_lock);
1644 mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
1645 mnt->mnt.mnt_flags = mnt_flags;
1646 br_write_unlock(vfsmount_lock);
1648 up_write(&sb->s_umount);
1650 br_write_lock(vfsmount_lock);
1651 touch_mnt_namespace(mnt->mnt_ns);
1652 br_write_unlock(vfsmount_lock);
1657 static inline int tree_contains_unbindable(struct mount *mnt)
1660 for (p = mnt; p; p = next_mnt(p, mnt)) {
1661 if (IS_MNT_UNBINDABLE(p))
1667 static int do_move_mount(struct path *path, char *old_name)
1669 struct path old_path, parent_path;
1673 if (!capable(CAP_SYS_ADMIN))
1675 if (!old_name || !*old_name)
1677 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
1681 err = lock_mount(path);
1685 old = real_mount(old_path.mnt);
1686 p = real_mount(path->mnt);
1689 if (!check_mnt(p) || !check_mnt(old))
1692 if (d_unlinked(path->dentry))
1696 if (old_path.dentry != old_path.mnt->mnt_root)
1699 if (!mnt_has_parent(old))
1702 if (S_ISDIR(path->dentry->d_inode->i_mode) !=
1703 S_ISDIR(old_path.dentry->d_inode->i_mode))
1706 * Don't move a mount residing in a shared parent.
1708 if (IS_MNT_SHARED(old->mnt_parent))
1711 * Don't move a mount tree containing unbindable mounts to a destination
1712 * mount which is shared.
1714 if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
1717 for (; mnt_has_parent(p); p = p->mnt_parent)
1721 err = attach_recursive_mnt(old, path, &parent_path);
1725 /* if the mount is moved, it should no longer be expire
1727 list_del_init(&old->mnt_expire);
1732 path_put(&parent_path);
1733 path_put(&old_path);
1737 static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
1740 const char *subtype = strchr(fstype, '.');
1749 mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL);
1751 if (!mnt->mnt_sb->s_subtype)
1757 return ERR_PTR(err);
1760 static struct vfsmount *
1761 do_kern_mount(const char *fstype, int flags, const char *name, void *data)
1763 struct file_system_type *type = get_fs_type(fstype);
1764 struct vfsmount *mnt;
1766 return ERR_PTR(-ENODEV);
1767 mnt = vfs_kern_mount(type, flags, name, data);
1768 if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
1769 !mnt->mnt_sb->s_subtype)
1770 mnt = fs_set_subtype(mnt, fstype);
1771 put_filesystem(type);
1776 * add a mount into a namespace's mount tree
1778 static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
1782 mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL);
1784 err = lock_mount(path);
1789 if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(real_mount(path->mnt)))
1792 /* Refuse the same filesystem on the same mount point */
1794 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb &&
1795 path->mnt->mnt_root == path->dentry)
1799 if (S_ISLNK(newmnt->mnt.mnt_root->d_inode->i_mode))
1802 newmnt->mnt.mnt_flags = mnt_flags;
1803 err = graft_tree(newmnt, path);
1811 * create a new mount for userspace and request it to be added into the
1814 static int do_new_mount(struct path *path, char *type, int flags,
1815 int mnt_flags, char *name, void *data)
1817 struct vfsmount *mnt;
1823 /* we need capabilities... */
1824 if (!capable(CAP_SYS_ADMIN))
1827 mnt = do_kern_mount(type, flags, name, data);
1829 return PTR_ERR(mnt);
1831 err = do_add_mount(real_mount(mnt), path, mnt_flags);
1837 int finish_automount(struct vfsmount *m, struct path *path)
1839 struct mount *mnt = real_mount(m);
1841 /* The new mount record should have at least 2 refs to prevent it being
1842 * expired before we get a chance to add it
1844 BUG_ON(mnt_get_count(mnt) < 2);
1846 if (m->mnt_sb == path->mnt->mnt_sb &&
1847 m->mnt_root == path->dentry) {
1852 err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
1856 /* remove m from any expiration list it may be on */
1857 if (!list_empty(&mnt->mnt_expire)) {
1858 down_write(&namespace_sem);
1859 br_write_lock(vfsmount_lock);
1860 list_del_init(&mnt->mnt_expire);
1861 br_write_unlock(vfsmount_lock);
1862 up_write(&namespace_sem);
1870 * mnt_set_expiry - Put a mount on an expiration list
1871 * @mnt: The mount to list.
1872 * @expiry_list: The list to add the mount to.
1874 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
1876 down_write(&namespace_sem);
1877 br_write_lock(vfsmount_lock);
1879 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
1881 br_write_unlock(vfsmount_lock);
1882 up_write(&namespace_sem);
1884 EXPORT_SYMBOL(mnt_set_expiry);
1887 * process a list of expirable mountpoints with the intent of discarding any
1888 * mountpoints that aren't in use and haven't been touched since last we came
1891 void mark_mounts_for_expiry(struct list_head *mounts)
1893 struct mount *mnt, *next;
1894 LIST_HEAD(graveyard);
1897 if (list_empty(mounts))
1900 down_write(&namespace_sem);
1901 br_write_lock(vfsmount_lock);
1903 /* extract from the expiration list every vfsmount that matches the
1904 * following criteria:
1905 * - only referenced by its parent vfsmount
1906 * - still marked for expiry (marked on the last call here; marks are
1907 * cleared by mntput())
1909 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
1910 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
1911 propagate_mount_busy(mnt, 1))
1913 list_move(&mnt->mnt_expire, &graveyard);
1915 while (!list_empty(&graveyard)) {
1916 mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
1917 touch_mnt_namespace(mnt->mnt_ns);
1918 umount_tree(mnt, 1, &umounts);
1920 br_write_unlock(vfsmount_lock);
1921 up_write(&namespace_sem);
1923 release_mounts(&umounts);
1926 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
1929 * Ripoff of 'select_parent()'
1931 * search the list of submounts for a given mountpoint, and move any
1932 * shrinkable submounts to the 'graveyard' list.
1934 static int select_submounts(struct mount *parent, struct list_head *graveyard)
1936 struct mount *this_parent = parent;
1937 struct list_head *next;
1941 next = this_parent->mnt_mounts.next;
1943 while (next != &this_parent->mnt_mounts) {
1944 struct list_head *tmp = next;
1945 struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
1948 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
1951 * Descend a level if the d_mounts list is non-empty.
1953 if (!list_empty(&mnt->mnt_mounts)) {
1958 if (!propagate_mount_busy(mnt, 1)) {
1959 list_move_tail(&mnt->mnt_expire, graveyard);
1964 * All done at this level ... ascend and resume the search
1966 if (this_parent != parent) {
1967 next = this_parent->mnt_child.next;
1968 this_parent = this_parent->mnt_parent;
1975 * process a list of expirable mountpoints with the intent of discarding any
1976 * submounts of a specific parent mountpoint
1978 * vfsmount_lock must be held for write
1980 static void shrink_submounts(struct mount *mnt, struct list_head *umounts)
1982 LIST_HEAD(graveyard);
1985 /* extract submounts of 'mountpoint' from the expiration list */
1986 while (select_submounts(mnt, &graveyard)) {
1987 while (!list_empty(&graveyard)) {
1988 m = list_first_entry(&graveyard, struct mount,
1990 touch_mnt_namespace(m->mnt_ns);
1991 umount_tree(m, 1, umounts);
1997 * Some copy_from_user() implementations do not return the exact number of
1998 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
1999 * Note that this function differs from copy_from_user() in that it will oops
2000 * on bad values of `to', rather than returning a short copy.
2002 static long exact_copy_from_user(void *to, const void __user * from,
2006 const char __user *f = from;
2009 if (!access_ok(VERIFY_READ, from, n))
2013 if (__get_user(c, f)) {
2024 int copy_mount_options(const void __user * data, unsigned long *where)
2034 if (!(page = __get_free_page(GFP_KERNEL)))
2037 /* We only care that *some* data at the address the user
2038 * gave us is valid. Just in case, we'll zero
2039 * the remainder of the page.
2041 /* copy_from_user cannot cross TASK_SIZE ! */
2042 size = TASK_SIZE - (unsigned long)data;
2043 if (size > PAGE_SIZE)
2046 i = size - exact_copy_from_user((void *)page, data, size);
2052 memset((char *)page + i, 0, PAGE_SIZE - i);
2057 int copy_mount_string(const void __user *data, char **where)
2066 tmp = strndup_user(data, PAGE_SIZE);
2068 return PTR_ERR(tmp);
2075 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
2076 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
2078 * data is a (void *) that can point to any structure up to
2079 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
2080 * information (or be NULL).
2082 * Pre-0.97 versions of mount() didn't have a flags word.
2083 * When the flags word was introduced its top half was required
2084 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
2085 * Therefore, if this magic number is present, it carries no information
2086 * and must be discarded.
2088 long do_mount(char *dev_name, char *dir_name, char *type_page,
2089 unsigned long flags, void *data_page)
2096 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
2097 flags &= ~MS_MGC_MSK;
2099 /* Basic sanity checks */
2101 if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
2105 ((char *)data_page)[PAGE_SIZE - 1] = 0;
2107 /* ... and get the mountpoint */
2108 retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
2112 retval = security_sb_mount(dev_name, &path,
2113 type_page, flags, data_page);
2117 /* Default to relatime unless overriden */
2118 if (!(flags & MS_NOATIME))
2119 mnt_flags |= MNT_RELATIME;
2121 /* Separate the per-mountpoint flags */
2122 if (flags & MS_NOSUID)
2123 mnt_flags |= MNT_NOSUID;
2124 if (flags & MS_NODEV)
2125 mnt_flags |= MNT_NODEV;
2126 if (flags & MS_NOEXEC)
2127 mnt_flags |= MNT_NOEXEC;
2128 if (flags & MS_NOATIME)
2129 mnt_flags |= MNT_NOATIME;
2130 if (flags & MS_NODIRATIME)
2131 mnt_flags |= MNT_NODIRATIME;
2132 if (flags & MS_STRICTATIME)
2133 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
2134 if (flags & MS_RDONLY)
2135 mnt_flags |= MNT_READONLY;
2137 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
2138 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
2141 if (flags & MS_REMOUNT)
2142 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
2144 else if (flags & MS_BIND)
2145 retval = do_loopback(&path, dev_name, flags & MS_REC);
2146 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2147 retval = do_change_type(&path, flags);
2148 else if (flags & MS_MOVE)
2149 retval = do_move_mount(&path, dev_name);
2151 retval = do_new_mount(&path, type_page, flags, mnt_flags,
2152 dev_name, data_page);
2158 static struct mnt_namespace *alloc_mnt_ns(void)
2160 struct mnt_namespace *new_ns;
2162 new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
2164 return ERR_PTR(-ENOMEM);
2165 atomic_set(&new_ns->count, 1);
2166 new_ns->root = NULL;
2167 INIT_LIST_HEAD(&new_ns->list);
2168 init_waitqueue_head(&new_ns->poll);
2173 void mnt_make_longterm(struct vfsmount *mnt)
2175 __mnt_make_longterm(real_mount(mnt));
2178 void mnt_make_shortterm(struct vfsmount *m)
2181 struct mount *mnt = real_mount(m);
2182 if (atomic_add_unless(&mnt->mnt_longterm, -1, 1))
2184 br_write_lock(vfsmount_lock);
2185 atomic_dec(&mnt->mnt_longterm);
2186 br_write_unlock(vfsmount_lock);
2191 * Allocate a new namespace structure and populate it with contents
2192 * copied from the namespace of the passed in task structure.
2194 static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
2195 struct fs_struct *fs)
2197 struct mnt_namespace *new_ns;
2198 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
2199 struct mount *p, *q;
2200 struct mount *old = real_mount(mnt_ns->root);
2203 new_ns = alloc_mnt_ns();
2207 down_write(&namespace_sem);
2208 /* First pass: copy the tree topology */
2209 new = copy_tree(old, old->mnt.mnt_root, CL_COPY_ALL | CL_EXPIRE);
2211 up_write(&namespace_sem);
2213 return ERR_PTR(-ENOMEM);
2215 new_ns->root = &new->mnt;
2216 br_write_lock(vfsmount_lock);
2217 list_add_tail(&new_ns->list, &new->mnt_list);
2218 br_write_unlock(vfsmount_lock);
2221 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
2222 * as belonging to new namespace. We have already acquired a private
2223 * fs_struct, so tsk->fs->lock is not needed.
2229 __mnt_make_longterm(q);
2231 if (&p->mnt == fs->root.mnt) {
2232 fs->root.mnt = mntget(&q->mnt);
2233 __mnt_make_longterm(q);
2234 mnt_make_shortterm(&p->mnt);
2237 if (&p->mnt == fs->pwd.mnt) {
2238 fs->pwd.mnt = mntget(&q->mnt);
2239 __mnt_make_longterm(q);
2240 mnt_make_shortterm(&p->mnt);
2244 p = next_mnt(p, old);
2245 q = next_mnt(q, new);
2247 up_write(&namespace_sem);
2257 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
2258 struct fs_struct *new_fs)
2260 struct mnt_namespace *new_ns;
2265 if (!(flags & CLONE_NEWNS))
2268 new_ns = dup_mnt_ns(ns, new_fs);
2275 * create_mnt_ns - creates a private namespace and adds a root filesystem
2276 * @mnt: pointer to the new root filesystem mountpoint
2278 static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
2280 struct mnt_namespace *new_ns = alloc_mnt_ns();
2281 if (!IS_ERR(new_ns)) {
2282 struct mount *mnt = real_mount(m);
2283 mnt->mnt_ns = new_ns;
2284 __mnt_make_longterm(mnt);
2286 list_add(&new_ns->list, &mnt->mnt_list);
2293 struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
2295 struct mnt_namespace *ns;
2296 struct super_block *s;
2300 ns = create_mnt_ns(mnt);
2302 return ERR_CAST(ns);
2304 err = vfs_path_lookup(mnt->mnt_root, mnt,
2305 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
2310 return ERR_PTR(err);
2312 /* trade a vfsmount reference for active sb one */
2313 s = path.mnt->mnt_sb;
2314 atomic_inc(&s->s_active);
2316 /* lock the sucker */
2317 down_write(&s->s_umount);
2318 /* ... and return the root of (sub)tree on it */
2321 EXPORT_SYMBOL(mount_subtree);
2323 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
2324 char __user *, type, unsigned long, flags, void __user *, data)
2330 unsigned long data_page;
2332 ret = copy_mount_string(type, &kernel_type);
2336 kernel_dir = getname(dir_name);
2337 if (IS_ERR(kernel_dir)) {
2338 ret = PTR_ERR(kernel_dir);
2342 ret = copy_mount_string(dev_name, &kernel_dev);
2346 ret = copy_mount_options(data, &data_page);
2350 ret = do_mount(kernel_dev, kernel_dir, kernel_type, flags,
2351 (void *) data_page);
2353 free_page(data_page);
2357 putname(kernel_dir);
2365 * Return true if path is reachable from root
2367 * namespace_sem or vfsmount_lock is held
2369 bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
2370 const struct path *root)
2372 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
2373 dentry = mnt->mnt_mountpoint;
2374 mnt = mnt->mnt_parent;
2376 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
2379 int path_is_under(struct path *path1, struct path *path2)
2382 br_read_lock(vfsmount_lock);
2383 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
2384 br_read_unlock(vfsmount_lock);
2387 EXPORT_SYMBOL(path_is_under);
2390 * pivot_root Semantics:
2391 * Moves the root file system of the current process to the directory put_old,
2392 * makes new_root as the new root file system of the current process, and sets
2393 * root/cwd of all processes which had them on the current root to new_root.
2396 * The new_root and put_old must be directories, and must not be on the
2397 * same file system as the current process root. The put_old must be
2398 * underneath new_root, i.e. adding a non-zero number of /.. to the string
2399 * pointed to by put_old must yield the same directory as new_root. No other
2400 * file system may be mounted on put_old. After all, new_root is a mountpoint.
2402 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
2403 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
2404 * in this situation.
2407 * - we don't move root/cwd if they are not at the root (reason: if something
2408 * cared enough to change them, it's probably wrong to force them elsewhere)
2409 * - it's okay to pick a root that isn't the root of a file system, e.g.
2410 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
2411 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
2414 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
2415 const char __user *, put_old)
2417 struct path new, old, parent_path, root_parent, root;
2418 struct mount *new_mnt, *root_mnt;
2421 if (!capable(CAP_SYS_ADMIN))
2424 error = user_path_dir(new_root, &new);
2428 error = user_path_dir(put_old, &old);
2432 error = security_sb_pivotroot(&old, &new);
2436 get_fs_root(current->fs, &root);
2437 error = lock_mount(&old);
2442 new_mnt = real_mount(new.mnt);
2443 root_mnt = real_mount(root.mnt);
2444 if (IS_MNT_SHARED(real_mount(old.mnt)) ||
2445 IS_MNT_SHARED(new_mnt->mnt_parent) ||
2446 IS_MNT_SHARED(root_mnt->mnt_parent))
2448 if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
2451 if (d_unlinked(new.dentry))
2453 if (d_unlinked(old.dentry))
2456 if (new.mnt == root.mnt ||
2457 old.mnt == root.mnt)
2458 goto out4; /* loop, on the same file system */
2460 if (root.mnt->mnt_root != root.dentry)
2461 goto out4; /* not a mountpoint */
2462 if (!mnt_has_parent(root_mnt))
2463 goto out4; /* not attached */
2464 if (new.mnt->mnt_root != new.dentry)
2465 goto out4; /* not a mountpoint */
2466 if (!mnt_has_parent(new_mnt))
2467 goto out4; /* not attached */
2468 /* make sure we can reach put_old from new_root */
2469 if (!is_path_reachable(real_mount(old.mnt), old.dentry, &new))
2471 br_write_lock(vfsmount_lock);
2472 detach_mnt(new_mnt, &parent_path);
2473 detach_mnt(root_mnt, &root_parent);
2474 /* mount old root on put_old */
2475 attach_mnt(root_mnt, &old);
2476 /* mount new_root on / */
2477 attach_mnt(new_mnt, &root_parent);
2478 touch_mnt_namespace(current->nsproxy->mnt_ns);
2479 br_write_unlock(vfsmount_lock);
2480 chroot_fs_refs(&root, &new);
2485 path_put(&root_parent);
2486 path_put(&parent_path);
2498 static void __init init_mount_tree(void)
2500 struct vfsmount *mnt;
2501 struct mnt_namespace *ns;
2504 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
2506 panic("Can't create rootfs");
2508 ns = create_mnt_ns(mnt);
2510 panic("Can't allocate initial namespace");
2512 init_task.nsproxy->mnt_ns = ns;
2515 root.mnt = ns->root;
2516 root.dentry = ns->root->mnt_root;
2518 set_fs_pwd(current->fs, &root);
2519 set_fs_root(current->fs, &root);
2522 void __init mnt_init(void)
2527 init_rwsem(&namespace_sem);
2529 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
2530 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2532 mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
2534 if (!mount_hashtable)
2535 panic("Failed to allocate mount hash table\n");
2537 printk(KERN_INFO "Mount-cache hash table entries: %lu\n", HASH_SIZE);
2539 for (u = 0; u < HASH_SIZE; u++)
2540 INIT_LIST_HEAD(&mount_hashtable[u]);
2542 br_lock_init(vfsmount_lock);
2546 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
2548 fs_kobj = kobject_create_and_add("fs", NULL);
2550 printk(KERN_WARNING "%s: kobj create error\n", __func__);
2555 void put_mnt_ns(struct mnt_namespace *ns)
2557 LIST_HEAD(umount_list);
2559 if (!atomic_dec_and_test(&ns->count))
2561 down_write(&namespace_sem);
2562 br_write_lock(vfsmount_lock);
2563 umount_tree(real_mount(ns->root), 0, &umount_list);
2564 br_write_unlock(vfsmount_lock);
2565 up_write(&namespace_sem);
2566 release_mounts(&umount_list);
2570 struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
2572 struct vfsmount *mnt;
2573 mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, data);
2576 * it is a longterm mount, don't release mnt until
2577 * we unmount before file sys is unregistered
2579 mnt_make_longterm(mnt);
2583 EXPORT_SYMBOL_GPL(kern_mount_data);
2585 void kern_unmount(struct vfsmount *mnt)
2587 /* release long term mount so mount point can be released */
2588 if (!IS_ERR_OR_NULL(mnt)) {
2589 mnt_make_shortterm(mnt);
2593 EXPORT_SYMBOL(kern_unmount);
2595 bool our_mnt(struct vfsmount *mnt)
2597 return check_mnt(real_mount(mnt));