4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/syscalls.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/smp_lock.h>
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/quotaops.h>
18 #include <linux/acct.h>
19 #include <linux/capability.h>
20 #include <linux/cpumask.h>
21 #include <linux/module.h>
22 #include <linux/sysfs.h>
23 #include <linux/seq_file.h>
24 #include <linux/mnt_namespace.h>
25 #include <linux/namei.h>
26 #include <linux/security.h>
27 #include <linux/mount.h>
28 #include <linux/ramfs.h>
29 #include <linux/log2.h>
30 #include <linux/idr.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
36 #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
37 #define HASH_SIZE (1UL << HASH_SHIFT)
39 /* spinlock for vfsmount related operations, inplace of dcache_lock */
40 __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
43 static DEFINE_IDA(mnt_id_ida);
44 static DEFINE_IDA(mnt_group_ida);
46 static struct list_head *mount_hashtable __read_mostly;
47 static struct kmem_cache *mnt_cache __read_mostly;
48 static struct rw_semaphore namespace_sem;
51 struct kobject *fs_kobj;
52 EXPORT_SYMBOL_GPL(fs_kobj);
54 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
56 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
57 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
58 tmp = tmp + (tmp >> HASH_SHIFT);
59 return tmp & (HASH_SIZE - 1);
62 #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
64 /* allocation is serialized by namespace_sem */
65 static int mnt_alloc_id(struct vfsmount *mnt)
70 ida_pre_get(&mnt_id_ida, GFP_KERNEL);
71 spin_lock(&vfsmount_lock);
72 res = ida_get_new(&mnt_id_ida, &mnt->mnt_id);
73 spin_unlock(&vfsmount_lock);
80 static void mnt_free_id(struct vfsmount *mnt)
82 spin_lock(&vfsmount_lock);
83 ida_remove(&mnt_id_ida, mnt->mnt_id);
84 spin_unlock(&vfsmount_lock);
88 * Allocate a new peer group ID
90 * mnt_group_ida is protected by namespace_sem
92 static int mnt_alloc_group_id(struct vfsmount *mnt)
94 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
97 return ida_get_new_above(&mnt_group_ida, 1, &mnt->mnt_group_id);
101 * Release a peer group ID
103 void mnt_release_group_id(struct vfsmount *mnt)
105 ida_remove(&mnt_group_ida, mnt->mnt_group_id);
106 mnt->mnt_group_id = 0;
109 struct vfsmount *alloc_vfsmnt(const char *name)
111 struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
115 err = mnt_alloc_id(mnt);
117 kmem_cache_free(mnt_cache, mnt);
121 atomic_set(&mnt->mnt_count, 1);
122 INIT_LIST_HEAD(&mnt->mnt_hash);
123 INIT_LIST_HEAD(&mnt->mnt_child);
124 INIT_LIST_HEAD(&mnt->mnt_mounts);
125 INIT_LIST_HEAD(&mnt->mnt_list);
126 INIT_LIST_HEAD(&mnt->mnt_expire);
127 INIT_LIST_HEAD(&mnt->mnt_share);
128 INIT_LIST_HEAD(&mnt->mnt_slave_list);
129 INIT_LIST_HEAD(&mnt->mnt_slave);
130 atomic_set(&mnt->__mnt_writers, 0);
132 int size = strlen(name) + 1;
133 char *newname = kmalloc(size, GFP_KERNEL);
135 memcpy(newname, name, size);
136 mnt->mnt_devname = newname;
144 * Most r/o checks on a fs are for operations that take
145 * discrete amounts of time, like a write() or unlink().
146 * We must keep track of when those operations start
147 * (for permission checks) and when they end, so that
148 * we can determine when writes are able to occur to
152 * __mnt_is_readonly: check whether a mount is read-only
153 * @mnt: the mount to check for its write status
155 * This shouldn't be used directly ouside of the VFS.
156 * It does not guarantee that the filesystem will stay
157 * r/w, just that it is right *now*. This can not and
158 * should not be used in place of IS_RDONLY(inode).
159 * mnt_want/drop_write() will _keep_ the filesystem
162 int __mnt_is_readonly(struct vfsmount *mnt)
164 if (mnt->mnt_flags & MNT_READONLY)
166 if (mnt->mnt_sb->s_flags & MS_RDONLY)
170 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
174 * If holding multiple instances of this lock, they
175 * must be ordered by cpu number.
178 struct lock_class_key lock_class; /* compiles out with !lockdep */
180 struct vfsmount *mnt;
181 } ____cacheline_aligned_in_smp;
182 static DEFINE_PER_CPU(struct mnt_writer, mnt_writers);
184 static int __init init_mnt_writers(void)
187 for_each_possible_cpu(cpu) {
188 struct mnt_writer *writer = &per_cpu(mnt_writers, cpu);
189 spin_lock_init(&writer->lock);
190 lockdep_set_class(&writer->lock, &writer->lock_class);
195 fs_initcall(init_mnt_writers);
197 static void unlock_mnt_writers(void)
200 struct mnt_writer *cpu_writer;
202 for_each_possible_cpu(cpu) {
203 cpu_writer = &per_cpu(mnt_writers, cpu);
204 spin_unlock(&cpu_writer->lock);
208 static inline void __clear_mnt_count(struct mnt_writer *cpu_writer)
210 if (!cpu_writer->mnt)
213 * This is in case anyone ever leaves an invalid,
214 * old ->mnt and a count of 0.
216 if (!cpu_writer->count)
218 atomic_add(cpu_writer->count, &cpu_writer->mnt->__mnt_writers);
219 cpu_writer->count = 0;
222 * must hold cpu_writer->lock
224 static inline void use_cpu_writer_for_mount(struct mnt_writer *cpu_writer,
225 struct vfsmount *mnt)
227 if (cpu_writer->mnt == mnt)
229 __clear_mnt_count(cpu_writer);
230 cpu_writer->mnt = mnt;
234 * Most r/o checks on a fs are for operations that take
235 * discrete amounts of time, like a write() or unlink().
236 * We must keep track of when those operations start
237 * (for permission checks) and when they end, so that
238 * we can determine when writes are able to occur to
242 * mnt_want_write - get write access to a mount
243 * @mnt: the mount on which to take a write
245 * This tells the low-level filesystem that a write is
246 * about to be performed to it, and makes sure that
247 * writes are allowed before returning success. When
248 * the write operation is finished, mnt_drop_write()
249 * must be called. This is effectively a refcount.
251 int mnt_want_write(struct vfsmount *mnt)
254 struct mnt_writer *cpu_writer;
256 cpu_writer = &get_cpu_var(mnt_writers);
257 spin_lock(&cpu_writer->lock);
258 if (__mnt_is_readonly(mnt)) {
262 use_cpu_writer_for_mount(cpu_writer, mnt);
265 spin_unlock(&cpu_writer->lock);
266 put_cpu_var(mnt_writers);
269 EXPORT_SYMBOL_GPL(mnt_want_write);
271 static void lock_mnt_writers(void)
274 struct mnt_writer *cpu_writer;
276 for_each_possible_cpu(cpu) {
277 cpu_writer = &per_cpu(mnt_writers, cpu);
278 spin_lock(&cpu_writer->lock);
279 __clear_mnt_count(cpu_writer);
280 cpu_writer->mnt = NULL;
285 * These per-cpu write counts are not guaranteed to have
286 * matched increments and decrements on any given cpu.
287 * A file open()ed for write on one cpu and close()d on
288 * another cpu will imbalance this count. Make sure it
289 * does not get too far out of whack.
291 static void handle_write_count_underflow(struct vfsmount *mnt)
293 if (atomic_read(&mnt->__mnt_writers) >=
294 MNT_WRITER_UNDERFLOW_LIMIT)
297 * It isn't necessary to hold all of the locks
298 * at the same time, but doing it this way makes
299 * us share a lot more code.
303 * vfsmount_lock is for mnt_flags.
305 spin_lock(&vfsmount_lock);
307 * If coalescing the per-cpu writer counts did not
308 * get us back to a positive writer count, we have
311 if ((atomic_read(&mnt->__mnt_writers) < 0) &&
312 !(mnt->mnt_flags & MNT_IMBALANCED_WRITE_COUNT)) {
313 printk(KERN_DEBUG "leak detected on mount(%p) writers "
315 mnt, atomic_read(&mnt->__mnt_writers));
317 /* use the flag to keep the dmesg spam down */
318 mnt->mnt_flags |= MNT_IMBALANCED_WRITE_COUNT;
320 spin_unlock(&vfsmount_lock);
321 unlock_mnt_writers();
325 * mnt_drop_write - give up write access to a mount
326 * @mnt: the mount on which to give up write access
328 * Tells the low-level filesystem that we are done
329 * performing writes to it. Must be matched with
330 * mnt_want_write() call above.
332 void mnt_drop_write(struct vfsmount *mnt)
334 int must_check_underflow = 0;
335 struct mnt_writer *cpu_writer;
337 cpu_writer = &get_cpu_var(mnt_writers);
338 spin_lock(&cpu_writer->lock);
340 use_cpu_writer_for_mount(cpu_writer, mnt);
341 if (cpu_writer->count > 0) {
344 must_check_underflow = 1;
345 atomic_dec(&mnt->__mnt_writers);
348 spin_unlock(&cpu_writer->lock);
350 * Logically, we could call this each time,
351 * but the __mnt_writers cacheline tends to
352 * be cold, and makes this expensive.
354 if (must_check_underflow)
355 handle_write_count_underflow(mnt);
357 * This could be done right after the spinlock
358 * is taken because the spinlock keeps us on
359 * the cpu, and disables preemption. However,
360 * putting it here bounds the amount that
361 * __mnt_writers can underflow. Without it,
362 * we could theoretically wrap __mnt_writers.
364 put_cpu_var(mnt_writers);
366 EXPORT_SYMBOL_GPL(mnt_drop_write);
368 static int mnt_make_readonly(struct vfsmount *mnt)
374 * With all the locks held, this value is stable
376 if (atomic_read(&mnt->__mnt_writers) > 0) {
381 * nobody can do a successful mnt_want_write() with all
382 * of the counts in MNT_DENIED_WRITE and the locks held.
384 spin_lock(&vfsmount_lock);
386 mnt->mnt_flags |= MNT_READONLY;
387 spin_unlock(&vfsmount_lock);
389 unlock_mnt_writers();
393 static void __mnt_unmake_readonly(struct vfsmount *mnt)
395 spin_lock(&vfsmount_lock);
396 mnt->mnt_flags &= ~MNT_READONLY;
397 spin_unlock(&vfsmount_lock);
400 int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
403 mnt->mnt_root = dget(sb->s_root);
407 EXPORT_SYMBOL(simple_set_mnt);
409 void free_vfsmnt(struct vfsmount *mnt)
411 kfree(mnt->mnt_devname);
413 kmem_cache_free(mnt_cache, mnt);
417 * find the first or last mount at @dentry on vfsmount @mnt depending on
418 * @dir. If @dir is set return the first mount else return the last mount.
420 struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
423 struct list_head *head = mount_hashtable + hash(mnt, dentry);
424 struct list_head *tmp = head;
425 struct vfsmount *p, *found = NULL;
428 tmp = dir ? tmp->next : tmp->prev;
432 p = list_entry(tmp, struct vfsmount, mnt_hash);
433 if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) {
442 * lookup_mnt increments the ref count before returning
443 * the vfsmount struct.
445 struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
447 struct vfsmount *child_mnt;
448 spin_lock(&vfsmount_lock);
449 if ((child_mnt = __lookup_mnt(mnt, dentry, 1)))
451 spin_unlock(&vfsmount_lock);
455 static inline int check_mnt(struct vfsmount *mnt)
457 return mnt->mnt_ns == current->nsproxy->mnt_ns;
460 static void touch_mnt_namespace(struct mnt_namespace *ns)
464 wake_up_interruptible(&ns->poll);
468 static void __touch_mnt_namespace(struct mnt_namespace *ns)
470 if (ns && ns->event != event) {
472 wake_up_interruptible(&ns->poll);
476 static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
478 old_path->dentry = mnt->mnt_mountpoint;
479 old_path->mnt = mnt->mnt_parent;
480 mnt->mnt_parent = mnt;
481 mnt->mnt_mountpoint = mnt->mnt_root;
482 list_del_init(&mnt->mnt_child);
483 list_del_init(&mnt->mnt_hash);
484 old_path->dentry->d_mounted--;
487 void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
488 struct vfsmount *child_mnt)
490 child_mnt->mnt_parent = mntget(mnt);
491 child_mnt->mnt_mountpoint = dget(dentry);
495 static void attach_mnt(struct vfsmount *mnt, struct path *path)
497 mnt_set_mountpoint(path->mnt, path->dentry, mnt);
498 list_add_tail(&mnt->mnt_hash, mount_hashtable +
499 hash(path->mnt, path->dentry));
500 list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts);
504 * the caller must hold vfsmount_lock
506 static void commit_tree(struct vfsmount *mnt)
508 struct vfsmount *parent = mnt->mnt_parent;
511 struct mnt_namespace *n = parent->mnt_ns;
513 BUG_ON(parent == mnt);
515 list_add_tail(&head, &mnt->mnt_list);
516 list_for_each_entry(m, &head, mnt_list)
518 list_splice(&head, n->list.prev);
520 list_add_tail(&mnt->mnt_hash, mount_hashtable +
521 hash(parent, mnt->mnt_mountpoint));
522 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
523 touch_mnt_namespace(n);
526 static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
528 struct list_head *next = p->mnt_mounts.next;
529 if (next == &p->mnt_mounts) {
533 next = p->mnt_child.next;
534 if (next != &p->mnt_parent->mnt_mounts)
539 return list_entry(next, struct vfsmount, mnt_child);
542 static struct vfsmount *skip_mnt_tree(struct vfsmount *p)
544 struct list_head *prev = p->mnt_mounts.prev;
545 while (prev != &p->mnt_mounts) {
546 p = list_entry(prev, struct vfsmount, mnt_child);
547 prev = p->mnt_mounts.prev;
552 static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
555 struct super_block *sb = old->mnt_sb;
556 struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
559 if (flag & (CL_SLAVE | CL_PRIVATE))
560 mnt->mnt_group_id = 0; /* not a peer of original */
562 mnt->mnt_group_id = old->mnt_group_id;
564 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
565 int err = mnt_alloc_group_id(mnt);
570 mnt->mnt_flags = old->mnt_flags;
571 atomic_inc(&sb->s_active);
573 mnt->mnt_root = dget(root);
574 mnt->mnt_mountpoint = mnt->mnt_root;
575 mnt->mnt_parent = mnt;
577 if (flag & CL_SLAVE) {
578 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
579 mnt->mnt_master = old;
580 CLEAR_MNT_SHARED(mnt);
581 } else if (!(flag & CL_PRIVATE)) {
582 if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old))
583 list_add(&mnt->mnt_share, &old->mnt_share);
584 if (IS_MNT_SLAVE(old))
585 list_add(&mnt->mnt_slave, &old->mnt_slave);
586 mnt->mnt_master = old->mnt_master;
588 if (flag & CL_MAKE_SHARED)
591 /* stick the duplicate mount on the same expiry list
592 * as the original if that was on one */
593 if (flag & CL_EXPIRE) {
594 if (!list_empty(&old->mnt_expire))
595 list_add(&mnt->mnt_expire, &old->mnt_expire);
605 static inline void __mntput(struct vfsmount *mnt)
608 struct super_block *sb = mnt->mnt_sb;
610 * We don't have to hold all of the locks at the
611 * same time here because we know that we're the
612 * last reference to mnt and that no new writers
615 for_each_possible_cpu(cpu) {
616 struct mnt_writer *cpu_writer = &per_cpu(mnt_writers, cpu);
617 if (cpu_writer->mnt != mnt)
619 spin_lock(&cpu_writer->lock);
620 atomic_add(cpu_writer->count, &mnt->__mnt_writers);
621 cpu_writer->count = 0;
623 * Might as well do this so that no one
624 * ever sees the pointer and expects
627 cpu_writer->mnt = NULL;
628 spin_unlock(&cpu_writer->lock);
631 * This probably indicates that somebody messed
632 * up a mnt_want/drop_write() pair. If this
633 * happens, the filesystem was probably unable
634 * to make r/w->r/o transitions.
636 WARN_ON(atomic_read(&mnt->__mnt_writers));
639 deactivate_super(sb);
642 void mntput_no_expire(struct vfsmount *mnt)
645 if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) {
646 if (likely(!mnt->mnt_pinned)) {
647 spin_unlock(&vfsmount_lock);
651 atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
653 spin_unlock(&vfsmount_lock);
654 acct_auto_close_mnt(mnt);
655 security_sb_umount_close(mnt);
660 EXPORT_SYMBOL(mntput_no_expire);
662 void mnt_pin(struct vfsmount *mnt)
664 spin_lock(&vfsmount_lock);
666 spin_unlock(&vfsmount_lock);
669 EXPORT_SYMBOL(mnt_pin);
671 void mnt_unpin(struct vfsmount *mnt)
673 spin_lock(&vfsmount_lock);
674 if (mnt->mnt_pinned) {
675 atomic_inc(&mnt->mnt_count);
678 spin_unlock(&vfsmount_lock);
681 EXPORT_SYMBOL(mnt_unpin);
683 static inline void mangle(struct seq_file *m, const char *s)
685 seq_escape(m, s, " \t\n\\");
689 * Simple .show_options callback for filesystems which don't want to
690 * implement more complex mount option showing.
692 * See also save_mount_options().
694 int generic_show_options(struct seq_file *m, struct vfsmount *mnt)
696 const char *options = mnt->mnt_sb->s_options;
698 if (options != NULL && options[0]) {
705 EXPORT_SYMBOL(generic_show_options);
708 * If filesystem uses generic_show_options(), this function should be
709 * called from the fill_super() callback.
711 * The .remount_fs callback usually needs to be handled in a special
712 * way, to make sure, that previous options are not overwritten if the
715 * Also note, that if the filesystem's .remount_fs function doesn't
716 * reset all options to their default value, but changes only newly
717 * given options, then the displayed options will not reflect reality
720 void save_mount_options(struct super_block *sb, char *options)
722 kfree(sb->s_options);
723 sb->s_options = kstrdup(options, GFP_KERNEL);
725 EXPORT_SYMBOL(save_mount_options);
728 static void *m_start(struct seq_file *m, loff_t *pos)
730 struct mnt_namespace *n = m->private;
732 down_read(&namespace_sem);
733 return seq_list_start(&n->list, *pos);
736 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
738 struct mnt_namespace *n = m->private;
740 return seq_list_next(v, &n->list, pos);
743 static void m_stop(struct seq_file *m, void *v)
745 up_read(&namespace_sem);
748 static int show_vfsmnt(struct seq_file *m, void *v)
750 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
752 static struct proc_fs_info {
756 { MS_SYNCHRONOUS, ",sync" },
757 { MS_DIRSYNC, ",dirsync" },
758 { MS_MANDLOCK, ",mand" },
761 static struct proc_fs_info mnt_info[] = {
762 { MNT_NOSUID, ",nosuid" },
763 { MNT_NODEV, ",nodev" },
764 { MNT_NOEXEC, ",noexec" },
765 { MNT_NOATIME, ",noatime" },
766 { MNT_NODIRATIME, ",nodiratime" },
767 { MNT_RELATIME, ",relatime" },
770 struct proc_fs_info *fs_infop;
771 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
773 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
775 seq_path(m, &mnt_path, " \t\n\\");
777 mangle(m, mnt->mnt_sb->s_type->name);
778 if (mnt->mnt_sb->s_subtype && mnt->mnt_sb->s_subtype[0]) {
780 mangle(m, mnt->mnt_sb->s_subtype);
782 seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
783 for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
784 if (mnt->mnt_sb->s_flags & fs_infop->flag)
785 seq_puts(m, fs_infop->str);
787 for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
788 if (mnt->mnt_flags & fs_infop->flag)
789 seq_puts(m, fs_infop->str);
791 if (mnt->mnt_sb->s_op->show_options)
792 err = mnt->mnt_sb->s_op->show_options(m, mnt);
793 seq_puts(m, " 0 0\n");
797 struct seq_operations mounts_op = {
804 static int show_vfsstat(struct seq_file *m, void *v)
806 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
807 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
811 if (mnt->mnt_devname) {
812 seq_puts(m, "device ");
813 mangle(m, mnt->mnt_devname);
815 seq_puts(m, "no device");
818 seq_puts(m, " mounted on ");
819 seq_path(m, &mnt_path, " \t\n\\");
822 /* file system type */
823 seq_puts(m, "with fstype ");
824 mangle(m, mnt->mnt_sb->s_type->name);
826 /* optional statistics */
827 if (mnt->mnt_sb->s_op->show_stats) {
829 err = mnt->mnt_sb->s_op->show_stats(m, mnt);
836 struct seq_operations mountstats_op = {
840 .show = show_vfsstat,
844 * may_umount_tree - check if a mount tree is busy
845 * @mnt: root of mount tree
847 * This is called to check if a tree of mounts has any
848 * open files, pwds, chroots or sub mounts that are
851 int may_umount_tree(struct vfsmount *mnt)
854 int minimum_refs = 0;
857 spin_lock(&vfsmount_lock);
858 for (p = mnt; p; p = next_mnt(p, mnt)) {
859 actual_refs += atomic_read(&p->mnt_count);
862 spin_unlock(&vfsmount_lock);
864 if (actual_refs > minimum_refs)
870 EXPORT_SYMBOL(may_umount_tree);
873 * may_umount - check if a mount point is busy
874 * @mnt: root of mount
876 * This is called to check if a mount point has any
877 * open files, pwds, chroots or sub mounts. If the
878 * mount has sub mounts this will return busy
879 * regardless of whether the sub mounts are busy.
881 * Doesn't take quota and stuff into account. IOW, in some cases it will
882 * give false negatives. The main reason why it's here is that we need
883 * a non-destructive way to look for easily umountable filesystems.
885 int may_umount(struct vfsmount *mnt)
888 spin_lock(&vfsmount_lock);
889 if (propagate_mount_busy(mnt, 2))
891 spin_unlock(&vfsmount_lock);
895 EXPORT_SYMBOL(may_umount);
897 void release_mounts(struct list_head *head)
899 struct vfsmount *mnt;
900 while (!list_empty(head)) {
901 mnt = list_first_entry(head, struct vfsmount, mnt_hash);
902 list_del_init(&mnt->mnt_hash);
903 if (mnt->mnt_parent != mnt) {
904 struct dentry *dentry;
906 spin_lock(&vfsmount_lock);
907 dentry = mnt->mnt_mountpoint;
909 mnt->mnt_mountpoint = mnt->mnt_root;
910 mnt->mnt_parent = mnt;
912 spin_unlock(&vfsmount_lock);
920 void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
924 for (p = mnt; p; p = next_mnt(p, mnt))
925 list_move(&p->mnt_hash, kill);
928 propagate_umount(kill);
930 list_for_each_entry(p, kill, mnt_hash) {
931 list_del_init(&p->mnt_expire);
932 list_del_init(&p->mnt_list);
933 __touch_mnt_namespace(p->mnt_ns);
935 list_del_init(&p->mnt_child);
936 if (p->mnt_parent != p) {
937 p->mnt_parent->mnt_ghosts++;
938 p->mnt_mountpoint->d_mounted--;
940 change_mnt_propagation(p, MS_PRIVATE);
944 static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts);
946 static int do_umount(struct vfsmount *mnt, int flags)
948 struct super_block *sb = mnt->mnt_sb;
950 LIST_HEAD(umount_list);
952 retval = security_sb_umount(mnt, flags);
957 * Allow userspace to request a mountpoint be expired rather than
958 * unmounting unconditionally. Unmount only happens if:
959 * (1) the mark is already set (the mark is cleared by mntput())
960 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
962 if (flags & MNT_EXPIRE) {
963 if (mnt == current->fs->root.mnt ||
964 flags & (MNT_FORCE | MNT_DETACH))
967 if (atomic_read(&mnt->mnt_count) != 2)
970 if (!xchg(&mnt->mnt_expiry_mark, 1))
975 * If we may have to abort operations to get out of this
976 * mount, and they will themselves hold resources we must
977 * allow the fs to do things. In the Unix tradition of
978 * 'Gee thats tricky lets do it in userspace' the umount_begin
979 * might fail to complete on the first run through as other tasks
980 * must return, and the like. Thats for the mount program to worry
981 * about for the moment.
985 if (sb->s_op->umount_begin)
986 sb->s_op->umount_begin(mnt, flags);
990 * No sense to grab the lock for this test, but test itself looks
991 * somewhat bogus. Suggestions for better replacement?
992 * Ho-hum... In principle, we might treat that as umount + switch
993 * to rootfs. GC would eventually take care of the old vfsmount.
994 * Actually it makes sense, especially if rootfs would contain a
995 * /reboot - static binary that would close all descriptors and
996 * call reboot(9). Then init(8) could umount root and exec /reboot.
998 if (mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1000 * Special case for "unmounting" root ...
1001 * we just try to remount it readonly.
1003 down_write(&sb->s_umount);
1004 if (!(sb->s_flags & MS_RDONLY)) {
1007 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
1010 up_write(&sb->s_umount);
1014 down_write(&namespace_sem);
1015 spin_lock(&vfsmount_lock);
1018 if (!(flags & MNT_DETACH))
1019 shrink_submounts(mnt, &umount_list);
1022 if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
1023 if (!list_empty(&mnt->mnt_list))
1024 umount_tree(mnt, 1, &umount_list);
1027 spin_unlock(&vfsmount_lock);
1029 security_sb_umount_busy(mnt);
1030 up_write(&namespace_sem);
1031 release_mounts(&umount_list);
1036 * Now umount can handle mount points as well as block devices.
1037 * This is important for filesystems which use unnamed block devices.
1039 * We now support a flag for forced unmount like the other 'big iron'
1040 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1043 asmlinkage long sys_umount(char __user * name, int flags)
1045 struct nameidata nd;
1048 retval = __user_walk(name, LOOKUP_FOLLOW, &nd);
1052 if (nd.path.dentry != nd.path.mnt->mnt_root)
1054 if (!check_mnt(nd.path.mnt))
1058 if (!capable(CAP_SYS_ADMIN))
1061 retval = do_umount(nd.path.mnt, flags);
1063 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1064 dput(nd.path.dentry);
1065 mntput_no_expire(nd.path.mnt);
1070 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1073 * The 2.0 compatible umount. No flags.
1075 asmlinkage long sys_oldumount(char __user * name)
1077 return sys_umount(name, 0);
1082 static int mount_is_safe(struct nameidata *nd)
1084 if (capable(CAP_SYS_ADMIN))
1088 if (S_ISLNK(nd->path.dentry->d_inode->i_mode))
1090 if (nd->path.dentry->d_inode->i_mode & S_ISVTX) {
1091 if (current->uid != nd->path.dentry->d_inode->i_uid)
1094 if (vfs_permission(nd, MAY_WRITE))
1100 static int lives_below_in_same_fs(struct dentry *d, struct dentry *dentry)
1105 if (d == NULL || d == d->d_parent)
1111 struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
1114 struct vfsmount *res, *p, *q, *r, *s;
1117 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
1120 res = q = clone_mnt(mnt, dentry, flag);
1123 q->mnt_mountpoint = mnt->mnt_mountpoint;
1126 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1127 if (!lives_below_in_same_fs(r->mnt_mountpoint, dentry))
1130 for (s = r; s; s = next_mnt(s, r)) {
1131 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) {
1132 s = skip_mnt_tree(s);
1135 while (p != s->mnt_parent) {
1141 path.dentry = p->mnt_mountpoint;
1142 q = clone_mnt(p, p->mnt_root, flag);
1145 spin_lock(&vfsmount_lock);
1146 list_add_tail(&q->mnt_list, &res->mnt_list);
1147 attach_mnt(q, &path);
1148 spin_unlock(&vfsmount_lock);
1154 LIST_HEAD(umount_list);
1155 spin_lock(&vfsmount_lock);
1156 umount_tree(res, 0, &umount_list);
1157 spin_unlock(&vfsmount_lock);
1158 release_mounts(&umount_list);
1163 struct vfsmount *collect_mounts(struct vfsmount *mnt, struct dentry *dentry)
1165 struct vfsmount *tree;
1166 down_write(&namespace_sem);
1167 tree = copy_tree(mnt, dentry, CL_COPY_ALL | CL_PRIVATE);
1168 up_write(&namespace_sem);
1172 void drop_collected_mounts(struct vfsmount *mnt)
1174 LIST_HEAD(umount_list);
1175 down_write(&namespace_sem);
1176 spin_lock(&vfsmount_lock);
1177 umount_tree(mnt, 0, &umount_list);
1178 spin_unlock(&vfsmount_lock);
1179 up_write(&namespace_sem);
1180 release_mounts(&umount_list);
1183 static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
1187 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
1188 if (p->mnt_group_id && !IS_MNT_SHARED(p))
1189 mnt_release_group_id(p);
1193 static int invent_group_ids(struct vfsmount *mnt, bool recurse)
1197 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
1198 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
1199 int err = mnt_alloc_group_id(p);
1201 cleanup_group_ids(mnt, p);
1211 * @source_mnt : mount tree to be attached
1212 * @nd : place the mount tree @source_mnt is attached
1213 * @parent_nd : if non-null, detach the source_mnt from its parent and
1214 * store the parent mount and mountpoint dentry.
1215 * (done when source_mnt is moved)
1217 * NOTE: in the table below explains the semantics when a source mount
1218 * of a given type is attached to a destination mount of a given type.
1219 * ---------------------------------------------------------------------------
1220 * | BIND MOUNT OPERATION |
1221 * |**************************************************************************
1222 * | source-->| shared | private | slave | unbindable |
1226 * |**************************************************************************
1227 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1229 * |non-shared| shared (+) | private | slave (*) | invalid |
1230 * ***************************************************************************
1231 * A bind operation clones the source mount and mounts the clone on the
1232 * destination mount.
1234 * (++) the cloned mount is propagated to all the mounts in the propagation
1235 * tree of the destination mount and the cloned mount is added to
1236 * the peer group of the source mount.
1237 * (+) the cloned mount is created under the destination mount and is marked
1238 * as shared. The cloned mount is added to the peer group of the source
1240 * (+++) the mount is propagated to all the mounts in the propagation tree
1241 * of the destination mount and the cloned mount is made slave
1242 * of the same master as that of the source mount. The cloned mount
1243 * is marked as 'shared and slave'.
1244 * (*) the cloned mount is made a slave of the same master as that of the
1247 * ---------------------------------------------------------------------------
1248 * | MOVE MOUNT OPERATION |
1249 * |**************************************************************************
1250 * | source-->| shared | private | slave | unbindable |
1254 * |**************************************************************************
1255 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
1257 * |non-shared| shared (+*) | private | slave (*) | unbindable |
1258 * ***************************************************************************
1260 * (+) the mount is moved to the destination. And is then propagated to
1261 * all the mounts in the propagation tree of the destination mount.
1262 * (+*) the mount is moved to the destination.
1263 * (+++) the mount is moved to the destination and is then propagated to
1264 * all the mounts belonging to the destination mount's propagation tree.
1265 * the mount is marked as 'shared and slave'.
1266 * (*) the mount continues to be a slave at the new location.
1268 * if the source mount is a tree, the operations explained above is
1269 * applied to each mount in the tree.
1270 * Must be called without spinlocks held, since this function can sleep
1273 static int attach_recursive_mnt(struct vfsmount *source_mnt,
1274 struct path *path, struct path *parent_path)
1276 LIST_HEAD(tree_list);
1277 struct vfsmount *dest_mnt = path->mnt;
1278 struct dentry *dest_dentry = path->dentry;
1279 struct vfsmount *child, *p;
1282 if (IS_MNT_SHARED(dest_mnt)) {
1283 err = invent_group_ids(source_mnt, true);
1287 err = propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list);
1289 goto out_cleanup_ids;
1291 if (IS_MNT_SHARED(dest_mnt)) {
1292 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
1296 spin_lock(&vfsmount_lock);
1298 detach_mnt(source_mnt, parent_path);
1299 attach_mnt(source_mnt, path);
1300 touch_mnt_namespace(current->nsproxy->mnt_ns);
1302 mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
1303 commit_tree(source_mnt);
1306 list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
1307 list_del_init(&child->mnt_hash);
1310 spin_unlock(&vfsmount_lock);
1314 if (IS_MNT_SHARED(dest_mnt))
1315 cleanup_group_ids(source_mnt, NULL);
1320 static int graft_tree(struct vfsmount *mnt, struct path *path)
1323 if (mnt->mnt_sb->s_flags & MS_NOUSER)
1326 if (S_ISDIR(path->dentry->d_inode->i_mode) !=
1327 S_ISDIR(mnt->mnt_root->d_inode->i_mode))
1331 mutex_lock(&path->dentry->d_inode->i_mutex);
1332 if (IS_DEADDIR(path->dentry->d_inode))
1335 err = security_sb_check_sb(mnt, path);
1340 if (IS_ROOT(path->dentry) || !d_unhashed(path->dentry))
1341 err = attach_recursive_mnt(mnt, path, NULL);
1343 mutex_unlock(&path->dentry->d_inode->i_mutex);
1345 security_sb_post_addmount(mnt, path);
1350 * recursively change the type of the mountpoint.
1351 * noinline this do_mount helper to save do_mount stack space.
1353 static noinline int do_change_type(struct nameidata *nd, int flag)
1355 struct vfsmount *m, *mnt = nd->path.mnt;
1356 int recurse = flag & MS_REC;
1357 int type = flag & ~MS_REC;
1360 if (!capable(CAP_SYS_ADMIN))
1363 if (nd->path.dentry != nd->path.mnt->mnt_root)
1366 down_write(&namespace_sem);
1367 if (type == MS_SHARED) {
1368 err = invent_group_ids(mnt, recurse);
1373 spin_lock(&vfsmount_lock);
1374 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
1375 change_mnt_propagation(m, type);
1376 spin_unlock(&vfsmount_lock);
1379 up_write(&namespace_sem);
1384 * do loopback mount.
1385 * noinline this do_mount helper to save do_mount stack space.
1387 static noinline int do_loopback(struct nameidata *nd, char *old_name,
1390 struct nameidata old_nd;
1391 struct vfsmount *mnt = NULL;
1392 int err = mount_is_safe(nd);
1395 if (!old_name || !*old_name)
1397 err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
1401 down_write(&namespace_sem);
1403 if (IS_MNT_UNBINDABLE(old_nd.path.mnt))
1406 if (!check_mnt(nd->path.mnt) || !check_mnt(old_nd.path.mnt))
1411 mnt = copy_tree(old_nd.path.mnt, old_nd.path.dentry, 0);
1413 mnt = clone_mnt(old_nd.path.mnt, old_nd.path.dentry, 0);
1418 err = graft_tree(mnt, &nd->path);
1420 LIST_HEAD(umount_list);
1421 spin_lock(&vfsmount_lock);
1422 umount_tree(mnt, 0, &umount_list);
1423 spin_unlock(&vfsmount_lock);
1424 release_mounts(&umount_list);
1428 up_write(&namespace_sem);
1429 path_put(&old_nd.path);
1433 static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
1436 int readonly_request = 0;
1438 if (ms_flags & MS_RDONLY)
1439 readonly_request = 1;
1440 if (readonly_request == __mnt_is_readonly(mnt))
1443 if (readonly_request)
1444 error = mnt_make_readonly(mnt);
1446 __mnt_unmake_readonly(mnt);
1451 * change filesystem flags. dir should be a physical root of filesystem.
1452 * If you've mounted a non-root directory somewhere and want to do remount
1453 * on it - tough luck.
1454 * noinline this do_mount helper to save do_mount stack space.
1456 static noinline int do_remount(struct nameidata *nd, int flags, int mnt_flags,
1460 struct super_block *sb = nd->path.mnt->mnt_sb;
1462 if (!capable(CAP_SYS_ADMIN))
1465 if (!check_mnt(nd->path.mnt))
1468 if (nd->path.dentry != nd->path.mnt->mnt_root)
1471 down_write(&sb->s_umount);
1472 if (flags & MS_BIND)
1473 err = change_mount_flags(nd->path.mnt, flags);
1475 err = do_remount_sb(sb, flags, data, 0);
1477 nd->path.mnt->mnt_flags = mnt_flags;
1478 up_write(&sb->s_umount);
1480 security_sb_post_remount(nd->path.mnt, flags, data);
1484 static inline int tree_contains_unbindable(struct vfsmount *mnt)
1487 for (p = mnt; p; p = next_mnt(p, mnt)) {
1488 if (IS_MNT_UNBINDABLE(p))
1495 * noinline this do_mount helper to save do_mount stack space.
1497 static noinline int do_move_mount(struct nameidata *nd, char *old_name)
1499 struct nameidata old_nd;
1500 struct path parent_path;
1503 if (!capable(CAP_SYS_ADMIN))
1505 if (!old_name || !*old_name)
1507 err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
1511 down_write(&namespace_sem);
1512 while (d_mountpoint(nd->path.dentry) &&
1513 follow_down(&nd->path.mnt, &nd->path.dentry))
1516 if (!check_mnt(nd->path.mnt) || !check_mnt(old_nd.path.mnt))
1520 mutex_lock(&nd->path.dentry->d_inode->i_mutex);
1521 if (IS_DEADDIR(nd->path.dentry->d_inode))
1524 if (!IS_ROOT(nd->path.dentry) && d_unhashed(nd->path.dentry))
1528 if (old_nd.path.dentry != old_nd.path.mnt->mnt_root)
1531 if (old_nd.path.mnt == old_nd.path.mnt->mnt_parent)
1534 if (S_ISDIR(nd->path.dentry->d_inode->i_mode) !=
1535 S_ISDIR(old_nd.path.dentry->d_inode->i_mode))
1538 * Don't move a mount residing in a shared parent.
1540 if (old_nd.path.mnt->mnt_parent &&
1541 IS_MNT_SHARED(old_nd.path.mnt->mnt_parent))
1544 * Don't move a mount tree containing unbindable mounts to a destination
1545 * mount which is shared.
1547 if (IS_MNT_SHARED(nd->path.mnt) &&
1548 tree_contains_unbindable(old_nd.path.mnt))
1551 for (p = nd->path.mnt; p->mnt_parent != p; p = p->mnt_parent)
1552 if (p == old_nd.path.mnt)
1555 err = attach_recursive_mnt(old_nd.path.mnt, &nd->path, &parent_path);
1559 /* if the mount is moved, it should no longer be expire
1561 list_del_init(&old_nd.path.mnt->mnt_expire);
1563 mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
1565 up_write(&namespace_sem);
1567 path_put(&parent_path);
1568 path_put(&old_nd.path);
1573 * create a new mount for userspace and request it to be added into the
1575 * noinline this do_mount helper to save do_mount stack space.
1577 static noinline int do_new_mount(struct nameidata *nd, char *type, int flags,
1578 int mnt_flags, char *name, void *data)
1580 struct vfsmount *mnt;
1582 if (!type || !memchr(type, 0, PAGE_SIZE))
1585 /* we need capabilities... */
1586 if (!capable(CAP_SYS_ADMIN))
1589 mnt = do_kern_mount(type, flags, name, data);
1591 return PTR_ERR(mnt);
1593 return do_add_mount(mnt, nd, mnt_flags, NULL);
1597 * add a mount into a namespace's mount tree
1598 * - provide the option of adding the new mount to an expiration list
1600 int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd,
1601 int mnt_flags, struct list_head *fslist)
1605 down_write(&namespace_sem);
1606 /* Something was mounted here while we slept */
1607 while (d_mountpoint(nd->path.dentry) &&
1608 follow_down(&nd->path.mnt, &nd->path.dentry))
1611 if (!check_mnt(nd->path.mnt))
1614 /* Refuse the same filesystem on the same mount point */
1616 if (nd->path.mnt->mnt_sb == newmnt->mnt_sb &&
1617 nd->path.mnt->mnt_root == nd->path.dentry)
1621 if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode))
1624 newmnt->mnt_flags = mnt_flags;
1625 if ((err = graft_tree(newmnt, &nd->path)))
1628 if (fslist) /* add to the specified expiration list */
1629 list_add_tail(&newmnt->mnt_expire, fslist);
1631 up_write(&namespace_sem);
1635 up_write(&namespace_sem);
1640 EXPORT_SYMBOL_GPL(do_add_mount);
1643 * process a list of expirable mountpoints with the intent of discarding any
1644 * mountpoints that aren't in use and haven't been touched since last we came
1647 void mark_mounts_for_expiry(struct list_head *mounts)
1649 struct vfsmount *mnt, *next;
1650 LIST_HEAD(graveyard);
1653 if (list_empty(mounts))
1656 down_write(&namespace_sem);
1657 spin_lock(&vfsmount_lock);
1659 /* extract from the expiration list every vfsmount that matches the
1660 * following criteria:
1661 * - only referenced by its parent vfsmount
1662 * - still marked for expiry (marked on the last call here; marks are
1663 * cleared by mntput())
1665 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
1666 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
1667 propagate_mount_busy(mnt, 1))
1669 list_move(&mnt->mnt_expire, &graveyard);
1671 while (!list_empty(&graveyard)) {
1672 mnt = list_first_entry(&graveyard, struct vfsmount, mnt_expire);
1673 touch_mnt_namespace(mnt->mnt_ns);
1674 umount_tree(mnt, 1, &umounts);
1676 spin_unlock(&vfsmount_lock);
1677 up_write(&namespace_sem);
1679 release_mounts(&umounts);
1682 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
1685 * Ripoff of 'select_parent()'
1687 * search the list of submounts for a given mountpoint, and move any
1688 * shrinkable submounts to the 'graveyard' list.
1690 static int select_submounts(struct vfsmount *parent, struct list_head *graveyard)
1692 struct vfsmount *this_parent = parent;
1693 struct list_head *next;
1697 next = this_parent->mnt_mounts.next;
1699 while (next != &this_parent->mnt_mounts) {
1700 struct list_head *tmp = next;
1701 struct vfsmount *mnt = list_entry(tmp, struct vfsmount, mnt_child);
1704 if (!(mnt->mnt_flags & MNT_SHRINKABLE))
1707 * Descend a level if the d_mounts list is non-empty.
1709 if (!list_empty(&mnt->mnt_mounts)) {
1714 if (!propagate_mount_busy(mnt, 1)) {
1715 list_move_tail(&mnt->mnt_expire, graveyard);
1720 * All done at this level ... ascend and resume the search
1722 if (this_parent != parent) {
1723 next = this_parent->mnt_child.next;
1724 this_parent = this_parent->mnt_parent;
1731 * process a list of expirable mountpoints with the intent of discarding any
1732 * submounts of a specific parent mountpoint
1734 static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts)
1736 LIST_HEAD(graveyard);
1739 /* extract submounts of 'mountpoint' from the expiration list */
1740 while (select_submounts(mnt, &graveyard)) {
1741 while (!list_empty(&graveyard)) {
1742 m = list_first_entry(&graveyard, struct vfsmount,
1744 touch_mnt_namespace(mnt->mnt_ns);
1745 umount_tree(mnt, 1, umounts);
1751 * Some copy_from_user() implementations do not return the exact number of
1752 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
1753 * Note that this function differs from copy_from_user() in that it will oops
1754 * on bad values of `to', rather than returning a short copy.
1756 static long exact_copy_from_user(void *to, const void __user * from,
1760 const char __user *f = from;
1763 if (!access_ok(VERIFY_READ, from, n))
1767 if (__get_user(c, f)) {
1778 int copy_mount_options(const void __user * data, unsigned long *where)
1788 if (!(page = __get_free_page(GFP_KERNEL)))
1791 /* We only care that *some* data at the address the user
1792 * gave us is valid. Just in case, we'll zero
1793 * the remainder of the page.
1795 /* copy_from_user cannot cross TASK_SIZE ! */
1796 size = TASK_SIZE - (unsigned long)data;
1797 if (size > PAGE_SIZE)
1800 i = size - exact_copy_from_user((void *)page, data, size);
1806 memset((char *)page + i, 0, PAGE_SIZE - i);
1812 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
1813 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
1815 * data is a (void *) that can point to any structure up to
1816 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
1817 * information (or be NULL).
1819 * Pre-0.97 versions of mount() didn't have a flags word.
1820 * When the flags word was introduced its top half was required
1821 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
1822 * Therefore, if this magic number is present, it carries no information
1823 * and must be discarded.
1825 long do_mount(char *dev_name, char *dir_name, char *type_page,
1826 unsigned long flags, void *data_page)
1828 struct nameidata nd;
1833 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
1834 flags &= ~MS_MGC_MSK;
1836 /* Basic sanity checks */
1838 if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
1840 if (dev_name && !memchr(dev_name, 0, PAGE_SIZE))
1844 ((char *)data_page)[PAGE_SIZE - 1] = 0;
1846 /* Separate the per-mountpoint flags */
1847 if (flags & MS_NOSUID)
1848 mnt_flags |= MNT_NOSUID;
1849 if (flags & MS_NODEV)
1850 mnt_flags |= MNT_NODEV;
1851 if (flags & MS_NOEXEC)
1852 mnt_flags |= MNT_NOEXEC;
1853 if (flags & MS_NOATIME)
1854 mnt_flags |= MNT_NOATIME;
1855 if (flags & MS_NODIRATIME)
1856 mnt_flags |= MNT_NODIRATIME;
1857 if (flags & MS_RELATIME)
1858 mnt_flags |= MNT_RELATIME;
1859 if (flags & MS_RDONLY)
1860 mnt_flags |= MNT_READONLY;
1862 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
1863 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT);
1865 /* ... and get the mountpoint */
1866 retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd);
1870 retval = security_sb_mount(dev_name, &nd.path,
1871 type_page, flags, data_page);
1875 if (flags & MS_REMOUNT)
1876 retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
1878 else if (flags & MS_BIND)
1879 retval = do_loopback(&nd, dev_name, flags & MS_REC);
1880 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
1881 retval = do_change_type(&nd, flags);
1882 else if (flags & MS_MOVE)
1883 retval = do_move_mount(&nd, dev_name);
1885 retval = do_new_mount(&nd, type_page, flags, mnt_flags,
1886 dev_name, data_page);
1893 * Allocate a new namespace structure and populate it with contents
1894 * copied from the namespace of the passed in task structure.
1896 static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
1897 struct fs_struct *fs)
1899 struct mnt_namespace *new_ns;
1900 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL, *altrootmnt = NULL;
1901 struct vfsmount *p, *q;
1903 new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
1905 return ERR_PTR(-ENOMEM);
1907 atomic_set(&new_ns->count, 1);
1908 INIT_LIST_HEAD(&new_ns->list);
1909 init_waitqueue_head(&new_ns->poll);
1912 down_write(&namespace_sem);
1913 /* First pass: copy the tree topology */
1914 new_ns->root = copy_tree(mnt_ns->root, mnt_ns->root->mnt_root,
1915 CL_COPY_ALL | CL_EXPIRE);
1916 if (!new_ns->root) {
1917 up_write(&namespace_sem);
1919 return ERR_PTR(-ENOMEM);;
1921 spin_lock(&vfsmount_lock);
1922 list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
1923 spin_unlock(&vfsmount_lock);
1926 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
1927 * as belonging to new namespace. We have already acquired a private
1928 * fs_struct, so tsk->fs->lock is not needed.
1935 if (p == fs->root.mnt) {
1937 fs->root.mnt = mntget(q);
1939 if (p == fs->pwd.mnt) {
1941 fs->pwd.mnt = mntget(q);
1943 if (p == fs->altroot.mnt) {
1945 fs->altroot.mnt = mntget(q);
1948 p = next_mnt(p, mnt_ns->root);
1949 q = next_mnt(q, new_ns->root);
1951 up_write(&namespace_sem);
1963 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
1964 struct fs_struct *new_fs)
1966 struct mnt_namespace *new_ns;
1971 if (!(flags & CLONE_NEWNS))
1974 new_ns = dup_mnt_ns(ns, new_fs);
1980 asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name,
1981 char __user * type, unsigned long flags,
1985 unsigned long data_page;
1986 unsigned long type_page;
1987 unsigned long dev_page;
1990 retval = copy_mount_options(type, &type_page);
1994 dir_page = getname(dir_name);
1995 retval = PTR_ERR(dir_page);
1996 if (IS_ERR(dir_page))
1999 retval = copy_mount_options(dev_name, &dev_page);
2003 retval = copy_mount_options(data, &data_page);
2008 retval = do_mount((char *)dev_page, dir_page, (char *)type_page,
2009 flags, (void *)data_page);
2011 free_page(data_page);
2014 free_page(dev_page);
2018 free_page(type_page);
2023 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
2024 * It can block. Requires the big lock held.
2026 void set_fs_root(struct fs_struct *fs, struct path *path)
2028 struct path old_root;
2030 write_lock(&fs->lock);
2031 old_root = fs->root;
2034 write_unlock(&fs->lock);
2035 if (old_root.dentry)
2036 path_put(&old_root);
2040 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
2041 * It can block. Requires the big lock held.
2043 void set_fs_pwd(struct fs_struct *fs, struct path *path)
2045 struct path old_pwd;
2047 write_lock(&fs->lock);
2051 write_unlock(&fs->lock);
2057 static void chroot_fs_refs(struct path *old_root, struct path *new_root)
2059 struct task_struct *g, *p;
2060 struct fs_struct *fs;
2062 read_lock(&tasklist_lock);
2063 do_each_thread(g, p) {
2067 atomic_inc(&fs->count);
2069 if (fs->root.dentry == old_root->dentry
2070 && fs->root.mnt == old_root->mnt)
2071 set_fs_root(fs, new_root);
2072 if (fs->pwd.dentry == old_root->dentry
2073 && fs->pwd.mnt == old_root->mnt)
2074 set_fs_pwd(fs, new_root);
2078 } while_each_thread(g, p);
2079 read_unlock(&tasklist_lock);
2083 * pivot_root Semantics:
2084 * Moves the root file system of the current process to the directory put_old,
2085 * makes new_root as the new root file system of the current process, and sets
2086 * root/cwd of all processes which had them on the current root to new_root.
2089 * The new_root and put_old must be directories, and must not be on the
2090 * same file system as the current process root. The put_old must be
2091 * underneath new_root, i.e. adding a non-zero number of /.. to the string
2092 * pointed to by put_old must yield the same directory as new_root. No other
2093 * file system may be mounted on put_old. After all, new_root is a mountpoint.
2095 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
2096 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
2097 * in this situation.
2100 * - we don't move root/cwd if they are not at the root (reason: if something
2101 * cared enough to change them, it's probably wrong to force them elsewhere)
2102 * - it's okay to pick a root that isn't the root of a file system, e.g.
2103 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
2104 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
2107 asmlinkage long sys_pivot_root(const char __user * new_root,
2108 const char __user * put_old)
2110 struct vfsmount *tmp;
2111 struct nameidata new_nd, old_nd;
2112 struct path parent_path, root_parent, root;
2115 if (!capable(CAP_SYS_ADMIN))
2118 error = __user_walk(new_root, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
2123 if (!check_mnt(new_nd.path.mnt))
2126 error = __user_walk(put_old, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old_nd);
2130 error = security_sb_pivotroot(&old_nd.path, &new_nd.path);
2132 path_put(&old_nd.path);
2136 read_lock(¤t->fs->lock);
2137 root = current->fs->root;
2138 path_get(¤t->fs->root);
2139 read_unlock(¤t->fs->lock);
2140 down_write(&namespace_sem);
2141 mutex_lock(&old_nd.path.dentry->d_inode->i_mutex);
2143 if (IS_MNT_SHARED(old_nd.path.mnt) ||
2144 IS_MNT_SHARED(new_nd.path.mnt->mnt_parent) ||
2145 IS_MNT_SHARED(root.mnt->mnt_parent))
2147 if (!check_mnt(root.mnt))
2150 if (IS_DEADDIR(new_nd.path.dentry->d_inode))
2152 if (d_unhashed(new_nd.path.dentry) && !IS_ROOT(new_nd.path.dentry))
2154 if (d_unhashed(old_nd.path.dentry) && !IS_ROOT(old_nd.path.dentry))
2157 if (new_nd.path.mnt == root.mnt ||
2158 old_nd.path.mnt == root.mnt)
2159 goto out2; /* loop, on the same file system */
2161 if (root.mnt->mnt_root != root.dentry)
2162 goto out2; /* not a mountpoint */
2163 if (root.mnt->mnt_parent == root.mnt)
2164 goto out2; /* not attached */
2165 if (new_nd.path.mnt->mnt_root != new_nd.path.dentry)
2166 goto out2; /* not a mountpoint */
2167 if (new_nd.path.mnt->mnt_parent == new_nd.path.mnt)
2168 goto out2; /* not attached */
2169 /* make sure we can reach put_old from new_root */
2170 tmp = old_nd.path.mnt;
2171 spin_lock(&vfsmount_lock);
2172 if (tmp != new_nd.path.mnt) {
2174 if (tmp->mnt_parent == tmp)
2175 goto out3; /* already mounted on put_old */
2176 if (tmp->mnt_parent == new_nd.path.mnt)
2178 tmp = tmp->mnt_parent;
2180 if (!is_subdir(tmp->mnt_mountpoint, new_nd.path.dentry))
2182 } else if (!is_subdir(old_nd.path.dentry, new_nd.path.dentry))
2184 detach_mnt(new_nd.path.mnt, &parent_path);
2185 detach_mnt(root.mnt, &root_parent);
2186 /* mount old root on put_old */
2187 attach_mnt(root.mnt, &old_nd.path);
2188 /* mount new_root on / */
2189 attach_mnt(new_nd.path.mnt, &root_parent);
2190 touch_mnt_namespace(current->nsproxy->mnt_ns);
2191 spin_unlock(&vfsmount_lock);
2192 chroot_fs_refs(&root, &new_nd.path);
2193 security_sb_post_pivotroot(&root, &new_nd.path);
2195 path_put(&root_parent);
2196 path_put(&parent_path);
2198 mutex_unlock(&old_nd.path.dentry->d_inode->i_mutex);
2199 up_write(&namespace_sem);
2201 path_put(&old_nd.path);
2203 path_put(&new_nd.path);
2207 spin_unlock(&vfsmount_lock);
2211 static void __init init_mount_tree(void)
2213 struct vfsmount *mnt;
2214 struct mnt_namespace *ns;
2217 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
2219 panic("Can't create rootfs");
2220 ns = kmalloc(sizeof(*ns), GFP_KERNEL);
2222 panic("Can't allocate initial namespace");
2223 atomic_set(&ns->count, 1);
2224 INIT_LIST_HEAD(&ns->list);
2225 init_waitqueue_head(&ns->poll);
2227 list_add(&mnt->mnt_list, &ns->list);
2231 init_task.nsproxy->mnt_ns = ns;
2234 root.mnt = ns->root;
2235 root.dentry = ns->root->mnt_root;
2237 set_fs_pwd(current->fs, &root);
2238 set_fs_root(current->fs, &root);
2241 void __init mnt_init(void)
2246 init_rwsem(&namespace_sem);
2248 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
2249 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2251 mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
2253 if (!mount_hashtable)
2254 panic("Failed to allocate mount hash table\n");
2256 printk("Mount-cache hash table entries: %lu\n", HASH_SIZE);
2258 for (u = 0; u < HASH_SIZE; u++)
2259 INIT_LIST_HEAD(&mount_hashtable[u]);
2263 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
2265 fs_kobj = kobject_create_and_add("fs", NULL);
2267 printk(KERN_WARNING "%s: kobj create error\n", __FUNCTION__);
2272 void __put_mnt_ns(struct mnt_namespace *ns)
2274 struct vfsmount *root = ns->root;
2275 LIST_HEAD(umount_list);
2277 spin_unlock(&vfsmount_lock);
2278 down_write(&namespace_sem);
2279 spin_lock(&vfsmount_lock);
2280 umount_tree(root, 0, &umount_list);
2281 spin_unlock(&vfsmount_lock);
2282 up_write(&namespace_sem);
2283 release_mounts(&umount_list);