superblock: move pin_sb_for_writeback() to fs/super.c
[linux-2.6-block.git] / fs / super.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/super.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * super.c contains code to handle: - mount structures
7 * - super-block tables
8 * - filesystem drivers list
9 * - mount system call
10 * - umount system call
11 * - ustat system call
12 *
13 * GK 2/5/95 - Changed to support mounting the root fs via NFS
14 *
15 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
16 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
17 * Added options to /proc/mounts:
96de0e25 18 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
1da177e4
LT
19 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
20 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
21 */
22
1da177e4
LT
23#include <linux/module.h>
24#include <linux/slab.h>
1da177e4
LT
25#include <linux/acct.h>
26#include <linux/blkdev.h>
1da177e4
LT
27#include <linux/mount.h>
28#include <linux/security.h>
1da177e4
LT
29#include <linux/writeback.h> /* for the emergency remount stuff */
30#include <linux/idr.h>
353ab6e9 31#include <linux/mutex.h>
5477d0fa 32#include <linux/backing-dev.h>
ceb5bdc2 33#include <linux/rculist_bl.h>
c515e1fd 34#include <linux/cleancache.h>
6d59e7f5 35#include "internal.h"
1da177e4
LT
36
37
1da177e4
LT
38LIST_HEAD(super_blocks);
39DEFINE_SPINLOCK(sb_lock);
40
41/**
42 * alloc_super - create new superblock
fe2bbc48 43 * @type: filesystem type superblock should belong to
1da177e4
LT
44 *
45 * Allocates and initializes a new &struct super_block. alloc_super()
46 * returns a pointer new superblock or %NULL if allocation had failed.
47 */
cf516249 48static struct super_block *alloc_super(struct file_system_type *type)
1da177e4 49{
11b0b5ab 50 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
b87221de 51 static const struct super_operations default_op;
1da177e4
LT
52
53 if (s) {
1da177e4
LT
54 if (security_sb_alloc(s)) {
55 kfree(s);
56 s = NULL;
57 goto out;
58 }
6416ccb7
NP
59#ifdef CONFIG_SMP
60 s->s_files = alloc_percpu(struct list_head);
61 if (!s->s_files) {
62 security_sb_free(s);
63 kfree(s);
64 s = NULL;
65 goto out;
66 } else {
67 int i;
68
69 for_each_possible_cpu(i)
70 INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i));
71 }
72#else
1da177e4 73 INIT_LIST_HEAD(&s->s_files);
6416ccb7 74#endif
95f28604 75 s->s_bdi = &default_backing_dev_info;
1da177e4 76 INIT_LIST_HEAD(&s->s_instances);
ceb5bdc2 77 INIT_HLIST_BL_HEAD(&s->s_anon);
1da177e4 78 INIT_LIST_HEAD(&s->s_inodes);
da3bbdd4 79 INIT_LIST_HEAD(&s->s_dentry_lru);
98b745c6 80 INIT_LIST_HEAD(&s->s_inode_lru);
09cc9fc7 81 spin_lock_init(&s->s_inode_lru_lock);
1da177e4 82 init_rwsem(&s->s_umount);
7892f2f4 83 mutex_init(&s->s_lock);
897c6ff9 84 lockdep_set_class(&s->s_umount, &type->s_umount_key);
cf516249
IM
85 /*
86 * The locking rules for s_lock are up to the
87 * filesystem. For example ext3fs has different
88 * lock ordering than usbfs:
89 */
90 lockdep_set_class(&s->s_lock, &type->s_lock_key);
ada723dc
PZ
91 /*
92 * sget() can have s_umount recursion.
93 *
94 * When it cannot find a suitable sb, it allocates a new
95 * one (this one), and tries again to find a suitable old
96 * one.
97 *
98 * In case that succeeds, it will acquire the s_umount
99 * lock of the old one. Since these are clearly distrinct
100 * locks, and this object isn't exposed yet, there's no
101 * risk of deadlocks.
102 *
103 * Annotate this by putting this lock in a different
104 * subclass.
105 */
106 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
b20bd1a5 107 s->s_count = 1;
1da177e4 108 atomic_set(&s->s_active, 1);
a11f3a05 109 mutex_init(&s->s_vfs_rename_mutex);
51ee049e 110 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
d3be915f
IM
111 mutex_init(&s->s_dquot.dqio_mutex);
112 mutex_init(&s->s_dquot.dqonoff_mutex);
1da177e4
LT
113 init_rwsem(&s->s_dquot.dqptr_sem);
114 init_waitqueue_head(&s->s_wait_unfrozen);
115 s->s_maxbytes = MAX_NON_LFS;
1da177e4
LT
116 s->s_op = &default_op;
117 s->s_time_gran = 1000000000;
c515e1fd 118 s->cleancache_poolid = -1;
1da177e4
LT
119 }
120out:
121 return s;
122}
123
124/**
125 * destroy_super - frees a superblock
126 * @s: superblock to free
127 *
128 * Frees a superblock.
129 */
130static inline void destroy_super(struct super_block *s)
131{
6416ccb7
NP
132#ifdef CONFIG_SMP
133 free_percpu(s->s_files);
134#endif
1da177e4 135 security_sb_free(s);
79c0b2df 136 kfree(s->s_subtype);
b3b304a2 137 kfree(s->s_options);
1da177e4
LT
138 kfree(s);
139}
140
141/* Superblock refcounting */
142
143/*
35cf7ba0 144 * Drop a superblock's refcount. The caller must hold sb_lock.
1da177e4 145 */
35cf7ba0 146void __put_super(struct super_block *sb)
1da177e4 147{
1da177e4 148 if (!--sb->s_count) {
551de6f3 149 list_del_init(&sb->s_list);
1da177e4 150 destroy_super(sb);
1da177e4 151 }
1da177e4
LT
152}
153
154/**
155 * put_super - drop a temporary reference to superblock
156 * @sb: superblock in question
157 *
158 * Drops a temporary reference, frees superblock if there's no
159 * references left.
160 */
03ba3782 161void put_super(struct super_block *sb)
1da177e4
LT
162{
163 spin_lock(&sb_lock);
164 __put_super(sb);
165 spin_unlock(&sb_lock);
166}
167
168
169/**
1712ac8f 170 * deactivate_locked_super - drop an active reference to superblock
1da177e4
LT
171 * @s: superblock to deactivate
172 *
1712ac8f
AV
173 * Drops an active reference to superblock, converting it into a temprory
174 * one if there is no other active references left. In that case we
1da177e4
LT
175 * tell fs driver to shut it down and drop the temporary reference we
176 * had just acquired.
1712ac8f
AV
177 *
178 * Caller holds exclusive lock on superblock; that lock is released.
1da177e4 179 */
1712ac8f 180void deactivate_locked_super(struct super_block *s)
1da177e4
LT
181{
182 struct file_system_type *fs = s->s_type;
b20bd1a5 183 if (atomic_dec_and_test(&s->s_active)) {
c515e1fd 184 cleancache_flush_fs(s);
1da177e4 185 fs->kill_sb(s);
d863b50a
BH
186 /*
187 * We need to call rcu_barrier so all the delayed rcu free
188 * inodes are flushed before we release the fs module.
189 */
190 rcu_barrier();
1da177e4
LT
191 put_filesystem(fs);
192 put_super(s);
1712ac8f
AV
193 } else {
194 up_write(&s->s_umount);
1da177e4
LT
195 }
196}
197
1712ac8f 198EXPORT_SYMBOL(deactivate_locked_super);
1da177e4 199
74dbbdd7 200/**
1712ac8f 201 * deactivate_super - drop an active reference to superblock
74dbbdd7
AV
202 * @s: superblock to deactivate
203 *
1712ac8f
AV
204 * Variant of deactivate_locked_super(), except that superblock is *not*
205 * locked by caller. If we are going to drop the final active reference,
206 * lock will be acquired prior to that.
74dbbdd7 207 */
1712ac8f 208void deactivate_super(struct super_block *s)
74dbbdd7 209{
1712ac8f
AV
210 if (!atomic_add_unless(&s->s_active, -1, 1)) {
211 down_write(&s->s_umount);
212 deactivate_locked_super(s);
74dbbdd7
AV
213 }
214}
215
1712ac8f 216EXPORT_SYMBOL(deactivate_super);
74dbbdd7 217
1da177e4
LT
218/**
219 * grab_super - acquire an active reference
220 * @s: reference we are trying to make active
221 *
222 * Tries to acquire an active reference. grab_super() is used when we
223 * had just found a superblock in super_blocks or fs_type->fs_supers
224 * and want to turn it into a full-blown active reference. grab_super()
225 * is called with sb_lock held and drops it. Returns 1 in case of
226 * success, 0 if we had failed (superblock contents was already dead or
227 * dying when grab_super() had been called).
228 */
9c4dbee7 229static int grab_super(struct super_block *s) __releases(sb_lock)
1da177e4 230{
b20bd1a5
AV
231 if (atomic_inc_not_zero(&s->s_active)) {
232 spin_unlock(&sb_lock);
b20bd1a5
AV
233 return 1;
234 }
235 /* it's going away */
1da177e4
LT
236 s->s_count++;
237 spin_unlock(&sb_lock);
1712ac8f 238 /* wait for it to die */
1da177e4 239 down_write(&s->s_umount);
1da177e4
LT
240 up_write(&s->s_umount);
241 put_super(s);
1da177e4
LT
242 return 0;
243}
244
12ad3ab6
DC
245/*
246 * grab_super_passive - acquire a passive reference
247 * @s: reference we are trying to grab
248 *
249 * Tries to acquire a passive reference. This is used in places where we
250 * cannot take an active reference but we need to ensure that the
251 * superblock does not go away while we are working on it. It returns
252 * false if a reference was not gained, and returns true with the s_umount
253 * lock held in read mode if a reference is gained. On successful return,
254 * the caller must drop the s_umount lock and the passive reference when
255 * done.
256 */
257bool grab_super_passive(struct super_block *sb)
258{
259 spin_lock(&sb_lock);
260 if (list_empty(&sb->s_instances)) {
261 spin_unlock(&sb_lock);
262 return false;
263 }
264
265 sb->s_count++;
266 spin_unlock(&sb_lock);
267
268 if (down_read_trylock(&sb->s_umount)) {
269 if (sb->s_root)
270 return true;
271 up_read(&sb->s_umount);
272 }
273
274 put_super(sb);
275 return false;
276}
277
914e2637
AV
278/*
279 * Superblock locking. We really ought to get rid of these two.
280 */
281void lock_super(struct super_block * sb)
282{
283 get_fs_excl();
284 mutex_lock(&sb->s_lock);
285}
286
287void unlock_super(struct super_block * sb)
288{
289 put_fs_excl();
290 mutex_unlock(&sb->s_lock);
291}
292
293EXPORT_SYMBOL(lock_super);
294EXPORT_SYMBOL(unlock_super);
295
1da177e4
LT
296/**
297 * generic_shutdown_super - common helper for ->kill_sb()
298 * @sb: superblock to kill
299 *
300 * generic_shutdown_super() does all fs-independent work on superblock
301 * shutdown. Typical ->kill_sb() should pick all fs-specific objects
302 * that need destruction out of superblock, call generic_shutdown_super()
303 * and release aforementioned objects. Note: dentries and inodes _are_
304 * taken care of and do not need specific handling.
c636ebdb
DH
305 *
306 * Upon calling this function, the filesystem may no longer alter or
307 * rearrange the set of dentries belonging to this super_block, nor may it
308 * change the attachments of dentries to inodes.
1da177e4
LT
309 */
310void generic_shutdown_super(struct super_block *sb)
311{
ee9b6d61 312 const struct super_operations *sop = sb->s_op;
1da177e4 313
efaee192 314
c636ebdb
DH
315 if (sb->s_root) {
316 shrink_dcache_for_umount(sb);
60b0680f 317 sync_filesystem(sb);
a9e220f8 318 get_fs_excl();
1da177e4 319 sb->s_flags &= ~MS_ACTIVE;
efaee192 320
63997e98
AV
321 fsnotify_unmount_inodes(&sb->s_inodes);
322
323 evict_inodes(sb);
1da177e4 324
1da177e4
LT
325 if (sop->put_super)
326 sop->put_super(sb);
327
63997e98 328 if (!list_empty(&sb->s_inodes)) {
7b4fe29e
DJ
329 printk("VFS: Busy inodes after unmount of %s. "
330 "Self-destruct in 5 seconds. Have a nice day...\n",
331 sb->s_id);
1da177e4 332 }
a9e220f8 333 put_fs_excl();
1da177e4
LT
334 }
335 spin_lock(&sb_lock);
336 /* should be initialized for __put_super_and_need_restart() */
551de6f3 337 list_del_init(&sb->s_instances);
1da177e4
LT
338 spin_unlock(&sb_lock);
339 up_write(&sb->s_umount);
340}
341
342EXPORT_SYMBOL(generic_shutdown_super);
343
344/**
345 * sget - find or create a superblock
346 * @type: filesystem type superblock should belong to
347 * @test: comparison callback
348 * @set: setup callback
349 * @data: argument to each of them
350 */
351struct super_block *sget(struct file_system_type *type,
352 int (*test)(struct super_block *,void *),
353 int (*set)(struct super_block *,void *),
354 void *data)
355{
356 struct super_block *s = NULL;
d4730127 357 struct super_block *old;
1da177e4
LT
358 int err;
359
360retry:
361 spin_lock(&sb_lock);
d4730127
MK
362 if (test) {
363 list_for_each_entry(old, &type->fs_supers, s_instances) {
364 if (!test(old, data))
365 continue;
366 if (!grab_super(old))
367 goto retry;
a3cfbb53
LZ
368 if (s) {
369 up_write(&s->s_umount);
d4730127 370 destroy_super(s);
7a4dec53 371 s = NULL;
a3cfbb53 372 }
d3f21473 373 down_write(&old->s_umount);
7a4dec53
AV
374 if (unlikely(!(old->s_flags & MS_BORN))) {
375 deactivate_locked_super(old);
376 goto retry;
377 }
d4730127
MK
378 return old;
379 }
1da177e4
LT
380 }
381 if (!s) {
382 spin_unlock(&sb_lock);
cf516249 383 s = alloc_super(type);
1da177e4
LT
384 if (!s)
385 return ERR_PTR(-ENOMEM);
386 goto retry;
387 }
388
389 err = set(s, data);
390 if (err) {
391 spin_unlock(&sb_lock);
a3cfbb53 392 up_write(&s->s_umount);
1da177e4
LT
393 destroy_super(s);
394 return ERR_PTR(err);
395 }
396 s->s_type = type;
397 strlcpy(s->s_id, type->name, sizeof(s->s_id));
398 list_add_tail(&s->s_list, &super_blocks);
399 list_add(&s->s_instances, &type->fs_supers);
400 spin_unlock(&sb_lock);
401 get_filesystem(type);
402 return s;
403}
404
405EXPORT_SYMBOL(sget);
406
407void drop_super(struct super_block *sb)
408{
409 up_read(&sb->s_umount);
410 put_super(sb);
411}
412
413EXPORT_SYMBOL(drop_super);
414
e5004753
CH
415/**
416 * sync_supers - helper for periodic superblock writeback
417 *
418 * Call the write_super method if present on all dirty superblocks in
419 * the system. This is for the periodic writeback used by most older
420 * filesystems. For data integrity superblock writeback use
421 * sync_filesystems() instead.
422 *
1da177e4
LT
423 * Note: check the dirty flag before waiting, so we don't
424 * hold up the sync while mounting a device. (The newly
425 * mounted device won't need syncing.)
426 */
427void sync_supers(void)
428{
dca33252 429 struct super_block *sb, *p = NULL;
618f0636 430
1da177e4 431 spin_lock(&sb_lock);
dca33252 432 list_for_each_entry(sb, &super_blocks, s_list) {
551de6f3
AV
433 if (list_empty(&sb->s_instances))
434 continue;
e5004753 435 if (sb->s_op->write_super && sb->s_dirt) {
1da177e4
LT
436 sb->s_count++;
437 spin_unlock(&sb_lock);
e5004753 438
1da177e4 439 down_read(&sb->s_umount);
e5004753
CH
440 if (sb->s_root && sb->s_dirt)
441 sb->s_op->write_super(sb);
618f0636 442 up_read(&sb->s_umount);
e5004753 443
618f0636 444 spin_lock(&sb_lock);
dca33252
AV
445 if (p)
446 __put_super(p);
447 p = sb;
618f0636
KK
448 }
449 }
dca33252
AV
450 if (p)
451 __put_super(p);
1da177e4
LT
452 spin_unlock(&sb_lock);
453}
454
01a05b33
AV
455/**
456 * iterate_supers - call function for all active superblocks
457 * @f: function to call
458 * @arg: argument to pass to it
459 *
460 * Scans the superblock list and calls given function, passing it
461 * locked superblock and given argument.
462 */
463void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
464{
dca33252 465 struct super_block *sb, *p = NULL;
01a05b33
AV
466
467 spin_lock(&sb_lock);
dca33252 468 list_for_each_entry(sb, &super_blocks, s_list) {
01a05b33
AV
469 if (list_empty(&sb->s_instances))
470 continue;
471 sb->s_count++;
472 spin_unlock(&sb_lock);
473
474 down_read(&sb->s_umount);
475 if (sb->s_root)
476 f(sb, arg);
477 up_read(&sb->s_umount);
478
479 spin_lock(&sb_lock);
dca33252
AV
480 if (p)
481 __put_super(p);
482 p = sb;
01a05b33 483 }
dca33252
AV
484 if (p)
485 __put_super(p);
01a05b33
AV
486 spin_unlock(&sb_lock);
487}
488
43e15cdb
AV
489/**
490 * iterate_supers_type - call function for superblocks of given type
491 * @type: fs type
492 * @f: function to call
493 * @arg: argument to pass to it
494 *
495 * Scans the superblock list and calls given function, passing it
496 * locked superblock and given argument.
497 */
498void iterate_supers_type(struct file_system_type *type,
499 void (*f)(struct super_block *, void *), void *arg)
500{
501 struct super_block *sb, *p = NULL;
502
503 spin_lock(&sb_lock);
504 list_for_each_entry(sb, &type->fs_supers, s_instances) {
505 sb->s_count++;
506 spin_unlock(&sb_lock);
507
508 down_read(&sb->s_umount);
509 if (sb->s_root)
510 f(sb, arg);
511 up_read(&sb->s_umount);
512
513 spin_lock(&sb_lock);
514 if (p)
515 __put_super(p);
516 p = sb;
517 }
518 if (p)
519 __put_super(p);
520 spin_unlock(&sb_lock);
521}
522
523EXPORT_SYMBOL(iterate_supers_type);
524
1da177e4
LT
525/**
526 * get_super - get the superblock of a device
527 * @bdev: device to get the superblock for
528 *
529 * Scans the superblock list and finds the superblock of the file system
530 * mounted on the device given. %NULL is returned if no match is found.
531 */
532
df40c01a 533struct super_block *get_super(struct block_device *bdev)
1da177e4 534{
618f0636
KK
535 struct super_block *sb;
536
1da177e4
LT
537 if (!bdev)
538 return NULL;
618f0636 539
1da177e4 540 spin_lock(&sb_lock);
618f0636
KK
541rescan:
542 list_for_each_entry(sb, &super_blocks, s_list) {
551de6f3
AV
543 if (list_empty(&sb->s_instances))
544 continue;
618f0636
KK
545 if (sb->s_bdev == bdev) {
546 sb->s_count++;
1da177e4 547 spin_unlock(&sb_lock);
618f0636 548 down_read(&sb->s_umount);
df40c01a 549 /* still alive? */
618f0636
KK
550 if (sb->s_root)
551 return sb;
552 up_read(&sb->s_umount);
df40c01a 553 /* nope, got unmounted */
618f0636 554 spin_lock(&sb_lock);
df40c01a
AV
555 __put_super(sb);
556 goto rescan;
1da177e4
LT
557 }
558 }
559 spin_unlock(&sb_lock);
560 return NULL;
561}
562
563EXPORT_SYMBOL(get_super);
4504230a
CH
564
565/**
566 * get_active_super - get an active reference to the superblock of a device
567 * @bdev: device to get the superblock for
568 *
569 * Scans the superblock list and finds the superblock of the file system
570 * mounted on the device given. Returns the superblock with an active
d3f21473 571 * reference or %NULL if none was found.
4504230a
CH
572 */
573struct super_block *get_active_super(struct block_device *bdev)
574{
575 struct super_block *sb;
576
577 if (!bdev)
578 return NULL;
579
1494583d 580restart:
4504230a
CH
581 spin_lock(&sb_lock);
582 list_for_each_entry(sb, &super_blocks, s_list) {
551de6f3
AV
583 if (list_empty(&sb->s_instances))
584 continue;
1494583d
AV
585 if (sb->s_bdev == bdev) {
586 if (grab_super(sb)) /* drops sb_lock */
587 return sb;
588 else
589 goto restart;
590 }
4504230a
CH
591 }
592 spin_unlock(&sb_lock);
593 return NULL;
594}
1da177e4 595
df40c01a 596struct super_block *user_get_super(dev_t dev)
1da177e4 597{
618f0636 598 struct super_block *sb;
1da177e4 599
1da177e4 600 spin_lock(&sb_lock);
618f0636
KK
601rescan:
602 list_for_each_entry(sb, &super_blocks, s_list) {
551de6f3
AV
603 if (list_empty(&sb->s_instances))
604 continue;
618f0636
KK
605 if (sb->s_dev == dev) {
606 sb->s_count++;
1da177e4 607 spin_unlock(&sb_lock);
618f0636 608 down_read(&sb->s_umount);
df40c01a 609 /* still alive? */
618f0636
KK
610 if (sb->s_root)
611 return sb;
612 up_read(&sb->s_umount);
df40c01a 613 /* nope, got unmounted */
618f0636 614 spin_lock(&sb_lock);
df40c01a
AV
615 __put_super(sb);
616 goto rescan;
1da177e4
LT
617 }
618 }
619 spin_unlock(&sb_lock);
620 return NULL;
621}
622
1da177e4
LT
623/**
624 * do_remount_sb - asks filesystem to change mount options.
625 * @sb: superblock in question
626 * @flags: numeric part of options
627 * @data: the rest of options
628 * @force: whether or not to force the change
629 *
630 * Alters the mount options of a mounted file system.
631 */
632int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
633{
634 int retval;
c79d967d 635 int remount_ro;
4504230a
CH
636
637 if (sb->s_frozen != SB_UNFROZEN)
638 return -EBUSY;
639
9361401e 640#ifdef CONFIG_BLOCK
1da177e4
LT
641 if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev))
642 return -EACCES;
9361401e 643#endif
4504230a 644
1da177e4
LT
645 if (flags & MS_RDONLY)
646 acct_auto_close(sb);
647 shrink_dcache_sb(sb);
60b0680f 648 sync_filesystem(sb);
1da177e4 649
d208bbdd 650 remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
d208bbdd 651
1da177e4
LT
652 /* If we are remounting RDONLY and current sb is read/write,
653 make sure there are no rw files opened */
d208bbdd 654 if (remount_ro) {
1da177e4
LT
655 if (force)
656 mark_files_ro(sb);
b0895513 657 else if (!fs_may_remount_ro(sb))
1da177e4
LT
658 return -EBUSY;
659 }
660
661 if (sb->s_op->remount_fs) {
1da177e4 662 retval = sb->s_op->remount_fs(sb, &flags, data);
b0895513 663 if (retval)
1da177e4
LT
664 return retval;
665 }
666 sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
c79d967d 667
d208bbdd
NP
668 /*
669 * Some filesystems modify their metadata via some other path than the
670 * bdev buffer cache (eg. use a private mapping, or directories in
671 * pagecache, etc). Also file data modifications go via their own
672 * mappings. So If we try to mount readonly then copy the filesystem
673 * from bdev, we could get stale data, so invalidate it to give a best
674 * effort at coherency.
675 */
676 if (remount_ro && sb->s_bdev)
677 invalidate_bdev(sb->s_bdev);
1da177e4
LT
678 return 0;
679}
680
a2a9537a 681static void do_emergency_remount(struct work_struct *work)
1da177e4 682{
dca33252 683 struct super_block *sb, *p = NULL;
1da177e4
LT
684
685 spin_lock(&sb_lock);
dca33252 686 list_for_each_entry(sb, &super_blocks, s_list) {
551de6f3
AV
687 if (list_empty(&sb->s_instances))
688 continue;
1da177e4
LT
689 sb->s_count++;
690 spin_unlock(&sb_lock);
443b94ba 691 down_write(&sb->s_umount);
1da177e4
LT
692 if (sb->s_root && sb->s_bdev && !(sb->s_flags & MS_RDONLY)) {
693 /*
1da177e4
LT
694 * What lock protects sb->s_flags??
695 */
1da177e4 696 do_remount_sb(sb, MS_RDONLY, NULL, 1);
1da177e4 697 }
443b94ba 698 up_write(&sb->s_umount);
1da177e4 699 spin_lock(&sb_lock);
dca33252
AV
700 if (p)
701 __put_super(p);
702 p = sb;
1da177e4 703 }
dca33252
AV
704 if (p)
705 __put_super(p);
1da177e4 706 spin_unlock(&sb_lock);
a2a9537a 707 kfree(work);
1da177e4
LT
708 printk("Emergency Remount complete\n");
709}
710
711void emergency_remount(void)
712{
a2a9537a
JA
713 struct work_struct *work;
714
715 work = kmalloc(sizeof(*work), GFP_ATOMIC);
716 if (work) {
717 INIT_WORK(work, do_emergency_remount);
718 schedule_work(work);
719 }
1da177e4
LT
720}
721
722/*
723 * Unnamed block devices are dummy devices used by virtual
724 * filesystems which don't use real block-devices. -- jrs
725 */
726
ad76cbc6 727static DEFINE_IDA(unnamed_dev_ida);
1da177e4 728static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
c63e09ec 729static int unnamed_dev_start = 0; /* don't bother trying below it */
1da177e4 730
0ee5dc67 731int get_anon_bdev(dev_t *p)
1da177e4
LT
732{
733 int dev;
734 int error;
735
736 retry:
ad76cbc6 737 if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
1da177e4
LT
738 return -ENOMEM;
739 spin_lock(&unnamed_dev_lock);
c63e09ec 740 error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
f21f6220
AV
741 if (!error)
742 unnamed_dev_start = dev + 1;
1da177e4
LT
743 spin_unlock(&unnamed_dev_lock);
744 if (error == -EAGAIN)
745 /* We raced and lost with another CPU. */
746 goto retry;
747 else if (error)
748 return -EAGAIN;
749
750 if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) {
751 spin_lock(&unnamed_dev_lock);
ad76cbc6 752 ida_remove(&unnamed_dev_ida, dev);
f21f6220
AV
753 if (unnamed_dev_start > dev)
754 unnamed_dev_start = dev;
1da177e4
LT
755 spin_unlock(&unnamed_dev_lock);
756 return -EMFILE;
757 }
0ee5dc67 758 *p = MKDEV(0, dev & MINORMASK);
1da177e4
LT
759 return 0;
760}
0ee5dc67 761EXPORT_SYMBOL(get_anon_bdev);
1da177e4 762
0ee5dc67 763void free_anon_bdev(dev_t dev)
1da177e4 764{
0ee5dc67 765 int slot = MINOR(dev);
1da177e4 766 spin_lock(&unnamed_dev_lock);
ad76cbc6 767 ida_remove(&unnamed_dev_ida, slot);
c63e09ec
AV
768 if (slot < unnamed_dev_start)
769 unnamed_dev_start = slot;
1da177e4
LT
770 spin_unlock(&unnamed_dev_lock);
771}
0ee5dc67
AV
772EXPORT_SYMBOL(free_anon_bdev);
773
774int set_anon_super(struct super_block *s, void *data)
775{
776 int error = get_anon_bdev(&s->s_dev);
777 if (!error)
778 s->s_bdi = &noop_backing_dev_info;
779 return error;
780}
781
782EXPORT_SYMBOL(set_anon_super);
783
784void kill_anon_super(struct super_block *sb)
785{
786 dev_t dev = sb->s_dev;
787 generic_shutdown_super(sb);
788 free_anon_bdev(dev);
789}
1da177e4
LT
790
791EXPORT_SYMBOL(kill_anon_super);
792
1da177e4
LT
793void kill_litter_super(struct super_block *sb)
794{
795 if (sb->s_root)
796 d_genocide(sb->s_root);
797 kill_anon_super(sb);
798}
799
800EXPORT_SYMBOL(kill_litter_super);
801
909e6d94
SH
802static int ns_test_super(struct super_block *sb, void *data)
803{
804 return sb->s_fs_info == data;
805}
806
807static int ns_set_super(struct super_block *sb, void *data)
808{
809 sb->s_fs_info = data;
810 return set_anon_super(sb, NULL);
811}
812
ceefda69
AV
813struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
814 void *data, int (*fill_super)(struct super_block *, void *, int))
909e6d94
SH
815{
816 struct super_block *sb;
817
818 sb = sget(fs_type, ns_test_super, ns_set_super, data);
819 if (IS_ERR(sb))
ceefda69 820 return ERR_CAST(sb);
909e6d94
SH
821
822 if (!sb->s_root) {
823 int err;
824 sb->s_flags = flags;
825 err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
826 if (err) {
74dbbdd7 827 deactivate_locked_super(sb);
ceefda69 828 return ERR_PTR(err);
909e6d94
SH
829 }
830
831 sb->s_flags |= MS_ACTIVE;
832 }
833
ceefda69 834 return dget(sb->s_root);
909e6d94
SH
835}
836
ceefda69 837EXPORT_SYMBOL(mount_ns);
909e6d94 838
9361401e 839#ifdef CONFIG_BLOCK
1da177e4
LT
840static int set_bdev_super(struct super_block *s, void *data)
841{
842 s->s_bdev = data;
843 s->s_dev = s->s_bdev->bd_dev;
32a88aa1
JA
844
845 /*
846 * We set the bdi here to the queue backing, file systems can
847 * overwrite this in ->fill_super()
848 */
849 s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
1da177e4
LT
850 return 0;
851}
852
853static int test_bdev_super(struct super_block *s, void *data)
854{
855 return (void *)s->s_bdev == data;
856}
857
152a0836 858struct dentry *mount_bdev(struct file_system_type *fs_type,
1da177e4 859 int flags, const char *dev_name, void *data,
152a0836 860 int (*fill_super)(struct super_block *, void *, int))
1da177e4
LT
861{
862 struct block_device *bdev;
863 struct super_block *s;
d4d77629 864 fmode_t mode = FMODE_READ | FMODE_EXCL;
1da177e4
LT
865 int error = 0;
866
30c40d2c
AV
867 if (!(flags & MS_RDONLY))
868 mode |= FMODE_WRITE;
869
d4d77629 870 bdev = blkdev_get_by_path(dev_name, mode, fs_type);
1da177e4 871 if (IS_ERR(bdev))
152a0836 872 return ERR_CAST(bdev);
1da177e4
LT
873
874 /*
875 * once the super is inserted into the list by sget, s_umount
876 * will protect the lockfs code from trying to start a snapshot
877 * while we are mounting
878 */
4fadd7bb
CH
879 mutex_lock(&bdev->bd_fsfreeze_mutex);
880 if (bdev->bd_fsfreeze_count > 0) {
881 mutex_unlock(&bdev->bd_fsfreeze_mutex);
882 error = -EBUSY;
883 goto error_bdev;
884 }
1da177e4 885 s = sget(fs_type, test_bdev_super, set_bdev_super, bdev);
4fadd7bb 886 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1da177e4 887 if (IS_ERR(s))
454e2398 888 goto error_s;
1da177e4
LT
889
890 if (s->s_root) {
891 if ((flags ^ s->s_flags) & MS_RDONLY) {
74dbbdd7 892 deactivate_locked_super(s);
454e2398
DH
893 error = -EBUSY;
894 goto error_bdev;
1da177e4 895 }
454e2398 896
4f331f01
TH
897 /*
898 * s_umount nests inside bd_mutex during
e525fd89
TH
899 * __invalidate_device(). blkdev_put() acquires
900 * bd_mutex and can't be called under s_umount. Drop
901 * s_umount temporarily. This is safe as we're
902 * holding an active reference.
4f331f01
TH
903 */
904 up_write(&s->s_umount);
d4d77629 905 blkdev_put(bdev, mode);
4f331f01 906 down_write(&s->s_umount);
1da177e4
LT
907 } else {
908 char b[BDEVNAME_SIZE];
909
9e1f1de0 910 s->s_flags = flags | MS_NOSEC;
30c40d2c 911 s->s_mode = mode;
1da177e4 912 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
e78c9a00 913 sb_set_blocksize(s, block_size(bdev));
9b04c997 914 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1da177e4 915 if (error) {
74dbbdd7 916 deactivate_locked_super(s);
454e2398 917 goto error;
fa675765 918 }
454e2398
DH
919
920 s->s_flags |= MS_ACTIVE;
87d8fe1e 921 bdev->bd_super = s;
1da177e4
LT
922 }
923
152a0836 924 return dget(s->s_root);
1da177e4 925
454e2398
DH
926error_s:
927 error = PTR_ERR(s);
928error_bdev:
d4d77629 929 blkdev_put(bdev, mode);
454e2398 930error:
152a0836
AV
931 return ERR_PTR(error);
932}
933EXPORT_SYMBOL(mount_bdev);
934
1da177e4
LT
935void kill_block_super(struct super_block *sb)
936{
937 struct block_device *bdev = sb->s_bdev;
30c40d2c 938 fmode_t mode = sb->s_mode;
1da177e4 939
ddbaaf30 940 bdev->bd_super = NULL;
1da177e4
LT
941 generic_shutdown_super(sb);
942 sync_blockdev(bdev);
d4d77629 943 WARN_ON_ONCE(!(mode & FMODE_EXCL));
e525fd89 944 blkdev_put(bdev, mode | FMODE_EXCL);
1da177e4
LT
945}
946
947EXPORT_SYMBOL(kill_block_super);
9361401e 948#endif
1da177e4 949
3c26ff6e 950struct dentry *mount_nodev(struct file_system_type *fs_type,
1da177e4 951 int flags, void *data,
3c26ff6e 952 int (*fill_super)(struct super_block *, void *, int))
1da177e4
LT
953{
954 int error;
955 struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
956
957 if (IS_ERR(s))
3c26ff6e 958 return ERR_CAST(s);
1da177e4
LT
959
960 s->s_flags = flags;
961
9b04c997 962 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1da177e4 963 if (error) {
74dbbdd7 964 deactivate_locked_super(s);
3c26ff6e 965 return ERR_PTR(error);
1da177e4
LT
966 }
967 s->s_flags |= MS_ACTIVE;
3c26ff6e 968 return dget(s->s_root);
1da177e4 969}
3c26ff6e
AV
970EXPORT_SYMBOL(mount_nodev);
971
1da177e4
LT
972static int compare_single(struct super_block *s, void *p)
973{
974 return 1;
975}
976
fc14f2fe 977struct dentry *mount_single(struct file_system_type *fs_type,
1da177e4 978 int flags, void *data,
fc14f2fe 979 int (*fill_super)(struct super_block *, void *, int))
1da177e4
LT
980{
981 struct super_block *s;
982 int error;
983
984 s = sget(fs_type, compare_single, set_anon_super, NULL);
985 if (IS_ERR(s))
fc14f2fe 986 return ERR_CAST(s);
1da177e4
LT
987 if (!s->s_root) {
988 s->s_flags = flags;
9b04c997 989 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1da177e4 990 if (error) {
74dbbdd7 991 deactivate_locked_super(s);
fc14f2fe 992 return ERR_PTR(error);
1da177e4
LT
993 }
994 s->s_flags |= MS_ACTIVE;
9329d1be
KS
995 } else {
996 do_remount_sb(s, flags, data, 0);
1da177e4 997 }
fc14f2fe
AV
998 return dget(s->s_root);
999}
1000EXPORT_SYMBOL(mount_single);
1001
9d412a43
AV
1002struct dentry *
1003mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
1da177e4 1004{
c96e41e9 1005 struct dentry *root;
9d412a43 1006 struct super_block *sb;
1da177e4 1007 char *secdata = NULL;
9d412a43 1008 int error = -ENOMEM;
8089352a 1009
e0007529 1010 if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
1da177e4 1011 secdata = alloc_secdata();
454e2398 1012 if (!secdata)
9d412a43 1013 goto out;
1da177e4 1014
e0007529 1015 error = security_sb_copy_data(data, secdata);
454e2398 1016 if (error)
1da177e4 1017 goto out_free_secdata;
1da177e4
LT
1018 }
1019
1a102ff9
AV
1020 root = type->mount(type, flags, name, data);
1021 if (IS_ERR(root)) {
1022 error = PTR_ERR(root);
1023 goto out_free_secdata;
c96e41e9 1024 }
9d412a43
AV
1025 sb = root->d_sb;
1026 BUG_ON(!sb);
1027 WARN_ON(!sb->s_bdi);
6c510389 1028 WARN_ON(sb->s_bdi == &default_backing_dev_info);
9d412a43 1029 sb->s_flags |= MS_BORN;
454e2398 1030
9d412a43 1031 error = security_sb_kern_mount(sb, flags, secdata);
5129a469
JE
1032 if (error)
1033 goto out_sb;
454e2398 1034
42cb56ae
JL
1035 /*
1036 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1037 * but s_maxbytes was an unsigned long long for many releases. Throw
1038 * this warning for a little while to try and catch filesystems that
4358b567 1039 * violate this rule.
42cb56ae 1040 */
9d412a43
AV
1041 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1042 "negative value (%lld)\n", type->name, sb->s_maxbytes);
42cb56ae 1043
9d412a43 1044 up_write(&sb->s_umount);
8680e22f 1045 free_secdata(secdata);
9d412a43 1046 return root;
1da177e4 1047out_sb:
9d412a43
AV
1048 dput(root);
1049 deactivate_locked_super(sb);
1da177e4
LT
1050out_free_secdata:
1051 free_secdata(secdata);
1da177e4 1052out:
454e2398 1053 return ERR_PTR(error);
1da177e4
LT
1054}
1055
18e9e510 1056/**
7000d3c4
RD
1057 * freeze_super - lock the filesystem and force it into a consistent state
1058 * @sb: the super to lock
18e9e510
JB
1059 *
1060 * Syncs the super to make sure the filesystem is consistent and calls the fs's
1061 * freeze_fs. Subsequent calls to this without first thawing the fs will return
1062 * -EBUSY.
1063 */
1064int freeze_super(struct super_block *sb)
1065{
1066 int ret;
1067
1068 atomic_inc(&sb->s_active);
1069 down_write(&sb->s_umount);
1070 if (sb->s_frozen) {
1071 deactivate_locked_super(sb);
1072 return -EBUSY;
1073 }
1074
1075 if (sb->s_flags & MS_RDONLY) {
1076 sb->s_frozen = SB_FREEZE_TRANS;
1077 smp_wmb();
1078 up_write(&sb->s_umount);
1079 return 0;
1080 }
1081
1082 sb->s_frozen = SB_FREEZE_WRITE;
1083 smp_wmb();
1084
1085 sync_filesystem(sb);
1086
1087 sb->s_frozen = SB_FREEZE_TRANS;
1088 smp_wmb();
1089
1090 sync_blockdev(sb->s_bdev);
1091 if (sb->s_op->freeze_fs) {
1092 ret = sb->s_op->freeze_fs(sb);
1093 if (ret) {
1094 printk(KERN_ERR
1095 "VFS:Filesystem freeze failed\n");
1096 sb->s_frozen = SB_UNFROZEN;
1097 deactivate_locked_super(sb);
1098 return ret;
1099 }
1100 }
1101 up_write(&sb->s_umount);
1102 return 0;
1103}
1104EXPORT_SYMBOL(freeze_super);
1105
1106/**
1107 * thaw_super -- unlock filesystem
1108 * @sb: the super to thaw
1109 *
1110 * Unlocks the filesystem and marks it writeable again after freeze_super().
1111 */
1112int thaw_super(struct super_block *sb)
1113{
1114 int error;
1115
1116 down_write(&sb->s_umount);
1117 if (sb->s_frozen == SB_UNFROZEN) {
1118 up_write(&sb->s_umount);
1119 return -EINVAL;
1120 }
1121
1122 if (sb->s_flags & MS_RDONLY)
1123 goto out;
1124
1125 if (sb->s_op->unfreeze_fs) {
1126 error = sb->s_op->unfreeze_fs(sb);
1127 if (error) {
1128 printk(KERN_ERR
1129 "VFS:Filesystem thaw failed\n");
1130 sb->s_frozen = SB_FREEZE_TRANS;
1131 up_write(&sb->s_umount);
1132 return error;
1133 }
1134 }
1135
1136out:
1137 sb->s_frozen = SB_UNFROZEN;
1138 smp_wmb();
1139 wake_up(&sb->s_wait_unfrozen);
1140 deactivate_locked_super(sb);
1141
1142 return 0;
1143}
1144EXPORT_SYMBOL(thaw_super);