Merge tag 'drm-next-2024-05-25' of https://gitlab.freedesktop.org/drm/kernel
[linux-2.6-block.git] / fs / super.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/fs/super.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * super.c contains code to handle: - mount structures
8 * - super-block tables
9 * - filesystem drivers list
10 * - mount system call
11 * - umount system call
12 * - ustat system call
13 *
14 * GK 2/5/95 - Changed to support mounting the root fs via NFS
15 *
16 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
17 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
18 * Added options to /proc/mounts:
96de0e25 19 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
1da177e4
LT
20 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
21 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
22 */
23
630d9c47 24#include <linux/export.h>
1da177e4 25#include <linux/slab.h>
1da177e4 26#include <linux/blkdev.h>
1da177e4
LT
27#include <linux/mount.h>
28#include <linux/security.h>
1da177e4
LT
29#include <linux/writeback.h> /* for the emergency remount stuff */
30#include <linux/idr.h>
353ab6e9 31#include <linux/mutex.h>
5477d0fa 32#include <linux/backing-dev.h>
ceb5bdc2 33#include <linux/rculist_bl.h>
22d94f49 34#include <linux/fscrypt.h>
40401530 35#include <linux/fsnotify.h>
5accdf82 36#include <linux/lockdep.h>
6e4eab57 37#include <linux/user_namespace.h>
9bc61ab1 38#include <linux/fs_context.h>
e262e32d 39#include <uapi/linux/mount.h>
6d59e7f5 40#include "internal.h"
1da177e4 41
880b9577 42static int thaw_super_locked(struct super_block *sb, enum freeze_holder who);
1da177e4 43
15d0f5ea
AV
44static LIST_HEAD(super_blocks);
45static DEFINE_SPINLOCK(sb_lock);
1da177e4 46
5accdf82
JK
47static char *sb_writers_name[SB_FREEZE_LEVELS] = {
48 "sb_writers",
49 "sb_pagefaults",
50 "sb_internal",
51};
52
5e874914 53static inline void __super_lock(struct super_block *sb, bool excl)
0ed33598
CB
54{
55 if (excl)
56 down_write(&sb->s_umount);
57 else
58 down_read(&sb->s_umount);
59}
60
61static inline void super_unlock(struct super_block *sb, bool excl)
62{
63 if (excl)
64 up_write(&sb->s_umount);
65 else
66 up_read(&sb->s_umount);
67}
68
5e874914 69static inline void __super_lock_excl(struct super_block *sb)
0ed33598 70{
5e874914 71 __super_lock(sb, true);
0ed33598
CB
72}
73
74static inline void super_unlock_excl(struct super_block *sb)
75{
76 super_unlock(sb, true);
77}
78
79static inline void super_unlock_shared(struct super_block *sb)
80{
81 super_unlock(sb, false);
82}
83
b30850c5 84static bool super_flags(const struct super_block *sb, unsigned int flags)
5e874914 85{
5e874914
CB
86 /*
87 * Pairs with smp_store_release() in super_wake() and ensures
b30850c5 88 * that we see @flags after we're woken.
5e874914 89 */
b30850c5 90 return smp_load_acquire(&sb->s_flags) & flags;
5e874914
CB
91}
92
93/**
94 * super_lock - wait for superblock to become ready and lock it
95 * @sb: superblock to wait for
96 * @excl: whether exclusive access is required
97 *
98 * If the superblock has neither passed through vfs_get_tree() or
99 * generic_shutdown_super() yet wait for it to happen. Either superblock
100 * creation will succeed and SB_BORN is set by vfs_get_tree() or we're
101 * woken and we'll see SB_DYING.
102 *
103 * The caller must have acquired a temporary reference on @sb->s_count.
104 *
f0cd9880
CB
105 * Return: The function returns true if SB_BORN was set and with
106 * s_umount held. The function returns false if SB_DYING was
107 * set and without s_umount held.
5e874914
CB
108 */
109static __must_check bool super_lock(struct super_block *sb, bool excl)
110{
5e874914
CB
111 lockdep_assert_not_held(&sb->s_umount);
112
b30850c5
CB
113 /* wait until the superblock is ready or dying */
114 wait_var_event(&sb->s_flags, super_flags(sb, SB_BORN | SB_DYING));
115
116 /* Don't pointlessly acquire s_umount. */
117 if (super_flags(sb, SB_DYING))
118 return false;
119
5e874914
CB
120 __super_lock(sb, excl);
121
122 /*
123 * Has gone through generic_shutdown_super() in the meantime.
124 * @sb->s_root is NULL and @sb->s_active is 0. No one needs to
125 * grab a reference to this. Tell them so.
126 */
f0cd9880
CB
127 if (sb->s_flags & SB_DYING) {
128 super_unlock(sb, excl);
5e874914 129 return false;
f0cd9880 130 }
5e874914 131
b30850c5
CB
132 WARN_ON_ONCE(!(sb->s_flags & SB_BORN));
133 return true;
5e874914
CB
134}
135
f0cd9880 136/* wait and try to acquire read-side of @sb->s_umount */
5e874914
CB
137static inline bool super_lock_shared(struct super_block *sb)
138{
139 return super_lock(sb, false);
140}
141
f0cd9880 142/* wait and try to acquire write-side of @sb->s_umount */
5e874914
CB
143static inline bool super_lock_excl(struct super_block *sb)
144{
145 return super_lock(sb, true);
146}
147
148/* wake waiters */
2c18a63b 149#define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING | SB_DEAD)
5e874914
CB
150static void super_wake(struct super_block *sb, unsigned int flag)
151{
152 WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS));
153 WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1);
154
155 /*
156 * Pairs with smp_load_acquire() in super_lock() to make sure
157 * all initializations in the superblock are seen by the user
158 * seeing SB_BORN sent.
159 */
160 smp_store_release(&sb->s_flags, sb->s_flags | flag);
161 /*
162 * Pairs with the barrier in prepare_to_wait_event() to make sure
163 * ___wait_var_event() either sees SB_BORN set or
164 * waitqueue_active() check in wake_up_var() sees the waiter.
165 */
166 smp_mb();
167 wake_up_var(&sb->s_flags);
168}
169
b0d40c92
DC
170/*
171 * One thing we have to be careful of with a per-sb shrinker is that we don't
172 * drop the last active reference to the superblock from within the shrinker.
173 * If that happens we could trigger unregistering the shrinker from within the
8a0e8bb1 174 * shrinker path and that leads to deadlock on the shrinker_mutex. Hence we
b0d40c92
DC
175 * take a passive reference to the superblock to avoid this from occurring.
176 */
0a234c6d
DC
177static unsigned long super_cache_scan(struct shrinker *shrink,
178 struct shrink_control *sc)
b0d40c92
DC
179{
180 struct super_block *sb;
0a234c6d
DC
181 long fs_objects = 0;
182 long total_objects;
183 long freed = 0;
184 long dentries;
185 long inodes;
b0d40c92 186
1720f5dd 187 sb = shrink->private_data;
b0d40c92
DC
188
189 /*
190 * Deadlock avoidance. We may hold various FS locks, and we don't want
191 * to recurse into the FS that called us in clear_inode() and friends..
192 */
0a234c6d
DC
193 if (!(sc->gfp_mask & __GFP_FS))
194 return SHRINK_STOP;
b0d40c92 195
d8ce82ef 196 if (!super_trylock_shared(sb))
0a234c6d 197 return SHRINK_STOP;
b0d40c92 198
d0407903 199 if (sb->s_op->nr_cached_objects)
4101b624 200 fs_objects = sb->s_op->nr_cached_objects(sb, sc);
0e1fdafd 201
503c358c
VD
202 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
203 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
f6041567 204 total_objects = dentries + inodes + fs_objects + 1;
475d0db7
TH
205 if (!total_objects)
206 total_objects = 1;
0e1fdafd 207
0a234c6d 208 /* proportion the scan between the caches */
f6041567 209 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
bc3b14cb 210 inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
503c358c 211 fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
b0d40c92 212
0a234c6d
DC
213 /*
214 * prune the dcache first as the icache is pinned by it, then
215 * prune the icache, followed by the filesystem specific caches
49e7e7ff
VD
216 *
217 * Ensure that we always scan at least one object - memcg kmem
218 * accounting uses this to fully empty the caches.
0a234c6d 219 */
49e7e7ff 220 sc->nr_to_scan = dentries + 1;
503c358c 221 freed = prune_dcache_sb(sb, sc);
49e7e7ff 222 sc->nr_to_scan = inodes + 1;
503c358c 223 freed += prune_icache_sb(sb, sc);
0a234c6d
DC
224
225 if (fs_objects) {
49e7e7ff 226 sc->nr_to_scan = fs_objects + 1;
4101b624 227 freed += sb->s_op->free_cached_objects(sb, sc);
b0d40c92
DC
228 }
229
0ed33598 230 super_unlock_shared(sb);
0a234c6d
DC
231 return freed;
232}
233
234static unsigned long super_cache_count(struct shrinker *shrink,
235 struct shrink_control *sc)
236{
237 struct super_block *sb;
238 long total_objects = 0;
239
1720f5dd 240 sb = shrink->private_data;
0a234c6d 241
d23da150 242 /*
d8ce82ef
CB
243 * We don't call super_trylock_shared() here as it is a scalability
244 * bottleneck, so we're exposed to partial setup state. The shrinker
245 * rwsem does not protect filesystem operations backing
246 * list_lru_shrink_count() or s_op->nr_cached_objects(). Counts can
247 * change between super_cache_count and super_cache_scan, so we really
248 * don't need locks here.
79f546a6
DC
249 *
250 * However, if we are currently mounting the superblock, the underlying
251 * filesystem might be in a state of partial construction and hence it
d8ce82ef
CB
252 * is dangerous to access it. super_trylock_shared() uses a SB_BORN check
253 * to avoid this situation, so do the same here. The memory barrier is
79f546a6 254 * matched with the one in mount_fs() as we don't hold locks here.
d23da150 255 */
79f546a6
DC
256 if (!(sb->s_flags & SB_BORN))
257 return 0;
258 smp_rmb();
259
0a234c6d 260 if (sb->s_op && sb->s_op->nr_cached_objects)
4101b624 261 total_objects = sb->s_op->nr_cached_objects(sb, sc);
0a234c6d 262
503c358c
VD
263 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
264 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
0a234c6d 265
9b996468
KT
266 if (!total_objects)
267 return SHRINK_EMPTY;
268
55f841ce 269 total_objects = vfs_pressure_ratio(total_objects);
0e1fdafd 270 return total_objects;
b0d40c92
DC
271}
272
853b39a7
ON
273static void destroy_super_work(struct work_struct *work)
274{
275 struct super_block *s = container_of(work, struct super_block,
276 destroy_work);
795bb82d 277 fsnotify_sb_free(s);
583340de
AV
278 security_sb_free(s);
279 put_user_ns(s->s_user_ns);
280 kfree(s->s_subtype);
281 for (int i = 0; i < SB_FREEZE_LEVELS; i++)
8129ed29 282 percpu_free_rwsem(&s->s_writers.rw_sem[i]);
853b39a7
ON
283 kfree(s);
284}
285
286static void destroy_super_rcu(struct rcu_head *head)
287{
288 struct super_block *s = container_of(head, struct super_block, rcu);
289 INIT_WORK(&s->destroy_work, destroy_super_work);
290 schedule_work(&s->destroy_work);
291}
292
0200894d
AV
293/* Free a superblock that has never been seen by anyone */
294static void destroy_unused_super(struct super_block *s)
5accdf82 295{
0200894d
AV
296 if (!s)
297 return;
0ed33598 298 super_unlock_excl(s);
7eb5e882
AV
299 list_lru_destroy(&s->s_dentry_lru);
300 list_lru_destroy(&s->s_inode_lru);
1720f5dd 301 shrinker_free(s->s_shrink);
0200894d
AV
302 /* no delays needed */
303 destroy_super_work(&s->destroy_work);
5accdf82
JK
304}
305
1da177e4
LT
306/**
307 * alloc_super - create new superblock
fe2bbc48 308 * @type: filesystem type superblock should belong to
9249e17f 309 * @flags: the mount flags
6e4eab57 310 * @user_ns: User namespace for the super_block
1da177e4
LT
311 *
312 * Allocates and initializes a new &struct super_block. alloc_super()
313 * returns a pointer new superblock or %NULL if allocation had failed.
314 */
6e4eab57
EB
315static struct super_block *alloc_super(struct file_system_type *type, int flags,
316 struct user_namespace *user_ns)
1da177e4 317{
2b46a19d 318 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_KERNEL);
b87221de 319 static const struct super_operations default_op;
7eb5e882
AV
320 int i;
321
322 if (!s)
323 return NULL;
1da177e4 324
b5bd856a 325 INIT_LIST_HEAD(&s->s_mounts);
6e4eab57 326 s->s_user_ns = get_user_ns(user_ns);
ca0168e8
AV
327 init_rwsem(&s->s_umount);
328 lockdep_set_class(&s->s_umount, &type->s_umount_key);
329 /*
330 * sget() can have s_umount recursion.
331 *
332 * When it cannot find a suitable sb, it allocates a new
333 * one (this one), and tries again to find a suitable old
334 * one.
335 *
336 * In case that succeeds, it will acquire the s_umount
337 * lock of the old one. Since these are clearly distrinct
338 * locks, and this object isn't exposed yet, there's no
339 * risk of deadlocks.
340 *
341 * Annotate this by putting this lock in a different
342 * subclass.
343 */
344 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
b5bd856a 345
7eb5e882
AV
346 if (security_sb_alloc(s))
347 goto fail;
7b7a8665 348
7eb5e882 349 for (i = 0; i < SB_FREEZE_LEVELS; i++) {
8129ed29
ON
350 if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
351 sb_writers_name[i],
352 &type->s_writers_key[i]))
7eb5e882 353 goto fail;
1da177e4 354 }
df0ce26c 355 s->s_bdi = &noop_backing_dev_info;
7eb5e882 356 s->s_flags = flags;
cc50a07a 357 if (s->s_user_ns != &init_user_ns)
67690f93 358 s->s_iflags |= SB_I_NODEV;
7eb5e882 359 INIT_HLIST_NODE(&s->s_instances);
f1ee6162 360 INIT_HLIST_BL_HEAD(&s->s_roots);
e97fedb9 361 mutex_init(&s->s_sync_lock);
7eb5e882 362 INIT_LIST_HEAD(&s->s_inodes);
74278da9 363 spin_lock_init(&s->s_inode_list_lock);
6c60d2b5
DC
364 INIT_LIST_HEAD(&s->s_inodes_wb);
365 spin_lock_init(&s->s_inode_wblist_lock);
7eb5e882 366
7eb5e882
AV
367 s->s_count = 1;
368 atomic_set(&s->s_active, 1);
369 mutex_init(&s->s_vfs_rename_mutex);
370 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
bc8230ee 371 init_rwsem(&s->s_dquot.dqio_sem);
7eb5e882
AV
372 s->s_maxbytes = MAX_NON_LFS;
373 s->s_op = &default_op;
374 s->s_time_gran = 1000000000;
188d20bc
DD
375 s->s_time_min = TIME64_MIN;
376 s->s_time_max = TIME64_MAX;
7eb5e882 377
1720f5dd
QZ
378 s->s_shrink = shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
379 "sb-%s", type->name);
380 if (!s->s_shrink)
8e04944f 381 goto fail;
1720f5dd
QZ
382
383 s->s_shrink->scan_objects = super_cache_scan;
384 s->s_shrink->count_objects = super_cache_count;
385 s->s_shrink->batch = 1024;
386 s->s_shrink->private_data = s;
387
388 if (list_lru_init_memcg(&s->s_dentry_lru, s->s_shrink))
2b3648a6 389 goto fail;
1720f5dd 390 if (list_lru_init_memcg(&s->s_inode_lru, s->s_shrink))
2b3648a6 391 goto fail;
1da177e4 392 return s;
5ca302c8 393
7eb5e882 394fail:
0200894d 395 destroy_unused_super(s);
7eb5e882 396 return NULL;
1da177e4
LT
397}
398
399/* Superblock refcounting */
400
401/*
35cf7ba0 402 * Drop a superblock's refcount. The caller must hold sb_lock.
1da177e4 403 */
c645b930 404static void __put_super(struct super_block *s)
1da177e4 405{
c645b930
AV
406 if (!--s->s_count) {
407 list_del_init(&s->s_list);
408 WARN_ON(s->s_dentry_lru.node);
409 WARN_ON(s->s_inode_lru.node);
410 WARN_ON(!list_empty(&s->s_mounts));
c645b930 411 call_rcu(&s->rcu, destroy_super_rcu);
1da177e4 412 }
1da177e4
LT
413}
414
415/**
416 * put_super - drop a temporary reference to superblock
417 * @sb: superblock in question
418 *
419 * Drops a temporary reference, frees superblock if there's no
420 * references left.
421 */
60b49885 422void put_super(struct super_block *sb)
1da177e4
LT
423{
424 spin_lock(&sb_lock);
425 __put_super(sb);
426 spin_unlock(&sb_lock);
427}
428
dc3216b1
CB
429static void kill_super_notify(struct super_block *sb)
430{
431 lockdep_assert_not_held(&sb->s_umount);
432
433 /* already notified earlier */
434 if (sb->s_flags & SB_DEAD)
435 return;
436
437 /*
438 * Remove it from @fs_supers so it isn't found by new
439 * sget{_fc}() walkers anymore. Any concurrent mounter still
440 * managing to grab a temporary reference is guaranteed to
441 * already see SB_DYING and will wait until we notify them about
442 * SB_DEAD.
443 */
444 spin_lock(&sb_lock);
445 hlist_del_init(&sb->s_instances);
446 spin_unlock(&sb_lock);
447
448 /*
449 * Let concurrent mounts know that this thing is really dead.
450 * We don't need @sb->s_umount here as every concurrent caller
451 * will see SB_DYING and either discard the superblock or wait
452 * for SB_DEAD.
453 */
454 super_wake(sb, SB_DEAD);
455}
1da177e4
LT
456
457/**
1712ac8f 458 * deactivate_locked_super - drop an active reference to superblock
1da177e4
LT
459 * @s: superblock to deactivate
460 *
bd7ced98 461 * Drops an active reference to superblock, converting it into a temporary
1712ac8f 462 * one if there is no other active references left. In that case we
1da177e4
LT
463 * tell fs driver to shut it down and drop the temporary reference we
464 * had just acquired.
1712ac8f
AV
465 *
466 * Caller holds exclusive lock on superblock; that lock is released.
1da177e4 467 */
1712ac8f 468void deactivate_locked_super(struct super_block *s)
1da177e4
LT
469{
470 struct file_system_type *fs = s->s_type;
b20bd1a5 471 if (atomic_dec_and_test(&s->s_active)) {
1720f5dd 472 shrinker_free(s->s_shrink);
28f2cd4f 473 fs->kill_sb(s);
f5e1dd34 474
dc3216b1
CB
475 kill_super_notify(s);
476
c0a5b560
VD
477 /*
478 * Since list_lru_destroy() may sleep, we cannot call it from
479 * put_super(), where we hold the sb_lock. Therefore we destroy
480 * the lru lists right now.
481 */
482 list_lru_destroy(&s->s_dentry_lru);
483 list_lru_destroy(&s->s_inode_lru);
484
1da177e4
LT
485 put_filesystem(fs);
486 put_super(s);
1712ac8f 487 } else {
0ed33598 488 super_unlock_excl(s);
1da177e4
LT
489 }
490}
491
1712ac8f 492EXPORT_SYMBOL(deactivate_locked_super);
1da177e4 493
74dbbdd7 494/**
1712ac8f 495 * deactivate_super - drop an active reference to superblock
74dbbdd7
AV
496 * @s: superblock to deactivate
497 *
1712ac8f
AV
498 * Variant of deactivate_locked_super(), except that superblock is *not*
499 * locked by caller. If we are going to drop the final active reference,
500 * lock will be acquired prior to that.
74dbbdd7 501 */
1712ac8f 502void deactivate_super(struct super_block *s)
74dbbdd7 503{
cc23402c 504 if (!atomic_add_unless(&s->s_active, -1, 1)) {
5e874914 505 __super_lock_excl(s);
1712ac8f 506 deactivate_locked_super(s);
74dbbdd7
AV
507 }
508}
509
1712ac8f 510EXPORT_SYMBOL(deactivate_super);
74dbbdd7 511
1da177e4 512/**
97cbed04 513 * grab_super - acquire an active reference to a superblock
2c18a63b
CB
514 * @sb: superblock to acquire
515 *
516 * Acquire a temporary reference on a superblock and try to trade it for
517 * an active reference. This is used in sget{_fc}() to wait for a
518 * superblock to either become SB_BORN or for it to pass through
519 * sb->kill() and be marked as SB_DEAD.
520 *
521 * Return: This returns true if an active reference could be acquired,
522 * false if not.
523 */
97cbed04 524static bool grab_super(struct super_block *sb)
2c18a63b 525{
97cbed04 526 bool locked;
2c18a63b
CB
527
528 sb->s_count++;
97cbed04
CB
529 spin_unlock(&sb_lock);
530 locked = super_lock_excl(sb);
531 if (locked) {
532 if (atomic_inc_not_zero(&sb->s_active)) {
533 put_super(sb);
534 return true;
535 }
536 super_unlock_excl(sb);
2c18a63b 537 }
b30850c5 538 wait_var_event(&sb->s_flags, super_flags(sb, SB_DEAD));
345a5c4a 539 put_super(sb);
2c18a63b
CB
540 return false;
541}
542
12ad3ab6 543/*
d8ce82ef 544 * super_trylock_shared - try to grab ->s_umount shared
331cbdee 545 * @sb: reference we are trying to grab
12ad3ab6 546 *
eb6ef3df 547 * Try to prevent fs shutdown. This is used in places where we
12ad3ab6 548 * cannot take an active reference but we need to ensure that the
eb6ef3df
KK
549 * filesystem is not shut down while we are working on it. It returns
550 * false if we cannot acquire s_umount or if we lose the race and
551 * filesystem already got into shutdown, and returns true with the s_umount
552 * lock held in read mode in case of success. On successful return,
553 * the caller must drop the s_umount lock when done.
554 *
555 * Note that unlike get_super() et.al. this one does *not* bump ->s_count.
556 * The reason why it's safe is that we are OK with doing trylock instead
557 * of down_read(). There's a couple of places that are OK with that, but
558 * it's very much not a general-purpose interface.
12ad3ab6 559 */
d8ce82ef 560bool super_trylock_shared(struct super_block *sb)
12ad3ab6 561{
12ad3ab6 562 if (down_read_trylock(&sb->s_umount)) {
5e874914
CB
563 if (!(sb->s_flags & SB_DYING) && sb->s_root &&
564 (sb->s_flags & SB_BORN))
12ad3ab6 565 return true;
0ed33598 566 super_unlock_shared(sb);
12ad3ab6
DC
567 }
568
12ad3ab6
DC
569 return false;
570}
571
04b94071
DL
572/**
573 * retire_super - prevents superblock from being reused
574 * @sb: superblock to retire
575 *
576 * The function marks superblock to be ignored in superblock test, which
577 * prevents it from being reused for any new mounts. If the superblock has
578 * a private bdi, it also unregisters it, but doesn't reduce the refcount
579 * of the superblock to prevent potential races. The refcount is reduced
580 * by generic_shutdown_super(). The function can not be called
581 * concurrently with generic_shutdown_super(). It is safe to call the
582 * function multiple times, subsequent calls have no effect.
583 *
584 * The marker will affect the re-use only for block-device-based
585 * superblocks. Other superblocks will still get marked if this function
586 * is used, but that will not affect their reusability.
587 */
588void retire_super(struct super_block *sb)
589{
590 WARN_ON(!sb->s_bdev);
5e874914 591 __super_lock_excl(sb);
04b94071
DL
592 if (sb->s_iflags & SB_I_PERSB_BDI) {
593 bdi_unregister(sb->s_bdi);
594 sb->s_iflags &= ~SB_I_PERSB_BDI;
595 }
596 sb->s_iflags |= SB_I_RETIRED;
0ed33598 597 super_unlock_excl(sb);
04b94071
DL
598}
599EXPORT_SYMBOL(retire_super);
600
1da177e4
LT
601/**
602 * generic_shutdown_super - common helper for ->kill_sb()
603 * @sb: superblock to kill
604 *
605 * generic_shutdown_super() does all fs-independent work on superblock
606 * shutdown. Typical ->kill_sb() should pick all fs-specific objects
607 * that need destruction out of superblock, call generic_shutdown_super()
608 * and release aforementioned objects. Note: dentries and inodes _are_
609 * taken care of and do not need specific handling.
c636ebdb
DH
610 *
611 * Upon calling this function, the filesystem may no longer alter or
612 * rearrange the set of dentries belonging to this super_block, nor may it
613 * change the attachments of dentries to inodes.
1da177e4
LT
614 */
615void generic_shutdown_super(struct super_block *sb)
616{
ee9b6d61 617 const struct super_operations *sop = sb->s_op;
1da177e4 618
c636ebdb
DH
619 if (sb->s_root) {
620 shrink_dcache_for_umount(sb);
60b0680f 621 sync_filesystem(sb);
e462ec50 622 sb->s_flags &= ~SB_ACTIVE;
efaee192 623
a1a0e23e 624 cgroup_writeback_umount();
63997e98 625
ccb820dc 626 /* Evict all inodes with zero refcount. */
63997e98 627 evict_inodes(sb);
ccb820dc
EB
628
629 /*
630 * Clean up and evict any inodes that still have references due
631 * to fsnotify or the security policy.
632 */
1edc8eb2 633 fsnotify_sb_delete(sb);
83e804f0 634 security_sb_delete(sb);
1da177e4 635
7b7a8665
CH
636 if (sb->s_dio_done_wq) {
637 destroy_workqueue(sb->s_dio_done_wq);
638 sb->s_dio_done_wq = NULL;
639 }
640
1da177e4
LT
641 if (sop->put_super)
642 sop->put_super(sb);
643
2a0e8571
JB
644 /*
645 * Now that all potentially-encrypted inodes have been evicted,
646 * the fscrypt keyring can be destroyed.
647 */
648 fscrypt_destroy_keyring(sb);
649
47d58691
JH
650 if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes),
651 "VFS: Busy inodes after unmount of %s (%s)",
652 sb->s_id, sb->s_type->name)) {
653 /*
654 * Adding a proper bailout path here would be hard, but
655 * we can at least make it more likely that a later
656 * iput_final() or such crashes cleanly.
657 */
658 struct inode *inode;
659
660 spin_lock(&sb->s_inode_list_lock);
661 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
662 inode->i_op = VFS_PTR_POISON;
663 inode->i_sb = VFS_PTR_POISON;
664 inode->i_mapping = VFS_PTR_POISON;
665 }
666 spin_unlock(&sb->s_inode_list_lock);
1da177e4 667 }
1da177e4 668 }
5e874914
CB
669 /*
670 * Broadcast to everyone that grabbed a temporary reference to this
671 * superblock before we removed it from @fs_supers that the superblock
672 * is dying. Every walker of @fs_supers outside of sget{_fc}() will now
673 * discard this superblock and treat it as dead.
2c18a63b
CB
674 *
675 * We leave the superblock on @fs_supers so it can be found by
676 * sget{_fc}() until we passed sb->kill_sb().
5e874914
CB
677 */
678 super_wake(sb, SB_DYING);
0ed33598 679 super_unlock_excl(sb);
c1844d53 680 if (sb->s_bdi != &noop_backing_dev_info) {
0b3ea092
CH
681 if (sb->s_iflags & SB_I_PERSB_BDI)
682 bdi_unregister(sb->s_bdi);
fca39346
JK
683 bdi_put(sb->s_bdi);
684 sb->s_bdi = &noop_backing_dev_info;
fca39346 685 }
1da177e4
LT
686}
687
688EXPORT_SYMBOL(generic_shutdown_super);
689
20284ab7 690bool mount_capable(struct fs_context *fc)
0ce0cf12 691{
20284ab7 692 if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
0ce0cf12
AV
693 return capable(CAP_SYS_ADMIN);
694 else
c2c44ec2 695 return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
0ce0cf12
AV
696}
697
cb50b348
AV
698/**
699 * sget_fc - Find or create a superblock
700 * @fc: Filesystem context.
701 * @test: Comparison callback
702 * @set: Setup callback
703 *
22ed7ecd 704 * Create a new superblock or find an existing one.
cb50b348 705 *
22ed7ecd
CB
706 * The @test callback is used to find a matching existing superblock.
707 * Whether or not the requested parameters in @fc are taken into account
708 * is specific to the @test callback that is used. They may even be
709 * completely ignored.
710 *
711 * If an extant superblock is matched, it will be returned unless:
712 *
713 * (1) the namespace the filesystem context @fc and the extant
714 * superblock's namespace differ
715 *
716 * (2) the filesystem context @fc has requested that reusing an extant
717 * superblock is not allowed
718 *
719 * In both cases EBUSY will be returned.
cb50b348
AV
720 *
721 * If no match is made, a new superblock will be allocated and basic
22ed7ecd
CB
722 * initialisation will be performed (s_type, s_fs_info and s_id will be
723 * set and the @set callback will be invoked), the superblock will be
724 * published and it will be returned in a partially constructed state
725 * with SB_BORN and SB_ACTIVE as yet unset.
726 *
727 * Return: On success, an extant or newly created superblock is
728 * returned. On failure an error pointer is returned.
cb50b348
AV
729 */
730struct super_block *sget_fc(struct fs_context *fc,
731 int (*test)(struct super_block *, struct fs_context *),
732 int (*set)(struct super_block *, struct fs_context *))
733{
734 struct super_block *s = NULL;
735 struct super_block *old;
736 struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
737 int err;
738
cb50b348
AV
739retry:
740 spin_lock(&sb_lock);
741 if (test) {
742 hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) {
743 if (test(old, fc))
744 goto share_extant_sb;
745 }
746 }
747 if (!s) {
748 spin_unlock(&sb_lock);
749 s = alloc_super(fc->fs_type, fc->sb_flags, user_ns);
750 if (!s)
751 return ERR_PTR(-ENOMEM);
752 goto retry;
753 }
754
755 s->s_fs_info = fc->s_fs_info;
756 err = set(s, fc);
757 if (err) {
758 s->s_fs_info = NULL;
759 spin_unlock(&sb_lock);
760 destroy_unused_super(s);
761 return ERR_PTR(err);
762 }
763 fc->s_fs_info = NULL;
764 s->s_type = fc->fs_type;
c80fa7c8 765 s->s_iflags |= fc->s_iflags;
c642256b 766 strscpy(s->s_id, s->s_type->name, sizeof(s->s_id));
5e874914
CB
767 /*
768 * Make the superblock visible on @super_blocks and @fs_supers.
769 * It's in a nascent state and users should wait on SB_BORN or
770 * SB_DYING to be set.
771 */
cb50b348
AV
772 list_add_tail(&s->s_list, &super_blocks);
773 hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
774 spin_unlock(&sb_lock);
775 get_filesystem(s->s_type);
1720f5dd 776 shrinker_register(s->s_shrink);
cb50b348
AV
777 return s;
778
779share_extant_sb:
22ed7ecd 780 if (user_ns != old->s_user_ns || fc->exclusive) {
cb50b348
AV
781 spin_unlock(&sb_lock);
782 destroy_unused_super(s);
22ed7ecd
CB
783 if (fc->exclusive)
784 warnfc(fc, "reusing existing filesystem not allowed");
785 else
786 warnfc(fc, "reusing existing filesystem in another namespace not allowed");
cb50b348
AV
787 return ERR_PTR(-EBUSY);
788 }
97cbed04 789 if (!grab_super(old))
cb50b348
AV
790 goto retry;
791 destroy_unused_super(s);
792 return old;
793}
794EXPORT_SYMBOL(sget_fc);
795
1da177e4 796/**
023d066a
DH
797 * sget - find or create a superblock
798 * @type: filesystem type superblock should belong to
799 * @test: comparison callback
800 * @set: setup callback
801 * @flags: mount flags
802 * @data: argument to each of them
1da177e4 803 */
023d066a 804struct super_block *sget(struct file_system_type *type,
1da177e4
LT
805 int (*test)(struct super_block *,void *),
806 int (*set)(struct super_block *,void *),
023d066a 807 int flags,
1da177e4
LT
808 void *data)
809{
023d066a 810 struct user_namespace *user_ns = current_user_ns();
1da177e4 811 struct super_block *s = NULL;
d4730127 812 struct super_block *old;
1da177e4
LT
813 int err;
814
023d066a
DH
815 /* We don't yet pass the user namespace of the parent
816 * mount through to here so always use &init_user_ns
817 * until that changes.
818 */
819 if (flags & SB_SUBMOUNT)
820 user_ns = &init_user_ns;
821
1da177e4
LT
822retry:
823 spin_lock(&sb_lock);
d4730127 824 if (test) {
b67bfe0d 825 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
d4730127
MK
826 if (!test(old, data))
827 continue;
6e4eab57
EB
828 if (user_ns != old->s_user_ns) {
829 spin_unlock(&sb_lock);
0200894d 830 destroy_unused_super(s);
6e4eab57
EB
831 return ERR_PTR(-EBUSY);
832 }
97cbed04 833 if (!grab_super(old))
d4730127 834 goto retry;
0200894d 835 destroy_unused_super(s);
d4730127
MK
836 return old;
837 }
1da177e4
LT
838 }
839 if (!s) {
840 spin_unlock(&sb_lock);
e462ec50 841 s = alloc_super(type, (flags & ~SB_SUBMOUNT), user_ns);
1da177e4
LT
842 if (!s)
843 return ERR_PTR(-ENOMEM);
844 goto retry;
845 }
dd111b31 846
1da177e4
LT
847 err = set(s, data);
848 if (err) {
849 spin_unlock(&sb_lock);
0200894d 850 destroy_unused_super(s);
1da177e4
LT
851 return ERR_PTR(err);
852 }
853 s->s_type = type;
c642256b 854 strscpy(s->s_id, type->name, sizeof(s->s_id));
1da177e4 855 list_add_tail(&s->s_list, &super_blocks);
a5166169 856 hlist_add_head(&s->s_instances, &type->fs_supers);
1da177e4
LT
857 spin_unlock(&sb_lock);
858 get_filesystem(type);
1720f5dd 859 shrinker_register(s->s_shrink);
1da177e4
LT
860 return s;
861}
1da177e4
LT
862EXPORT_SYMBOL(sget);
863
864void drop_super(struct super_block *sb)
865{
0ed33598 866 super_unlock_shared(sb);
1da177e4
LT
867 put_super(sb);
868}
869
870EXPORT_SYMBOL(drop_super);
871
ba6379f7
JK
872void drop_super_exclusive(struct super_block *sb)
873{
0ed33598 874 super_unlock_excl(sb);
ba6379f7
JK
875 put_super(sb);
876}
877EXPORT_SYMBOL(drop_super_exclusive);
878
fa7c1d50
MG
879static void __iterate_supers(void (*f)(struct super_block *))
880{
881 struct super_block *sb, *p = NULL;
882
883 spin_lock(&sb_lock);
884 list_for_each_entry(sb, &super_blocks, s_list) {
b30850c5 885 if (super_flags(sb, SB_DYING))
fa7c1d50
MG
886 continue;
887 sb->s_count++;
888 spin_unlock(&sb_lock);
889
890 f(sb);
891
892 spin_lock(&sb_lock);
893 if (p)
894 __put_super(p);
895 p = sb;
896 }
897 if (p)
898 __put_super(p);
899 spin_unlock(&sb_lock);
900}
01a05b33
AV
901/**
902 * iterate_supers - call function for all active superblocks
903 * @f: function to call
904 * @arg: argument to pass to it
905 *
906 * Scans the superblock list and calls given function, passing it
907 * locked superblock and given argument.
908 */
909void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
910{
dca33252 911 struct super_block *sb, *p = NULL;
01a05b33
AV
912
913 spin_lock(&sb_lock);
dca33252 914 list_for_each_entry(sb, &super_blocks, s_list) {
f0cd9880 915 bool locked;
5e874914 916
01a05b33
AV
917 sb->s_count++;
918 spin_unlock(&sb_lock);
919
f0cd9880
CB
920 locked = super_lock_shared(sb);
921 if (locked) {
922 if (sb->s_root)
923 f(sb, arg);
924 super_unlock_shared(sb);
925 }
01a05b33
AV
926
927 spin_lock(&sb_lock);
dca33252
AV
928 if (p)
929 __put_super(p);
930 p = sb;
01a05b33 931 }
dca33252
AV
932 if (p)
933 __put_super(p);
01a05b33
AV
934 spin_unlock(&sb_lock);
935}
936
43e15cdb
AV
937/**
938 * iterate_supers_type - call function for superblocks of given type
939 * @type: fs type
940 * @f: function to call
941 * @arg: argument to pass to it
942 *
943 * Scans the superblock list and calls given function, passing it
944 * locked superblock and given argument.
945 */
946void iterate_supers_type(struct file_system_type *type,
947 void (*f)(struct super_block *, void *), void *arg)
948{
949 struct super_block *sb, *p = NULL;
950
951 spin_lock(&sb_lock);
b67bfe0d 952 hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
f0cd9880 953 bool locked;
5e874914 954
43e15cdb
AV
955 sb->s_count++;
956 spin_unlock(&sb_lock);
957
f0cd9880
CB
958 locked = super_lock_shared(sb);
959 if (locked) {
960 if (sb->s_root)
961 f(sb, arg);
962 super_unlock_shared(sb);
963 }
43e15cdb
AV
964
965 spin_lock(&sb_lock);
966 if (p)
967 __put_super(p);
968 p = sb;
969 }
970 if (p)
971 __put_super(p);
972 spin_unlock(&sb_lock);
973}
974
975EXPORT_SYMBOL(iterate_supers_type);
976
4e7b5671 977struct super_block *user_get_super(dev_t dev, bool excl)
1da177e4 978{
618f0636 979 struct super_block *sb;
1da177e4 980
1da177e4 981 spin_lock(&sb_lock);
618f0636
KK
982 list_for_each_entry(sb, &super_blocks, s_list) {
983 if (sb->s_dev == dev) {
f0cd9880 984 bool locked;
5e874914 985
618f0636 986 sb->s_count++;
1da177e4 987 spin_unlock(&sb_lock);
df40c01a 988 /* still alive? */
f0cd9880
CB
989 locked = super_lock(sb, excl);
990 if (locked) {
991 if (sb->s_root)
992 return sb;
993 super_unlock(sb, excl);
994 }
df40c01a 995 /* nope, got unmounted */
618f0636 996 spin_lock(&sb_lock);
df40c01a 997 __put_super(sb);
5e874914 998 break;
1da177e4
LT
999 }
1000 }
1001 spin_unlock(&sb_lock);
1002 return NULL;
1003}
1004
1da177e4 1005/**
8d0347f6
DH
1006 * reconfigure_super - asks filesystem to change superblock parameters
1007 * @fc: The superblock and configuration
1da177e4 1008 *
8d0347f6 1009 * Alters the configuration parameters of a live superblock.
1da177e4 1010 */
8d0347f6 1011int reconfigure_super(struct fs_context *fc)
1da177e4 1012{
8d0347f6 1013 struct super_block *sb = fc->root->d_sb;
1da177e4 1014 int retval;
8d0347f6 1015 bool remount_ro = false;
c541dce8 1016 bool remount_rw = false;
8d0347f6 1017 bool force = fc->sb_flags & SB_FORCE;
4504230a 1018
8d0347f6
DH
1019 if (fc->sb_flags_mask & ~MS_RMT_MASK)
1020 return -EINVAL;
5accdf82 1021 if (sb->s_writers.frozen != SB_UNFROZEN)
4504230a
CH
1022 return -EBUSY;
1023
8d0347f6
DH
1024 retval = security_sb_remount(sb, fc->security);
1025 if (retval)
1026 return retval;
1027
1028 if (fc->sb_flags_mask & SB_RDONLY) {
9361401e 1029#ifdef CONFIG_BLOCK
6f0d9689
CH
1030 if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev &&
1031 bdev_read_only(sb->s_bdev))
8d0347f6 1032 return -EACCES;
9361401e 1033#endif
c541dce8 1034 remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb);
8d0347f6
DH
1035 remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
1036 }
d208bbdd 1037
0aec09d0 1038 if (remount_ro) {
fdab684d 1039 if (!hlist_empty(&sb->s_pins)) {
0ed33598 1040 super_unlock_excl(sb);
fdab684d 1041 group_pin_kill(&sb->s_pins);
5e874914 1042 __super_lock_excl(sb);
0aec09d0
AV
1043 if (!sb->s_root)
1044 return 0;
1045 if (sb->s_writers.frozen != SB_UNFROZEN)
1046 return -EBUSY;
8d0347f6 1047 remount_ro = !sb_rdonly(sb);
0aec09d0
AV
1048 }
1049 }
1050 shrink_dcache_sb(sb);
1051
8d0347f6
DH
1052 /* If we are reconfiguring to RDONLY and current sb is read/write,
1053 * make sure there are no files open for writing.
1054 */
d208bbdd 1055 if (remount_ro) {
4ed5e82f 1056 if (force) {
d7439fb1 1057 sb_start_ro_state_change(sb);
4ed5e82f
MS
1058 } else {
1059 retval = sb_prepare_remount_readonly(sb);
1060 if (retval)
1061 return retval;
4ed5e82f 1062 }
c541dce8
JK
1063 } else if (remount_rw) {
1064 /*
d7439fb1
JK
1065 * Protect filesystem's reconfigure code from writes from
1066 * userspace until reconfigure finishes.
c541dce8 1067 */
d7439fb1 1068 sb_start_ro_state_change(sb);
1da177e4
LT
1069 }
1070
f3a09c92
AV
1071 if (fc->ops->reconfigure) {
1072 retval = fc->ops->reconfigure(fc);
1073 if (retval) {
1074 if (!force)
1075 goto cancel_readonly;
1076 /* If forced remount, go ahead despite any errors */
1077 WARN(1, "forced remount of a %s fs returned %i\n",
1078 sb->s_type->name, retval);
1079 }
1da177e4 1080 }
8d0347f6
DH
1081
1082 WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
1083 (fc->sb_flags & fc->sb_flags_mask)));
d7439fb1 1084 sb_end_ro_state_change(sb);
c79d967d 1085
d208bbdd
NP
1086 /*
1087 * Some filesystems modify their metadata via some other path than the
1088 * bdev buffer cache (eg. use a private mapping, or directories in
1089 * pagecache, etc). Also file data modifications go via their own
1090 * mappings. So If we try to mount readonly then copy the filesystem
1091 * from bdev, we could get stale data, so invalidate it to give a best
1092 * effort at coherency.
1093 */
1094 if (remount_ro && sb->s_bdev)
1095 invalidate_bdev(sb->s_bdev);
1da177e4 1096 return 0;
4ed5e82f
MS
1097
1098cancel_readonly:
d7439fb1 1099 sb_end_ro_state_change(sb);
4ed5e82f 1100 return retval;
1da177e4
LT
1101}
1102
fa7c1d50 1103static void do_emergency_remount_callback(struct super_block *sb)
1da177e4 1104{
f0cd9880 1105 bool locked = super_lock_excl(sb);
5e874914 1106
f0cd9880 1107 if (locked && sb->s_root && sb->s_bdev && !sb_rdonly(sb)) {
8d0347f6
DH
1108 struct fs_context *fc;
1109
1110 fc = fs_context_for_reconfigure(sb->s_root,
1111 SB_RDONLY | SB_FORCE, SB_RDONLY);
1112 if (!IS_ERR(fc)) {
1113 if (parse_monolithic_mount_data(fc, NULL) == 0)
1114 (void)reconfigure_super(fc);
1115 put_fs_context(fc);
1116 }
1da177e4 1117 }
f0cd9880
CB
1118 if (locked)
1119 super_unlock_excl(sb);
fa7c1d50
MG
1120}
1121
1122static void do_emergency_remount(struct work_struct *work)
1123{
1124 __iterate_supers(do_emergency_remount_callback);
a2a9537a 1125 kfree(work);
1da177e4
LT
1126 printk("Emergency Remount complete\n");
1127}
1128
1129void emergency_remount(void)
1130{
a2a9537a
JA
1131 struct work_struct *work;
1132
1133 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1134 if (work) {
1135 INIT_WORK(work, do_emergency_remount);
1136 schedule_work(work);
1137 }
1da177e4
LT
1138}
1139
08fdc8a0
MG
1140static void do_thaw_all_callback(struct super_block *sb)
1141{
f0cd9880 1142 bool locked = super_lock_excl(sb);
5e874914 1143
f0cd9880 1144 if (locked && sb->s_root) {
4a8b719f 1145 if (IS_ENABLED(CONFIG_BLOCK))
982c3b30 1146 while (sb->s_bdev && !bdev_thaw(sb->s_bdev))
4a8b719f 1147 pr_warn("Emergency Thaw on %pg\n", sb->s_bdev);
880b9577 1148 thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE);
f0cd9880 1149 return;
08fdc8a0 1150 }
f0cd9880
CB
1151 if (locked)
1152 super_unlock_excl(sb);
08fdc8a0
MG
1153}
1154
1155static void do_thaw_all(struct work_struct *work)
1156{
1157 __iterate_supers(do_thaw_all_callback);
1158 kfree(work);
1159 printk(KERN_WARNING "Emergency Thaw complete\n");
1160}
1161
1162/**
1163 * emergency_thaw_all -- forcibly thaw every frozen filesystem
1164 *
1165 * Used for emergency unfreeze of all filesystems via SysRq
1166 */
1167void emergency_thaw_all(void)
1168{
1169 struct work_struct *work;
1170
1171 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1172 if (work) {
1173 INIT_WORK(work, do_thaw_all);
1174 schedule_work(work);
1175 }
1176}
1177
ad76cbc6 1178static DEFINE_IDA(unnamed_dev_ida);
1da177e4 1179
5a66847e
MW
1180/**
1181 * get_anon_bdev - Allocate a block device for filesystems which don't have one.
1182 * @p: Pointer to a dev_t.
1183 *
1184 * Filesystems which don't use real block devices can call this function
1185 * to allocate a virtual block device.
1186 *
1187 * Context: Any context. Frequently called while holding sb_lock.
1188 * Return: 0 on success, -EMFILE if there are no anonymous bdevs left
1189 * or -ENOMEM if memory allocation failed.
1190 */
0ee5dc67 1191int get_anon_bdev(dev_t *p)
1da177e4
LT
1192{
1193 int dev;
5a66847e
MW
1194
1195 /*
1196 * Many userspace utilities consider an FSID of 0 invalid.
1197 * Always return at least 1 from get_anon_bdev.
1198 */
1199 dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
1200 GFP_ATOMIC);
1201 if (dev == -ENOSPC)
1202 dev = -EMFILE;
1203 if (dev < 0)
1204 return dev;
1205
1206 *p = MKDEV(0, dev);
1da177e4
LT
1207 return 0;
1208}
0ee5dc67 1209EXPORT_SYMBOL(get_anon_bdev);
1da177e4 1210
0ee5dc67 1211void free_anon_bdev(dev_t dev)
1da177e4 1212{
5a66847e 1213 ida_free(&unnamed_dev_ida, MINOR(dev));
1da177e4 1214}
0ee5dc67
AV
1215EXPORT_SYMBOL(free_anon_bdev);
1216
1217int set_anon_super(struct super_block *s, void *data)
1218{
df0ce26c 1219 return get_anon_bdev(&s->s_dev);
0ee5dc67 1220}
0ee5dc67
AV
1221EXPORT_SYMBOL(set_anon_super);
1222
1223void kill_anon_super(struct super_block *sb)
1224{
1225 dev_t dev = sb->s_dev;
1226 generic_shutdown_super(sb);
dc3216b1 1227 kill_super_notify(sb);
0ee5dc67
AV
1228 free_anon_bdev(dev);
1229}
1da177e4
LT
1230EXPORT_SYMBOL(kill_anon_super);
1231
1da177e4
LT
1232void kill_litter_super(struct super_block *sb)
1233{
1234 if (sb->s_root)
1235 d_genocide(sb->s_root);
1236 kill_anon_super(sb);
1237}
1da177e4
LT
1238EXPORT_SYMBOL(kill_litter_super);
1239
cb50b348
AV
1240int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
1241{
1242 return set_anon_super(sb, NULL);
1243}
1244EXPORT_SYMBOL(set_anon_super_fc);
1245
1246static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
1247{
1248 return sb->s_fs_info == fc->s_fs_info;
1249}
1250
1251static int test_single_super(struct super_block *s, struct fs_context *fc)
1252{
1253 return 1;
1254}
1255
e062abae 1256static int vfs_get_super(struct fs_context *fc,
cda2ed05
CH
1257 int (*test)(struct super_block *, struct fs_context *),
1258 int (*fill_super)(struct super_block *sb,
1259 struct fs_context *fc))
cb50b348 1260{
cb50b348 1261 struct super_block *sb;
43ce4c1f 1262 int err;
cb50b348 1263
cb50b348
AV
1264 sb = sget_fc(fc, test, set_anon_super_fc);
1265 if (IS_ERR(sb))
1266 return PTR_ERR(sb);
1267
1268 if (!sb->s_root) {
43ce4c1f
DH
1269 err = fill_super(sb, fc);
1270 if (err)
1271 goto error;
cb50b348
AV
1272
1273 sb->s_flags |= SB_ACTIVE;
1274 }
1275
e062abae 1276 fc->root = dget(sb->s_root);
cb50b348 1277 return 0;
43ce4c1f
DH
1278
1279error:
1280 deactivate_locked_super(sb);
1281 return err;
cb50b348 1282}
cb50b348 1283
2ac295d4
AV
1284int get_tree_nodev(struct fs_context *fc,
1285 int (*fill_super)(struct super_block *sb,
1286 struct fs_context *fc))
1287{
e062abae 1288 return vfs_get_super(fc, NULL, fill_super);
2ac295d4
AV
1289}
1290EXPORT_SYMBOL(get_tree_nodev);
1291
c23a0bba
AV
1292int get_tree_single(struct fs_context *fc,
1293 int (*fill_super)(struct super_block *sb,
1294 struct fs_context *fc))
1295{
e062abae 1296 return vfs_get_super(fc, test_single_super, fill_super);
c23a0bba
AV
1297}
1298EXPORT_SYMBOL(get_tree_single);
1299
533770cc
AV
1300int get_tree_keyed(struct fs_context *fc,
1301 int (*fill_super)(struct super_block *sb,
1302 struct fs_context *fc),
1303 void *key)
1304{
1305 fc->s_fs_info = key;
e062abae 1306 return vfs_get_super(fc, test_keyed_super, fill_super);
533770cc
AV
1307}
1308EXPORT_SYMBOL(get_tree_keyed);
1309
69881be3
CB
1310static int set_bdev_super(struct super_block *s, void *data)
1311{
1312 s->s_dev = *(dev_t *)data;
1313 return 0;
1314}
1315
1316static int super_s_dev_set(struct super_block *s, struct fs_context *fc)
1317{
1318 return set_bdev_super(s, fc->sget_key);
1319}
1320
1321static int super_s_dev_test(struct super_block *s, struct fs_context *fc)
1322{
1323 return !(s->s_iflags & SB_I_RETIRED) &&
1324 s->s_dev == *(dev_t *)fc->sget_key;
1325}
1326
1327/**
1328 * sget_dev - Find or create a superblock by device number
1329 * @fc: Filesystem context.
1330 * @dev: device number
1331 *
1332 * Find or create a superblock using the provided device number that
1333 * will be stored in fc->sget_key.
1334 *
1335 * If an extant superblock is matched, then that will be returned with
1336 * an elevated reference count that the caller must transfer or discard.
1337 *
1338 * If no match is made, a new superblock will be allocated and basic
1339 * initialisation will be performed (s_type, s_fs_info, s_id, s_dev will
1340 * be set). The superblock will be published and it will be returned in
1341 * a partially constructed state with SB_BORN and SB_ACTIVE as yet
1342 * unset.
1343 *
1344 * Return: an existing or newly created superblock on success, an error
1345 * pointer on failure.
1346 */
1347struct super_block *sget_dev(struct fs_context *fc, dev_t dev)
1348{
1349 fc->sget_key = &dev;
1350 return sget_fc(fc, super_s_dev_test, super_s_dev_set);
1351}
1352EXPORT_SYMBOL(sget_dev);
1353
9361401e 1354#ifdef CONFIG_BLOCK
9c09a7cf 1355/*
fd146410
JK
1356 * Lock the superblock that is holder of the bdev. Returns the superblock
1357 * pointer if we successfully locked the superblock and it is alive. Otherwise
1358 * we return NULL and just unlock bdev->bd_holder_lock.
9c09a7cf 1359 *
fd146410 1360 * The function must be called with bdev->bd_holder_lock and releases it.
9c09a7cf 1361 */
49ef8832 1362static struct super_block *bdev_super_lock(struct block_device *bdev, bool excl)
fd146410 1363 __releases(&bdev->bd_holder_lock)
87efb390 1364{
fd146410 1365 struct super_block *sb = bdev->bd_holder;
f0cd9880 1366 bool locked;
fd146410
JK
1367
1368 lockdep_assert_held(&bdev->bd_holder_lock);
1369 lockdep_assert_not_held(&sb->s_umount);
3b224e1d 1370 lockdep_assert_not_held(&bdev->bd_disk->open_mutex);
fd146410
JK
1371
1372 /* Make sure sb doesn't go away from under us */
1373 spin_lock(&sb_lock);
1374 sb->s_count++;
1375 spin_unlock(&sb_lock);
49ef8832 1376
fd146410 1377 mutex_unlock(&bdev->bd_holder_lock);
87efb390 1378
49ef8832
CB
1379 locked = super_lock(sb, excl);
1380
fd146410 1381 /*
49ef8832
CB
1382 * If the superblock wasn't already SB_DYING then we hold
1383 * s_umount and can safely drop our temporary reference.
1384 */
fd146410 1385 put_super(sb);
49ef8832
CB
1386
1387 if (!locked)
1388 return NULL;
1389
1390 if (!sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
1391 super_unlock(sb, excl);
1392 return NULL;
1393 }
1394
fd146410 1395 return sb;
9c09a7cf
CH
1396}
1397
d8530de5 1398static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
87efb390 1399{
fd146410 1400 struct super_block *sb;
9c09a7cf 1401
49ef8832 1402 sb = bdev_super_lock(bdev, false);
fd146410 1403 if (!sb)
87efb390
CH
1404 return;
1405
d8530de5
CH
1406 if (!surprise)
1407 sync_filesystem(sb);
1408 shrink_dcache_sb(sb);
e127b9bc 1409 invalidate_inodes(sb);
87efb390
CH
1410 if (sb->s_op->shutdown)
1411 sb->s_op->shutdown(sb);
9c09a7cf 1412
0ed33598 1413 super_unlock_shared(sb);
87efb390
CH
1414}
1415
2142b88c
CH
1416static void fs_bdev_sync(struct block_device *bdev)
1417{
fd146410 1418 struct super_block *sb;
2142b88c 1419
49ef8832 1420 sb = bdev_super_lock(bdev, false);
fd146410 1421 if (!sb)
2142b88c 1422 return;
49ef8832 1423
2142b88c 1424 sync_filesystem(sb);
0ed33598 1425 super_unlock_shared(sb);
2142b88c
CH
1426}
1427
49ef8832
CB
1428static struct super_block *get_bdev_super(struct block_device *bdev)
1429{
1430 bool active = false;
1431 struct super_block *sb;
1432
1433 sb = bdev_super_lock(bdev, true);
1434 if (sb) {
1435 active = atomic_inc_not_zero(&sb->s_active);
1436 super_unlock_excl(sb);
1437 }
1438 if (!active)
1439 return NULL;
1440 return sb;
1441}
1442
7366f8b6
CB
1443/**
1444 * fs_bdev_freeze - freeze owning filesystem of block device
1445 * @bdev: block device
1446 *
1447 * Freeze the filesystem that owns this block device if it is still
1448 * active.
1449 *
1450 * A filesystem that owns multiple block devices may be frozen from each
1451 * block device and won't be unfrozen until all block devices are
1452 * unfrozen. Each block device can only freeze the filesystem once as we
1453 * nest freezes for block devices in the block layer.
1454 *
1455 * Return: If the freeze was successful zero is returned. If the freeze
1456 * failed a negative error code is returned.
1457 */
49ef8832
CB
1458static int fs_bdev_freeze(struct block_device *bdev)
1459{
1460 struct super_block *sb;
1461 int error = 0;
1462
1463 lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
1464
1465 sb = get_bdev_super(bdev);
1466 if (!sb)
1467 return -EINVAL;
1468
1469 if (sb->s_op->freeze_super)
7366f8b6
CB
1470 error = sb->s_op->freeze_super(sb,
1471 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
49ef8832 1472 else
7366f8b6
CB
1473 error = freeze_super(sb,
1474 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
49ef8832
CB
1475 if (!error)
1476 error = sync_blockdev(bdev);
1477 deactivate_super(sb);
1478 return error;
1479}
1480
7366f8b6
CB
1481/**
1482 * fs_bdev_thaw - thaw owning filesystem of block device
1483 * @bdev: block device
1484 *
1485 * Thaw the filesystem that owns this block device.
1486 *
1487 * A filesystem that owns multiple block devices may be frozen from each
1488 * block device and won't be unfrozen until all block devices are
1489 * unfrozen. Each block device can only freeze the filesystem once as we
1490 * nest freezes for block devices in the block layer.
1491 *
1492 * Return: If the thaw was successful zero is returned. If the thaw
1493 * failed a negative error code is returned. If this function
1494 * returns zero it doesn't mean that the filesystem is unfrozen
1495 * as it may have been frozen multiple times (kernel may hold a
1496 * freeze or might be frozen from other block devices).
1497 */
49ef8832
CB
1498static int fs_bdev_thaw(struct block_device *bdev)
1499{
1500 struct super_block *sb;
1501 int error;
1502
1503 lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
1504
1505 sb = get_bdev_super(bdev);
1506 if (WARN_ON_ONCE(!sb))
1507 return -EINVAL;
1508
1509 if (sb->s_op->thaw_super)
7366f8b6
CB
1510 error = sb->s_op->thaw_super(sb,
1511 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
49ef8832 1512 else
7366f8b6
CB
1513 error = thaw_super(sb,
1514 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
49ef8832
CB
1515 deactivate_super(sb);
1516 return error;
1517}
1518
7ecd0b6f 1519const struct blk_holder_ops fs_holder_ops = {
d8530de5 1520 .mark_dead = fs_bdev_mark_dead,
2142b88c 1521 .sync = fs_bdev_sync,
49ef8832
CB
1522 .freeze = fs_bdev_freeze,
1523 .thaw = fs_bdev_thaw,
87efb390 1524};
7ecd0b6f 1525EXPORT_SYMBOL_GPL(fs_holder_ops);
fe62c3a4 1526
cf6da236 1527int setup_bdev_super(struct super_block *sb, int sb_flags,
aca740ce
JK
1528 struct fs_context *fc)
1529{
1530 blk_mode_t mode = sb_open_mode(sb_flags);
f3a60882 1531 struct file *bdev_file;
aca740ce
JK
1532 struct block_device *bdev;
1533
f3a60882
CB
1534 bdev_file = bdev_file_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
1535 if (IS_ERR(bdev_file)) {
aca740ce
JK
1536 if (fc)
1537 errorf(fc, "%s: Can't open blockdev", fc->source);
f3a60882 1538 return PTR_ERR(bdev_file);
aca740ce 1539 }
f3a60882 1540 bdev = file_bdev(bdev_file);
aca740ce
JK
1541
1542 /*
1543 * This really should be in blkdev_get_by_dev, but right now can't due
1544 * to legacy issues that require us to allow opening a block device node
1545 * writable from userspace even for a read-only block device.
1546 */
1547 if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
22650a99 1548 bdev_fput(bdev_file);
aca740ce
JK
1549 return -EACCES;
1550 }
1551
1552 /*
49ef8832
CB
1553 * It is enough to check bdev was not frozen before we set
1554 * s_bdev as freezing will wait until SB_BORN is set.
aca740ce 1555 */
49ef8832 1556 if (atomic_read(&bdev->bd_fsfreeze_count) > 0) {
aca740ce
JK
1557 if (fc)
1558 warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
22650a99 1559 bdev_fput(bdev_file);
aca740ce
JK
1560 return -EBUSY;
1561 }
1562 spin_lock(&sb_lock);
f3a60882 1563 sb->s_bdev_file = bdev_file;
aca740ce
JK
1564 sb->s_bdev = bdev;
1565 sb->s_bdi = bdi_get(bdev->bd_disk->bdi);
1566 if (bdev_stable_writes(bdev))
1567 sb->s_iflags |= SB_I_STABLE_WRITES;
1568 spin_unlock(&sb_lock);
aca740ce
JK
1569
1570 snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
1720f5dd 1571 shrinker_debugfs_rename(sb->s_shrink, "sb-%s:%s", sb->s_type->name,
aca740ce
JK
1572 sb->s_id);
1573 sb_set_blocksize(sb, block_size(bdev));
1574 return 0;
fe62c3a4 1575}
cf6da236 1576EXPORT_SYMBOL_GPL(setup_bdev_super);
fe62c3a4
DH
1577
1578/**
1579 * get_tree_bdev - Get a superblock based on a single block device
1580 * @fc: The filesystem context holding the parameters
1581 * @fill_super: Helper to initialise a new superblock
1582 */
1583int get_tree_bdev(struct fs_context *fc,
1584 int (*fill_super)(struct super_block *,
1585 struct fs_context *))
1586{
fe62c3a4 1587 struct super_block *s;
fe62c3a4 1588 int error = 0;
aca740ce 1589 dev_t dev;
fe62c3a4 1590
fe62c3a4
DH
1591 if (!fc->source)
1592 return invalf(fc, "No source specified");
1593
aca740ce
JK
1594 error = lookup_bdev(fc->source, &dev);
1595 if (error) {
1596 errorf(fc, "%s: Can't lookup blockdev", fc->source);
1597 return error;
fe62c3a4
DH
1598 }
1599
1600 fc->sb_flags |= SB_NOSEC;
69881be3 1601 s = sget_dev(fc, dev);
aca740ce 1602 if (IS_ERR(s))
fe62c3a4
DH
1603 return PTR_ERR(s);
1604
1605 if (s->s_root) {
1606 /* Don't summarily change the RO/RW state. */
1607 if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
aca740ce 1608 warnf(fc, "%pg: Can't mount, would change RO state", s->s_bdev);
fe62c3a4 1609 deactivate_locked_super(s);
fe62c3a4
DH
1610 return -EBUSY;
1611 }
aca740ce 1612 } else {
aca740ce 1613 error = setup_bdev_super(s, fc->sb_flags, fc);
aca740ce
JK
1614 if (!error)
1615 error = fill_super(s, fc);
fe62c3a4
DH
1616 if (error) {
1617 deactivate_locked_super(s);
1618 return error;
1619 }
fe62c3a4 1620 s->s_flags |= SB_ACTIVE;
fe62c3a4
DH
1621 }
1622
1623 BUG_ON(fc->root);
1624 fc->root = dget(s->s_root);
1625 return 0;
1626}
1627EXPORT_SYMBOL(get_tree_bdev);
1628
1da177e4
LT
1629static int test_bdev_super(struct super_block *s, void *data)
1630{
aca740ce 1631 return !(s->s_iflags & SB_I_RETIRED) && s->s_dev == *(dev_t *)data;
1da177e4
LT
1632}
1633
152a0836 1634struct dentry *mount_bdev(struct file_system_type *fs_type,
1da177e4 1635 int flags, const char *dev_name, void *data,
152a0836 1636 int (*fill_super)(struct super_block *, void *, int))
1da177e4 1637{
1da177e4 1638 struct super_block *s;
aca740ce
JK
1639 int error;
1640 dev_t dev;
1da177e4 1641
aca740ce
JK
1642 error = lookup_bdev(dev_name, &dev);
1643 if (error)
1644 return ERR_PTR(error);
1da177e4 1645
aca740ce
JK
1646 flags |= SB_NOSEC;
1647 s = sget(fs_type, test_bdev_super, set_bdev_super, flags, &dev);
1da177e4 1648 if (IS_ERR(s))
aca740ce 1649 return ERR_CAST(s);
1da177e4
LT
1650
1651 if (s->s_root) {
e462ec50 1652 if ((flags ^ s->s_flags) & SB_RDONLY) {
74dbbdd7 1653 deactivate_locked_super(s);
aca740ce 1654 return ERR_PTR(-EBUSY);
1da177e4 1655 }
aca740ce 1656 } else {
aca740ce 1657 error = setup_bdev_super(s, flags, NULL);
aca740ce
JK
1658 if (!error)
1659 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1da177e4 1660 if (error) {
74dbbdd7 1661 deactivate_locked_super(s);
aca740ce 1662 return ERR_PTR(error);
fa675765 1663 }
454e2398 1664
e462ec50 1665 s->s_flags |= SB_ACTIVE;
1da177e4
LT
1666 }
1667
152a0836 1668 return dget(s->s_root);
152a0836
AV
1669}
1670EXPORT_SYMBOL(mount_bdev);
1671
1da177e4
LT
1672void kill_block_super(struct super_block *sb)
1673{
1674 struct block_device *bdev = sb->s_bdev;
1675
1da177e4 1676 generic_shutdown_super(sb);
aca740ce
JK
1677 if (bdev) {
1678 sync_blockdev(bdev);
22650a99 1679 bdev_fput(sb->s_bdev_file);
aca740ce 1680 }
1da177e4
LT
1681}
1682
1683EXPORT_SYMBOL(kill_block_super);
9361401e 1684#endif
1da177e4 1685
3c26ff6e 1686struct dentry *mount_nodev(struct file_system_type *fs_type,
1da177e4 1687 int flags, void *data,
3c26ff6e 1688 int (*fill_super)(struct super_block *, void *, int))
1da177e4
LT
1689{
1690 int error;
9249e17f 1691 struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
1da177e4
LT
1692
1693 if (IS_ERR(s))
3c26ff6e 1694 return ERR_CAST(s);
1da177e4 1695
e462ec50 1696 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1da177e4 1697 if (error) {
74dbbdd7 1698 deactivate_locked_super(s);
3c26ff6e 1699 return ERR_PTR(error);
1da177e4 1700 }
e462ec50 1701 s->s_flags |= SB_ACTIVE;
3c26ff6e 1702 return dget(s->s_root);
1da177e4 1703}
3c26ff6e
AV
1704EXPORT_SYMBOL(mount_nodev);
1705
a6097180
N
1706int reconfigure_single(struct super_block *s,
1707 int flags, void *data)
8d0347f6
DH
1708{
1709 struct fs_context *fc;
1710 int ret;
1711
1712 /* The caller really need to be passing fc down into mount_single(),
1713 * then a chunk of this can be removed. [Bollocks -- AV]
1714 * Better yet, reconfiguration shouldn't happen, but rather the second
1715 * mount should be rejected if the parameters are not compatible.
1716 */
1717 fc = fs_context_for_reconfigure(s->s_root, flags, MS_RMT_MASK);
1718 if (IS_ERR(fc))
1719 return PTR_ERR(fc);
1720
1721 ret = parse_monolithic_mount_data(fc, data);
1722 if (ret < 0)
1723 goto out;
1724
1725 ret = reconfigure_super(fc);
1726out:
1727 put_fs_context(fc);
1728 return ret;
1729}
1730
1da177e4
LT
1731static int compare_single(struct super_block *s, void *p)
1732{
1733 return 1;
1734}
1735
fc14f2fe 1736struct dentry *mount_single(struct file_system_type *fs_type,
1da177e4 1737 int flags, void *data,
fc14f2fe 1738 int (*fill_super)(struct super_block *, void *, int))
1da177e4
LT
1739{
1740 struct super_block *s;
1741 int error;
1742
9249e17f 1743 s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
1da177e4 1744 if (IS_ERR(s))
fc14f2fe 1745 return ERR_CAST(s);
1da177e4 1746 if (!s->s_root) {
e462ec50 1747 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
8d0347f6
DH
1748 if (!error)
1749 s->s_flags |= SB_ACTIVE;
9329d1be 1750 } else {
8d0347f6
DH
1751 error = reconfigure_single(s, flags, data);
1752 }
1753 if (unlikely(error)) {
1754 deactivate_locked_super(s);
1755 return ERR_PTR(error);
1da177e4 1756 }
fc14f2fe
AV
1757 return dget(s->s_root);
1758}
1759EXPORT_SYMBOL(mount_single);
1760
9bc61ab1
DH
1761/**
1762 * vfs_get_tree - Get the mountable root
1763 * @fc: The superblock configuration context.
1764 *
1765 * The filesystem is invoked to get or create a superblock which can then later
1766 * be used for mounting. The filesystem places a pointer to the root to be
1767 * used for mounting in @fc->root.
1768 */
1769int vfs_get_tree(struct fs_context *fc)
1da177e4 1770{
9d412a43 1771 struct super_block *sb;
9bc61ab1 1772 int error;
8089352a 1773
f3a09c92
AV
1774 if (fc->root)
1775 return -EBUSY;
1776
1777 /* Get the mountable root in fc->root, with a ref on the root and a ref
1778 * on the superblock.
1779 */
1780 error = fc->ops->get_tree(fc);
9bc61ab1
DH
1781 if (error < 0)
1782 return error;
1da177e4 1783
f3a09c92
AV
1784 if (!fc->root) {
1785 pr_err("Filesystem %s get_tree() didn't set fc->root\n",
1786 fc->fs_type->name);
1787 /* We don't know what the locking state of the superblock is -
1788 * if there is a superblock.
1789 */
1790 BUG();
1791 }
1792
9bc61ab1 1793 sb = fc->root->d_sb;
9d412a43 1794 WARN_ON(!sb->s_bdi);
79f546a6
DC
1795
1796 /*
5e874914
CB
1797 * super_wake() contains a memory barrier which also care of
1798 * ordering for super_cache_count(). We place it before setting
1799 * SB_BORN as the data dependency between the two functions is
1800 * the superblock structure contents that we just set up, not
1801 * the SB_BORN flag.
79f546a6 1802 */
5e874914 1803 super_wake(sb, SB_BORN);
454e2398 1804
9bc61ab1 1805 error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
c9ce29ed
AV
1806 if (unlikely(error)) {
1807 fc_drop_locked(fc);
1808 return error;
a10d7c22
AV
1809 }
1810
42cb56ae
JL
1811 /*
1812 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1813 * but s_maxbytes was an unsigned long long for many releases. Throw
1814 * this warning for a little while to try and catch filesystems that
4358b567 1815 * violate this rule.
42cb56ae 1816 */
9d412a43 1817 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
9bc61ab1 1818 "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
42cb56ae 1819
9bc61ab1 1820 return 0;
1da177e4 1821}
9bc61ab1 1822EXPORT_SYMBOL(vfs_get_tree);
1da177e4 1823
fca39346
JK
1824/*
1825 * Setup private BDI for given superblock. It gets automatically cleaned up
1826 * in generic_shutdown_super().
1827 */
1828int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1829{
1830 struct backing_dev_info *bdi;
1831 int err;
1832 va_list args;
1833
aef33c2f 1834 bdi = bdi_alloc(NUMA_NO_NODE);
fca39346
JK
1835 if (!bdi)
1836 return -ENOMEM;
1837
fca39346 1838 va_start(args, fmt);
7c4cc300 1839 err = bdi_register_va(bdi, fmt, args);
fca39346
JK
1840 va_end(args);
1841 if (err) {
1842 bdi_put(bdi);
1843 return err;
1844 }
1845 WARN_ON(sb->s_bdi != &noop_backing_dev_info);
1846 sb->s_bdi = bdi;
0b3ea092 1847 sb->s_iflags |= SB_I_PERSB_BDI;
fca39346
JK
1848
1849 return 0;
1850}
1851EXPORT_SYMBOL(super_setup_bdi_name);
1852
1853/*
1854 * Setup private BDI for given superblock. I gets automatically cleaned up
1855 * in generic_shutdown_super().
1856 */
1857int super_setup_bdi(struct super_block *sb)
1858{
1859 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1860
1861 return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
1862 atomic_long_inc_return(&bdi_seq));
1863}
1864EXPORT_SYMBOL(super_setup_bdi);
1865
5accdf82
JK
1866/**
1867 * sb_wait_write - wait until all writers to given file system finish
1868 * @sb: the super for which we wait
1869 * @level: type of writers we wait for (normal vs page fault)
1870 *
1871 * This function waits until there are no writers of given type to given file
8129ed29 1872 * system.
5accdf82
JK
1873 */
1874static void sb_wait_write(struct super_block *sb, int level)
1875{
8129ed29 1876 percpu_down_write(sb->s_writers.rw_sem + level-1);
8129ed29 1877}
5accdf82 1878
f1a96220
ON
1879/*
1880 * We are going to return to userspace and forget about these locks, the
1881 * ownership goes to the caller of thaw_super() which does unlock().
1882 */
1883static void lockdep_sb_freeze_release(struct super_block *sb)
1884{
1885 int level;
1886
1887 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1888 percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1889}
1890
1891/*
1892 * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
1893 */
1894static void lockdep_sb_freeze_acquire(struct super_block *sb)
8129ed29
ON
1895{
1896 int level;
5accdf82 1897
8129ed29
ON
1898 for (level = 0; level < SB_FREEZE_LEVELS; ++level)
1899 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
f1a96220
ON
1900}
1901
2719c716 1902static void sb_freeze_unlock(struct super_block *sb, int level)
f1a96220 1903{
2719c716 1904 for (level--; level >= 0; level--)
8129ed29 1905 percpu_up_write(sb->s_writers.rw_sem + level);
5accdf82
JK
1906}
1907
59ba4fdd
DW
1908static int wait_for_partially_frozen(struct super_block *sb)
1909{
1910 int ret = 0;
1911
1912 do {
1913 unsigned short old = sb->s_writers.frozen;
1914
1915 up_write(&sb->s_umount);
1916 ret = wait_var_event_killable(&sb->s_writers.frozen,
1917 sb->s_writers.frozen != old);
1918 down_write(&sb->s_umount);
1919 } while (ret == 0 &&
1920 sb->s_writers.frozen != SB_UNFROZEN &&
1921 sb->s_writers.frozen != SB_FREEZE_COMPLETE);
1922
1923 return ret;
1924}
1925
7366f8b6
CB
1926#define FREEZE_HOLDERS (FREEZE_HOLDER_KERNEL | FREEZE_HOLDER_USERSPACE)
1927#define FREEZE_FLAGS (FREEZE_HOLDERS | FREEZE_MAY_NEST)
1928
1929static inline int freeze_inc(struct super_block *sb, enum freeze_holder who)
1930{
1931 WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1932 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1933
1934 if (who & FREEZE_HOLDER_KERNEL)
1935 ++sb->s_writers.freeze_kcount;
1936 if (who & FREEZE_HOLDER_USERSPACE)
1937 ++sb->s_writers.freeze_ucount;
1938 return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
1939}
1940
1941static inline int freeze_dec(struct super_block *sb, enum freeze_holder who)
1942{
1943 WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1944 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1945
1946 if ((who & FREEZE_HOLDER_KERNEL) && sb->s_writers.freeze_kcount)
1947 --sb->s_writers.freeze_kcount;
1948 if ((who & FREEZE_HOLDER_USERSPACE) && sb->s_writers.freeze_ucount)
1949 --sb->s_writers.freeze_ucount;
1950 return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
1951}
1952
1953static inline bool may_freeze(struct super_block *sb, enum freeze_holder who)
1954{
1955 WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1956 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1957
1958 if (who & FREEZE_HOLDER_KERNEL)
1959 return (who & FREEZE_MAY_NEST) ||
1960 sb->s_writers.freeze_kcount == 0;
1961 if (who & FREEZE_HOLDER_USERSPACE)
1962 return (who & FREEZE_MAY_NEST) ||
1963 sb->s_writers.freeze_ucount == 0;
1964 return false;
1965}
1966
18e9e510 1967/**
7000d3c4
RD
1968 * freeze_super - lock the filesystem and force it into a consistent state
1969 * @sb: the super to lock
880b9577 1970 * @who: context that wants to freeze
18e9e510
JB
1971 *
1972 * Syncs the super to make sure the filesystem is consistent and calls the fs's
880b9577 1973 * freeze_fs. Subsequent calls to this without first thawing the fs may return
18e9e510 1974 * -EBUSY.
5accdf82 1975 *
880b9577
DW
1976 * @who should be:
1977 * * %FREEZE_HOLDER_USERSPACE if userspace wants to freeze the fs;
1978 * * %FREEZE_HOLDER_KERNEL if the kernel wants to freeze the fs.
7366f8b6 1979 * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed.
880b9577
DW
1980 *
1981 * The @who argument distinguishes between the kernel and userspace trying to
1982 * freeze the filesystem. Although there cannot be multiple kernel freezes or
1983 * multiple userspace freezes in effect at any given time, the kernel and
1984 * userspace can both hold a filesystem frozen. The filesystem remains frozen
1985 * until there are no kernel or userspace freezes in effect.
1986 *
7366f8b6
CB
1987 * A filesystem may hold multiple devices and thus a filesystems may be
1988 * frozen through the block layer via multiple block devices. In this
1989 * case the request is marked as being allowed to nest by passing
1990 * FREEZE_MAY_NEST. The filesystem remains frozen until all block
1991 * devices are unfrozen. If multiple freezes are attempted without
1992 * FREEZE_MAY_NEST -EBUSY will be returned.
1993 *
5accdf82
JK
1994 * During this function, sb->s_writers.frozen goes through these values:
1995 *
1996 * SB_UNFROZEN: File system is normal, all writes progress as usual.
1997 *
1998 * SB_FREEZE_WRITE: The file system is in the process of being frozen. New
1999 * writes should be blocked, though page faults are still allowed. We wait for
2000 * all writes to complete and then proceed to the next stage.
2001 *
2002 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
2003 * but internal fs threads can still modify the filesystem (although they
2004 * should not dirty new pages or inodes), writeback can run etc. After waiting
2005 * for all running page faults we sync the filesystem which will clean all
2006 * dirty pages and inodes (no new dirty pages or inodes can be created when
2007 * sync is running).
2008 *
2009 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
2010 * modification are blocked (e.g. XFS preallocation truncation on inode
2011 * reclaim). This is usually implemented by blocking new transactions for
2012 * filesystems that have them and need this additional guard. After all
2013 * internal writers are finished we call ->freeze_fs() to finish filesystem
2014 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
2015 * mostly auxiliary for filesystems to verify they do not modify frozen fs.
2016 *
2017 * sb->s_writers.frozen is protected by sb->s_umount.
7366f8b6
CB
2018 *
2019 * Return: If the freeze was successful zero is returned. If the freeze
2020 * failed a negative error code is returned.
18e9e510 2021 */
880b9577 2022int freeze_super(struct super_block *sb, enum freeze_holder who)
18e9e510
JB
2023{
2024 int ret;
2025
f0cd9880
CB
2026 if (!super_lock_excl(sb)) {
2027 WARN_ON_ONCE("Dying superblock while freezing!");
2028 return -EINVAL;
2029 }
18e9e510 2030 atomic_inc(&sb->s_active);
051178c3 2031
59ba4fdd 2032retry:
880b9577 2033 if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
7366f8b6
CB
2034 if (may_freeze(sb, who))
2035 ret = !!WARN_ON_ONCE(freeze_inc(sb, who) == 1);
2036 else
2037 ret = -EBUSY;
2038 /* All freezers share a single active reference. */
2039 deactivate_locked_super(sb);
2040 return ret;
880b9577
DW
2041 }
2042
5accdf82 2043 if (sb->s_writers.frozen != SB_UNFROZEN) {
59ba4fdd
DW
2044 ret = wait_for_partially_frozen(sb);
2045 if (ret) {
2046 deactivate_locked_super(sb);
2047 return ret;
2048 }
2049
2050 goto retry;
18e9e510
JB
2051 }
2052
bc98a42c 2053 if (sb_rdonly(sb)) {
5accdf82 2054 /* Nothing to do really... */
7366f8b6 2055 WARN_ON_ONCE(freeze_inc(sb, who) > 1);
5accdf82 2056 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
59ba4fdd 2057 wake_up_var(&sb->s_writers.frozen);
0ed33598 2058 super_unlock_excl(sb);
18e9e510
JB
2059 return 0;
2060 }
2061
5accdf82 2062 sb->s_writers.frozen = SB_FREEZE_WRITE;
5accdf82 2063 /* Release s_umount to preserve sb_start_write -> s_umount ordering */
0ed33598 2064 super_unlock_excl(sb);
5accdf82 2065 sb_wait_write(sb, SB_FREEZE_WRITE);
63513f85 2066 __super_lock_excl(sb);
5accdf82
JK
2067
2068 /* Now we go and block page faults... */
5accdf82 2069 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
5accdf82
JK
2070 sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
2071
2072 /* All writers are done so after syncing there won't be dirty data */
2719c716
DW
2073 ret = sync_filesystem(sb);
2074 if (ret) {
2075 sb->s_writers.frozen = SB_UNFROZEN;
2076 sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
59ba4fdd 2077 wake_up_var(&sb->s_writers.frozen);
2719c716
DW
2078 deactivate_locked_super(sb);
2079 return ret;
2080 }
18e9e510 2081
5accdf82
JK
2082 /* Now wait for internal filesystem counter */
2083 sb->s_writers.frozen = SB_FREEZE_FS;
5accdf82 2084 sb_wait_write(sb, SB_FREEZE_FS);
18e9e510 2085
18e9e510
JB
2086 if (sb->s_op->freeze_fs) {
2087 ret = sb->s_op->freeze_fs(sb);
2088 if (ret) {
2089 printk(KERN_ERR
2090 "VFS:Filesystem freeze failed\n");
5accdf82 2091 sb->s_writers.frozen = SB_UNFROZEN;
2719c716 2092 sb_freeze_unlock(sb, SB_FREEZE_FS);
59ba4fdd 2093 wake_up_var(&sb->s_writers.frozen);
18e9e510
JB
2094 deactivate_locked_super(sb);
2095 return ret;
2096 }
2097 }
5accdf82 2098 /*
89f39af1
ON
2099 * For debugging purposes so that fs can warn if it sees write activity
2100 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
5accdf82 2101 */
7366f8b6 2102 WARN_ON_ONCE(freeze_inc(sb, who) > 1);
5accdf82 2103 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
59ba4fdd 2104 wake_up_var(&sb->s_writers.frozen);
f1a96220 2105 lockdep_sb_freeze_release(sb);
0ed33598 2106 super_unlock_excl(sb);
18e9e510
JB
2107 return 0;
2108}
2109EXPORT_SYMBOL(freeze_super);
2110
880b9577
DW
2111/*
2112 * Undoes the effect of a freeze_super_locked call. If the filesystem is
2113 * frozen both by userspace and the kernel, a thaw call from either source
2114 * removes that state without releasing the other state or unlocking the
2115 * filesystem.
2116 */
2117static int thaw_super_locked(struct super_block *sb, enum freeze_holder who)
18e9e510 2118{
24c372d5 2119 int error = -EINVAL;
18e9e510 2120
24c372d5
CH
2121 if (sb->s_writers.frozen != SB_FREEZE_COMPLETE)
2122 goto out_unlock;
880b9577 2123
24c372d5 2124 /*
7366f8b6
CB
2125 * All freezers share a single active reference.
2126 * So just unlock in case there are any left.
24c372d5 2127 */
7366f8b6
CB
2128 if (freeze_dec(sb, who))
2129 goto out_unlock;
18e9e510 2130
bc98a42c 2131 if (sb_rdonly(sb)) {
8129ed29 2132 sb->s_writers.frozen = SB_UNFROZEN;
59ba4fdd 2133 wake_up_var(&sb->s_writers.frozen);
24c372d5 2134 goto out_deactivate;
8129ed29 2135 }
18e9e510 2136
f1a96220
ON
2137 lockdep_sb_freeze_acquire(sb);
2138
18e9e510
JB
2139 if (sb->s_op->unfreeze_fs) {
2140 error = sb->s_op->unfreeze_fs(sb);
2141 if (error) {
7366f8b6
CB
2142 pr_err("VFS: Filesystem thaw failed\n");
2143 freeze_inc(sb, who);
f1a96220 2144 lockdep_sb_freeze_release(sb);
24c372d5 2145 goto out_unlock;
18e9e510
JB
2146 }
2147 }
2148
5accdf82 2149 sb->s_writers.frozen = SB_UNFROZEN;
59ba4fdd 2150 wake_up_var(&sb->s_writers.frozen);
2719c716 2151 sb_freeze_unlock(sb, SB_FREEZE_FS);
24c372d5 2152out_deactivate:
18e9e510 2153 deactivate_locked_super(sb);
18e9e510 2154 return 0;
24c372d5
CH
2155
2156out_unlock:
2157 super_unlock_excl(sb);
2158 return error;
18e9e510 2159}
08fdc8a0 2160
961f3c89
MCC
2161/**
2162 * thaw_super -- unlock filesystem
2163 * @sb: the super to thaw
880b9577
DW
2164 * @who: context that wants to freeze
2165 *
2166 * Unlocks the filesystem and marks it writeable again after freeze_super()
2167 * if there are no remaining freezes on the filesystem.
961f3c89 2168 *
880b9577
DW
2169 * @who should be:
2170 * * %FREEZE_HOLDER_USERSPACE if userspace wants to thaw the fs;
2171 * * %FREEZE_HOLDER_KERNEL if the kernel wants to thaw the fs.
7366f8b6
CB
2172 * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed
2173 *
2174 * A filesystem may hold multiple devices and thus a filesystems may
2175 * have been frozen through the block layer via multiple block devices.
2176 * The filesystem remains frozen until all block devices are unfrozen.
961f3c89 2177 */
880b9577 2178int thaw_super(struct super_block *sb, enum freeze_holder who)
08fdc8a0 2179{
f0cd9880
CB
2180 if (!super_lock_excl(sb)) {
2181 WARN_ON_ONCE("Dying superblock while thawing!");
2182 return -EINVAL;
2183 }
880b9577 2184 return thaw_super_locked(sb, who);
08fdc8a0 2185}
18e9e510 2186EXPORT_SYMBOL(thaw_super);
439bc39b
CH
2187
2188/*
2189 * Create workqueue for deferred direct IO completions. We allocate the
2190 * workqueue when it's first needed. This avoids creating workqueue for
2191 * filesystems that don't need it and also allows us to create the workqueue
2192 * late enough so the we can include s_id in the name of the workqueue.
2193 */
2194int sb_init_dio_done_wq(struct super_block *sb)
2195{
2196 struct workqueue_struct *old;
2197 struct workqueue_struct *wq = alloc_workqueue("dio/%s",
2198 WQ_MEM_RECLAIM, 0,
2199 sb->s_id);
2200 if (!wq)
2201 return -ENOMEM;
2202 /*
2203 * This has to be atomic as more DIOs can race to create the workqueue
2204 */
2205 old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
2206 /* Someone created workqueue before us? Free ours... */
2207 if (old)
2208 destroy_workqueue(wq);
2209 return 0;
2210}
389a4a4a 2211EXPORT_SYMBOL_GPL(sb_init_dio_done_wq);