| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * linux/fs/super.c |
| 4 | * |
| 5 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 6 | * |
| 7 | * super.c contains code to handle: - mount structures |
| 8 | * - super-block tables |
| 9 | * - filesystem drivers list |
| 10 | * - mount system call |
| 11 | * - umount system call |
| 12 | * - ustat system call |
| 13 | * |
| 14 | * GK 2/5/95 - Changed to support mounting the root fs via NFS |
| 15 | * |
| 16 | * Added kerneld support: Jacques Gelinas and Bjorn Ekwall |
| 17 | * Added change_root: Werner Almesberger & Hans Lermen, Feb '96 |
| 18 | * Added options to /proc/mounts: |
| 19 | * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996. |
| 20 | * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998 |
| 21 | * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000 |
| 22 | */ |
| 23 | |
| 24 | #include <linux/export.h> |
| 25 | #include <linux/slab.h> |
| 26 | #include <linux/blkdev.h> |
| 27 | #include <linux/mount.h> |
| 28 | #include <linux/security.h> |
| 29 | #include <linux/writeback.h> /* for the emergency remount stuff */ |
| 30 | #include <linux/idr.h> |
| 31 | #include <linux/mutex.h> |
| 32 | #include <linux/backing-dev.h> |
| 33 | #include <linux/rculist_bl.h> |
| 34 | #include <linux/fscrypt.h> |
| 35 | #include <linux/fsnotify.h> |
| 36 | #include <linux/lockdep.h> |
| 37 | #include <linux/user_namespace.h> |
| 38 | #include <linux/fs_context.h> |
| 39 | #include <uapi/linux/mount.h> |
| 40 | #include "internal.h" |
| 41 | |
| 42 | static int thaw_super_locked(struct super_block *sb, enum freeze_holder who, |
| 43 | const void *freeze_owner); |
| 44 | |
| 45 | static LIST_HEAD(super_blocks); |
| 46 | static DEFINE_SPINLOCK(sb_lock); |
| 47 | |
| 48 | static char *sb_writers_name[SB_FREEZE_LEVELS] = { |
| 49 | "sb_writers", |
| 50 | "sb_pagefaults", |
| 51 | "sb_internal", |
| 52 | }; |
| 53 | |
| 54 | static inline void __super_lock(struct super_block *sb, bool excl) |
| 55 | { |
| 56 | if (excl) |
| 57 | down_write(&sb->s_umount); |
| 58 | else |
| 59 | down_read(&sb->s_umount); |
| 60 | } |
| 61 | |
| 62 | static inline void super_unlock(struct super_block *sb, bool excl) |
| 63 | { |
| 64 | if (excl) |
| 65 | up_write(&sb->s_umount); |
| 66 | else |
| 67 | up_read(&sb->s_umount); |
| 68 | } |
| 69 | |
| 70 | static inline void __super_lock_excl(struct super_block *sb) |
| 71 | { |
| 72 | __super_lock(sb, true); |
| 73 | } |
| 74 | |
| 75 | static inline void super_unlock_excl(struct super_block *sb) |
| 76 | { |
| 77 | super_unlock(sb, true); |
| 78 | } |
| 79 | |
| 80 | static inline void super_unlock_shared(struct super_block *sb) |
| 81 | { |
| 82 | super_unlock(sb, false); |
| 83 | } |
| 84 | |
| 85 | static bool super_flags(const struct super_block *sb, unsigned int flags) |
| 86 | { |
| 87 | /* |
| 88 | * Pairs with smp_store_release() in super_wake() and ensures |
| 89 | * that we see @flags after we're woken. |
| 90 | */ |
| 91 | return smp_load_acquire(&sb->s_flags) & flags; |
| 92 | } |
| 93 | |
| 94 | /** |
| 95 | * super_lock - wait for superblock to become ready and lock it |
| 96 | * @sb: superblock to wait for |
| 97 | * @excl: whether exclusive access is required |
| 98 | * |
| 99 | * If the superblock has neither passed through vfs_get_tree() or |
| 100 | * generic_shutdown_super() yet wait for it to happen. Either superblock |
| 101 | * creation will succeed and SB_BORN is set by vfs_get_tree() or we're |
| 102 | * woken and we'll see SB_DYING. |
| 103 | * |
| 104 | * The caller must have acquired a temporary reference on @sb->s_count. |
| 105 | * |
| 106 | * Return: The function returns true if SB_BORN was set and with |
| 107 | * s_umount held. The function returns false if SB_DYING was |
| 108 | * set and without s_umount held. |
| 109 | */ |
| 110 | static __must_check bool super_lock(struct super_block *sb, bool excl) |
| 111 | { |
| 112 | lockdep_assert_not_held(&sb->s_umount); |
| 113 | |
| 114 | /* wait until the superblock is ready or dying */ |
| 115 | wait_var_event(&sb->s_flags, super_flags(sb, SB_BORN | SB_DYING)); |
| 116 | |
| 117 | /* Don't pointlessly acquire s_umount. */ |
| 118 | if (super_flags(sb, SB_DYING)) |
| 119 | return false; |
| 120 | |
| 121 | __super_lock(sb, excl); |
| 122 | |
| 123 | /* |
| 124 | * Has gone through generic_shutdown_super() in the meantime. |
| 125 | * @sb->s_root is NULL and @sb->s_active is 0. No one needs to |
| 126 | * grab a reference to this. Tell them so. |
| 127 | */ |
| 128 | if (sb->s_flags & SB_DYING) { |
| 129 | super_unlock(sb, excl); |
| 130 | return false; |
| 131 | } |
| 132 | |
| 133 | WARN_ON_ONCE(!(sb->s_flags & SB_BORN)); |
| 134 | return true; |
| 135 | } |
| 136 | |
| 137 | /* wait and try to acquire read-side of @sb->s_umount */ |
| 138 | static inline bool super_lock_shared(struct super_block *sb) |
| 139 | { |
| 140 | return super_lock(sb, false); |
| 141 | } |
| 142 | |
| 143 | /* wait and try to acquire write-side of @sb->s_umount */ |
| 144 | static inline bool super_lock_excl(struct super_block *sb) |
| 145 | { |
| 146 | return super_lock(sb, true); |
| 147 | } |
| 148 | |
| 149 | /* wake waiters */ |
| 150 | #define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING | SB_DEAD) |
| 151 | static void super_wake(struct super_block *sb, unsigned int flag) |
| 152 | { |
| 153 | WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS)); |
| 154 | WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1); |
| 155 | |
| 156 | /* |
| 157 | * Pairs with smp_load_acquire() in super_lock() to make sure |
| 158 | * all initializations in the superblock are seen by the user |
| 159 | * seeing SB_BORN sent. |
| 160 | */ |
| 161 | smp_store_release(&sb->s_flags, sb->s_flags | flag); |
| 162 | /* |
| 163 | * Pairs with the barrier in prepare_to_wait_event() to make sure |
| 164 | * ___wait_var_event() either sees SB_BORN set or |
| 165 | * waitqueue_active() check in wake_up_var() sees the waiter. |
| 166 | */ |
| 167 | smp_mb(); |
| 168 | wake_up_var(&sb->s_flags); |
| 169 | } |
| 170 | |
| 171 | /* |
| 172 | * One thing we have to be careful of with a per-sb shrinker is that we don't |
| 173 | * drop the last active reference to the superblock from within the shrinker. |
| 174 | * If that happens we could trigger unregistering the shrinker from within the |
| 175 | * shrinker path and that leads to deadlock on the shrinker_mutex. Hence we |
| 176 | * take a passive reference to the superblock to avoid this from occurring. |
| 177 | */ |
| 178 | static unsigned long super_cache_scan(struct shrinker *shrink, |
| 179 | struct shrink_control *sc) |
| 180 | { |
| 181 | struct super_block *sb; |
| 182 | long fs_objects = 0; |
| 183 | long total_objects; |
| 184 | long freed = 0; |
| 185 | long dentries; |
| 186 | long inodes; |
| 187 | |
| 188 | sb = shrink->private_data; |
| 189 | |
| 190 | /* |
| 191 | * Deadlock avoidance. We may hold various FS locks, and we don't want |
| 192 | * to recurse into the FS that called us in clear_inode() and friends.. |
| 193 | */ |
| 194 | if (!(sc->gfp_mask & __GFP_FS)) |
| 195 | return SHRINK_STOP; |
| 196 | |
| 197 | if (!super_trylock_shared(sb)) |
| 198 | return SHRINK_STOP; |
| 199 | |
| 200 | if (sb->s_op->nr_cached_objects) |
| 201 | fs_objects = sb->s_op->nr_cached_objects(sb, sc); |
| 202 | |
| 203 | inodes = list_lru_shrink_count(&sb->s_inode_lru, sc); |
| 204 | dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc); |
| 205 | total_objects = dentries + inodes + fs_objects; |
| 206 | if (!total_objects) |
| 207 | total_objects = 1; |
| 208 | |
| 209 | /* proportion the scan between the caches */ |
| 210 | dentries = mult_frac(sc->nr_to_scan, dentries, total_objects); |
| 211 | inodes = mult_frac(sc->nr_to_scan, inodes, total_objects); |
| 212 | fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects); |
| 213 | |
| 214 | /* |
| 215 | * prune the dcache first as the icache is pinned by it, then |
| 216 | * prune the icache, followed by the filesystem specific caches |
| 217 | * |
| 218 | * Ensure that we always scan at least one object - memcg kmem |
| 219 | * accounting uses this to fully empty the caches. |
| 220 | */ |
| 221 | sc->nr_to_scan = dentries + 1; |
| 222 | freed = prune_dcache_sb(sb, sc); |
| 223 | sc->nr_to_scan = inodes + 1; |
| 224 | freed += prune_icache_sb(sb, sc); |
| 225 | |
| 226 | if (fs_objects) { |
| 227 | sc->nr_to_scan = fs_objects + 1; |
| 228 | freed += sb->s_op->free_cached_objects(sb, sc); |
| 229 | } |
| 230 | |
| 231 | super_unlock_shared(sb); |
| 232 | return freed; |
| 233 | } |
| 234 | |
| 235 | static unsigned long super_cache_count(struct shrinker *shrink, |
| 236 | struct shrink_control *sc) |
| 237 | { |
| 238 | struct super_block *sb; |
| 239 | long total_objects = 0; |
| 240 | |
| 241 | sb = shrink->private_data; |
| 242 | |
| 243 | /* |
| 244 | * We don't call super_trylock_shared() here as it is a scalability |
| 245 | * bottleneck, so we're exposed to partial setup state. The shrinker |
| 246 | * rwsem does not protect filesystem operations backing |
| 247 | * list_lru_shrink_count() or s_op->nr_cached_objects(). Counts can |
| 248 | * change between super_cache_count and super_cache_scan, so we really |
| 249 | * don't need locks here. |
| 250 | * |
| 251 | * However, if we are currently mounting the superblock, the underlying |
| 252 | * filesystem might be in a state of partial construction and hence it |
| 253 | * is dangerous to access it. super_trylock_shared() uses a SB_BORN check |
| 254 | * to avoid this situation, so do the same here. The memory barrier is |
| 255 | * matched with the one in mount_fs() as we don't hold locks here. |
| 256 | */ |
| 257 | if (!(sb->s_flags & SB_BORN)) |
| 258 | return 0; |
| 259 | smp_rmb(); |
| 260 | |
| 261 | if (sb->s_op && sb->s_op->nr_cached_objects) |
| 262 | total_objects = sb->s_op->nr_cached_objects(sb, sc); |
| 263 | |
| 264 | total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc); |
| 265 | total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc); |
| 266 | |
| 267 | if (!total_objects) |
| 268 | return SHRINK_EMPTY; |
| 269 | |
| 270 | total_objects = vfs_pressure_ratio(total_objects); |
| 271 | return total_objects; |
| 272 | } |
| 273 | |
| 274 | static void destroy_super_work(struct work_struct *work) |
| 275 | { |
| 276 | struct super_block *s = container_of(work, struct super_block, |
| 277 | destroy_work); |
| 278 | fsnotify_sb_free(s); |
| 279 | security_sb_free(s); |
| 280 | put_user_ns(s->s_user_ns); |
| 281 | kfree(s->s_subtype); |
| 282 | for (int i = 0; i < SB_FREEZE_LEVELS; i++) |
| 283 | percpu_free_rwsem(&s->s_writers.rw_sem[i]); |
| 284 | kfree(s); |
| 285 | } |
| 286 | |
| 287 | static void destroy_super_rcu(struct rcu_head *head) |
| 288 | { |
| 289 | struct super_block *s = container_of(head, struct super_block, rcu); |
| 290 | INIT_WORK(&s->destroy_work, destroy_super_work); |
| 291 | schedule_work(&s->destroy_work); |
| 292 | } |
| 293 | |
| 294 | /* Free a superblock that has never been seen by anyone */ |
| 295 | static void destroy_unused_super(struct super_block *s) |
| 296 | { |
| 297 | if (!s) |
| 298 | return; |
| 299 | super_unlock_excl(s); |
| 300 | list_lru_destroy(&s->s_dentry_lru); |
| 301 | list_lru_destroy(&s->s_inode_lru); |
| 302 | shrinker_free(s->s_shrink); |
| 303 | /* no delays needed */ |
| 304 | destroy_super_work(&s->destroy_work); |
| 305 | } |
| 306 | |
| 307 | /** |
| 308 | * alloc_super - create new superblock |
| 309 | * @type: filesystem type superblock should belong to |
| 310 | * @flags: the mount flags |
| 311 | * @user_ns: User namespace for the super_block |
| 312 | * |
| 313 | * Allocates and initializes a new &struct super_block. alloc_super() |
| 314 | * returns a pointer new superblock or %NULL if allocation had failed. |
| 315 | */ |
| 316 | static struct super_block *alloc_super(struct file_system_type *type, int flags, |
| 317 | struct user_namespace *user_ns) |
| 318 | { |
| 319 | struct super_block *s = kzalloc(sizeof(struct super_block), GFP_KERNEL); |
| 320 | static const struct super_operations default_op; |
| 321 | int i; |
| 322 | |
| 323 | if (!s) |
| 324 | return NULL; |
| 325 | |
| 326 | INIT_LIST_HEAD(&s->s_mounts); |
| 327 | s->s_user_ns = get_user_ns(user_ns); |
| 328 | init_rwsem(&s->s_umount); |
| 329 | lockdep_set_class(&s->s_umount, &type->s_umount_key); |
| 330 | /* |
| 331 | * sget() can have s_umount recursion. |
| 332 | * |
| 333 | * When it cannot find a suitable sb, it allocates a new |
| 334 | * one (this one), and tries again to find a suitable old |
| 335 | * one. |
| 336 | * |
| 337 | * In case that succeeds, it will acquire the s_umount |
| 338 | * lock of the old one. Since these are clearly distrinct |
| 339 | * locks, and this object isn't exposed yet, there's no |
| 340 | * risk of deadlocks. |
| 341 | * |
| 342 | * Annotate this by putting this lock in a different |
| 343 | * subclass. |
| 344 | */ |
| 345 | down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING); |
| 346 | |
| 347 | if (security_sb_alloc(s)) |
| 348 | goto fail; |
| 349 | |
| 350 | for (i = 0; i < SB_FREEZE_LEVELS; i++) { |
| 351 | if (__percpu_init_rwsem(&s->s_writers.rw_sem[i], |
| 352 | sb_writers_name[i], |
| 353 | &type->s_writers_key[i])) |
| 354 | goto fail; |
| 355 | } |
| 356 | s->s_bdi = &noop_backing_dev_info; |
| 357 | s->s_flags = flags; |
| 358 | if (s->s_user_ns != &init_user_ns) |
| 359 | s->s_iflags |= SB_I_NODEV; |
| 360 | INIT_HLIST_NODE(&s->s_instances); |
| 361 | INIT_HLIST_BL_HEAD(&s->s_roots); |
| 362 | mutex_init(&s->s_sync_lock); |
| 363 | INIT_LIST_HEAD(&s->s_inodes); |
| 364 | spin_lock_init(&s->s_inode_list_lock); |
| 365 | INIT_LIST_HEAD(&s->s_inodes_wb); |
| 366 | spin_lock_init(&s->s_inode_wblist_lock); |
| 367 | |
| 368 | s->s_count = 1; |
| 369 | atomic_set(&s->s_active, 1); |
| 370 | mutex_init(&s->s_vfs_rename_mutex); |
| 371 | lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); |
| 372 | init_rwsem(&s->s_dquot.dqio_sem); |
| 373 | s->s_maxbytes = MAX_NON_LFS; |
| 374 | s->s_op = &default_op; |
| 375 | s->s_time_gran = 1000000000; |
| 376 | s->s_time_min = TIME64_MIN; |
| 377 | s->s_time_max = TIME64_MAX; |
| 378 | |
| 379 | s->s_shrink = shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, |
| 380 | "sb-%s", type->name); |
| 381 | if (!s->s_shrink) |
| 382 | goto fail; |
| 383 | |
| 384 | s->s_shrink->scan_objects = super_cache_scan; |
| 385 | s->s_shrink->count_objects = super_cache_count; |
| 386 | s->s_shrink->batch = 1024; |
| 387 | s->s_shrink->private_data = s; |
| 388 | |
| 389 | if (list_lru_init_memcg(&s->s_dentry_lru, s->s_shrink)) |
| 390 | goto fail; |
| 391 | if (list_lru_init_memcg(&s->s_inode_lru, s->s_shrink)) |
| 392 | goto fail; |
| 393 | return s; |
| 394 | |
| 395 | fail: |
| 396 | destroy_unused_super(s); |
| 397 | return NULL; |
| 398 | } |
| 399 | |
| 400 | /* Superblock refcounting */ |
| 401 | |
| 402 | /* |
| 403 | * Drop a superblock's refcount. The caller must hold sb_lock. |
| 404 | */ |
| 405 | static void __put_super(struct super_block *s) |
| 406 | { |
| 407 | if (!--s->s_count) { |
| 408 | list_del_init(&s->s_list); |
| 409 | WARN_ON(s->s_dentry_lru.node); |
| 410 | WARN_ON(s->s_inode_lru.node); |
| 411 | WARN_ON(!list_empty(&s->s_mounts)); |
| 412 | call_rcu(&s->rcu, destroy_super_rcu); |
| 413 | } |
| 414 | } |
| 415 | |
| 416 | /** |
| 417 | * put_super - drop a temporary reference to superblock |
| 418 | * @sb: superblock in question |
| 419 | * |
| 420 | * Drops a temporary reference, frees superblock if there's no |
| 421 | * references left. |
| 422 | */ |
| 423 | void put_super(struct super_block *sb) |
| 424 | { |
| 425 | spin_lock(&sb_lock); |
| 426 | __put_super(sb); |
| 427 | spin_unlock(&sb_lock); |
| 428 | } |
| 429 | |
| 430 | static void kill_super_notify(struct super_block *sb) |
| 431 | { |
| 432 | lockdep_assert_not_held(&sb->s_umount); |
| 433 | |
| 434 | /* already notified earlier */ |
| 435 | if (sb->s_flags & SB_DEAD) |
| 436 | return; |
| 437 | |
| 438 | /* |
| 439 | * Remove it from @fs_supers so it isn't found by new |
| 440 | * sget{_fc}() walkers anymore. Any concurrent mounter still |
| 441 | * managing to grab a temporary reference is guaranteed to |
| 442 | * already see SB_DYING and will wait until we notify them about |
| 443 | * SB_DEAD. |
| 444 | */ |
| 445 | spin_lock(&sb_lock); |
| 446 | hlist_del_init(&sb->s_instances); |
| 447 | spin_unlock(&sb_lock); |
| 448 | |
| 449 | /* |
| 450 | * Let concurrent mounts know that this thing is really dead. |
| 451 | * We don't need @sb->s_umount here as every concurrent caller |
| 452 | * will see SB_DYING and either discard the superblock or wait |
| 453 | * for SB_DEAD. |
| 454 | */ |
| 455 | super_wake(sb, SB_DEAD); |
| 456 | } |
| 457 | |
| 458 | /** |
| 459 | * deactivate_locked_super - drop an active reference to superblock |
| 460 | * @s: superblock to deactivate |
| 461 | * |
| 462 | * Drops an active reference to superblock, converting it into a temporary |
| 463 | * one if there is no other active references left. In that case we |
| 464 | * tell fs driver to shut it down and drop the temporary reference we |
| 465 | * had just acquired. |
| 466 | * |
| 467 | * Caller holds exclusive lock on superblock; that lock is released. |
| 468 | */ |
| 469 | void deactivate_locked_super(struct super_block *s) |
| 470 | { |
| 471 | struct file_system_type *fs = s->s_type; |
| 472 | if (atomic_dec_and_test(&s->s_active)) { |
| 473 | shrinker_free(s->s_shrink); |
| 474 | fs->kill_sb(s); |
| 475 | |
| 476 | kill_super_notify(s); |
| 477 | |
| 478 | /* |
| 479 | * Since list_lru_destroy() may sleep, we cannot call it from |
| 480 | * put_super(), where we hold the sb_lock. Therefore we destroy |
| 481 | * the lru lists right now. |
| 482 | */ |
| 483 | list_lru_destroy(&s->s_dentry_lru); |
| 484 | list_lru_destroy(&s->s_inode_lru); |
| 485 | |
| 486 | put_filesystem(fs); |
| 487 | put_super(s); |
| 488 | } else { |
| 489 | super_unlock_excl(s); |
| 490 | } |
| 491 | } |
| 492 | |
| 493 | EXPORT_SYMBOL(deactivate_locked_super); |
| 494 | |
| 495 | /** |
| 496 | * deactivate_super - drop an active reference to superblock |
| 497 | * @s: superblock to deactivate |
| 498 | * |
| 499 | * Variant of deactivate_locked_super(), except that superblock is *not* |
| 500 | * locked by caller. If we are going to drop the final active reference, |
| 501 | * lock will be acquired prior to that. |
| 502 | */ |
| 503 | void deactivate_super(struct super_block *s) |
| 504 | { |
| 505 | if (!atomic_add_unless(&s->s_active, -1, 1)) { |
| 506 | __super_lock_excl(s); |
| 507 | deactivate_locked_super(s); |
| 508 | } |
| 509 | } |
| 510 | |
| 511 | EXPORT_SYMBOL(deactivate_super); |
| 512 | |
| 513 | /** |
| 514 | * grab_super - acquire an active reference to a superblock |
| 515 | * @sb: superblock to acquire |
| 516 | * |
| 517 | * Acquire a temporary reference on a superblock and try to trade it for |
| 518 | * an active reference. This is used in sget{_fc}() to wait for a |
| 519 | * superblock to either become SB_BORN or for it to pass through |
| 520 | * sb->kill() and be marked as SB_DEAD. |
| 521 | * |
| 522 | * Return: This returns true if an active reference could be acquired, |
| 523 | * false if not. |
| 524 | */ |
| 525 | static bool grab_super(struct super_block *sb) |
| 526 | { |
| 527 | bool locked; |
| 528 | |
| 529 | sb->s_count++; |
| 530 | spin_unlock(&sb_lock); |
| 531 | locked = super_lock_excl(sb); |
| 532 | if (locked) { |
| 533 | if (atomic_inc_not_zero(&sb->s_active)) { |
| 534 | put_super(sb); |
| 535 | return true; |
| 536 | } |
| 537 | super_unlock_excl(sb); |
| 538 | } |
| 539 | wait_var_event(&sb->s_flags, super_flags(sb, SB_DEAD)); |
| 540 | put_super(sb); |
| 541 | return false; |
| 542 | } |
| 543 | |
| 544 | /* |
| 545 | * super_trylock_shared - try to grab ->s_umount shared |
| 546 | * @sb: reference we are trying to grab |
| 547 | * |
| 548 | * Try to prevent fs shutdown. This is used in places where we |
| 549 | * cannot take an active reference but we need to ensure that the |
| 550 | * filesystem is not shut down while we are working on it. It returns |
| 551 | * false if we cannot acquire s_umount or if we lose the race and |
| 552 | * filesystem already got into shutdown, and returns true with the s_umount |
| 553 | * lock held in read mode in case of success. On successful return, |
| 554 | * the caller must drop the s_umount lock when done. |
| 555 | * |
| 556 | * Note that unlike get_super() et.al. this one does *not* bump ->s_count. |
| 557 | * The reason why it's safe is that we are OK with doing trylock instead |
| 558 | * of down_read(). There's a couple of places that are OK with that, but |
| 559 | * it's very much not a general-purpose interface. |
| 560 | */ |
| 561 | bool super_trylock_shared(struct super_block *sb) |
| 562 | { |
| 563 | if (down_read_trylock(&sb->s_umount)) { |
| 564 | if (!(sb->s_flags & SB_DYING) && sb->s_root && |
| 565 | (sb->s_flags & SB_BORN)) |
| 566 | return true; |
| 567 | super_unlock_shared(sb); |
| 568 | } |
| 569 | |
| 570 | return false; |
| 571 | } |
| 572 | |
| 573 | /** |
| 574 | * retire_super - prevents superblock from being reused |
| 575 | * @sb: superblock to retire |
| 576 | * |
| 577 | * The function marks superblock to be ignored in superblock test, which |
| 578 | * prevents it from being reused for any new mounts. If the superblock has |
| 579 | * a private bdi, it also unregisters it, but doesn't reduce the refcount |
| 580 | * of the superblock to prevent potential races. The refcount is reduced |
| 581 | * by generic_shutdown_super(). The function can not be called |
| 582 | * concurrently with generic_shutdown_super(). It is safe to call the |
| 583 | * function multiple times, subsequent calls have no effect. |
| 584 | * |
| 585 | * The marker will affect the re-use only for block-device-based |
| 586 | * superblocks. Other superblocks will still get marked if this function |
| 587 | * is used, but that will not affect their reusability. |
| 588 | */ |
| 589 | void retire_super(struct super_block *sb) |
| 590 | { |
| 591 | WARN_ON(!sb->s_bdev); |
| 592 | __super_lock_excl(sb); |
| 593 | if (sb->s_iflags & SB_I_PERSB_BDI) { |
| 594 | bdi_unregister(sb->s_bdi); |
| 595 | sb->s_iflags &= ~SB_I_PERSB_BDI; |
| 596 | } |
| 597 | sb->s_iflags |= SB_I_RETIRED; |
| 598 | super_unlock_excl(sb); |
| 599 | } |
| 600 | EXPORT_SYMBOL(retire_super); |
| 601 | |
| 602 | /** |
| 603 | * generic_shutdown_super - common helper for ->kill_sb() |
| 604 | * @sb: superblock to kill |
| 605 | * |
| 606 | * generic_shutdown_super() does all fs-independent work on superblock |
| 607 | * shutdown. Typical ->kill_sb() should pick all fs-specific objects |
| 608 | * that need destruction out of superblock, call generic_shutdown_super() |
| 609 | * and release aforementioned objects. Note: dentries and inodes _are_ |
| 610 | * taken care of and do not need specific handling. |
| 611 | * |
| 612 | * Upon calling this function, the filesystem may no longer alter or |
| 613 | * rearrange the set of dentries belonging to this super_block, nor may it |
| 614 | * change the attachments of dentries to inodes. |
| 615 | */ |
| 616 | void generic_shutdown_super(struct super_block *sb) |
| 617 | { |
| 618 | const struct super_operations *sop = sb->s_op; |
| 619 | |
| 620 | if (sb->s_root) { |
| 621 | shrink_dcache_for_umount(sb); |
| 622 | sync_filesystem(sb); |
| 623 | sb->s_flags &= ~SB_ACTIVE; |
| 624 | |
| 625 | cgroup_writeback_umount(sb); |
| 626 | |
| 627 | /* Evict all inodes with zero refcount. */ |
| 628 | evict_inodes(sb); |
| 629 | |
| 630 | /* |
| 631 | * Clean up and evict any inodes that still have references due |
| 632 | * to fsnotify or the security policy. |
| 633 | */ |
| 634 | fsnotify_sb_delete(sb); |
| 635 | security_sb_delete(sb); |
| 636 | |
| 637 | if (sb->s_dio_done_wq) { |
| 638 | destroy_workqueue(sb->s_dio_done_wq); |
| 639 | sb->s_dio_done_wq = NULL; |
| 640 | } |
| 641 | |
| 642 | if (sop->put_super) |
| 643 | sop->put_super(sb); |
| 644 | |
| 645 | /* |
| 646 | * Now that all potentially-encrypted inodes have been evicted, |
| 647 | * the fscrypt keyring can be destroyed. |
| 648 | */ |
| 649 | fscrypt_destroy_keyring(sb); |
| 650 | |
| 651 | if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes), NULL, |
| 652 | "VFS: Busy inodes after unmount of %s (%s)", |
| 653 | sb->s_id, sb->s_type->name)) { |
| 654 | /* |
| 655 | * Adding a proper bailout path here would be hard, but |
| 656 | * we can at least make it more likely that a later |
| 657 | * iput_final() or such crashes cleanly. |
| 658 | */ |
| 659 | struct inode *inode; |
| 660 | |
| 661 | spin_lock(&sb->s_inode_list_lock); |
| 662 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { |
| 663 | inode->i_op = VFS_PTR_POISON; |
| 664 | inode->i_sb = VFS_PTR_POISON; |
| 665 | inode->i_mapping = VFS_PTR_POISON; |
| 666 | } |
| 667 | spin_unlock(&sb->s_inode_list_lock); |
| 668 | } |
| 669 | } |
| 670 | /* |
| 671 | * Broadcast to everyone that grabbed a temporary reference to this |
| 672 | * superblock before we removed it from @fs_supers that the superblock |
| 673 | * is dying. Every walker of @fs_supers outside of sget{_fc}() will now |
| 674 | * discard this superblock and treat it as dead. |
| 675 | * |
| 676 | * We leave the superblock on @fs_supers so it can be found by |
| 677 | * sget{_fc}() until we passed sb->kill_sb(). |
| 678 | */ |
| 679 | super_wake(sb, SB_DYING); |
| 680 | super_unlock_excl(sb); |
| 681 | if (sb->s_bdi != &noop_backing_dev_info) { |
| 682 | if (sb->s_iflags & SB_I_PERSB_BDI) |
| 683 | bdi_unregister(sb->s_bdi); |
| 684 | bdi_put(sb->s_bdi); |
| 685 | sb->s_bdi = &noop_backing_dev_info; |
| 686 | } |
| 687 | } |
| 688 | |
| 689 | EXPORT_SYMBOL(generic_shutdown_super); |
| 690 | |
| 691 | bool mount_capable(struct fs_context *fc) |
| 692 | { |
| 693 | if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) |
| 694 | return capable(CAP_SYS_ADMIN); |
| 695 | else |
| 696 | return ns_capable(fc->user_ns, CAP_SYS_ADMIN); |
| 697 | } |
| 698 | |
| 699 | /** |
| 700 | * sget_fc - Find or create a superblock |
| 701 | * @fc: Filesystem context. |
| 702 | * @test: Comparison callback |
| 703 | * @set: Setup callback |
| 704 | * |
| 705 | * Create a new superblock or find an existing one. |
| 706 | * |
| 707 | * The @test callback is used to find a matching existing superblock. |
| 708 | * Whether or not the requested parameters in @fc are taken into account |
| 709 | * is specific to the @test callback that is used. They may even be |
| 710 | * completely ignored. |
| 711 | * |
| 712 | * If an extant superblock is matched, it will be returned unless: |
| 713 | * |
| 714 | * (1) the namespace the filesystem context @fc and the extant |
| 715 | * superblock's namespace differ |
| 716 | * |
| 717 | * (2) the filesystem context @fc has requested that reusing an extant |
| 718 | * superblock is not allowed |
| 719 | * |
| 720 | * In both cases EBUSY will be returned. |
| 721 | * |
| 722 | * If no match is made, a new superblock will be allocated and basic |
| 723 | * initialisation will be performed (s_type, s_fs_info and s_id will be |
| 724 | * set and the @set callback will be invoked), the superblock will be |
| 725 | * published and it will be returned in a partially constructed state |
| 726 | * with SB_BORN and SB_ACTIVE as yet unset. |
| 727 | * |
| 728 | * Return: On success, an extant or newly created superblock is |
| 729 | * returned. On failure an error pointer is returned. |
| 730 | */ |
| 731 | struct super_block *sget_fc(struct fs_context *fc, |
| 732 | int (*test)(struct super_block *, struct fs_context *), |
| 733 | int (*set)(struct super_block *, struct fs_context *)) |
| 734 | { |
| 735 | struct super_block *s = NULL; |
| 736 | struct super_block *old; |
| 737 | struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns; |
| 738 | int err; |
| 739 | |
| 740 | /* |
| 741 | * Never allow s_user_ns != &init_user_ns when FS_USERNS_MOUNT is |
| 742 | * not set, as the filesystem is likely unprepared to handle it. |
| 743 | * This can happen when fsconfig() is called from init_user_ns with |
| 744 | * an fs_fd opened in another user namespace. |
| 745 | */ |
| 746 | if (user_ns != &init_user_ns && !(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) { |
| 747 | errorfc(fc, "VFS: Mounting from non-initial user namespace is not allowed"); |
| 748 | return ERR_PTR(-EPERM); |
| 749 | } |
| 750 | |
| 751 | retry: |
| 752 | spin_lock(&sb_lock); |
| 753 | if (test) { |
| 754 | hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) { |
| 755 | if (test(old, fc)) |
| 756 | goto share_extant_sb; |
| 757 | } |
| 758 | } |
| 759 | if (!s) { |
| 760 | spin_unlock(&sb_lock); |
| 761 | s = alloc_super(fc->fs_type, fc->sb_flags, user_ns); |
| 762 | if (!s) |
| 763 | return ERR_PTR(-ENOMEM); |
| 764 | goto retry; |
| 765 | } |
| 766 | |
| 767 | s->s_fs_info = fc->s_fs_info; |
| 768 | err = set(s, fc); |
| 769 | if (err) { |
| 770 | s->s_fs_info = NULL; |
| 771 | spin_unlock(&sb_lock); |
| 772 | destroy_unused_super(s); |
| 773 | return ERR_PTR(err); |
| 774 | } |
| 775 | fc->s_fs_info = NULL; |
| 776 | s->s_type = fc->fs_type; |
| 777 | s->s_iflags |= fc->s_iflags; |
| 778 | strscpy(s->s_id, s->s_type->name, sizeof(s->s_id)); |
| 779 | /* |
| 780 | * Make the superblock visible on @super_blocks and @fs_supers. |
| 781 | * It's in a nascent state and users should wait on SB_BORN or |
| 782 | * SB_DYING to be set. |
| 783 | */ |
| 784 | list_add_tail(&s->s_list, &super_blocks); |
| 785 | hlist_add_head(&s->s_instances, &s->s_type->fs_supers); |
| 786 | spin_unlock(&sb_lock); |
| 787 | get_filesystem(s->s_type); |
| 788 | shrinker_register(s->s_shrink); |
| 789 | return s; |
| 790 | |
| 791 | share_extant_sb: |
| 792 | if (user_ns != old->s_user_ns || fc->exclusive) { |
| 793 | spin_unlock(&sb_lock); |
| 794 | destroy_unused_super(s); |
| 795 | if (fc->exclusive) |
| 796 | warnfc(fc, "reusing existing filesystem not allowed"); |
| 797 | else |
| 798 | warnfc(fc, "reusing existing filesystem in another namespace not allowed"); |
| 799 | return ERR_PTR(-EBUSY); |
| 800 | } |
| 801 | if (!grab_super(old)) |
| 802 | goto retry; |
| 803 | destroy_unused_super(s); |
| 804 | return old; |
| 805 | } |
| 806 | EXPORT_SYMBOL(sget_fc); |
| 807 | |
| 808 | /** |
| 809 | * sget - find or create a superblock |
| 810 | * @type: filesystem type superblock should belong to |
| 811 | * @test: comparison callback |
| 812 | * @set: setup callback |
| 813 | * @flags: mount flags |
| 814 | * @data: argument to each of them |
| 815 | */ |
| 816 | struct super_block *sget(struct file_system_type *type, |
| 817 | int (*test)(struct super_block *,void *), |
| 818 | int (*set)(struct super_block *,void *), |
| 819 | int flags, |
| 820 | void *data) |
| 821 | { |
| 822 | struct user_namespace *user_ns = current_user_ns(); |
| 823 | struct super_block *s = NULL; |
| 824 | struct super_block *old; |
| 825 | int err; |
| 826 | |
| 827 | retry: |
| 828 | spin_lock(&sb_lock); |
| 829 | if (test) { |
| 830 | hlist_for_each_entry(old, &type->fs_supers, s_instances) { |
| 831 | if (!test(old, data)) |
| 832 | continue; |
| 833 | if (user_ns != old->s_user_ns) { |
| 834 | spin_unlock(&sb_lock); |
| 835 | destroy_unused_super(s); |
| 836 | return ERR_PTR(-EBUSY); |
| 837 | } |
| 838 | if (!grab_super(old)) |
| 839 | goto retry; |
| 840 | destroy_unused_super(s); |
| 841 | return old; |
| 842 | } |
| 843 | } |
| 844 | if (!s) { |
| 845 | spin_unlock(&sb_lock); |
| 846 | s = alloc_super(type, flags, user_ns); |
| 847 | if (!s) |
| 848 | return ERR_PTR(-ENOMEM); |
| 849 | goto retry; |
| 850 | } |
| 851 | |
| 852 | err = set(s, data); |
| 853 | if (err) { |
| 854 | spin_unlock(&sb_lock); |
| 855 | destroy_unused_super(s); |
| 856 | return ERR_PTR(err); |
| 857 | } |
| 858 | s->s_type = type; |
| 859 | strscpy(s->s_id, type->name, sizeof(s->s_id)); |
| 860 | list_add_tail(&s->s_list, &super_blocks); |
| 861 | hlist_add_head(&s->s_instances, &type->fs_supers); |
| 862 | spin_unlock(&sb_lock); |
| 863 | get_filesystem(type); |
| 864 | shrinker_register(s->s_shrink); |
| 865 | return s; |
| 866 | } |
| 867 | EXPORT_SYMBOL(sget); |
| 868 | |
| 869 | void drop_super(struct super_block *sb) |
| 870 | { |
| 871 | super_unlock_shared(sb); |
| 872 | put_super(sb); |
| 873 | } |
| 874 | |
| 875 | EXPORT_SYMBOL(drop_super); |
| 876 | |
| 877 | void drop_super_exclusive(struct super_block *sb) |
| 878 | { |
| 879 | super_unlock_excl(sb); |
| 880 | put_super(sb); |
| 881 | } |
| 882 | EXPORT_SYMBOL(drop_super_exclusive); |
| 883 | |
| 884 | enum super_iter_flags_t { |
| 885 | SUPER_ITER_EXCL = (1U << 0), |
| 886 | SUPER_ITER_UNLOCKED = (1U << 1), |
| 887 | SUPER_ITER_REVERSE = (1U << 2), |
| 888 | }; |
| 889 | |
| 890 | static inline struct super_block *first_super(enum super_iter_flags_t flags) |
| 891 | { |
| 892 | if (flags & SUPER_ITER_REVERSE) |
| 893 | return list_last_entry(&super_blocks, struct super_block, s_list); |
| 894 | return list_first_entry(&super_blocks, struct super_block, s_list); |
| 895 | } |
| 896 | |
| 897 | static inline struct super_block *next_super(struct super_block *sb, |
| 898 | enum super_iter_flags_t flags) |
| 899 | { |
| 900 | if (flags & SUPER_ITER_REVERSE) |
| 901 | return list_prev_entry(sb, s_list); |
| 902 | return list_next_entry(sb, s_list); |
| 903 | } |
| 904 | |
| 905 | static void __iterate_supers(void (*f)(struct super_block *, void *), void *arg, |
| 906 | enum super_iter_flags_t flags) |
| 907 | { |
| 908 | struct super_block *sb, *p = NULL; |
| 909 | bool excl = flags & SUPER_ITER_EXCL; |
| 910 | |
| 911 | guard(spinlock)(&sb_lock); |
| 912 | |
| 913 | for (sb = first_super(flags); |
| 914 | !list_entry_is_head(sb, &super_blocks, s_list); |
| 915 | sb = next_super(sb, flags)) { |
| 916 | if (super_flags(sb, SB_DYING)) |
| 917 | continue; |
| 918 | sb->s_count++; |
| 919 | spin_unlock(&sb_lock); |
| 920 | |
| 921 | if (flags & SUPER_ITER_UNLOCKED) { |
| 922 | f(sb, arg); |
| 923 | } else if (super_lock(sb, excl)) { |
| 924 | f(sb, arg); |
| 925 | super_unlock(sb, excl); |
| 926 | } |
| 927 | |
| 928 | spin_lock(&sb_lock); |
| 929 | if (p) |
| 930 | __put_super(p); |
| 931 | p = sb; |
| 932 | } |
| 933 | if (p) |
| 934 | __put_super(p); |
| 935 | } |
| 936 | |
| 937 | void iterate_supers(void (*f)(struct super_block *, void *), void *arg) |
| 938 | { |
| 939 | __iterate_supers(f, arg, 0); |
| 940 | } |
| 941 | |
| 942 | /** |
| 943 | * iterate_supers_type - call function for superblocks of given type |
| 944 | * @type: fs type |
| 945 | * @f: function to call |
| 946 | * @arg: argument to pass to it |
| 947 | * |
| 948 | * Scans the superblock list and calls given function, passing it |
| 949 | * locked superblock and given argument. |
| 950 | */ |
| 951 | void iterate_supers_type(struct file_system_type *type, |
| 952 | void (*f)(struct super_block *, void *), void *arg) |
| 953 | { |
| 954 | struct super_block *sb, *p = NULL; |
| 955 | |
| 956 | spin_lock(&sb_lock); |
| 957 | hlist_for_each_entry(sb, &type->fs_supers, s_instances) { |
| 958 | bool locked; |
| 959 | |
| 960 | if (super_flags(sb, SB_DYING)) |
| 961 | continue; |
| 962 | |
| 963 | sb->s_count++; |
| 964 | spin_unlock(&sb_lock); |
| 965 | |
| 966 | locked = super_lock_shared(sb); |
| 967 | if (locked) { |
| 968 | f(sb, arg); |
| 969 | super_unlock_shared(sb); |
| 970 | } |
| 971 | |
| 972 | spin_lock(&sb_lock); |
| 973 | if (p) |
| 974 | __put_super(p); |
| 975 | p = sb; |
| 976 | } |
| 977 | if (p) |
| 978 | __put_super(p); |
| 979 | spin_unlock(&sb_lock); |
| 980 | } |
| 981 | |
| 982 | EXPORT_SYMBOL(iterate_supers_type); |
| 983 | |
| 984 | struct super_block *user_get_super(dev_t dev, bool excl) |
| 985 | { |
| 986 | struct super_block *sb; |
| 987 | |
| 988 | spin_lock(&sb_lock); |
| 989 | list_for_each_entry(sb, &super_blocks, s_list) { |
| 990 | bool locked; |
| 991 | |
| 992 | if (sb->s_dev != dev) |
| 993 | continue; |
| 994 | |
| 995 | sb->s_count++; |
| 996 | spin_unlock(&sb_lock); |
| 997 | |
| 998 | locked = super_lock(sb, excl); |
| 999 | if (locked) |
| 1000 | return sb; |
| 1001 | |
| 1002 | spin_lock(&sb_lock); |
| 1003 | __put_super(sb); |
| 1004 | break; |
| 1005 | } |
| 1006 | spin_unlock(&sb_lock); |
| 1007 | return NULL; |
| 1008 | } |
| 1009 | |
| 1010 | /** |
| 1011 | * reconfigure_super - asks filesystem to change superblock parameters |
| 1012 | * @fc: The superblock and configuration |
| 1013 | * |
| 1014 | * Alters the configuration parameters of a live superblock. |
| 1015 | */ |
| 1016 | int reconfigure_super(struct fs_context *fc) |
| 1017 | { |
| 1018 | struct super_block *sb = fc->root->d_sb; |
| 1019 | int retval; |
| 1020 | bool remount_ro = false; |
| 1021 | bool remount_rw = false; |
| 1022 | bool force = fc->sb_flags & SB_FORCE; |
| 1023 | |
| 1024 | if (fc->sb_flags_mask & ~MS_RMT_MASK) |
| 1025 | return -EINVAL; |
| 1026 | if (sb->s_writers.frozen != SB_UNFROZEN) |
| 1027 | return -EBUSY; |
| 1028 | |
| 1029 | retval = security_sb_remount(sb, fc->security); |
| 1030 | if (retval) |
| 1031 | return retval; |
| 1032 | |
| 1033 | if (fc->sb_flags_mask & SB_RDONLY) { |
| 1034 | #ifdef CONFIG_BLOCK |
| 1035 | if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev && |
| 1036 | bdev_read_only(sb->s_bdev)) |
| 1037 | return -EACCES; |
| 1038 | #endif |
| 1039 | remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb); |
| 1040 | remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb); |
| 1041 | } |
| 1042 | |
| 1043 | if (remount_ro) { |
| 1044 | if (!hlist_empty(&sb->s_pins)) { |
| 1045 | super_unlock_excl(sb); |
| 1046 | group_pin_kill(&sb->s_pins); |
| 1047 | __super_lock_excl(sb); |
| 1048 | if (!sb->s_root) |
| 1049 | return 0; |
| 1050 | if (sb->s_writers.frozen != SB_UNFROZEN) |
| 1051 | return -EBUSY; |
| 1052 | remount_ro = !sb_rdonly(sb); |
| 1053 | } |
| 1054 | } |
| 1055 | shrink_dcache_sb(sb); |
| 1056 | |
| 1057 | /* If we are reconfiguring to RDONLY and current sb is read/write, |
| 1058 | * make sure there are no files open for writing. |
| 1059 | */ |
| 1060 | if (remount_ro) { |
| 1061 | if (force) { |
| 1062 | sb_start_ro_state_change(sb); |
| 1063 | } else { |
| 1064 | retval = sb_prepare_remount_readonly(sb); |
| 1065 | if (retval) |
| 1066 | return retval; |
| 1067 | } |
| 1068 | } else if (remount_rw) { |
| 1069 | /* |
| 1070 | * Protect filesystem's reconfigure code from writes from |
| 1071 | * userspace until reconfigure finishes. |
| 1072 | */ |
| 1073 | sb_start_ro_state_change(sb); |
| 1074 | } |
| 1075 | |
| 1076 | if (fc->ops->reconfigure) { |
| 1077 | retval = fc->ops->reconfigure(fc); |
| 1078 | if (retval) { |
| 1079 | if (!force) |
| 1080 | goto cancel_readonly; |
| 1081 | /* If forced remount, go ahead despite any errors */ |
| 1082 | WARN(1, "forced remount of a %s fs returned %i\n", |
| 1083 | sb->s_type->name, retval); |
| 1084 | } |
| 1085 | } |
| 1086 | |
| 1087 | WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) | |
| 1088 | (fc->sb_flags & fc->sb_flags_mask))); |
| 1089 | sb_end_ro_state_change(sb); |
| 1090 | |
| 1091 | /* |
| 1092 | * Some filesystems modify their metadata via some other path than the |
| 1093 | * bdev buffer cache (eg. use a private mapping, or directories in |
| 1094 | * pagecache, etc). Also file data modifications go via their own |
| 1095 | * mappings. So If we try to mount readonly then copy the filesystem |
| 1096 | * from bdev, we could get stale data, so invalidate it to give a best |
| 1097 | * effort at coherency. |
| 1098 | */ |
| 1099 | if (remount_ro && sb->s_bdev) |
| 1100 | invalidate_bdev(sb->s_bdev); |
| 1101 | return 0; |
| 1102 | |
| 1103 | cancel_readonly: |
| 1104 | sb_end_ro_state_change(sb); |
| 1105 | return retval; |
| 1106 | } |
| 1107 | |
| 1108 | static void do_emergency_remount_callback(struct super_block *sb, void *unused) |
| 1109 | { |
| 1110 | if (sb->s_bdev && !sb_rdonly(sb)) { |
| 1111 | struct fs_context *fc; |
| 1112 | |
| 1113 | fc = fs_context_for_reconfigure(sb->s_root, |
| 1114 | SB_RDONLY | SB_FORCE, SB_RDONLY); |
| 1115 | if (!IS_ERR(fc)) { |
| 1116 | if (parse_monolithic_mount_data(fc, NULL) == 0) |
| 1117 | (void)reconfigure_super(fc); |
| 1118 | put_fs_context(fc); |
| 1119 | } |
| 1120 | } |
| 1121 | } |
| 1122 | |
| 1123 | static void do_emergency_remount(struct work_struct *work) |
| 1124 | { |
| 1125 | __iterate_supers(do_emergency_remount_callback, NULL, |
| 1126 | SUPER_ITER_EXCL | SUPER_ITER_REVERSE); |
| 1127 | kfree(work); |
| 1128 | printk("Emergency Remount complete\n"); |
| 1129 | } |
| 1130 | |
| 1131 | void emergency_remount(void) |
| 1132 | { |
| 1133 | struct work_struct *work; |
| 1134 | |
| 1135 | work = kmalloc(sizeof(*work), GFP_ATOMIC); |
| 1136 | if (work) { |
| 1137 | INIT_WORK(work, do_emergency_remount); |
| 1138 | schedule_work(work); |
| 1139 | } |
| 1140 | } |
| 1141 | |
| 1142 | static void do_thaw_all_callback(struct super_block *sb, void *unused) |
| 1143 | { |
| 1144 | if (IS_ENABLED(CONFIG_BLOCK)) |
| 1145 | while (sb->s_bdev && !bdev_thaw(sb->s_bdev)) |
| 1146 | pr_warn("Emergency Thaw on %pg\n", sb->s_bdev); |
| 1147 | thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE, NULL); |
| 1148 | return; |
| 1149 | } |
| 1150 | |
| 1151 | static void do_thaw_all(struct work_struct *work) |
| 1152 | { |
| 1153 | __iterate_supers(do_thaw_all_callback, NULL, SUPER_ITER_EXCL); |
| 1154 | kfree(work); |
| 1155 | printk(KERN_WARNING "Emergency Thaw complete\n"); |
| 1156 | } |
| 1157 | |
| 1158 | /** |
| 1159 | * emergency_thaw_all -- forcibly thaw every frozen filesystem |
| 1160 | * |
| 1161 | * Used for emergency unfreeze of all filesystems via SysRq |
| 1162 | */ |
| 1163 | void emergency_thaw_all(void) |
| 1164 | { |
| 1165 | struct work_struct *work; |
| 1166 | |
| 1167 | work = kmalloc(sizeof(*work), GFP_ATOMIC); |
| 1168 | if (work) { |
| 1169 | INIT_WORK(work, do_thaw_all); |
| 1170 | schedule_work(work); |
| 1171 | } |
| 1172 | } |
| 1173 | |
| 1174 | static inline bool get_active_super(struct super_block *sb) |
| 1175 | { |
| 1176 | bool active = false; |
| 1177 | |
| 1178 | if (super_lock_excl(sb)) { |
| 1179 | active = atomic_inc_not_zero(&sb->s_active); |
| 1180 | super_unlock_excl(sb); |
| 1181 | } |
| 1182 | return active; |
| 1183 | } |
| 1184 | |
| 1185 | static const char *filesystems_freeze_ptr = "filesystems_freeze"; |
| 1186 | |
| 1187 | static void filesystems_freeze_callback(struct super_block *sb, void *unused) |
| 1188 | { |
| 1189 | if (!sb->s_op->freeze_fs && !sb->s_op->freeze_super) |
| 1190 | return; |
| 1191 | |
| 1192 | if (!get_active_super(sb)) |
| 1193 | return; |
| 1194 | |
| 1195 | if (sb->s_op->freeze_super) |
| 1196 | sb->s_op->freeze_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL, |
| 1197 | filesystems_freeze_ptr); |
| 1198 | else |
| 1199 | freeze_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL, |
| 1200 | filesystems_freeze_ptr); |
| 1201 | |
| 1202 | deactivate_super(sb); |
| 1203 | } |
| 1204 | |
| 1205 | void filesystems_freeze(void) |
| 1206 | { |
| 1207 | __iterate_supers(filesystems_freeze_callback, NULL, |
| 1208 | SUPER_ITER_UNLOCKED | SUPER_ITER_REVERSE); |
| 1209 | } |
| 1210 | |
| 1211 | static void filesystems_thaw_callback(struct super_block *sb, void *unused) |
| 1212 | { |
| 1213 | if (!sb->s_op->freeze_fs && !sb->s_op->freeze_super) |
| 1214 | return; |
| 1215 | |
| 1216 | if (!get_active_super(sb)) |
| 1217 | return; |
| 1218 | |
| 1219 | if (sb->s_op->thaw_super) |
| 1220 | sb->s_op->thaw_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL, |
| 1221 | filesystems_freeze_ptr); |
| 1222 | else |
| 1223 | thaw_super(sb, FREEZE_EXCL | FREEZE_HOLDER_KERNEL, |
| 1224 | filesystems_freeze_ptr); |
| 1225 | |
| 1226 | deactivate_super(sb); |
| 1227 | } |
| 1228 | |
| 1229 | void filesystems_thaw(void) |
| 1230 | { |
| 1231 | __iterate_supers(filesystems_thaw_callback, NULL, SUPER_ITER_UNLOCKED); |
| 1232 | } |
| 1233 | |
| 1234 | static DEFINE_IDA(unnamed_dev_ida); |
| 1235 | |
| 1236 | /** |
| 1237 | * get_anon_bdev - Allocate a block device for filesystems which don't have one. |
| 1238 | * @p: Pointer to a dev_t. |
| 1239 | * |
| 1240 | * Filesystems which don't use real block devices can call this function |
| 1241 | * to allocate a virtual block device. |
| 1242 | * |
| 1243 | * Context: Any context. Frequently called while holding sb_lock. |
| 1244 | * Return: 0 on success, -EMFILE if there are no anonymous bdevs left |
| 1245 | * or -ENOMEM if memory allocation failed. |
| 1246 | */ |
| 1247 | int get_anon_bdev(dev_t *p) |
| 1248 | { |
| 1249 | int dev; |
| 1250 | |
| 1251 | /* |
| 1252 | * Many userspace utilities consider an FSID of 0 invalid. |
| 1253 | * Always return at least 1 from get_anon_bdev. |
| 1254 | */ |
| 1255 | dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1, |
| 1256 | GFP_ATOMIC); |
| 1257 | if (dev == -ENOSPC) |
| 1258 | dev = -EMFILE; |
| 1259 | if (dev < 0) |
| 1260 | return dev; |
| 1261 | |
| 1262 | *p = MKDEV(0, dev); |
| 1263 | return 0; |
| 1264 | } |
| 1265 | EXPORT_SYMBOL(get_anon_bdev); |
| 1266 | |
| 1267 | void free_anon_bdev(dev_t dev) |
| 1268 | { |
| 1269 | ida_free(&unnamed_dev_ida, MINOR(dev)); |
| 1270 | } |
| 1271 | EXPORT_SYMBOL(free_anon_bdev); |
| 1272 | |
| 1273 | int set_anon_super(struct super_block *s, void *data) |
| 1274 | { |
| 1275 | return get_anon_bdev(&s->s_dev); |
| 1276 | } |
| 1277 | EXPORT_SYMBOL(set_anon_super); |
| 1278 | |
| 1279 | void kill_anon_super(struct super_block *sb) |
| 1280 | { |
| 1281 | dev_t dev = sb->s_dev; |
| 1282 | generic_shutdown_super(sb); |
| 1283 | kill_super_notify(sb); |
| 1284 | free_anon_bdev(dev); |
| 1285 | } |
| 1286 | EXPORT_SYMBOL(kill_anon_super); |
| 1287 | |
| 1288 | void kill_litter_super(struct super_block *sb) |
| 1289 | { |
| 1290 | if (sb->s_root) |
| 1291 | d_genocide(sb->s_root); |
| 1292 | kill_anon_super(sb); |
| 1293 | } |
| 1294 | EXPORT_SYMBOL(kill_litter_super); |
| 1295 | |
| 1296 | int set_anon_super_fc(struct super_block *sb, struct fs_context *fc) |
| 1297 | { |
| 1298 | return set_anon_super(sb, NULL); |
| 1299 | } |
| 1300 | EXPORT_SYMBOL(set_anon_super_fc); |
| 1301 | |
| 1302 | static int test_keyed_super(struct super_block *sb, struct fs_context *fc) |
| 1303 | { |
| 1304 | return sb->s_fs_info == fc->s_fs_info; |
| 1305 | } |
| 1306 | |
| 1307 | static int test_single_super(struct super_block *s, struct fs_context *fc) |
| 1308 | { |
| 1309 | return 1; |
| 1310 | } |
| 1311 | |
| 1312 | static int vfs_get_super(struct fs_context *fc, |
| 1313 | int (*test)(struct super_block *, struct fs_context *), |
| 1314 | int (*fill_super)(struct super_block *sb, |
| 1315 | struct fs_context *fc)) |
| 1316 | { |
| 1317 | struct super_block *sb; |
| 1318 | int err; |
| 1319 | |
| 1320 | sb = sget_fc(fc, test, set_anon_super_fc); |
| 1321 | if (IS_ERR(sb)) |
| 1322 | return PTR_ERR(sb); |
| 1323 | |
| 1324 | if (!sb->s_root) { |
| 1325 | err = fill_super(sb, fc); |
| 1326 | if (err) |
| 1327 | goto error; |
| 1328 | |
| 1329 | sb->s_flags |= SB_ACTIVE; |
| 1330 | } |
| 1331 | |
| 1332 | fc->root = dget(sb->s_root); |
| 1333 | return 0; |
| 1334 | |
| 1335 | error: |
| 1336 | deactivate_locked_super(sb); |
| 1337 | return err; |
| 1338 | } |
| 1339 | |
| 1340 | int get_tree_nodev(struct fs_context *fc, |
| 1341 | int (*fill_super)(struct super_block *sb, |
| 1342 | struct fs_context *fc)) |
| 1343 | { |
| 1344 | return vfs_get_super(fc, NULL, fill_super); |
| 1345 | } |
| 1346 | EXPORT_SYMBOL(get_tree_nodev); |
| 1347 | |
| 1348 | int get_tree_single(struct fs_context *fc, |
| 1349 | int (*fill_super)(struct super_block *sb, |
| 1350 | struct fs_context *fc)) |
| 1351 | { |
| 1352 | return vfs_get_super(fc, test_single_super, fill_super); |
| 1353 | } |
| 1354 | EXPORT_SYMBOL(get_tree_single); |
| 1355 | |
| 1356 | int get_tree_keyed(struct fs_context *fc, |
| 1357 | int (*fill_super)(struct super_block *sb, |
| 1358 | struct fs_context *fc), |
| 1359 | void *key) |
| 1360 | { |
| 1361 | fc->s_fs_info = key; |
| 1362 | return vfs_get_super(fc, test_keyed_super, fill_super); |
| 1363 | } |
| 1364 | EXPORT_SYMBOL(get_tree_keyed); |
| 1365 | |
| 1366 | static int set_bdev_super(struct super_block *s, void *data) |
| 1367 | { |
| 1368 | s->s_dev = *(dev_t *)data; |
| 1369 | return 0; |
| 1370 | } |
| 1371 | |
| 1372 | static int super_s_dev_set(struct super_block *s, struct fs_context *fc) |
| 1373 | { |
| 1374 | return set_bdev_super(s, fc->sget_key); |
| 1375 | } |
| 1376 | |
| 1377 | static int super_s_dev_test(struct super_block *s, struct fs_context *fc) |
| 1378 | { |
| 1379 | return !(s->s_iflags & SB_I_RETIRED) && |
| 1380 | s->s_dev == *(dev_t *)fc->sget_key; |
| 1381 | } |
| 1382 | |
| 1383 | /** |
| 1384 | * sget_dev - Find or create a superblock by device number |
| 1385 | * @fc: Filesystem context. |
| 1386 | * @dev: device number |
| 1387 | * |
| 1388 | * Find or create a superblock using the provided device number that |
| 1389 | * will be stored in fc->sget_key. |
| 1390 | * |
| 1391 | * If an extant superblock is matched, then that will be returned with |
| 1392 | * an elevated reference count that the caller must transfer or discard. |
| 1393 | * |
| 1394 | * If no match is made, a new superblock will be allocated and basic |
| 1395 | * initialisation will be performed (s_type, s_fs_info, s_id, s_dev will |
| 1396 | * be set). The superblock will be published and it will be returned in |
| 1397 | * a partially constructed state with SB_BORN and SB_ACTIVE as yet |
| 1398 | * unset. |
| 1399 | * |
| 1400 | * Return: an existing or newly created superblock on success, an error |
| 1401 | * pointer on failure. |
| 1402 | */ |
| 1403 | struct super_block *sget_dev(struct fs_context *fc, dev_t dev) |
| 1404 | { |
| 1405 | fc->sget_key = &dev; |
| 1406 | return sget_fc(fc, super_s_dev_test, super_s_dev_set); |
| 1407 | } |
| 1408 | EXPORT_SYMBOL(sget_dev); |
| 1409 | |
| 1410 | #ifdef CONFIG_BLOCK |
| 1411 | /* |
| 1412 | * Lock the superblock that is holder of the bdev. Returns the superblock |
| 1413 | * pointer if we successfully locked the superblock and it is alive. Otherwise |
| 1414 | * we return NULL and just unlock bdev->bd_holder_lock. |
| 1415 | * |
| 1416 | * The function must be called with bdev->bd_holder_lock and releases it. |
| 1417 | */ |
| 1418 | static struct super_block *bdev_super_lock(struct block_device *bdev, bool excl) |
| 1419 | __releases(&bdev->bd_holder_lock) |
| 1420 | { |
| 1421 | struct super_block *sb = bdev->bd_holder; |
| 1422 | bool locked; |
| 1423 | |
| 1424 | lockdep_assert_held(&bdev->bd_holder_lock); |
| 1425 | lockdep_assert_not_held(&sb->s_umount); |
| 1426 | lockdep_assert_not_held(&bdev->bd_disk->open_mutex); |
| 1427 | |
| 1428 | /* Make sure sb doesn't go away from under us */ |
| 1429 | spin_lock(&sb_lock); |
| 1430 | sb->s_count++; |
| 1431 | spin_unlock(&sb_lock); |
| 1432 | |
| 1433 | mutex_unlock(&bdev->bd_holder_lock); |
| 1434 | |
| 1435 | locked = super_lock(sb, excl); |
| 1436 | |
| 1437 | /* |
| 1438 | * If the superblock wasn't already SB_DYING then we hold |
| 1439 | * s_umount and can safely drop our temporary reference. |
| 1440 | */ |
| 1441 | put_super(sb); |
| 1442 | |
| 1443 | if (!locked) |
| 1444 | return NULL; |
| 1445 | |
| 1446 | if (!sb->s_root || !(sb->s_flags & SB_ACTIVE)) { |
| 1447 | super_unlock(sb, excl); |
| 1448 | return NULL; |
| 1449 | } |
| 1450 | |
| 1451 | return sb; |
| 1452 | } |
| 1453 | |
| 1454 | static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise) |
| 1455 | { |
| 1456 | struct super_block *sb; |
| 1457 | |
| 1458 | sb = bdev_super_lock(bdev, false); |
| 1459 | if (!sb) |
| 1460 | return; |
| 1461 | |
| 1462 | if (!surprise) |
| 1463 | sync_filesystem(sb); |
| 1464 | shrink_dcache_sb(sb); |
| 1465 | evict_inodes(sb); |
| 1466 | if (sb->s_op->shutdown) |
| 1467 | sb->s_op->shutdown(sb); |
| 1468 | |
| 1469 | super_unlock_shared(sb); |
| 1470 | } |
| 1471 | |
| 1472 | static void fs_bdev_sync(struct block_device *bdev) |
| 1473 | { |
| 1474 | struct super_block *sb; |
| 1475 | |
| 1476 | sb = bdev_super_lock(bdev, false); |
| 1477 | if (!sb) |
| 1478 | return; |
| 1479 | |
| 1480 | sync_filesystem(sb); |
| 1481 | super_unlock_shared(sb); |
| 1482 | } |
| 1483 | |
| 1484 | static struct super_block *get_bdev_super(struct block_device *bdev) |
| 1485 | { |
| 1486 | bool active = false; |
| 1487 | struct super_block *sb; |
| 1488 | |
| 1489 | sb = bdev_super_lock(bdev, true); |
| 1490 | if (sb) { |
| 1491 | active = atomic_inc_not_zero(&sb->s_active); |
| 1492 | super_unlock_excl(sb); |
| 1493 | } |
| 1494 | if (!active) |
| 1495 | return NULL; |
| 1496 | return sb; |
| 1497 | } |
| 1498 | |
| 1499 | /** |
| 1500 | * fs_bdev_freeze - freeze owning filesystem of block device |
| 1501 | * @bdev: block device |
| 1502 | * |
| 1503 | * Freeze the filesystem that owns this block device if it is still |
| 1504 | * active. |
| 1505 | * |
| 1506 | * A filesystem that owns multiple block devices may be frozen from each |
| 1507 | * block device and won't be unfrozen until all block devices are |
| 1508 | * unfrozen. Each block device can only freeze the filesystem once as we |
| 1509 | * nest freezes for block devices in the block layer. |
| 1510 | * |
| 1511 | * Return: If the freeze was successful zero is returned. If the freeze |
| 1512 | * failed a negative error code is returned. |
| 1513 | */ |
| 1514 | static int fs_bdev_freeze(struct block_device *bdev) |
| 1515 | { |
| 1516 | struct super_block *sb; |
| 1517 | int error = 0; |
| 1518 | |
| 1519 | lockdep_assert_held(&bdev->bd_fsfreeze_mutex); |
| 1520 | |
| 1521 | sb = get_bdev_super(bdev); |
| 1522 | if (!sb) |
| 1523 | return -EINVAL; |
| 1524 | |
| 1525 | if (sb->s_op->freeze_super) |
| 1526 | error = sb->s_op->freeze_super(sb, |
| 1527 | FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL); |
| 1528 | else |
| 1529 | error = freeze_super(sb, |
| 1530 | FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL); |
| 1531 | if (!error) |
| 1532 | error = sync_blockdev(bdev); |
| 1533 | deactivate_super(sb); |
| 1534 | return error; |
| 1535 | } |
| 1536 | |
| 1537 | /** |
| 1538 | * fs_bdev_thaw - thaw owning filesystem of block device |
| 1539 | * @bdev: block device |
| 1540 | * |
| 1541 | * Thaw the filesystem that owns this block device. |
| 1542 | * |
| 1543 | * A filesystem that owns multiple block devices may be frozen from each |
| 1544 | * block device and won't be unfrozen until all block devices are |
| 1545 | * unfrozen. Each block device can only freeze the filesystem once as we |
| 1546 | * nest freezes for block devices in the block layer. |
| 1547 | * |
| 1548 | * Return: If the thaw was successful zero is returned. If the thaw |
| 1549 | * failed a negative error code is returned. If this function |
| 1550 | * returns zero it doesn't mean that the filesystem is unfrozen |
| 1551 | * as it may have been frozen multiple times (kernel may hold a |
| 1552 | * freeze or might be frozen from other block devices). |
| 1553 | */ |
| 1554 | static int fs_bdev_thaw(struct block_device *bdev) |
| 1555 | { |
| 1556 | struct super_block *sb; |
| 1557 | int error; |
| 1558 | |
| 1559 | lockdep_assert_held(&bdev->bd_fsfreeze_mutex); |
| 1560 | |
| 1561 | /* |
| 1562 | * The block device may have been frozen before it was claimed by a |
| 1563 | * filesystem. Concurrently another process might try to mount that |
| 1564 | * frozen block device and has temporarily claimed the block device for |
| 1565 | * that purpose causing a concurrent fs_bdev_thaw() to end up here. The |
| 1566 | * mounter is already about to abort mounting because they still saw an |
| 1567 | * elevanted bdev->bd_fsfreeze_count so get_bdev_super() will return |
| 1568 | * NULL in that case. |
| 1569 | */ |
| 1570 | sb = get_bdev_super(bdev); |
| 1571 | if (!sb) |
| 1572 | return -EINVAL; |
| 1573 | |
| 1574 | if (sb->s_op->thaw_super) |
| 1575 | error = sb->s_op->thaw_super(sb, |
| 1576 | FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL); |
| 1577 | else |
| 1578 | error = thaw_super(sb, |
| 1579 | FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE, NULL); |
| 1580 | deactivate_super(sb); |
| 1581 | return error; |
| 1582 | } |
| 1583 | |
| 1584 | const struct blk_holder_ops fs_holder_ops = { |
| 1585 | .mark_dead = fs_bdev_mark_dead, |
| 1586 | .sync = fs_bdev_sync, |
| 1587 | .freeze = fs_bdev_freeze, |
| 1588 | .thaw = fs_bdev_thaw, |
| 1589 | }; |
| 1590 | EXPORT_SYMBOL_GPL(fs_holder_ops); |
| 1591 | |
| 1592 | int setup_bdev_super(struct super_block *sb, int sb_flags, |
| 1593 | struct fs_context *fc) |
| 1594 | { |
| 1595 | blk_mode_t mode = sb_open_mode(sb_flags); |
| 1596 | struct file *bdev_file; |
| 1597 | struct block_device *bdev; |
| 1598 | |
| 1599 | bdev_file = bdev_file_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops); |
| 1600 | if (IS_ERR(bdev_file)) { |
| 1601 | if (fc) |
| 1602 | errorf(fc, "%s: Can't open blockdev", fc->source); |
| 1603 | return PTR_ERR(bdev_file); |
| 1604 | } |
| 1605 | bdev = file_bdev(bdev_file); |
| 1606 | |
| 1607 | /* |
| 1608 | * This really should be in blkdev_get_by_dev, but right now can't due |
| 1609 | * to legacy issues that require us to allow opening a block device node |
| 1610 | * writable from userspace even for a read-only block device. |
| 1611 | */ |
| 1612 | if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) { |
| 1613 | bdev_fput(bdev_file); |
| 1614 | return -EACCES; |
| 1615 | } |
| 1616 | |
| 1617 | /* |
| 1618 | * It is enough to check bdev was not frozen before we set |
| 1619 | * s_bdev as freezing will wait until SB_BORN is set. |
| 1620 | */ |
| 1621 | if (atomic_read(&bdev->bd_fsfreeze_count) > 0) { |
| 1622 | if (fc) |
| 1623 | warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev); |
| 1624 | bdev_fput(bdev_file); |
| 1625 | return -EBUSY; |
| 1626 | } |
| 1627 | spin_lock(&sb_lock); |
| 1628 | sb->s_bdev_file = bdev_file; |
| 1629 | sb->s_bdev = bdev; |
| 1630 | sb->s_bdi = bdi_get(bdev->bd_disk->bdi); |
| 1631 | if (bdev_stable_writes(bdev)) |
| 1632 | sb->s_iflags |= SB_I_STABLE_WRITES; |
| 1633 | spin_unlock(&sb_lock); |
| 1634 | |
| 1635 | snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev); |
| 1636 | shrinker_debugfs_rename(sb->s_shrink, "sb-%s:%s", sb->s_type->name, |
| 1637 | sb->s_id); |
| 1638 | sb_set_blocksize(sb, block_size(bdev)); |
| 1639 | return 0; |
| 1640 | } |
| 1641 | EXPORT_SYMBOL_GPL(setup_bdev_super); |
| 1642 | |
| 1643 | /** |
| 1644 | * get_tree_bdev_flags - Get a superblock based on a single block device |
| 1645 | * @fc: The filesystem context holding the parameters |
| 1646 | * @fill_super: Helper to initialise a new superblock |
| 1647 | * @flags: GET_TREE_BDEV_* flags |
| 1648 | */ |
| 1649 | int get_tree_bdev_flags(struct fs_context *fc, |
| 1650 | int (*fill_super)(struct super_block *sb, |
| 1651 | struct fs_context *fc), unsigned int flags) |
| 1652 | { |
| 1653 | struct super_block *s; |
| 1654 | int error = 0; |
| 1655 | dev_t dev; |
| 1656 | |
| 1657 | if (!fc->source) |
| 1658 | return invalf(fc, "No source specified"); |
| 1659 | |
| 1660 | error = lookup_bdev(fc->source, &dev); |
| 1661 | if (error) { |
| 1662 | if (!(flags & GET_TREE_BDEV_QUIET_LOOKUP)) |
| 1663 | errorf(fc, "%s: Can't lookup blockdev", fc->source); |
| 1664 | return error; |
| 1665 | } |
| 1666 | fc->sb_flags |= SB_NOSEC; |
| 1667 | s = sget_dev(fc, dev); |
| 1668 | if (IS_ERR(s)) |
| 1669 | return PTR_ERR(s); |
| 1670 | |
| 1671 | if (s->s_root) { |
| 1672 | /* Don't summarily change the RO/RW state. */ |
| 1673 | if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) { |
| 1674 | warnf(fc, "%pg: Can't mount, would change RO state", s->s_bdev); |
| 1675 | deactivate_locked_super(s); |
| 1676 | return -EBUSY; |
| 1677 | } |
| 1678 | } else { |
| 1679 | error = setup_bdev_super(s, fc->sb_flags, fc); |
| 1680 | if (!error) |
| 1681 | error = fill_super(s, fc); |
| 1682 | if (error) { |
| 1683 | deactivate_locked_super(s); |
| 1684 | return error; |
| 1685 | } |
| 1686 | s->s_flags |= SB_ACTIVE; |
| 1687 | } |
| 1688 | |
| 1689 | BUG_ON(fc->root); |
| 1690 | fc->root = dget(s->s_root); |
| 1691 | return 0; |
| 1692 | } |
| 1693 | EXPORT_SYMBOL_GPL(get_tree_bdev_flags); |
| 1694 | |
| 1695 | /** |
| 1696 | * get_tree_bdev - Get a superblock based on a single block device |
| 1697 | * @fc: The filesystem context holding the parameters |
| 1698 | * @fill_super: Helper to initialise a new superblock |
| 1699 | */ |
| 1700 | int get_tree_bdev(struct fs_context *fc, |
| 1701 | int (*fill_super)(struct super_block *, |
| 1702 | struct fs_context *)) |
| 1703 | { |
| 1704 | return get_tree_bdev_flags(fc, fill_super, 0); |
| 1705 | } |
| 1706 | EXPORT_SYMBOL(get_tree_bdev); |
| 1707 | |
| 1708 | static int test_bdev_super(struct super_block *s, void *data) |
| 1709 | { |
| 1710 | return !(s->s_iflags & SB_I_RETIRED) && s->s_dev == *(dev_t *)data; |
| 1711 | } |
| 1712 | |
| 1713 | struct dentry *mount_bdev(struct file_system_type *fs_type, |
| 1714 | int flags, const char *dev_name, void *data, |
| 1715 | int (*fill_super)(struct super_block *, void *, int)) |
| 1716 | { |
| 1717 | struct super_block *s; |
| 1718 | int error; |
| 1719 | dev_t dev; |
| 1720 | |
| 1721 | error = lookup_bdev(dev_name, &dev); |
| 1722 | if (error) |
| 1723 | return ERR_PTR(error); |
| 1724 | |
| 1725 | flags |= SB_NOSEC; |
| 1726 | s = sget(fs_type, test_bdev_super, set_bdev_super, flags, &dev); |
| 1727 | if (IS_ERR(s)) |
| 1728 | return ERR_CAST(s); |
| 1729 | |
| 1730 | if (s->s_root) { |
| 1731 | if ((flags ^ s->s_flags) & SB_RDONLY) { |
| 1732 | deactivate_locked_super(s); |
| 1733 | return ERR_PTR(-EBUSY); |
| 1734 | } |
| 1735 | } else { |
| 1736 | error = setup_bdev_super(s, flags, NULL); |
| 1737 | if (!error) |
| 1738 | error = fill_super(s, data, flags & SB_SILENT ? 1 : 0); |
| 1739 | if (error) { |
| 1740 | deactivate_locked_super(s); |
| 1741 | return ERR_PTR(error); |
| 1742 | } |
| 1743 | |
| 1744 | s->s_flags |= SB_ACTIVE; |
| 1745 | } |
| 1746 | |
| 1747 | return dget(s->s_root); |
| 1748 | } |
| 1749 | EXPORT_SYMBOL(mount_bdev); |
| 1750 | |
| 1751 | void kill_block_super(struct super_block *sb) |
| 1752 | { |
| 1753 | struct block_device *bdev = sb->s_bdev; |
| 1754 | |
| 1755 | generic_shutdown_super(sb); |
| 1756 | if (bdev) { |
| 1757 | sync_blockdev(bdev); |
| 1758 | bdev_fput(sb->s_bdev_file); |
| 1759 | } |
| 1760 | } |
| 1761 | |
| 1762 | EXPORT_SYMBOL(kill_block_super); |
| 1763 | #endif |
| 1764 | |
| 1765 | struct dentry *mount_nodev(struct file_system_type *fs_type, |
| 1766 | int flags, void *data, |
| 1767 | int (*fill_super)(struct super_block *, void *, int)) |
| 1768 | { |
| 1769 | int error; |
| 1770 | struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL); |
| 1771 | |
| 1772 | if (IS_ERR(s)) |
| 1773 | return ERR_CAST(s); |
| 1774 | |
| 1775 | error = fill_super(s, data, flags & SB_SILENT ? 1 : 0); |
| 1776 | if (error) { |
| 1777 | deactivate_locked_super(s); |
| 1778 | return ERR_PTR(error); |
| 1779 | } |
| 1780 | s->s_flags |= SB_ACTIVE; |
| 1781 | return dget(s->s_root); |
| 1782 | } |
| 1783 | EXPORT_SYMBOL(mount_nodev); |
| 1784 | |
| 1785 | /** |
| 1786 | * vfs_get_tree - Get the mountable root |
| 1787 | * @fc: The superblock configuration context. |
| 1788 | * |
| 1789 | * The filesystem is invoked to get or create a superblock which can then later |
| 1790 | * be used for mounting. The filesystem places a pointer to the root to be |
| 1791 | * used for mounting in @fc->root. |
| 1792 | */ |
| 1793 | int vfs_get_tree(struct fs_context *fc) |
| 1794 | { |
| 1795 | struct super_block *sb; |
| 1796 | int error; |
| 1797 | |
| 1798 | if (fc->root) |
| 1799 | return -EBUSY; |
| 1800 | |
| 1801 | /* Get the mountable root in fc->root, with a ref on the root and a ref |
| 1802 | * on the superblock. |
| 1803 | */ |
| 1804 | error = fc->ops->get_tree(fc); |
| 1805 | if (error < 0) |
| 1806 | return error; |
| 1807 | |
| 1808 | if (!fc->root) { |
| 1809 | pr_err("Filesystem %s get_tree() didn't set fc->root, returned %i\n", |
| 1810 | fc->fs_type->name, error); |
| 1811 | /* We don't know what the locking state of the superblock is - |
| 1812 | * if there is a superblock. |
| 1813 | */ |
| 1814 | BUG(); |
| 1815 | } |
| 1816 | |
| 1817 | sb = fc->root->d_sb; |
| 1818 | WARN_ON(!sb->s_bdi); |
| 1819 | |
| 1820 | /* |
| 1821 | * super_wake() contains a memory barrier which also care of |
| 1822 | * ordering for super_cache_count(). We place it before setting |
| 1823 | * SB_BORN as the data dependency between the two functions is |
| 1824 | * the superblock structure contents that we just set up, not |
| 1825 | * the SB_BORN flag. |
| 1826 | */ |
| 1827 | super_wake(sb, SB_BORN); |
| 1828 | |
| 1829 | error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL); |
| 1830 | if (unlikely(error)) { |
| 1831 | fc_drop_locked(fc); |
| 1832 | return error; |
| 1833 | } |
| 1834 | |
| 1835 | /* |
| 1836 | * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE |
| 1837 | * but s_maxbytes was an unsigned long long for many releases. Throw |
| 1838 | * this warning for a little while to try and catch filesystems that |
| 1839 | * violate this rule. |
| 1840 | */ |
| 1841 | WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to " |
| 1842 | "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes); |
| 1843 | |
| 1844 | return 0; |
| 1845 | } |
| 1846 | EXPORT_SYMBOL(vfs_get_tree); |
| 1847 | |
| 1848 | /* |
| 1849 | * Setup private BDI for given superblock. It gets automatically cleaned up |
| 1850 | * in generic_shutdown_super(). |
| 1851 | */ |
| 1852 | int super_setup_bdi_name(struct super_block *sb, char *fmt, ...) |
| 1853 | { |
| 1854 | struct backing_dev_info *bdi; |
| 1855 | int err; |
| 1856 | va_list args; |
| 1857 | |
| 1858 | bdi = bdi_alloc(NUMA_NO_NODE); |
| 1859 | if (!bdi) |
| 1860 | return -ENOMEM; |
| 1861 | |
| 1862 | va_start(args, fmt); |
| 1863 | err = bdi_register_va(bdi, fmt, args); |
| 1864 | va_end(args); |
| 1865 | if (err) { |
| 1866 | bdi_put(bdi); |
| 1867 | return err; |
| 1868 | } |
| 1869 | WARN_ON(sb->s_bdi != &noop_backing_dev_info); |
| 1870 | sb->s_bdi = bdi; |
| 1871 | sb->s_iflags |= SB_I_PERSB_BDI; |
| 1872 | |
| 1873 | return 0; |
| 1874 | } |
| 1875 | EXPORT_SYMBOL(super_setup_bdi_name); |
| 1876 | |
| 1877 | /* |
| 1878 | * Setup private BDI for given superblock. I gets automatically cleaned up |
| 1879 | * in generic_shutdown_super(). |
| 1880 | */ |
| 1881 | int super_setup_bdi(struct super_block *sb) |
| 1882 | { |
| 1883 | static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); |
| 1884 | |
| 1885 | return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name, |
| 1886 | atomic_long_inc_return(&bdi_seq)); |
| 1887 | } |
| 1888 | EXPORT_SYMBOL(super_setup_bdi); |
| 1889 | |
| 1890 | /** |
| 1891 | * sb_wait_write - wait until all writers to given file system finish |
| 1892 | * @sb: the super for which we wait |
| 1893 | * @level: type of writers we wait for (normal vs page fault) |
| 1894 | * |
| 1895 | * This function waits until there are no writers of given type to given file |
| 1896 | * system. |
| 1897 | */ |
| 1898 | static void sb_wait_write(struct super_block *sb, int level) |
| 1899 | { |
| 1900 | percpu_down_write(sb->s_writers.rw_sem + level-1); |
| 1901 | } |
| 1902 | |
| 1903 | /* |
| 1904 | * We are going to return to userspace and forget about these locks, the |
| 1905 | * ownership goes to the caller of thaw_super() which does unlock(). |
| 1906 | */ |
| 1907 | static void lockdep_sb_freeze_release(struct super_block *sb) |
| 1908 | { |
| 1909 | int level; |
| 1910 | |
| 1911 | for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--) |
| 1912 | percpu_rwsem_release(sb->s_writers.rw_sem + level, _THIS_IP_); |
| 1913 | } |
| 1914 | |
| 1915 | /* |
| 1916 | * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb). |
| 1917 | */ |
| 1918 | static void lockdep_sb_freeze_acquire(struct super_block *sb) |
| 1919 | { |
| 1920 | int level; |
| 1921 | |
| 1922 | for (level = 0; level < SB_FREEZE_LEVELS; ++level) |
| 1923 | percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_); |
| 1924 | } |
| 1925 | |
| 1926 | static void sb_freeze_unlock(struct super_block *sb, int level) |
| 1927 | { |
| 1928 | for (level--; level >= 0; level--) |
| 1929 | percpu_up_write(sb->s_writers.rw_sem + level); |
| 1930 | } |
| 1931 | |
| 1932 | static int wait_for_partially_frozen(struct super_block *sb) |
| 1933 | { |
| 1934 | int ret = 0; |
| 1935 | |
| 1936 | do { |
| 1937 | unsigned short old = sb->s_writers.frozen; |
| 1938 | |
| 1939 | up_write(&sb->s_umount); |
| 1940 | ret = wait_var_event_killable(&sb->s_writers.frozen, |
| 1941 | sb->s_writers.frozen != old); |
| 1942 | down_write(&sb->s_umount); |
| 1943 | } while (ret == 0 && |
| 1944 | sb->s_writers.frozen != SB_UNFROZEN && |
| 1945 | sb->s_writers.frozen != SB_FREEZE_COMPLETE); |
| 1946 | |
| 1947 | return ret; |
| 1948 | } |
| 1949 | |
| 1950 | #define FREEZE_HOLDERS (FREEZE_HOLDER_KERNEL | FREEZE_HOLDER_USERSPACE) |
| 1951 | #define FREEZE_FLAGS (FREEZE_HOLDERS | FREEZE_MAY_NEST | FREEZE_EXCL) |
| 1952 | |
| 1953 | static inline int freeze_inc(struct super_block *sb, enum freeze_holder who) |
| 1954 | { |
| 1955 | WARN_ON_ONCE((who & ~FREEZE_FLAGS)); |
| 1956 | WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1); |
| 1957 | |
| 1958 | if (who & FREEZE_HOLDER_KERNEL) |
| 1959 | ++sb->s_writers.freeze_kcount; |
| 1960 | if (who & FREEZE_HOLDER_USERSPACE) |
| 1961 | ++sb->s_writers.freeze_ucount; |
| 1962 | return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount; |
| 1963 | } |
| 1964 | |
| 1965 | static inline int freeze_dec(struct super_block *sb, enum freeze_holder who) |
| 1966 | { |
| 1967 | WARN_ON_ONCE((who & ~FREEZE_FLAGS)); |
| 1968 | WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1); |
| 1969 | |
| 1970 | if ((who & FREEZE_HOLDER_KERNEL) && sb->s_writers.freeze_kcount) |
| 1971 | --sb->s_writers.freeze_kcount; |
| 1972 | if ((who & FREEZE_HOLDER_USERSPACE) && sb->s_writers.freeze_ucount) |
| 1973 | --sb->s_writers.freeze_ucount; |
| 1974 | return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount; |
| 1975 | } |
| 1976 | |
| 1977 | static inline bool may_freeze(struct super_block *sb, enum freeze_holder who, |
| 1978 | const void *freeze_owner) |
| 1979 | { |
| 1980 | lockdep_assert_held(&sb->s_umount); |
| 1981 | |
| 1982 | WARN_ON_ONCE((who & ~FREEZE_FLAGS)); |
| 1983 | WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1); |
| 1984 | |
| 1985 | if (who & FREEZE_EXCL) { |
| 1986 | if (WARN_ON_ONCE(!(who & FREEZE_HOLDER_KERNEL))) |
| 1987 | return false; |
| 1988 | if (WARN_ON_ONCE(who & ~(FREEZE_EXCL | FREEZE_HOLDER_KERNEL))) |
| 1989 | return false; |
| 1990 | if (WARN_ON_ONCE(!freeze_owner)) |
| 1991 | return false; |
| 1992 | /* This freeze already has a specific owner. */ |
| 1993 | if (sb->s_writers.freeze_owner) |
| 1994 | return false; |
| 1995 | /* |
| 1996 | * This is already frozen multiple times so we're just |
| 1997 | * going to take a reference count and mark the freeze as |
| 1998 | * being owned by the caller. |
| 1999 | */ |
| 2000 | if (sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount) |
| 2001 | sb->s_writers.freeze_owner = freeze_owner; |
| 2002 | return true; |
| 2003 | } |
| 2004 | |
| 2005 | if (who & FREEZE_HOLDER_KERNEL) |
| 2006 | return (who & FREEZE_MAY_NEST) || |
| 2007 | sb->s_writers.freeze_kcount == 0; |
| 2008 | if (who & FREEZE_HOLDER_USERSPACE) |
| 2009 | return (who & FREEZE_MAY_NEST) || |
| 2010 | sb->s_writers.freeze_ucount == 0; |
| 2011 | return false; |
| 2012 | } |
| 2013 | |
| 2014 | static inline bool may_unfreeze(struct super_block *sb, enum freeze_holder who, |
| 2015 | const void *freeze_owner) |
| 2016 | { |
| 2017 | lockdep_assert_held(&sb->s_umount); |
| 2018 | |
| 2019 | WARN_ON_ONCE((who & ~FREEZE_FLAGS)); |
| 2020 | WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1); |
| 2021 | |
| 2022 | if (who & FREEZE_EXCL) { |
| 2023 | if (WARN_ON_ONCE(!(who & FREEZE_HOLDER_KERNEL))) |
| 2024 | return false; |
| 2025 | if (WARN_ON_ONCE(who & ~(FREEZE_EXCL | FREEZE_HOLDER_KERNEL))) |
| 2026 | return false; |
| 2027 | if (WARN_ON_ONCE(!freeze_owner)) |
| 2028 | return false; |
| 2029 | if (WARN_ON_ONCE(sb->s_writers.freeze_kcount == 0)) |
| 2030 | return false; |
| 2031 | /* This isn't exclusively frozen. */ |
| 2032 | if (!sb->s_writers.freeze_owner) |
| 2033 | return false; |
| 2034 | /* This isn't exclusively frozen by us. */ |
| 2035 | if (sb->s_writers.freeze_owner != freeze_owner) |
| 2036 | return false; |
| 2037 | /* |
| 2038 | * This is still frozen multiple times so we're just |
| 2039 | * going to drop our reference count and undo our |
| 2040 | * exclusive freeze. |
| 2041 | */ |
| 2042 | if ((sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount) > 1) |
| 2043 | sb->s_writers.freeze_owner = NULL; |
| 2044 | return true; |
| 2045 | } |
| 2046 | |
| 2047 | if (who & FREEZE_HOLDER_KERNEL) { |
| 2048 | /* |
| 2049 | * Someone's trying to steal the reference belonging to |
| 2050 | * @sb->s_writers.freeze_owner. |
| 2051 | */ |
| 2052 | if (sb->s_writers.freeze_kcount == 1 && |
| 2053 | sb->s_writers.freeze_owner) |
| 2054 | return false; |
| 2055 | return sb->s_writers.freeze_kcount > 0; |
| 2056 | } |
| 2057 | |
| 2058 | if (who & FREEZE_HOLDER_USERSPACE) |
| 2059 | return sb->s_writers.freeze_ucount > 0; |
| 2060 | |
| 2061 | return false; |
| 2062 | } |
| 2063 | |
| 2064 | /** |
| 2065 | * freeze_super - lock the filesystem and force it into a consistent state |
| 2066 | * @sb: the super to lock |
| 2067 | * @who: context that wants to freeze |
| 2068 | * @freeze_owner: owner of the freeze |
| 2069 | * |
| 2070 | * Syncs the super to make sure the filesystem is consistent and calls the fs's |
| 2071 | * freeze_fs. Subsequent calls to this without first thawing the fs may return |
| 2072 | * -EBUSY. |
| 2073 | * |
| 2074 | * @who should be: |
| 2075 | * * %FREEZE_HOLDER_USERSPACE if userspace wants to freeze the fs; |
| 2076 | * * %FREEZE_HOLDER_KERNEL if the kernel wants to freeze the fs. |
| 2077 | * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed. |
| 2078 | * |
| 2079 | * The @who argument distinguishes between the kernel and userspace trying to |
| 2080 | * freeze the filesystem. Although there cannot be multiple kernel freezes or |
| 2081 | * multiple userspace freezes in effect at any given time, the kernel and |
| 2082 | * userspace can both hold a filesystem frozen. The filesystem remains frozen |
| 2083 | * until there are no kernel or userspace freezes in effect. |
| 2084 | * |
| 2085 | * A filesystem may hold multiple devices and thus a filesystems may be |
| 2086 | * frozen through the block layer via multiple block devices. In this |
| 2087 | * case the request is marked as being allowed to nest by passing |
| 2088 | * FREEZE_MAY_NEST. The filesystem remains frozen until all block |
| 2089 | * devices are unfrozen. If multiple freezes are attempted without |
| 2090 | * FREEZE_MAY_NEST -EBUSY will be returned. |
| 2091 | * |
| 2092 | * During this function, sb->s_writers.frozen goes through these values: |
| 2093 | * |
| 2094 | * SB_UNFROZEN: File system is normal, all writes progress as usual. |
| 2095 | * |
| 2096 | * SB_FREEZE_WRITE: The file system is in the process of being frozen. New |
| 2097 | * writes should be blocked, though page faults are still allowed. We wait for |
| 2098 | * all writes to complete and then proceed to the next stage. |
| 2099 | * |
| 2100 | * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked |
| 2101 | * but internal fs threads can still modify the filesystem (although they |
| 2102 | * should not dirty new pages or inodes), writeback can run etc. After waiting |
| 2103 | * for all running page faults we sync the filesystem which will clean all |
| 2104 | * dirty pages and inodes (no new dirty pages or inodes can be created when |
| 2105 | * sync is running). |
| 2106 | * |
| 2107 | * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs |
| 2108 | * modification are blocked (e.g. XFS preallocation truncation on inode |
| 2109 | * reclaim). This is usually implemented by blocking new transactions for |
| 2110 | * filesystems that have them and need this additional guard. After all |
| 2111 | * internal writers are finished we call ->freeze_fs() to finish filesystem |
| 2112 | * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is |
| 2113 | * mostly auxiliary for filesystems to verify they do not modify frozen fs. |
| 2114 | * |
| 2115 | * sb->s_writers.frozen is protected by sb->s_umount. |
| 2116 | * |
| 2117 | * Return: If the freeze was successful zero is returned. If the freeze |
| 2118 | * failed a negative error code is returned. |
| 2119 | */ |
| 2120 | int freeze_super(struct super_block *sb, enum freeze_holder who, const void *freeze_owner) |
| 2121 | { |
| 2122 | int ret; |
| 2123 | |
| 2124 | if (!super_lock_excl(sb)) { |
| 2125 | WARN_ON_ONCE("Dying superblock while freezing!"); |
| 2126 | return -EINVAL; |
| 2127 | } |
| 2128 | atomic_inc(&sb->s_active); |
| 2129 | |
| 2130 | retry: |
| 2131 | if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) { |
| 2132 | if (may_freeze(sb, who, freeze_owner)) |
| 2133 | ret = !!WARN_ON_ONCE(freeze_inc(sb, who) == 1); |
| 2134 | else |
| 2135 | ret = -EBUSY; |
| 2136 | /* All freezers share a single active reference. */ |
| 2137 | deactivate_locked_super(sb); |
| 2138 | return ret; |
| 2139 | } |
| 2140 | |
| 2141 | if (sb->s_writers.frozen != SB_UNFROZEN) { |
| 2142 | ret = wait_for_partially_frozen(sb); |
| 2143 | if (ret) { |
| 2144 | deactivate_locked_super(sb); |
| 2145 | return ret; |
| 2146 | } |
| 2147 | |
| 2148 | goto retry; |
| 2149 | } |
| 2150 | |
| 2151 | if (sb_rdonly(sb)) { |
| 2152 | /* Nothing to do really... */ |
| 2153 | WARN_ON_ONCE(freeze_inc(sb, who) > 1); |
| 2154 | sb->s_writers.freeze_owner = freeze_owner; |
| 2155 | sb->s_writers.frozen = SB_FREEZE_COMPLETE; |
| 2156 | wake_up_var(&sb->s_writers.frozen); |
| 2157 | super_unlock_excl(sb); |
| 2158 | return 0; |
| 2159 | } |
| 2160 | |
| 2161 | sb->s_writers.frozen = SB_FREEZE_WRITE; |
| 2162 | /* Release s_umount to preserve sb_start_write -> s_umount ordering */ |
| 2163 | super_unlock_excl(sb); |
| 2164 | sb_wait_write(sb, SB_FREEZE_WRITE); |
| 2165 | __super_lock_excl(sb); |
| 2166 | |
| 2167 | /* Now we go and block page faults... */ |
| 2168 | sb->s_writers.frozen = SB_FREEZE_PAGEFAULT; |
| 2169 | sb_wait_write(sb, SB_FREEZE_PAGEFAULT); |
| 2170 | |
| 2171 | /* All writers are done so after syncing there won't be dirty data */ |
| 2172 | ret = sync_filesystem(sb); |
| 2173 | if (ret) { |
| 2174 | sb->s_writers.frozen = SB_UNFROZEN; |
| 2175 | sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT); |
| 2176 | wake_up_var(&sb->s_writers.frozen); |
| 2177 | deactivate_locked_super(sb); |
| 2178 | return ret; |
| 2179 | } |
| 2180 | |
| 2181 | /* Now wait for internal filesystem counter */ |
| 2182 | sb->s_writers.frozen = SB_FREEZE_FS; |
| 2183 | sb_wait_write(sb, SB_FREEZE_FS); |
| 2184 | |
| 2185 | if (sb->s_op->freeze_fs) { |
| 2186 | ret = sb->s_op->freeze_fs(sb); |
| 2187 | if (ret) { |
| 2188 | printk(KERN_ERR |
| 2189 | "VFS:Filesystem freeze failed\n"); |
| 2190 | sb->s_writers.frozen = SB_UNFROZEN; |
| 2191 | sb_freeze_unlock(sb, SB_FREEZE_FS); |
| 2192 | wake_up_var(&sb->s_writers.frozen); |
| 2193 | deactivate_locked_super(sb); |
| 2194 | return ret; |
| 2195 | } |
| 2196 | } |
| 2197 | /* |
| 2198 | * For debugging purposes so that fs can warn if it sees write activity |
| 2199 | * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super(). |
| 2200 | */ |
| 2201 | WARN_ON_ONCE(freeze_inc(sb, who) > 1); |
| 2202 | sb->s_writers.freeze_owner = freeze_owner; |
| 2203 | sb->s_writers.frozen = SB_FREEZE_COMPLETE; |
| 2204 | wake_up_var(&sb->s_writers.frozen); |
| 2205 | lockdep_sb_freeze_release(sb); |
| 2206 | super_unlock_excl(sb); |
| 2207 | return 0; |
| 2208 | } |
| 2209 | EXPORT_SYMBOL(freeze_super); |
| 2210 | |
| 2211 | /* |
| 2212 | * Undoes the effect of a freeze_super_locked call. If the filesystem is |
| 2213 | * frozen both by userspace and the kernel, a thaw call from either source |
| 2214 | * removes that state without releasing the other state or unlocking the |
| 2215 | * filesystem. |
| 2216 | */ |
| 2217 | static int thaw_super_locked(struct super_block *sb, enum freeze_holder who, |
| 2218 | const void *freeze_owner) |
| 2219 | { |
| 2220 | int error = -EINVAL; |
| 2221 | |
| 2222 | if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) |
| 2223 | goto out_unlock; |
| 2224 | |
| 2225 | if (!may_unfreeze(sb, who, freeze_owner)) |
| 2226 | goto out_unlock; |
| 2227 | |
| 2228 | /* |
| 2229 | * All freezers share a single active reference. |
| 2230 | * So just unlock in case there are any left. |
| 2231 | */ |
| 2232 | if (freeze_dec(sb, who)) |
| 2233 | goto out_unlock; |
| 2234 | |
| 2235 | if (sb_rdonly(sb)) { |
| 2236 | sb->s_writers.frozen = SB_UNFROZEN; |
| 2237 | sb->s_writers.freeze_owner = NULL; |
| 2238 | wake_up_var(&sb->s_writers.frozen); |
| 2239 | goto out_deactivate; |
| 2240 | } |
| 2241 | |
| 2242 | lockdep_sb_freeze_acquire(sb); |
| 2243 | |
| 2244 | if (sb->s_op->unfreeze_fs) { |
| 2245 | error = sb->s_op->unfreeze_fs(sb); |
| 2246 | if (error) { |
| 2247 | pr_err("VFS: Filesystem thaw failed\n"); |
| 2248 | freeze_inc(sb, who); |
| 2249 | lockdep_sb_freeze_release(sb); |
| 2250 | goto out_unlock; |
| 2251 | } |
| 2252 | } |
| 2253 | |
| 2254 | sb->s_writers.frozen = SB_UNFROZEN; |
| 2255 | sb->s_writers.freeze_owner = NULL; |
| 2256 | wake_up_var(&sb->s_writers.frozen); |
| 2257 | sb_freeze_unlock(sb, SB_FREEZE_FS); |
| 2258 | out_deactivate: |
| 2259 | deactivate_locked_super(sb); |
| 2260 | return 0; |
| 2261 | |
| 2262 | out_unlock: |
| 2263 | super_unlock_excl(sb); |
| 2264 | return error; |
| 2265 | } |
| 2266 | |
| 2267 | /** |
| 2268 | * thaw_super -- unlock filesystem |
| 2269 | * @sb: the super to thaw |
| 2270 | * @who: context that wants to freeze |
| 2271 | * @freeze_owner: owner of the freeze |
| 2272 | * |
| 2273 | * Unlocks the filesystem and marks it writeable again after freeze_super() |
| 2274 | * if there are no remaining freezes on the filesystem. |
| 2275 | * |
| 2276 | * @who should be: |
| 2277 | * * %FREEZE_HOLDER_USERSPACE if userspace wants to thaw the fs; |
| 2278 | * * %FREEZE_HOLDER_KERNEL if the kernel wants to thaw the fs. |
| 2279 | * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed |
| 2280 | * |
| 2281 | * A filesystem may hold multiple devices and thus a filesystems may |
| 2282 | * have been frozen through the block layer via multiple block devices. |
| 2283 | * The filesystem remains frozen until all block devices are unfrozen. |
| 2284 | */ |
| 2285 | int thaw_super(struct super_block *sb, enum freeze_holder who, |
| 2286 | const void *freeze_owner) |
| 2287 | { |
| 2288 | if (!super_lock_excl(sb)) { |
| 2289 | WARN_ON_ONCE("Dying superblock while thawing!"); |
| 2290 | return -EINVAL; |
| 2291 | } |
| 2292 | return thaw_super_locked(sb, who, freeze_owner); |
| 2293 | } |
| 2294 | EXPORT_SYMBOL(thaw_super); |
| 2295 | |
| 2296 | /* |
| 2297 | * Create workqueue for deferred direct IO completions. We allocate the |
| 2298 | * workqueue when it's first needed. This avoids creating workqueue for |
| 2299 | * filesystems that don't need it and also allows us to create the workqueue |
| 2300 | * late enough so the we can include s_id in the name of the workqueue. |
| 2301 | */ |
| 2302 | int sb_init_dio_done_wq(struct super_block *sb) |
| 2303 | { |
| 2304 | struct workqueue_struct *old; |
| 2305 | struct workqueue_struct *wq = alloc_workqueue("dio/%s", |
| 2306 | WQ_MEM_RECLAIM, 0, |
| 2307 | sb->s_id); |
| 2308 | if (!wq) |
| 2309 | return -ENOMEM; |
| 2310 | /* |
| 2311 | * This has to be atomic as more DIOs can race to create the workqueue |
| 2312 | */ |
| 2313 | old = cmpxchg(&sb->s_dio_done_wq, NULL, wq); |
| 2314 | /* Someone created workqueue before us? Free ours... */ |
| 2315 | if (old) |
| 2316 | destroy_workqueue(wq); |
| 2317 | return 0; |
| 2318 | } |
| 2319 | EXPORT_SYMBOL_GPL(sb_init_dio_done_wq); |