| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
| 5 | */ |
| 6 | #include "xfs.h" |
| 7 | #include "xfs_fs.h" |
| 8 | #include "xfs_shared.h" |
| 9 | #include "xfs_format.h" |
| 10 | #include "xfs_log_format.h" |
| 11 | #include "xfs_trans_resv.h" |
| 12 | #include "xfs_sb.h" |
| 13 | #include "xfs_mount.h" |
| 14 | #include "xfs_trans.h" |
| 15 | #include "xfs_error.h" |
| 16 | #include "xfs_alloc.h" |
| 17 | #include "xfs_fsops.h" |
| 18 | #include "xfs_trans_space.h" |
| 19 | #include "xfs_log.h" |
| 20 | #include "xfs_log_priv.h" |
| 21 | #include "xfs_ag.h" |
| 22 | #include "xfs_ag_resv.h" |
| 23 | #include "xfs_trace.h" |
| 24 | |
| 25 | /* |
| 26 | * Write new AG headers to disk. Non-transactional, but need to be |
| 27 | * written and completed prior to the growfs transaction being logged. |
| 28 | * To do this, we use a delayed write buffer list and wait for |
| 29 | * submission and IO completion of the list as a whole. This allows the |
| 30 | * IO subsystem to merge all the AG headers in a single AG into a single |
| 31 | * IO and hide most of the latency of the IO from us. |
| 32 | * |
| 33 | * This also means that if we get an error whilst building the buffer |
| 34 | * list to write, we can cancel the entire list without having written |
| 35 | * anything. |
| 36 | */ |
| 37 | static int |
| 38 | xfs_resizefs_init_new_ags( |
| 39 | struct xfs_trans *tp, |
| 40 | struct aghdr_init_data *id, |
| 41 | xfs_agnumber_t oagcount, |
| 42 | xfs_agnumber_t nagcount, |
| 43 | xfs_rfsblock_t delta, |
| 44 | struct xfs_perag *last_pag, |
| 45 | bool *lastag_extended) |
| 46 | { |
| 47 | struct xfs_mount *mp = tp->t_mountp; |
| 48 | xfs_rfsblock_t nb = mp->m_sb.sb_dblocks + delta; |
| 49 | int error; |
| 50 | |
| 51 | *lastag_extended = false; |
| 52 | |
| 53 | INIT_LIST_HEAD(&id->buffer_list); |
| 54 | for (id->agno = nagcount - 1; |
| 55 | id->agno >= oagcount; |
| 56 | id->agno--, delta -= id->agsize) { |
| 57 | |
| 58 | if (id->agno == nagcount - 1) |
| 59 | id->agsize = nb - (id->agno * |
| 60 | (xfs_rfsblock_t)mp->m_sb.sb_agblocks); |
| 61 | else |
| 62 | id->agsize = mp->m_sb.sb_agblocks; |
| 63 | |
| 64 | error = xfs_ag_init_headers(mp, id); |
| 65 | if (error) { |
| 66 | xfs_buf_delwri_cancel(&id->buffer_list); |
| 67 | return error; |
| 68 | } |
| 69 | } |
| 70 | |
| 71 | error = xfs_buf_delwri_submit(&id->buffer_list); |
| 72 | if (error) |
| 73 | return error; |
| 74 | |
| 75 | if (delta) { |
| 76 | *lastag_extended = true; |
| 77 | error = xfs_ag_extend_space(last_pag, tp, delta); |
| 78 | } |
| 79 | return error; |
| 80 | } |
| 81 | |
| 82 | /* |
| 83 | * growfs operations |
| 84 | */ |
| 85 | static int |
| 86 | xfs_growfs_data_private( |
| 87 | struct xfs_mount *mp, /* mount point for filesystem */ |
| 88 | struct xfs_growfs_data *in) /* growfs data input struct */ |
| 89 | { |
| 90 | struct xfs_buf *bp; |
| 91 | int error; |
| 92 | xfs_agnumber_t nagcount; |
| 93 | xfs_agnumber_t nagimax = 0; |
| 94 | xfs_rfsblock_t nb, nb_div, nb_mod; |
| 95 | int64_t delta; |
| 96 | bool lastag_extended = false; |
| 97 | xfs_agnumber_t oagcount; |
| 98 | struct xfs_trans *tp; |
| 99 | struct aghdr_init_data id = {}; |
| 100 | struct xfs_perag *last_pag; |
| 101 | |
| 102 | nb = in->newblocks; |
| 103 | error = xfs_sb_validate_fsb_count(&mp->m_sb, nb); |
| 104 | if (error) |
| 105 | return error; |
| 106 | |
| 107 | if (nb > mp->m_sb.sb_dblocks) { |
| 108 | error = xfs_buf_read_uncached(mp->m_ddev_targp, |
| 109 | XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1), |
| 110 | XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL); |
| 111 | if (error) |
| 112 | return error; |
| 113 | xfs_buf_relse(bp); |
| 114 | } |
| 115 | |
| 116 | nb_div = nb; |
| 117 | nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks); |
| 118 | if (nb_mod && nb_mod >= XFS_MIN_AG_BLOCKS) |
| 119 | nb_div++; |
| 120 | else if (nb_mod) |
| 121 | nb = nb_div * mp->m_sb.sb_agblocks; |
| 122 | |
| 123 | if (nb_div > XFS_MAX_AGNUMBER + 1) { |
| 124 | nb_div = XFS_MAX_AGNUMBER + 1; |
| 125 | nb = nb_div * mp->m_sb.sb_agblocks; |
| 126 | } |
| 127 | nagcount = nb_div; |
| 128 | delta = nb - mp->m_sb.sb_dblocks; |
| 129 | /* |
| 130 | * Reject filesystems with a single AG because they are not |
| 131 | * supported, and reject a shrink operation that would cause a |
| 132 | * filesystem to become unsupported. |
| 133 | */ |
| 134 | if (delta < 0 && nagcount < 2) |
| 135 | return -EINVAL; |
| 136 | |
| 137 | /* No work to do */ |
| 138 | if (delta == 0) |
| 139 | return 0; |
| 140 | |
| 141 | oagcount = mp->m_sb.sb_agcount; |
| 142 | /* allocate the new per-ag structures */ |
| 143 | if (nagcount > oagcount) { |
| 144 | error = xfs_initialize_perag(mp, nagcount, nb, &nagimax); |
| 145 | if (error) |
| 146 | return error; |
| 147 | } else if (nagcount < oagcount) { |
| 148 | /* TODO: shrinking the entire AGs hasn't yet completed */ |
| 149 | return -EINVAL; |
| 150 | } |
| 151 | |
| 152 | if (delta > 0) |
| 153 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata, |
| 154 | XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, |
| 155 | &tp); |
| 156 | else |
| 157 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata, -delta, 0, |
| 158 | 0, &tp); |
| 159 | if (error) |
| 160 | return error; |
| 161 | |
| 162 | last_pag = xfs_perag_get(mp, oagcount - 1); |
| 163 | if (delta > 0) { |
| 164 | error = xfs_resizefs_init_new_ags(tp, &id, oagcount, nagcount, |
| 165 | delta, last_pag, &lastag_extended); |
| 166 | } else { |
| 167 | xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SHRINK, |
| 168 | "EXPERIMENTAL online shrink feature in use. Use at your own risk!"); |
| 169 | |
| 170 | error = xfs_ag_shrink_space(last_pag, &tp, -delta); |
| 171 | } |
| 172 | xfs_perag_put(last_pag); |
| 173 | if (error) |
| 174 | goto out_trans_cancel; |
| 175 | |
| 176 | /* |
| 177 | * Update changed superblock fields transactionally. These are not |
| 178 | * seen by the rest of the world until the transaction commit applies |
| 179 | * them atomically to the superblock. |
| 180 | */ |
| 181 | if (nagcount > oagcount) |
| 182 | xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount); |
| 183 | if (delta) |
| 184 | xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS, delta); |
| 185 | if (id.nfree) |
| 186 | xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, id.nfree); |
| 187 | |
| 188 | /* |
| 189 | * Sync sb counters now to reflect the updated values. This is |
| 190 | * particularly important for shrink because the write verifier |
| 191 | * will fail if sb_fdblocks is ever larger than sb_dblocks. |
| 192 | */ |
| 193 | if (xfs_has_lazysbcount(mp)) |
| 194 | xfs_log_sb(tp); |
| 195 | |
| 196 | xfs_trans_set_sync(tp); |
| 197 | error = xfs_trans_commit(tp); |
| 198 | if (error) |
| 199 | return error; |
| 200 | |
| 201 | /* New allocation groups fully initialized, so update mount struct */ |
| 202 | if (nagimax) |
| 203 | mp->m_maxagi = nagimax; |
| 204 | xfs_set_low_space_thresholds(mp); |
| 205 | mp->m_alloc_set_aside = xfs_alloc_set_aside(mp); |
| 206 | |
| 207 | if (delta > 0) { |
| 208 | /* |
| 209 | * If we expanded the last AG, free the per-AG reservation |
| 210 | * so we can reinitialize it with the new size. |
| 211 | */ |
| 212 | if (lastag_extended) { |
| 213 | struct xfs_perag *pag; |
| 214 | |
| 215 | pag = xfs_perag_get(mp, id.agno); |
| 216 | error = xfs_ag_resv_free(pag); |
| 217 | xfs_perag_put(pag); |
| 218 | if (error) |
| 219 | return error; |
| 220 | } |
| 221 | /* |
| 222 | * Reserve AG metadata blocks. ENOSPC here does not mean there |
| 223 | * was a growfs failure, just that there still isn't space for |
| 224 | * new user data after the grow has been run. |
| 225 | */ |
| 226 | error = xfs_fs_reserve_ag_blocks(mp); |
| 227 | if (error == -ENOSPC) |
| 228 | error = 0; |
| 229 | } |
| 230 | return error; |
| 231 | |
| 232 | out_trans_cancel: |
| 233 | xfs_trans_cancel(tp); |
| 234 | return error; |
| 235 | } |
| 236 | |
| 237 | static int |
| 238 | xfs_growfs_log_private( |
| 239 | struct xfs_mount *mp, /* mount point for filesystem */ |
| 240 | struct xfs_growfs_log *in) /* growfs log input struct */ |
| 241 | { |
| 242 | xfs_extlen_t nb; |
| 243 | |
| 244 | nb = in->newblocks; |
| 245 | if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES)) |
| 246 | return -EINVAL; |
| 247 | if (nb == mp->m_sb.sb_logblocks && |
| 248 | in->isint == (mp->m_sb.sb_logstart != 0)) |
| 249 | return -EINVAL; |
| 250 | /* |
| 251 | * Moving the log is hard, need new interfaces to sync |
| 252 | * the log first, hold off all activity while moving it. |
| 253 | * Can have shorter or longer log in the same space, |
| 254 | * or transform internal to external log or vice versa. |
| 255 | */ |
| 256 | return -ENOSYS; |
| 257 | } |
| 258 | |
| 259 | static int |
| 260 | xfs_growfs_imaxpct( |
| 261 | struct xfs_mount *mp, |
| 262 | __u32 imaxpct) |
| 263 | { |
| 264 | struct xfs_trans *tp; |
| 265 | int dpct; |
| 266 | int error; |
| 267 | |
| 268 | if (imaxpct > 100) |
| 269 | return -EINVAL; |
| 270 | |
| 271 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata, |
| 272 | XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp); |
| 273 | if (error) |
| 274 | return error; |
| 275 | |
| 276 | dpct = imaxpct - mp->m_sb.sb_imax_pct; |
| 277 | xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct); |
| 278 | xfs_trans_set_sync(tp); |
| 279 | return xfs_trans_commit(tp); |
| 280 | } |
| 281 | |
| 282 | /* |
| 283 | * protected versions of growfs function acquire and release locks on the mount |
| 284 | * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG, |
| 285 | * XFS_IOC_FSGROWFSRT |
| 286 | */ |
| 287 | int |
| 288 | xfs_growfs_data( |
| 289 | struct xfs_mount *mp, |
| 290 | struct xfs_growfs_data *in) |
| 291 | { |
| 292 | int error = 0; |
| 293 | |
| 294 | if (!capable(CAP_SYS_ADMIN)) |
| 295 | return -EPERM; |
| 296 | if (!mutex_trylock(&mp->m_growlock)) |
| 297 | return -EWOULDBLOCK; |
| 298 | |
| 299 | /* update imaxpct separately to the physical grow of the filesystem */ |
| 300 | if (in->imaxpct != mp->m_sb.sb_imax_pct) { |
| 301 | error = xfs_growfs_imaxpct(mp, in->imaxpct); |
| 302 | if (error) |
| 303 | goto out_error; |
| 304 | } |
| 305 | |
| 306 | if (in->newblocks != mp->m_sb.sb_dblocks) { |
| 307 | error = xfs_growfs_data_private(mp, in); |
| 308 | if (error) |
| 309 | goto out_error; |
| 310 | } |
| 311 | |
| 312 | /* Post growfs calculations needed to reflect new state in operations */ |
| 313 | if (mp->m_sb.sb_imax_pct) { |
| 314 | uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct; |
| 315 | do_div(icount, 100); |
| 316 | M_IGEO(mp)->maxicount = XFS_FSB_TO_INO(mp, icount); |
| 317 | } else |
| 318 | M_IGEO(mp)->maxicount = 0; |
| 319 | |
| 320 | /* Update secondary superblocks now the physical grow has completed */ |
| 321 | error = xfs_update_secondary_sbs(mp); |
| 322 | |
| 323 | out_error: |
| 324 | /* |
| 325 | * Increment the generation unconditionally, the error could be from |
| 326 | * updating the secondary superblocks, in which case the new size |
| 327 | * is live already. |
| 328 | */ |
| 329 | mp->m_generation++; |
| 330 | mutex_unlock(&mp->m_growlock); |
| 331 | return error; |
| 332 | } |
| 333 | |
| 334 | int |
| 335 | xfs_growfs_log( |
| 336 | xfs_mount_t *mp, |
| 337 | struct xfs_growfs_log *in) |
| 338 | { |
| 339 | int error; |
| 340 | |
| 341 | if (!capable(CAP_SYS_ADMIN)) |
| 342 | return -EPERM; |
| 343 | if (!mutex_trylock(&mp->m_growlock)) |
| 344 | return -EWOULDBLOCK; |
| 345 | error = xfs_growfs_log_private(mp, in); |
| 346 | mutex_unlock(&mp->m_growlock); |
| 347 | return error; |
| 348 | } |
| 349 | |
| 350 | /* |
| 351 | * Reserve the requested number of blocks if available. Otherwise return |
| 352 | * as many as possible to satisfy the request. The actual number |
| 353 | * reserved are returned in outval. |
| 354 | */ |
| 355 | int |
| 356 | xfs_reserve_blocks( |
| 357 | struct xfs_mount *mp, |
| 358 | uint64_t request) |
| 359 | { |
| 360 | int64_t lcounter, delta; |
| 361 | int64_t fdblks_delta = 0; |
| 362 | int64_t free; |
| 363 | int error = 0; |
| 364 | |
| 365 | /* |
| 366 | * With per-cpu counters, this becomes an interesting problem. we need |
| 367 | * to work out if we are freeing or allocation blocks first, then we can |
| 368 | * do the modification as necessary. |
| 369 | * |
| 370 | * We do this under the m_sb_lock so that if we are near ENOSPC, we will |
| 371 | * hold out any changes while we work out what to do. This means that |
| 372 | * the amount of free space can change while we do this, so we need to |
| 373 | * retry if we end up trying to reserve more space than is available. |
| 374 | */ |
| 375 | spin_lock(&mp->m_sb_lock); |
| 376 | |
| 377 | /* |
| 378 | * If our previous reservation was larger than the current value, |
| 379 | * then move any unused blocks back to the free pool. Modify the resblks |
| 380 | * counters directly since we shouldn't have any problems unreserving |
| 381 | * space. |
| 382 | */ |
| 383 | if (mp->m_resblks > request) { |
| 384 | lcounter = mp->m_resblks_avail - request; |
| 385 | if (lcounter > 0) { /* release unused blocks */ |
| 386 | fdblks_delta = lcounter; |
| 387 | mp->m_resblks_avail -= lcounter; |
| 388 | } |
| 389 | mp->m_resblks = request; |
| 390 | if (fdblks_delta) { |
| 391 | spin_unlock(&mp->m_sb_lock); |
| 392 | error = xfs_mod_fdblocks(mp, fdblks_delta, 0); |
| 393 | spin_lock(&mp->m_sb_lock); |
| 394 | } |
| 395 | |
| 396 | goto out; |
| 397 | } |
| 398 | |
| 399 | /* |
| 400 | * If the request is larger than the current reservation, reserve the |
| 401 | * blocks before we update the reserve counters. Sample m_fdblocks and |
| 402 | * perform a partial reservation if the request exceeds free space. |
| 403 | * |
| 404 | * The code below estimates how many blocks it can request from |
| 405 | * fdblocks to stash in the reserve pool. This is a classic TOCTOU |
| 406 | * race since fdblocks updates are not always coordinated via |
| 407 | * m_sb_lock. Set the reserve size even if there's not enough free |
| 408 | * space to fill it because mod_fdblocks will refill an undersized |
| 409 | * reserve when it can. |
| 410 | */ |
| 411 | free = percpu_counter_sum(&mp->m_fdblocks) - |
| 412 | xfs_fdblocks_unavailable(mp); |
| 413 | delta = request - mp->m_resblks; |
| 414 | mp->m_resblks = request; |
| 415 | if (delta > 0 && free > 0) { |
| 416 | /* |
| 417 | * We'll either succeed in getting space from the free block |
| 418 | * count or we'll get an ENOSPC. Don't set the reserved flag |
| 419 | * here - we don't want to reserve the extra reserve blocks |
| 420 | * from the reserve. |
| 421 | * |
| 422 | * The desired reserve size can change after we drop the lock. |
| 423 | * Use mod_fdblocks to put the space into the reserve or into |
| 424 | * fdblocks as appropriate. |
| 425 | */ |
| 426 | fdblks_delta = min(free, delta); |
| 427 | spin_unlock(&mp->m_sb_lock); |
| 428 | error = xfs_mod_fdblocks(mp, -fdblks_delta, 0); |
| 429 | if (!error) |
| 430 | xfs_mod_fdblocks(mp, fdblks_delta, 0); |
| 431 | spin_lock(&mp->m_sb_lock); |
| 432 | } |
| 433 | out: |
| 434 | spin_unlock(&mp->m_sb_lock); |
| 435 | return error; |
| 436 | } |
| 437 | |
| 438 | int |
| 439 | xfs_fs_goingdown( |
| 440 | xfs_mount_t *mp, |
| 441 | uint32_t inflags) |
| 442 | { |
| 443 | switch (inflags) { |
| 444 | case XFS_FSOP_GOING_FLAGS_DEFAULT: { |
| 445 | if (!freeze_bdev(mp->m_super->s_bdev)) { |
| 446 | xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT); |
| 447 | thaw_bdev(mp->m_super->s_bdev); |
| 448 | } |
| 449 | break; |
| 450 | } |
| 451 | case XFS_FSOP_GOING_FLAGS_LOGFLUSH: |
| 452 | xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT); |
| 453 | break; |
| 454 | case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH: |
| 455 | xfs_force_shutdown(mp, |
| 456 | SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR); |
| 457 | break; |
| 458 | default: |
| 459 | return -EINVAL; |
| 460 | } |
| 461 | |
| 462 | return 0; |
| 463 | } |
| 464 | |
| 465 | /* |
| 466 | * Force a shutdown of the filesystem instantly while keeping the filesystem |
| 467 | * consistent. We don't do an unmount here; just shutdown the shop, make sure |
| 468 | * that absolutely nothing persistent happens to this filesystem after this |
| 469 | * point. |
| 470 | * |
| 471 | * The shutdown state change is atomic, resulting in the first and only the |
| 472 | * first shutdown call processing the shutdown. This means we only shutdown the |
| 473 | * log once as it requires, and we don't spam the logs when multiple concurrent |
| 474 | * shutdowns race to set the shutdown flags. |
| 475 | */ |
| 476 | void |
| 477 | xfs_do_force_shutdown( |
| 478 | struct xfs_mount *mp, |
| 479 | uint32_t flags, |
| 480 | char *fname, |
| 481 | int lnnum) |
| 482 | { |
| 483 | int tag; |
| 484 | const char *why; |
| 485 | |
| 486 | |
| 487 | if (test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &mp->m_opstate)) { |
| 488 | xlog_shutdown_wait(mp->m_log); |
| 489 | return; |
| 490 | } |
| 491 | if (mp->m_sb_bp) |
| 492 | mp->m_sb_bp->b_flags |= XBF_DONE; |
| 493 | |
| 494 | if (flags & SHUTDOWN_FORCE_UMOUNT) |
| 495 | xfs_alert(mp, "User initiated shutdown received."); |
| 496 | |
| 497 | if (xlog_force_shutdown(mp->m_log, flags)) { |
| 498 | tag = XFS_PTAG_SHUTDOWN_LOGERROR; |
| 499 | why = "Log I/O Error"; |
| 500 | } else if (flags & SHUTDOWN_CORRUPT_INCORE) { |
| 501 | tag = XFS_PTAG_SHUTDOWN_CORRUPT; |
| 502 | why = "Corruption of in-memory data"; |
| 503 | } else if (flags & SHUTDOWN_CORRUPT_ONDISK) { |
| 504 | tag = XFS_PTAG_SHUTDOWN_CORRUPT; |
| 505 | why = "Corruption of on-disk metadata"; |
| 506 | } else if (flags & SHUTDOWN_DEVICE_REMOVED) { |
| 507 | tag = XFS_PTAG_SHUTDOWN_IOERROR; |
| 508 | why = "Block device removal"; |
| 509 | } else { |
| 510 | tag = XFS_PTAG_SHUTDOWN_IOERROR; |
| 511 | why = "Metadata I/O Error"; |
| 512 | } |
| 513 | |
| 514 | trace_xfs_force_shutdown(mp, tag, flags, fname, lnnum); |
| 515 | |
| 516 | xfs_alert_tag(mp, tag, |
| 517 | "%s (0x%x) detected at %pS (%s:%d). Shutting down filesystem.", |
| 518 | why, flags, __return_address, fname, lnnum); |
| 519 | xfs_alert(mp, |
| 520 | "Please unmount the filesystem and rectify the problem(s)"); |
| 521 | if (xfs_error_level >= XFS_ERRLEVEL_HIGH) |
| 522 | xfs_stack_trace(); |
| 523 | } |
| 524 | |
| 525 | /* |
| 526 | * Reserve free space for per-AG metadata. |
| 527 | */ |
| 528 | int |
| 529 | xfs_fs_reserve_ag_blocks( |
| 530 | struct xfs_mount *mp) |
| 531 | { |
| 532 | xfs_agnumber_t agno; |
| 533 | struct xfs_perag *pag; |
| 534 | int error = 0; |
| 535 | int err2; |
| 536 | |
| 537 | mp->m_finobt_nores = false; |
| 538 | for_each_perag(mp, agno, pag) { |
| 539 | err2 = xfs_ag_resv_init(pag, NULL); |
| 540 | if (err2 && !error) |
| 541 | error = err2; |
| 542 | } |
| 543 | |
| 544 | if (error && error != -ENOSPC) { |
| 545 | xfs_warn(mp, |
| 546 | "Error %d reserving per-AG metadata reserve pool.", error); |
| 547 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); |
| 548 | } |
| 549 | |
| 550 | return error; |
| 551 | } |
| 552 | |
| 553 | /* |
| 554 | * Free space reserved for per-AG metadata. |
| 555 | */ |
| 556 | int |
| 557 | xfs_fs_unreserve_ag_blocks( |
| 558 | struct xfs_mount *mp) |
| 559 | { |
| 560 | xfs_agnumber_t agno; |
| 561 | struct xfs_perag *pag; |
| 562 | int error = 0; |
| 563 | int err2; |
| 564 | |
| 565 | for_each_perag(mp, agno, pag) { |
| 566 | err2 = xfs_ag_resv_free(pag); |
| 567 | if (err2 && !error) |
| 568 | error = err2; |
| 569 | } |
| 570 | |
| 571 | if (error) |
| 572 | xfs_warn(mp, |
| 573 | "Error %d freeing per-AG metadata reserve pool.", error); |
| 574 | |
| 575 | return error; |
| 576 | } |