xfs: refactor xfs_verifier_error and xfs_buf_ioerror
[linux-2.6-block.git] / fs / xfs / libxfs / xfs_alloc.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
a844f451 19#include "xfs_fs.h"
70a9883c 20#include "xfs_format.h"
239880ef 21#include "xfs_log_format.h"
70a9883c 22#include "xfs_shared.h"
239880ef 23#include "xfs_trans_resv.h"
a844f451 24#include "xfs_bit.h"
1da177e4 25#include "xfs_sb.h"
1da177e4 26#include "xfs_mount.h"
3ab78df2 27#include "xfs_defer.h"
a844f451 28#include "xfs_inode.h"
1da177e4 29#include "xfs_btree.h"
673930c3 30#include "xfs_rmap.h"
a4fbe6ab 31#include "xfs_alloc_btree.h"
1da177e4 32#include "xfs_alloc.h"
efc27b52 33#include "xfs_extent_busy.h"
e9e899a2 34#include "xfs_errortag.h"
1da177e4 35#include "xfs_error.h"
4e0e6040 36#include "xfs_cksum.h"
0b1b213f 37#include "xfs_trace.h"
239880ef 38#include "xfs_trans.h"
4e0e6040 39#include "xfs_buf_item.h"
239880ef 40#include "xfs_log.h"
3fd129b6 41#include "xfs_ag_resv.h"
1da177e4 42
c999a223 43struct workqueue_struct *xfs_alloc_wq;
1da177e4
LT
44
45#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
46
47#define XFSA_FIXUP_BNO_OK 1
48#define XFSA_FIXUP_CNT_OK 2
49
1da177e4
LT
50STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
51STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
52STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
53STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
e26f0501 54 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
1da177e4 55
af30dfa1
DW
56unsigned int
57xfs_refc_block(
58 struct xfs_mount *mp)
59{
60 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
61 return XFS_RMAP_BLOCK(mp) + 1;
62 if (xfs_sb_version_hasfinobt(&mp->m_sb))
63 return XFS_FIBT_BLOCK(mp) + 1;
64 return XFS_IBT_BLOCK(mp) + 1;
65}
66
8018026e
DW
67xfs_extlen_t
68xfs_prealloc_blocks(
69 struct xfs_mount *mp)
70{
af30dfa1
DW
71 if (xfs_sb_version_hasreflink(&mp->m_sb))
72 return xfs_refc_block(mp) + 1;
8018026e
DW
73 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
74 return XFS_RMAP_BLOCK(mp) + 1;
75 if (xfs_sb_version_hasfinobt(&mp->m_sb))
76 return XFS_FIBT_BLOCK(mp) + 1;
77 return XFS_IBT_BLOCK(mp) + 1;
78}
79
52548852
DW
80/*
81 * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
82 * AGF buffer (PV 947395), we place constraints on the relationship among
83 * actual allocations for data blocks, freelist blocks, and potential file data
84 * bmap btree blocks. However, these restrictions may result in no actual space
85 * allocated for a delayed extent, for example, a data block in a certain AG is
86 * allocated but there is no additional block for the additional bmap btree
87 * block due to a split of the bmap btree of the file. The result of this may
88 * lead to an infinite loop when the file gets flushed to disk and all delayed
89 * extents need to be actually allocated. To get around this, we explicitly set
90 * aside a few blocks which will not be reserved in delayed allocation.
91 *
3fd129b6
DW
92 * We need to reserve 4 fsbs _per AG_ for the freelist and 4 more to handle a
93 * potential split of the file's bmap btree.
52548852
DW
94 */
95unsigned int
96xfs_alloc_set_aside(
97 struct xfs_mount *mp)
98{
5149fd32 99 return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
52548852
DW
100}
101
102/*
103 * When deciding how much space to allocate out of an AG, we limit the
104 * allocation maximum size to the size the AG. However, we cannot use all the
105 * blocks in the AG - some are permanently used by metadata. These
106 * blocks are generally:
107 * - the AG superblock, AGF, AGI and AGFL
108 * - the AGF (bno and cnt) and AGI btree root blocks, and optionally
109 * the AGI free inode and rmap btree root blocks.
110 * - blocks on the AGFL according to xfs_alloc_set_aside() limits
111 * - the rmapbt root block
112 *
113 * The AG headers are sector sized, so the amount of space they take up is
114 * dependent on filesystem geometry. The others are all single blocks.
115 */
116unsigned int
117xfs_alloc_ag_max_usable(
118 struct xfs_mount *mp)
119{
120 unsigned int blocks;
121
122 blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
123 blocks += XFS_ALLOC_AGFL_RESERVE;
124 blocks += 3; /* AGF, AGI btree root blocks */
125 if (xfs_sb_version_hasfinobt(&mp->m_sb))
126 blocks++; /* finobt root block */
127 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
128 blocks++; /* rmap root block */
d0e853f3
DW
129 if (xfs_sb_version_hasreflink(&mp->m_sb))
130 blocks++; /* refcount root block */
52548852
DW
131
132 return mp->m_sb.sb_agblocks - blocks;
133}
134
fe033cc8
CH
135/*
136 * Lookup the record equal to [bno, len] in the btree given by cur.
137 */
138STATIC int /* error */
139xfs_alloc_lookup_eq(
140 struct xfs_btree_cur *cur, /* btree cursor */
141 xfs_agblock_t bno, /* starting block of extent */
142 xfs_extlen_t len, /* length of extent */
143 int *stat) /* success/failure */
144{
145 cur->bc_rec.a.ar_startblock = bno;
146 cur->bc_rec.a.ar_blockcount = len;
147 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
148}
149
150/*
151 * Lookup the first record greater than or equal to [bno, len]
152 * in the btree given by cur.
153 */
a66d6363 154int /* error */
fe033cc8
CH
155xfs_alloc_lookup_ge(
156 struct xfs_btree_cur *cur, /* btree cursor */
157 xfs_agblock_t bno, /* starting block of extent */
158 xfs_extlen_t len, /* length of extent */
159 int *stat) /* success/failure */
160{
161 cur->bc_rec.a.ar_startblock = bno;
162 cur->bc_rec.a.ar_blockcount = len;
163 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
164}
165
166/*
167 * Lookup the first record less than or equal to [bno, len]
168 * in the btree given by cur.
169 */
0d5a75e9 170static int /* error */
fe033cc8
CH
171xfs_alloc_lookup_le(
172 struct xfs_btree_cur *cur, /* btree cursor */
173 xfs_agblock_t bno, /* starting block of extent */
174 xfs_extlen_t len, /* length of extent */
175 int *stat) /* success/failure */
176{
177 cur->bc_rec.a.ar_startblock = bno;
178 cur->bc_rec.a.ar_blockcount = len;
179 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
180}
181
278d0ca1
CH
182/*
183 * Update the record referred to by cur to the value given
184 * by [bno, len].
185 * This either works (return 0) or gets an EFSCORRUPTED error.
186 */
187STATIC int /* error */
188xfs_alloc_update(
189 struct xfs_btree_cur *cur, /* btree cursor */
190 xfs_agblock_t bno, /* starting block of extent */
191 xfs_extlen_t len) /* length of extent */
192{
193 union xfs_btree_rec rec;
194
195 rec.alloc.ar_startblock = cpu_to_be32(bno);
196 rec.alloc.ar_blockcount = cpu_to_be32(len);
197 return xfs_btree_update(cur, &rec);
198}
fe033cc8 199
8cc938fe
CH
200/*
201 * Get the data from the pointed-to record.
202 */
a46db608 203int /* error */
8cc938fe
CH
204xfs_alloc_get_rec(
205 struct xfs_btree_cur *cur, /* btree cursor */
206 xfs_agblock_t *bno, /* output: starting block of extent */
207 xfs_extlen_t *len, /* output: length of extent */
208 int *stat) /* output: success/failure */
209{
210 union xfs_btree_rec *rec;
211 int error;
212
213 error = xfs_btree_get_rec(cur, &rec, stat);
214 if (!error && *stat == 1) {
215 *bno = be32_to_cpu(rec->alloc.ar_startblock);
216 *len = be32_to_cpu(rec->alloc.ar_blockcount);
217 }
218 return error;
219}
220
1da177e4
LT
221/*
222 * Compute aligned version of the found extent.
223 * Takes alignment and min length into account.
224 */
ebf55872 225STATIC bool
1da177e4 226xfs_alloc_compute_aligned(
86fa8af6 227 xfs_alloc_arg_t *args, /* allocation argument structure */
1da177e4
LT
228 xfs_agblock_t foundbno, /* starting block in found extent */
229 xfs_extlen_t foundlen, /* length in found extent */
1da177e4 230 xfs_agblock_t *resbno, /* result block number */
ebf55872
CH
231 xfs_extlen_t *reslen, /* result length */
232 unsigned *busy_gen)
1da177e4 233{
ebf55872
CH
234 xfs_agblock_t bno = foundbno;
235 xfs_extlen_t len = foundlen;
bfe46d4e 236 xfs_extlen_t diff;
ebf55872 237 bool busy;
1da177e4 238
e26f0501 239 /* Trim busy sections out of found extent */
ebf55872 240 busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
e26f0501 241
bfe46d4e
BF
242 /*
243 * If we have a largish extent that happens to start before min_agbno,
244 * see if we can shift it into range...
245 */
246 if (bno < args->min_agbno && bno + len > args->min_agbno) {
247 diff = args->min_agbno - bno;
248 if (len > diff) {
249 bno += diff;
250 len -= diff;
251 }
252 }
253
e26f0501
CH
254 if (args->alignment > 1 && len >= args->minlen) {
255 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
bfe46d4e
BF
256
257 diff = aligned_bno - bno;
e26f0501
CH
258
259 *resbno = aligned_bno;
260 *reslen = diff >= len ? 0 : len - diff;
1da177e4 261 } else {
e26f0501
CH
262 *resbno = bno;
263 *reslen = len;
1da177e4 264 }
ebf55872
CH
265
266 return busy;
1da177e4
LT
267}
268
269/*
270 * Compute best start block and diff for "near" allocations.
271 * freelen >= wantlen already checked by caller.
272 */
273STATIC xfs_extlen_t /* difference value (absolute) */
274xfs_alloc_compute_diff(
275 xfs_agblock_t wantbno, /* target starting block */
276 xfs_extlen_t wantlen, /* target length */
277 xfs_extlen_t alignment, /* target alignment */
292378ed 278 int datatype, /* are we allocating data? */
1da177e4
LT
279 xfs_agblock_t freebno, /* freespace's starting block */
280 xfs_extlen_t freelen, /* freespace's length */
281 xfs_agblock_t *newbnop) /* result: best start block from free */
282{
283 xfs_agblock_t freeend; /* end of freespace extent */
284 xfs_agblock_t newbno1; /* return block number */
285 xfs_agblock_t newbno2; /* other new block number */
286 xfs_extlen_t newlen1=0; /* length with newbno1 */
287 xfs_extlen_t newlen2=0; /* length with newbno2 */
288 xfs_agblock_t wantend; /* end of target extent */
292378ed 289 bool userdata = xfs_alloc_is_userdata(datatype);
1da177e4
LT
290
291 ASSERT(freelen >= wantlen);
292 freeend = freebno + freelen;
293 wantend = wantbno + wantlen;
211d022c
JK
294 /*
295 * We want to allocate from the start of a free extent if it is past
296 * the desired block or if we are allocating user data and the free
297 * extent is before desired block. The second case is there to allow
298 * for contiguous allocation from the remaining free space if the file
299 * grows in the short term.
300 */
301 if (freebno >= wantbno || (userdata && freeend < wantend)) {
1da177e4
LT
302 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
303 newbno1 = NULLAGBLOCK;
304 } else if (freeend >= wantend && alignment > 1) {
305 newbno1 = roundup(wantbno, alignment);
306 newbno2 = newbno1 - alignment;
307 if (newbno1 >= freeend)
308 newbno1 = NULLAGBLOCK;
309 else
310 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
311 if (newbno2 < freebno)
312 newbno2 = NULLAGBLOCK;
313 else
314 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
315 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
316 if (newlen1 < newlen2 ||
317 (newlen1 == newlen2 &&
318 XFS_ABSDIFF(newbno1, wantbno) >
319 XFS_ABSDIFF(newbno2, wantbno)))
320 newbno1 = newbno2;
321 } else if (newbno2 != NULLAGBLOCK)
322 newbno1 = newbno2;
323 } else if (freeend >= wantend) {
324 newbno1 = wantbno;
325 } else if (alignment > 1) {
326 newbno1 = roundup(freeend - wantlen, alignment);
327 if (newbno1 > freeend - wantlen &&
328 newbno1 - alignment >= freebno)
329 newbno1 -= alignment;
330 else if (newbno1 >= freeend)
331 newbno1 = NULLAGBLOCK;
332 } else
333 newbno1 = freeend - wantlen;
334 *newbnop = newbno1;
335 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
336}
337
338/*
339 * Fix up the length, based on mod and prod.
340 * len should be k * prod + mod for some k.
341 * If len is too small it is returned unchanged.
342 * If len hits maxlen it is left alone.
343 */
344STATIC void
345xfs_alloc_fix_len(
346 xfs_alloc_arg_t *args) /* allocation argument structure */
347{
348 xfs_extlen_t k;
349 xfs_extlen_t rlen;
350
351 ASSERT(args->mod < args->prod);
352 rlen = args->len;
353 ASSERT(rlen >= args->minlen);
354 ASSERT(rlen <= args->maxlen);
355 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
356 (args->mod == 0 && rlen < args->prod))
357 return;
358 k = rlen % args->prod;
359 if (k == args->mod)
360 return;
30265117
JK
361 if (k > args->mod)
362 rlen = rlen - (k - args->mod);
363 else
364 rlen = rlen - args->prod + (args->mod - k);
3790a8cd 365 /* casts to (int) catch length underflows */
30265117
JK
366 if ((int)rlen < (int)args->minlen)
367 return;
368 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
369 ASSERT(rlen % args->prod == args->mod);
54fee133
CH
370 ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
371 rlen + args->minleft);
1da177e4
LT
372 args->len = rlen;
373}
374
1da177e4
LT
375/*
376 * Update the two btrees, logically removing from freespace the extent
377 * starting at rbno, rlen blocks. The extent is contained within the
378 * actual (current) free extent fbno for flen blocks.
379 * Flags are passed in indicating whether the cursors are set to the
380 * relevant records.
381 */
382STATIC int /* error code */
383xfs_alloc_fixup_trees(
384 xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */
385 xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */
386 xfs_agblock_t fbno, /* starting block of free extent */
387 xfs_extlen_t flen, /* length of free extent */
388 xfs_agblock_t rbno, /* starting block of returned extent */
389 xfs_extlen_t rlen, /* length of returned extent */
390 int flags) /* flags, XFSA_FIXUP_... */
391{
392 int error; /* error code */
393 int i; /* operation results */
394 xfs_agblock_t nfbno1; /* first new free startblock */
395 xfs_agblock_t nfbno2; /* second new free startblock */
396 xfs_extlen_t nflen1=0; /* first new free length */
397 xfs_extlen_t nflen2=0; /* second new free length */
5fb5aeee
ES
398 struct xfs_mount *mp;
399
400 mp = cnt_cur->bc_mp;
1da177e4
LT
401
402 /*
403 * Look up the record in the by-size tree if necessary.
404 */
405 if (flags & XFSA_FIXUP_CNT_OK) {
406#ifdef DEBUG
407 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
408 return error;
5fb5aeee 409 XFS_WANT_CORRUPTED_RETURN(mp,
1da177e4
LT
410 i == 1 && nfbno1 == fbno && nflen1 == flen);
411#endif
412 } else {
413 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
414 return error;
5fb5aeee 415 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
1da177e4
LT
416 }
417 /*
418 * Look up the record in the by-block tree if necessary.
419 */
420 if (flags & XFSA_FIXUP_BNO_OK) {
421#ifdef DEBUG
422 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
423 return error;
5fb5aeee 424 XFS_WANT_CORRUPTED_RETURN(mp,
1da177e4
LT
425 i == 1 && nfbno1 == fbno && nflen1 == flen);
426#endif
427 } else {
428 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
429 return error;
5fb5aeee 430 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
1da177e4 431 }
7cc95a82 432
1da177e4 433#ifdef DEBUG
7cc95a82
CH
434 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
435 struct xfs_btree_block *bnoblock;
436 struct xfs_btree_block *cntblock;
437
438 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
439 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
1da177e4 440
5fb5aeee 441 XFS_WANT_CORRUPTED_RETURN(mp,
7cc95a82 442 bnoblock->bb_numrecs == cntblock->bb_numrecs);
1da177e4
LT
443 }
444#endif
7cc95a82 445
1da177e4
LT
446 /*
447 * Deal with all four cases: the allocated record is contained
448 * within the freespace record, so we can have new freespace
449 * at either (or both) end, or no freespace remaining.
450 */
451 if (rbno == fbno && rlen == flen)
452 nfbno1 = nfbno2 = NULLAGBLOCK;
453 else if (rbno == fbno) {
454 nfbno1 = rbno + rlen;
455 nflen1 = flen - rlen;
456 nfbno2 = NULLAGBLOCK;
457 } else if (rbno + rlen == fbno + flen) {
458 nfbno1 = fbno;
459 nflen1 = flen - rlen;
460 nfbno2 = NULLAGBLOCK;
461 } else {
462 nfbno1 = fbno;
463 nflen1 = rbno - fbno;
464 nfbno2 = rbno + rlen;
465 nflen2 = (fbno + flen) - nfbno2;
466 }
467 /*
468 * Delete the entry from the by-size btree.
469 */
91cca5df 470 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4 471 return error;
5fb5aeee 472 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
1da177e4
LT
473 /*
474 * Add new by-size btree entry(s).
475 */
476 if (nfbno1 != NULLAGBLOCK) {
477 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
478 return error;
5fb5aeee 479 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
4b22a571 480 if ((error = xfs_btree_insert(cnt_cur, &i)))
1da177e4 481 return error;
5fb5aeee 482 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
1da177e4
LT
483 }
484 if (nfbno2 != NULLAGBLOCK) {
485 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
486 return error;
5fb5aeee 487 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
4b22a571 488 if ((error = xfs_btree_insert(cnt_cur, &i)))
1da177e4 489 return error;
5fb5aeee 490 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
1da177e4
LT
491 }
492 /*
493 * Fix up the by-block btree entry(s).
494 */
495 if (nfbno1 == NULLAGBLOCK) {
496 /*
497 * No remaining freespace, just delete the by-block tree entry.
498 */
91cca5df 499 if ((error = xfs_btree_delete(bno_cur, &i)))
1da177e4 500 return error;
5fb5aeee 501 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
1da177e4
LT
502 } else {
503 /*
504 * Update the by-block entry to start later|be shorter.
505 */
506 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
507 return error;
508 }
509 if (nfbno2 != NULLAGBLOCK) {
510 /*
511 * 2 resulting free entries, need to add one.
512 */
513 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
514 return error;
5fb5aeee 515 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
4b22a571 516 if ((error = xfs_btree_insert(bno_cur, &i)))
1da177e4 517 return error;
5fb5aeee 518 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
1da177e4
LT
519 }
520 return 0;
521}
522
77c95bba 523static bool
612cfbfe 524xfs_agfl_verify(
bb80c6d7
DC
525 struct xfs_buf *bp)
526{
bb80c6d7
DC
527 struct xfs_mount *mp = bp->b_target->bt_mount;
528 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
bb80c6d7
DC
529 int i;
530
ce748eaa 531 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
77c95bba
CH
532 return false;
533 if (be32_to_cpu(agfl->agfl_magicnum) != XFS_AGFL_MAGIC)
534 return false;
535 /*
536 * during growfs operations, the perag is not fully initialised,
537 * so we can't use it for any useful checking. growfs ensures we can't
538 * use it by using uncached buffers that don't have the perag attached
539 * so we can detect and avoid this problem.
540 */
541 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
542 return false;
543
bb80c6d7 544 for (i = 0; i < XFS_AGFL_SIZE(mp); i++) {
77c95bba 545 if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
bb80c6d7 546 be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
77c95bba 547 return false;
bb80c6d7 548 }
a45086e2
BF
549
550 return xfs_log_check_lsn(mp,
551 be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn));
77c95bba
CH
552}
553
554static void
555xfs_agfl_read_verify(
556 struct xfs_buf *bp)
557{
558 struct xfs_mount *mp = bp->b_target->bt_mount;
77c95bba
CH
559
560 /*
561 * There is no verification of non-crc AGFLs because mkfs does not
562 * initialise the AGFL to zero or NULL. Hence the only valid part of the
563 * AGFL is what the AGF says is active. We can't get to the AGF, so we
564 * can't verify just those entries are valid.
565 */
566 if (!xfs_sb_version_hascrc(&mp->m_sb))
567 return;
568
ce5028cf 569 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
31ca03c9 570 xfs_verifier_error(bp, -EFSBADCRC);
ce5028cf 571 else if (!xfs_agfl_verify(bp))
31ca03c9 572 xfs_verifier_error(bp, -EFSCORRUPTED);
612cfbfe
DC
573}
574
1813dd64 575static void
612cfbfe
DC
576xfs_agfl_write_verify(
577 struct xfs_buf *bp)
578{
77c95bba
CH
579 struct xfs_mount *mp = bp->b_target->bt_mount;
580 struct xfs_buf_log_item *bip = bp->b_fspriv;
612cfbfe 581
77c95bba
CH
582 /* no verification of non-crc AGFLs */
583 if (!xfs_sb_version_hascrc(&mp->m_sb))
584 return;
585
586 if (!xfs_agfl_verify(bp)) {
31ca03c9 587 xfs_verifier_error(bp, -EFSCORRUPTED);
77c95bba
CH
588 return;
589 }
590
591 if (bip)
592 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
593
f1dbcd7e 594 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
bb80c6d7
DC
595}
596
1813dd64 597const struct xfs_buf_ops xfs_agfl_buf_ops = {
233135b7 598 .name = "xfs_agfl",
1813dd64
DC
599 .verify_read = xfs_agfl_read_verify,
600 .verify_write = xfs_agfl_write_verify,
601};
602
1da177e4
LT
603/*
604 * Read in the allocation group free block array.
605 */
26788097 606int /* error */
1da177e4
LT
607xfs_alloc_read_agfl(
608 xfs_mount_t *mp, /* mount point structure */
609 xfs_trans_t *tp, /* transaction pointer */
610 xfs_agnumber_t agno, /* allocation group number */
611 xfs_buf_t **bpp) /* buffer for the ag free block array */
612{
613 xfs_buf_t *bp; /* return value */
614 int error;
615
616 ASSERT(agno != NULLAGNUMBER);
617 error = xfs_trans_read_buf(
618 mp, tp, mp->m_ddev_targp,
619 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
1813dd64 620 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
1da177e4
LT
621 if (error)
622 return error;
38f23232 623 xfs_buf_set_ref(bp, XFS_AGFL_REF);
1da177e4
LT
624 *bpp = bp;
625 return 0;
626}
627
ecb6928f
CH
628STATIC int
629xfs_alloc_update_counters(
630 struct xfs_trans *tp,
631 struct xfs_perag *pag,
632 struct xfs_buf *agbp,
633 long len)
634{
635 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
636
637 pag->pagf_freeblks += len;
638 be32_add_cpu(&agf->agf_freeblks, len);
639
640 xfs_trans_agblocks_delta(tp, len);
641 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
642 be32_to_cpu(agf->agf_length)))
2451337d 643 return -EFSCORRUPTED;
ecb6928f
CH
644
645 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
646 return 0;
647}
648
1da177e4
LT
649/*
650 * Allocation group level functions.
651 */
652
653/*
654 * Allocate a variable extent in the allocation group agno.
655 * Type and bno are used to determine where in the allocation group the
656 * extent will start.
657 * Extent's length (returned in *len) will be between minlen and maxlen,
658 * and of the form k * prod + mod unless there's nothing that large.
659 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
660 */
661STATIC int /* error */
662xfs_alloc_ag_vextent(
663 xfs_alloc_arg_t *args) /* argument structure for allocation */
664{
665 int error=0;
1da177e4
LT
666
667 ASSERT(args->minlen > 0);
668 ASSERT(args->maxlen > 0);
669 ASSERT(args->minlen <= args->maxlen);
670 ASSERT(args->mod < args->prod);
671 ASSERT(args->alignment > 0);
3fd129b6 672
1da177e4
LT
673 /*
674 * Branch to correct routine based on the type.
675 */
676 args->wasfromfl = 0;
677 switch (args->type) {
678 case XFS_ALLOCTYPE_THIS_AG:
679 error = xfs_alloc_ag_vextent_size(args);
680 break;
681 case XFS_ALLOCTYPE_NEAR_BNO:
682 error = xfs_alloc_ag_vextent_near(args);
683 break;
684 case XFS_ALLOCTYPE_THIS_BNO:
685 error = xfs_alloc_ag_vextent_exact(args);
686 break;
687 default:
688 ASSERT(0);
689 /* NOTREACHED */
690 }
ecb6928f
CH
691
692 if (error || args->agbno == NULLAGBLOCK)
1da177e4 693 return error;
ecb6928f
CH
694
695 ASSERT(args->len >= args->minlen);
696 ASSERT(args->len <= args->maxlen);
3fd129b6 697 ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_AGFL);
ecb6928f
CH
698 ASSERT(args->agbno % args->alignment == 0);
699
673930c3 700 /* if not file data, insert new block into the reverse map btree */
33df3a9c 701 if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
673930c3
DW
702 error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
703 args->agbno, args->len, &args->oinfo);
704 if (error)
705 return error;
706 }
707
ecb6928f
CH
708 if (!args->wasfromfl) {
709 error = xfs_alloc_update_counters(args->tp, args->pag,
710 args->agbp,
711 -((long)(args->len)));
712 if (error)
713 return error;
714
4ecbfe63 715 ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
e26f0501 716 args->agbno, args->len));
1da177e4 717 }
ecb6928f 718
3fd129b6 719 xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
ecb6928f 720
ff6d6af2
BD
721 XFS_STATS_INC(args->mp, xs_allocx);
722 XFS_STATS_ADD(args->mp, xs_allocb, args->len);
ecb6928f 723 return error;
1da177e4
LT
724}
725
726/*
727 * Allocate a variable extent at exactly agno/bno.
728 * Extent's length (returned in *len) will be between minlen and maxlen,
729 * and of the form k * prod + mod unless there's nothing that large.
730 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
731 */
732STATIC int /* error */
733xfs_alloc_ag_vextent_exact(
734 xfs_alloc_arg_t *args) /* allocation argument structure */
735{
736 xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
737 xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
1da177e4
LT
738 int error;
739 xfs_agblock_t fbno; /* start block of found extent */
1da177e4 740 xfs_extlen_t flen; /* length of found extent */
ebf55872
CH
741 xfs_agblock_t tbno; /* start block of busy extent */
742 xfs_extlen_t tlen; /* length of busy extent */
743 xfs_agblock_t tend; /* end block of busy extent */
1da177e4 744 int i; /* success/failure of operation */
ebf55872 745 unsigned busy_gen;
1da177e4
LT
746
747 ASSERT(args->alignment == 1);
9f9baab3 748
1da177e4
LT
749 /*
750 * Allocate/initialize a cursor for the by-number freespace btree.
751 */
561f7d17 752 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
9f9baab3
CH
753 args->agno, XFS_BTNUM_BNO);
754
1da177e4
LT
755 /*
756 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
757 * Look for the closest free block <= bno, it must contain bno
758 * if any free block does.
759 */
9f9baab3
CH
760 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
761 if (error)
1da177e4 762 goto error0;
9f9baab3
CH
763 if (!i)
764 goto not_found;
765
1da177e4
LT
766 /*
767 * Grab the freespace record.
768 */
9f9baab3
CH
769 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
770 if (error)
1da177e4 771 goto error0;
c29aad41 772 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1da177e4 773 ASSERT(fbno <= args->agbno);
9f9baab3 774
1da177e4 775 /*
e26f0501 776 * Check for overlapping busy extents.
1da177e4 777 */
ebf55872
CH
778 tbno = fbno;
779 tlen = flen;
780 xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
e26f0501
CH
781
782 /*
783 * Give up if the start of the extent is busy, or the freespace isn't
784 * long enough for the minimum request.
785 */
786 if (tbno > args->agbno)
787 goto not_found;
788 if (tlen < args->minlen)
789 goto not_found;
790 tend = tbno + tlen;
791 if (tend < args->agbno + args->minlen)
9f9baab3
CH
792 goto not_found;
793
1da177e4
LT
794 /*
795 * End of extent will be smaller of the freespace end and the
796 * maximal requested end.
9f9baab3 797 *
1da177e4
LT
798 * Fix the length according to mod and prod if given.
799 */
81463b1c
CS
800 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
801 - args->agbno;
1da177e4 802 xfs_alloc_fix_len(args);
81463b1c 803 ASSERT(args->agbno + args->len <= tend);
9f9baab3 804
1da177e4 805 /*
81463b1c 806 * We are allocating agbno for args->len
1da177e4
LT
807 * Allocate/initialize a cursor for the by-size btree.
808 */
561f7d17
CH
809 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
810 args->agno, XFS_BTNUM_CNT);
1da177e4 811 ASSERT(args->agbno + args->len <=
16259e7d 812 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
9f9baab3
CH
813 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
814 args->len, XFSA_FIXUP_BNO_OK);
815 if (error) {
1da177e4
LT
816 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
817 goto error0;
818 }
9f9baab3 819
1da177e4
LT
820 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
821 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
0b1b213f 822
1da177e4 823 args->wasfromfl = 0;
9f9baab3
CH
824 trace_xfs_alloc_exact_done(args);
825 return 0;
826
827not_found:
828 /* Didn't find it, return null. */
829 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
830 args->agbno = NULLAGBLOCK;
831 trace_xfs_alloc_exact_notfound(args);
1da177e4
LT
832 return 0;
833
834error0:
835 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
0b1b213f 836 trace_xfs_alloc_exact_error(args);
1da177e4
LT
837 return error;
838}
839
489a150f
CH
840/*
841 * Search the btree in a given direction via the search cursor and compare
842 * the records found against the good extent we've already found.
843 */
844STATIC int
845xfs_alloc_find_best_extent(
846 struct xfs_alloc_arg *args, /* allocation argument structure */
847 struct xfs_btree_cur **gcur, /* good cursor */
848 struct xfs_btree_cur **scur, /* searching cursor */
849 xfs_agblock_t gdiff, /* difference for search comparison */
850 xfs_agblock_t *sbno, /* extent found by search */
e26f0501
CH
851 xfs_extlen_t *slen, /* extent length */
852 xfs_agblock_t *sbnoa, /* aligned extent found by search */
853 xfs_extlen_t *slena, /* aligned extent length */
489a150f
CH
854 int dir) /* 0 = search right, 1 = search left */
855{
489a150f
CH
856 xfs_agblock_t new;
857 xfs_agblock_t sdiff;
858 int error;
859 int i;
ebf55872 860 unsigned busy_gen;
489a150f
CH
861
862 /* The good extent is perfect, no need to search. */
863 if (!gdiff)
864 goto out_use_good;
865
866 /*
867 * Look until we find a better one, run out of space or run off the end.
868 */
869 do {
870 error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
871 if (error)
872 goto error0;
c29aad41 873 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
ebf55872
CH
874 xfs_alloc_compute_aligned(args, *sbno, *slen,
875 sbnoa, slena, &busy_gen);
489a150f
CH
876
877 /*
878 * The good extent is closer than this one.
879 */
880 if (!dir) {
bfe46d4e
BF
881 if (*sbnoa > args->max_agbno)
882 goto out_use_good;
e26f0501 883 if (*sbnoa >= args->agbno + gdiff)
489a150f
CH
884 goto out_use_good;
885 } else {
bfe46d4e
BF
886 if (*sbnoa < args->min_agbno)
887 goto out_use_good;
e26f0501 888 if (*sbnoa <= args->agbno - gdiff)
489a150f
CH
889 goto out_use_good;
890 }
891
892 /*
893 * Same distance, compare length and pick the best.
894 */
895 if (*slena >= args->minlen) {
896 args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
897 xfs_alloc_fix_len(args);
898
899 sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
211d022c 900 args->alignment,
292378ed 901 args->datatype, *sbnoa,
e26f0501 902 *slena, &new);
489a150f
CH
903
904 /*
905 * Choose closer size and invalidate other cursor.
906 */
907 if (sdiff < gdiff)
908 goto out_use_search;
909 goto out_use_good;
910 }
911
912 if (!dir)
913 error = xfs_btree_increment(*scur, 0, &i);
914 else
915 error = xfs_btree_decrement(*scur, 0, &i);
916 if (error)
917 goto error0;
918 } while (i);
919
920out_use_good:
921 xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
922 *scur = NULL;
923 return 0;
924
925out_use_search:
926 xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
927 *gcur = NULL;
928 return 0;
929
930error0:
931 /* caller invalidates cursors */
932 return error;
933}
934
1da177e4
LT
935/*
936 * Allocate a variable extent near bno in the allocation group agno.
937 * Extent's length (returned in len) will be between minlen and maxlen,
938 * and of the form k * prod + mod unless there's nothing that large.
939 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
940 */
941STATIC int /* error */
942xfs_alloc_ag_vextent_near(
943 xfs_alloc_arg_t *args) /* allocation argument structure */
944{
945 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */
946 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */
947 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */
1da177e4
LT
948 xfs_agblock_t gtbno; /* start bno of right side entry */
949 xfs_agblock_t gtbnoa; /* aligned ... */
950 xfs_extlen_t gtdiff; /* difference to right side entry */
951 xfs_extlen_t gtlen; /* length of right side entry */
e26f0501 952 xfs_extlen_t gtlena; /* aligned ... */
1da177e4
LT
953 xfs_agblock_t gtnew; /* useful start bno of right side */
954 int error; /* error code */
955 int i; /* result code, temporary */
956 int j; /* result code, temporary */
957 xfs_agblock_t ltbno; /* start bno of left side entry */
958 xfs_agblock_t ltbnoa; /* aligned ... */
959 xfs_extlen_t ltdiff; /* difference to left side entry */
1da177e4 960 xfs_extlen_t ltlen; /* length of left side entry */
e26f0501 961 xfs_extlen_t ltlena; /* aligned ... */
1da177e4
LT
962 xfs_agblock_t ltnew; /* useful start bno of left side */
963 xfs_extlen_t rlen; /* length of returned extent */
ebf55872
CH
964 bool busy;
965 unsigned busy_gen;
63d20d6e 966#ifdef DEBUG
1da177e4
LT
967 /*
968 * Randomly don't execute the first algorithm.
969 */
970 int dofirst; /* set to do first algorithm */
971
ecb3403d 972 dofirst = prandom_u32() & 1;
1da177e4 973#endif
e26f0501 974
bfe46d4e
BF
975 /* handle unitialized agbno range so caller doesn't have to */
976 if (!args->min_agbno && !args->max_agbno)
977 args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
978 ASSERT(args->min_agbno <= args->max_agbno);
979
980 /* clamp agbno to the range if it's outside */
981 if (args->agbno < args->min_agbno)
982 args->agbno = args->min_agbno;
983 if (args->agbno > args->max_agbno)
984 args->agbno = args->max_agbno;
985
e26f0501
CH
986restart:
987 bno_cur_lt = NULL;
988 bno_cur_gt = NULL;
989 ltlen = 0;
990 gtlena = 0;
991 ltlena = 0;
ebf55872 992 busy = false;
e26f0501 993
1da177e4
LT
994 /*
995 * Get a cursor for the by-size btree.
996 */
561f7d17
CH
997 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
998 args->agno, XFS_BTNUM_CNT);
e26f0501 999
1da177e4
LT
1000 /*
1001 * See if there are any free extents as big as maxlen.
1002 */
1003 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
1004 goto error0;
1005 /*
1006 * If none, then pick up the last entry in the tree unless the
1007 * tree is empty.
1008 */
1009 if (!i) {
1010 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,
1011 &ltlen, &i)))
1012 goto error0;
1013 if (i == 0 || ltlen == 0) {
1014 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
e26f0501 1015 trace_xfs_alloc_near_noentry(args);
1da177e4
LT
1016 return 0;
1017 }
1018 ASSERT(i == 1);
1019 }
1020 args->wasfromfl = 0;
e26f0501 1021
1da177e4
LT
1022 /*
1023 * First algorithm.
1024 * If the requested extent is large wrt the freespaces available
1025 * in this a.g., then the cursor will be pointing to a btree entry
1026 * near the right edge of the tree. If it's in the last btree leaf
1027 * block, then we just examine all the entries in that block
1028 * that are big enough, and pick the best one.
1029 * This is written as a while loop so we can break out of it,
1030 * but we never loop back to the top.
1031 */
1032 while (xfs_btree_islastblock(cnt_cur, 0)) {
1033 xfs_extlen_t bdiff;
1034 int besti=0;
1035 xfs_extlen_t blen=0;
1036 xfs_agblock_t bnew=0;
1037
63d20d6e
DC
1038#ifdef DEBUG
1039 if (dofirst)
1da177e4
LT
1040 break;
1041#endif
1042 /*
1043 * Start from the entry that lookup found, sequence through
1044 * all larger free blocks. If we're actually pointing at a
1045 * record smaller than maxlen, go to the start of this block,
1046 * and skip all those smaller than minlen.
1047 */
1048 if (ltlen || args->alignment > 1) {
1049 cnt_cur->bc_ptrs[0] = 1;
1050 do {
1051 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
1052 &ltlen, &i)))
1053 goto error0;
c29aad41 1054 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1da177e4
LT
1055 if (ltlen >= args->minlen)
1056 break;
637aa50f 1057 if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
1da177e4
LT
1058 goto error0;
1059 } while (i);
1060 ASSERT(ltlen >= args->minlen);
1061 if (!i)
1062 break;
1063 }
1064 i = cnt_cur->bc_ptrs[0];
1065 for (j = 1, blen = 0, bdiff = 0;
1066 !error && j && (blen < args->maxlen || bdiff > 0);
637aa50f 1067 error = xfs_btree_increment(cnt_cur, 0, &j)) {
1da177e4
LT
1068 /*
1069 * For each entry, decide if it's better than
1070 * the previous best entry.
1071 */
1072 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
1073 goto error0;
c29aad41 1074 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
ebf55872
CH
1075 busy = xfs_alloc_compute_aligned(args, ltbno, ltlen,
1076 &ltbnoa, &ltlena, &busy_gen);
e6430037 1077 if (ltlena < args->minlen)
1da177e4 1078 continue;
bfe46d4e
BF
1079 if (ltbnoa < args->min_agbno || ltbnoa > args->max_agbno)
1080 continue;
1da177e4
LT
1081 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1082 xfs_alloc_fix_len(args);
1083 ASSERT(args->len >= args->minlen);
1084 if (args->len < blen)
1085 continue;
1086 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
292378ed 1087 args->alignment, args->datatype, ltbnoa,
211d022c 1088 ltlena, &ltnew);
1da177e4
LT
1089 if (ltnew != NULLAGBLOCK &&
1090 (args->len > blen || ltdiff < bdiff)) {
1091 bdiff = ltdiff;
1092 bnew = ltnew;
1093 blen = args->len;
1094 besti = cnt_cur->bc_ptrs[0];
1095 }
1096 }
1097 /*
1098 * It didn't work. We COULD be in a case where
1099 * there's a good record somewhere, so try again.
1100 */
1101 if (blen == 0)
1102 break;
1103 /*
1104 * Point at the best entry, and retrieve it again.
1105 */
1106 cnt_cur->bc_ptrs[0] = besti;
1107 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
1108 goto error0;
c29aad41 1109 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
73523a2e 1110 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1da177e4 1111 args->len = blen;
54fee133 1112
1da177e4
LT
1113 /*
1114 * We are allocating starting at bnew for blen blocks.
1115 */
1116 args->agbno = bnew;
1117 ASSERT(bnew >= ltbno);
73523a2e 1118 ASSERT(bnew + blen <= ltbno + ltlen);
1da177e4
LT
1119 /*
1120 * Set up a cursor for the by-bno tree.
1121 */
561f7d17
CH
1122 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
1123 args->agbp, args->agno, XFS_BTNUM_BNO);
1da177e4
LT
1124 /*
1125 * Fix up the btree entries.
1126 */
1127 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
1128 ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
1129 goto error0;
1130 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1131 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
0b1b213f
CH
1132
1133 trace_xfs_alloc_near_first(args);
1da177e4
LT
1134 return 0;
1135 }
1136 /*
1137 * Second algorithm.
1138 * Search in the by-bno tree to the left and to the right
1139 * simultaneously, until in each case we find a space big enough,
1140 * or run into the edge of the tree. When we run into the edge,
1141 * we deallocate that cursor.
1142 * If both searches succeed, we compare the two spaces and pick
1143 * the better one.
1144 * With alignment, it's possible for both to fail; the upper
1145 * level algorithm that picks allocation groups for allocations
1146 * is not supposed to do this.
1147 */
1148 /*
1149 * Allocate and initialize the cursor for the leftward search.
1150 */
561f7d17
CH
1151 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1152 args->agno, XFS_BTNUM_BNO);
1da177e4
LT
1153 /*
1154 * Lookup <= bno to find the leftward search's starting point.
1155 */
1156 if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
1157 goto error0;
1158 if (!i) {
1159 /*
1160 * Didn't find anything; use this cursor for the rightward
1161 * search.
1162 */
1163 bno_cur_gt = bno_cur_lt;
1164 bno_cur_lt = NULL;
1165 }
1166 /*
1167 * Found something. Duplicate the cursor for the rightward search.
1168 */
1169 else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
1170 goto error0;
1171 /*
1172 * Increment the cursor, so we will point at the entry just right
1173 * of the leftward entry if any, or to the leftmost entry.
1174 */
637aa50f 1175 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1da177e4
LT
1176 goto error0;
1177 if (!i) {
1178 /*
1179 * It failed, there are no rightward entries.
1180 */
1181 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
1182 bno_cur_gt = NULL;
1183 }
1184 /*
1185 * Loop going left with the leftward cursor, right with the
1186 * rightward cursor, until either both directions give up or
1187 * we find an entry at least as big as minlen.
1188 */
1189 do {
1190 if (bno_cur_lt) {
1191 if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
1192 goto error0;
c29aad41 1193 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
ebf55872
CH
1194 busy |= xfs_alloc_compute_aligned(args, ltbno, ltlen,
1195 &ltbnoa, &ltlena, &busy_gen);
bfe46d4e 1196 if (ltlena >= args->minlen && ltbnoa >= args->min_agbno)
1da177e4 1197 break;
8df4da4a 1198 if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
1da177e4 1199 goto error0;
bfe46d4e 1200 if (!i || ltbnoa < args->min_agbno) {
1da177e4
LT
1201 xfs_btree_del_cursor(bno_cur_lt,
1202 XFS_BTREE_NOERROR);
1203 bno_cur_lt = NULL;
1204 }
1205 }
1206 if (bno_cur_gt) {
1207 if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
1208 goto error0;
c29aad41 1209 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
ebf55872
CH
1210 busy |= xfs_alloc_compute_aligned(args, gtbno, gtlen,
1211 &gtbnoa, &gtlena, &busy_gen);
bfe46d4e 1212 if (gtlena >= args->minlen && gtbnoa <= args->max_agbno)
1da177e4 1213 break;
637aa50f 1214 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1da177e4 1215 goto error0;
bfe46d4e 1216 if (!i || gtbnoa > args->max_agbno) {
1da177e4
LT
1217 xfs_btree_del_cursor(bno_cur_gt,
1218 XFS_BTREE_NOERROR);
1219 bno_cur_gt = NULL;
1220 }
1221 }
1222 } while (bno_cur_lt || bno_cur_gt);
489a150f 1223
1da177e4
LT
1224 /*
1225 * Got both cursors still active, need to find better entry.
1226 */
1227 if (bno_cur_lt && bno_cur_gt) {
1da177e4
LT
1228 if (ltlena >= args->minlen) {
1229 /*
489a150f 1230 * Left side is good, look for a right side entry.
1da177e4
LT
1231 */
1232 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1233 xfs_alloc_fix_len(args);
489a150f 1234 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
292378ed 1235 args->alignment, args->datatype, ltbnoa,
211d022c 1236 ltlena, &ltnew);
489a150f
CH
1237
1238 error = xfs_alloc_find_best_extent(args,
1239 &bno_cur_lt, &bno_cur_gt,
e26f0501
CH
1240 ltdiff, &gtbno, &gtlen,
1241 &gtbnoa, &gtlena,
489a150f
CH
1242 0 /* search right */);
1243 } else {
1244 ASSERT(gtlena >= args->minlen);
1245
1da177e4 1246 /*
489a150f 1247 * Right side is good, look for a left side entry.
1da177e4
LT
1248 */
1249 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1250 xfs_alloc_fix_len(args);
489a150f 1251 gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
292378ed 1252 args->alignment, args->datatype, gtbnoa,
211d022c 1253 gtlena, &gtnew);
489a150f
CH
1254
1255 error = xfs_alloc_find_best_extent(args,
1256 &bno_cur_gt, &bno_cur_lt,
e26f0501
CH
1257 gtdiff, &ltbno, &ltlen,
1258 &ltbnoa, &ltlena,
489a150f 1259 1 /* search left */);
1da177e4 1260 }
489a150f
CH
1261
1262 if (error)
1263 goto error0;
1da177e4 1264 }
489a150f 1265
1da177e4
LT
1266 /*
1267 * If we couldn't get anything, give up.
1268 */
1269 if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
e3a746f5
DC
1270 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1271
ebf55872 1272 if (busy) {
e26f0501 1273 trace_xfs_alloc_near_busy(args);
ebf55872 1274 xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
e26f0501
CH
1275 goto restart;
1276 }
0b1b213f 1277 trace_xfs_alloc_size_neither(args);
1da177e4
LT
1278 args->agbno = NULLAGBLOCK;
1279 return 0;
1280 }
489a150f 1281
1da177e4
LT
1282 /*
1283 * At this point we have selected a freespace entry, either to the
1284 * left or to the right. If it's on the right, copy all the
1285 * useful variables to the "left" set so we only have one
1286 * copy of this code.
1287 */
1288 if (bno_cur_gt) {
1289 bno_cur_lt = bno_cur_gt;
1290 bno_cur_gt = NULL;
1291 ltbno = gtbno;
1292 ltbnoa = gtbnoa;
1293 ltlen = gtlen;
1294 ltlena = gtlena;
1295 j = 1;
1296 } else
1297 j = 0;
489a150f 1298
1da177e4
LT
1299 /*
1300 * Fix up the length and compute the useful address.
1301 */
1da177e4
LT
1302 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1303 xfs_alloc_fix_len(args);
1da177e4 1304 rlen = args->len;
e26f0501 1305 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
292378ed 1306 args->datatype, ltbnoa, ltlena, &ltnew);
1da177e4 1307 ASSERT(ltnew >= ltbno);
e26f0501 1308 ASSERT(ltnew + rlen <= ltbnoa + ltlena);
16259e7d 1309 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
bfe46d4e 1310 ASSERT(ltnew >= args->min_agbno && ltnew <= args->max_agbno);
1da177e4 1311 args->agbno = ltnew;
e26f0501 1312
1da177e4
LT
1313 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1314 ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1315 goto error0;
0b1b213f
CH
1316
1317 if (j)
1318 trace_xfs_alloc_near_greater(args);
1319 else
1320 trace_xfs_alloc_near_lesser(args);
1321
1da177e4
LT
1322 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1323 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1324 return 0;
1325
1326 error0:
0b1b213f 1327 trace_xfs_alloc_near_error(args);
1da177e4
LT
1328 if (cnt_cur != NULL)
1329 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1330 if (bno_cur_lt != NULL)
1331 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1332 if (bno_cur_gt != NULL)
1333 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1334 return error;
1335}
1336
1337/*
1338 * Allocate a variable extent anywhere in the allocation group agno.
1339 * Extent's length (returned in len) will be between minlen and maxlen,
1340 * and of the form k * prod + mod unless there's nothing that large.
1341 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1342 */
1343STATIC int /* error */
1344xfs_alloc_ag_vextent_size(
1345 xfs_alloc_arg_t *args) /* allocation argument structure */
1346{
1347 xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
1348 xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
1349 int error; /* error result */
1350 xfs_agblock_t fbno; /* start of found freespace */
1351 xfs_extlen_t flen; /* length of found freespace */
1da177e4
LT
1352 int i; /* temp status variable */
1353 xfs_agblock_t rbno; /* returned block number */
1354 xfs_extlen_t rlen; /* length of returned extent */
ebf55872
CH
1355 bool busy;
1356 unsigned busy_gen;
1da177e4 1357
e26f0501 1358restart:
1da177e4
LT
1359 /*
1360 * Allocate and initialize a cursor for the by-size btree.
1361 */
561f7d17
CH
1362 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1363 args->agno, XFS_BTNUM_CNT);
1da177e4 1364 bno_cur = NULL;
ebf55872 1365 busy = false;
e26f0501 1366
1da177e4
LT
1367 /*
1368 * Look for an entry >= maxlen+alignment-1 blocks.
1369 */
1370 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1371 args->maxlen + args->alignment - 1, &i)))
1372 goto error0;
e26f0501 1373
1da177e4 1374 /*
ebf55872
CH
1375 * If none then we have to settle for a smaller extent. In the case that
1376 * there are no large extents, this will return the last entry in the
1377 * tree unless the tree is empty. In the case that there are only busy
1378 * large extents, this will return the largest small extent unless there
e26f0501 1379 * are no smaller extents available.
1da177e4 1380 */
ebf55872 1381 if (!i) {
e26f0501
CH
1382 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1383 &fbno, &flen, &i);
1384 if (error)
1da177e4
LT
1385 goto error0;
1386 if (i == 0 || flen == 0) {
1387 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
0b1b213f 1388 trace_xfs_alloc_size_noentry(args);
1da177e4
LT
1389 return 0;
1390 }
1391 ASSERT(i == 1);
ebf55872
CH
1392 busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1393 &rlen, &busy_gen);
e26f0501
CH
1394 } else {
1395 /*
1396 * Search for a non-busy extent that is large enough.
e26f0501
CH
1397 */
1398 for (;;) {
1399 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1400 if (error)
1401 goto error0;
c29aad41 1402 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
e26f0501 1403
ebf55872
CH
1404 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1405 &rbno, &rlen, &busy_gen);
e26f0501
CH
1406
1407 if (rlen >= args->maxlen)
1408 break;
1409
1410 error = xfs_btree_increment(cnt_cur, 0, &i);
1411 if (error)
1412 goto error0;
1413 if (i == 0) {
1414 /*
1415 * Our only valid extents must have been busy.
1416 * Make it unbusy by forcing the log out and
ebf55872 1417 * retrying.
e26f0501
CH
1418 */
1419 xfs_btree_del_cursor(cnt_cur,
1420 XFS_BTREE_NOERROR);
1421 trace_xfs_alloc_size_busy(args);
ebf55872
CH
1422 xfs_extent_busy_flush(args->mp,
1423 args->pag, busy_gen);
e26f0501
CH
1424 goto restart;
1425 }
1426 }
1da177e4 1427 }
e26f0501 1428
1da177e4
LT
1429 /*
1430 * In the first case above, we got the last entry in the
1431 * by-size btree. Now we check to see if the space hits maxlen
1432 * once aligned; if not, we search left for something better.
1433 * This can't happen in the second case above.
1434 */
1da177e4 1435 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
c29aad41 1436 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1da177e4
LT
1437 (rlen <= flen && rbno + rlen <= fbno + flen), error0);
1438 if (rlen < args->maxlen) {
1439 xfs_agblock_t bestfbno;
1440 xfs_extlen_t bestflen;
1441 xfs_agblock_t bestrbno;
1442 xfs_extlen_t bestrlen;
1443
1444 bestrlen = rlen;
1445 bestrbno = rbno;
1446 bestflen = flen;
1447 bestfbno = fbno;
1448 for (;;) {
8df4da4a 1449 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1da177e4
LT
1450 goto error0;
1451 if (i == 0)
1452 break;
1453 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1454 &i)))
1455 goto error0;
c29aad41 1456 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1da177e4
LT
1457 if (flen < bestrlen)
1458 break;
ebf55872
CH
1459 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1460 &rbno, &rlen, &busy_gen);
1da177e4 1461 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
c29aad41 1462 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1da177e4
LT
1463 (rlen <= flen && rbno + rlen <= fbno + flen),
1464 error0);
1465 if (rlen > bestrlen) {
1466 bestrlen = rlen;
1467 bestrbno = rbno;
1468 bestflen = flen;
1469 bestfbno = fbno;
1470 if (rlen == args->maxlen)
1471 break;
1472 }
1473 }
1474 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1475 &i)))
1476 goto error0;
c29aad41 1477 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1da177e4
LT
1478 rlen = bestrlen;
1479 rbno = bestrbno;
1480 flen = bestflen;
1481 fbno = bestfbno;
1482 }
1483 args->wasfromfl = 0;
1484 /*
1485 * Fix up the length.
1486 */
1487 args->len = rlen;
e26f0501 1488 if (rlen < args->minlen) {
ebf55872 1489 if (busy) {
e26f0501
CH
1490 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1491 trace_xfs_alloc_size_busy(args);
ebf55872 1492 xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
e26f0501
CH
1493 goto restart;
1494 }
1495 goto out_nominleft;
1da177e4 1496 }
e26f0501
CH
1497 xfs_alloc_fix_len(args);
1498
1da177e4 1499 rlen = args->len;
c29aad41 1500 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
1da177e4
LT
1501 /*
1502 * Allocate and initialize a cursor for the by-block tree.
1503 */
561f7d17
CH
1504 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1505 args->agno, XFS_BTNUM_BNO);
1da177e4
LT
1506 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1507 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1508 goto error0;
1509 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1510 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1511 cnt_cur = bno_cur = NULL;
1512 args->len = rlen;
1513 args->agbno = rbno;
c29aad41 1514 XFS_WANT_CORRUPTED_GOTO(args->mp,
1da177e4 1515 args->agbno + args->len <=
16259e7d 1516 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1da177e4 1517 error0);
0b1b213f 1518 trace_xfs_alloc_size_done(args);
1da177e4
LT
1519 return 0;
1520
1521error0:
0b1b213f 1522 trace_xfs_alloc_size_error(args);
1da177e4
LT
1523 if (cnt_cur)
1524 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1525 if (bno_cur)
1526 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1527 return error;
e26f0501
CH
1528
1529out_nominleft:
1530 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1531 trace_xfs_alloc_size_nominleft(args);
1532 args->agbno = NULLAGBLOCK;
1533 return 0;
1da177e4
LT
1534}
1535
1536/*
1537 * Deal with the case where only small freespaces remain.
1538 * Either return the contents of the last freespace record,
1539 * or allocate space from the freelist if there is nothing in the tree.
1540 */
1541STATIC int /* error */
1542xfs_alloc_ag_vextent_small(
1543 xfs_alloc_arg_t *args, /* allocation argument structure */
1544 xfs_btree_cur_t *ccur, /* by-size cursor */
1545 xfs_agblock_t *fbnop, /* result block number */
1546 xfs_extlen_t *flenp, /* result length */
1547 int *stat) /* status: 0-freelist, 1-normal/none */
1548{
a03f1a66 1549 struct xfs_owner_info oinfo;
3fd129b6 1550 struct xfs_perag *pag;
1da177e4
LT
1551 int error;
1552 xfs_agblock_t fbno;
1553 xfs_extlen_t flen;
1da177e4
LT
1554 int i;
1555
8df4da4a 1556 if ((error = xfs_btree_decrement(ccur, 0, &i)))
1da177e4
LT
1557 goto error0;
1558 if (i) {
1559 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1560 goto error0;
c29aad41 1561 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1da177e4
LT
1562 }
1563 /*
1564 * Nothing in the btree, try the freelist. Make sure
1565 * to respect minleft even when pulling from the
1566 * freelist.
1567 */
3fd129b6
DW
1568 else if (args->minlen == 1 && args->alignment == 1 &&
1569 args->resv != XFS_AG_RESV_AGFL &&
16259e7d
CH
1570 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1571 > args->minleft)) {
92821e2b
DC
1572 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1573 if (error)
1da177e4
LT
1574 goto error0;
1575 if (fbno != NULLAGBLOCK) {
4ecbfe63 1576 xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
292378ed 1577 xfs_alloc_allow_busy_reuse(args->datatype));
97d3ac75 1578
292378ed 1579 if (xfs_alloc_is_userdata(args->datatype)) {
1da177e4
LT
1580 xfs_buf_t *bp;
1581
1582 bp = xfs_btree_get_bufs(args->mp, args->tp,
1583 args->agno, fbno, 0);
93e8befc
ES
1584 if (!bp) {
1585 error = -EFSCORRUPTED;
1586 goto error0;
1587 }
1da177e4
LT
1588 xfs_trans_binval(args->tp, bp);
1589 }
1590 args->len = 1;
1591 args->agbno = fbno;
c29aad41 1592 XFS_WANT_CORRUPTED_GOTO(args->mp,
1da177e4 1593 args->agbno + args->len <=
16259e7d 1594 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1da177e4
LT
1595 error0);
1596 args->wasfromfl = 1;
0b1b213f 1597 trace_xfs_alloc_small_freelist(args);
a03f1a66
DW
1598
1599 /*
1600 * If we're feeding an AGFL block to something that
1601 * doesn't live in the free space, we need to clear
3fd129b6
DW
1602 * out the OWN_AG rmap and add the block back to
1603 * the AGFL per-AG reservation.
a03f1a66
DW
1604 */
1605 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
1606 error = xfs_rmap_free(args->tp, args->agbp, args->agno,
1607 fbno, 1, &oinfo);
1608 if (error)
1609 goto error0;
3fd129b6
DW
1610 pag = xfs_perag_get(args->mp, args->agno);
1611 xfs_ag_resv_free_extent(pag, XFS_AG_RESV_AGFL,
1612 args->tp, 1);
1613 xfs_perag_put(pag);
a03f1a66 1614
1da177e4
LT
1615 *stat = 0;
1616 return 0;
1617 }
1618 /*
1619 * Nothing in the freelist.
1620 */
1621 else
1622 flen = 0;
1623 }
1624 /*
1625 * Can't allocate from the freelist for some reason.
1626 */
d432c80e
NS
1627 else {
1628 fbno = NULLAGBLOCK;
1da177e4 1629 flen = 0;
d432c80e 1630 }
1da177e4
LT
1631 /*
1632 * Can't do the allocation, give up.
1633 */
1634 if (flen < args->minlen) {
1635 args->agbno = NULLAGBLOCK;
0b1b213f 1636 trace_xfs_alloc_small_notenough(args);
1da177e4
LT
1637 flen = 0;
1638 }
1639 *fbnop = fbno;
1640 *flenp = flen;
1641 *stat = 1;
0b1b213f 1642 trace_xfs_alloc_small_done(args);
1da177e4
LT
1643 return 0;
1644
1645error0:
0b1b213f 1646 trace_xfs_alloc_small_error(args);
1da177e4
LT
1647 return error;
1648}
1649
1650/*
1651 * Free the extent starting at agno/bno for length.
1652 */
340785cc 1653STATIC int
1da177e4 1654xfs_free_ag_extent(
340785cc
DW
1655 xfs_trans_t *tp,
1656 xfs_buf_t *agbp,
1657 xfs_agnumber_t agno,
1658 xfs_agblock_t bno,
1659 xfs_extlen_t len,
1660 struct xfs_owner_info *oinfo,
3fd129b6 1661 enum xfs_ag_resv_type type)
1da177e4
LT
1662{
1663 xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
1664 xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
1665 int error; /* error return value */
1da177e4
LT
1666 xfs_agblock_t gtbno; /* start of right neighbor block */
1667 xfs_extlen_t gtlen; /* length of right neighbor block */
1668 int haveleft; /* have a left neighbor block */
1669 int haveright; /* have a right neighbor block */
1670 int i; /* temp, result code */
1671 xfs_agblock_t ltbno; /* start of left neighbor block */
1672 xfs_extlen_t ltlen; /* length of left neighbor block */
1673 xfs_mount_t *mp; /* mount point struct for filesystem */
1674 xfs_agblock_t nbno; /* new starting block of freespace */
1675 xfs_extlen_t nlen; /* new length of freespace */
ecb6928f 1676 xfs_perag_t *pag; /* per allocation group data */
1da177e4 1677
673930c3 1678 bno_cur = cnt_cur = NULL;
1da177e4 1679 mp = tp->t_mountp;
673930c3 1680
33df3a9c 1681 if (!xfs_rmap_should_skip_owner_update(oinfo)) {
673930c3
DW
1682 error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
1683 if (error)
1684 goto error0;
1685 }
1686
1da177e4
LT
1687 /*
1688 * Allocate and initialize a cursor for the by-block btree.
1689 */
561f7d17 1690 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
1da177e4
LT
1691 /*
1692 * Look for a neighboring block on the left (lower block numbers)
1693 * that is contiguous with this space.
1694 */
1695 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1696 goto error0;
1697 if (haveleft) {
1698 /*
1699 * There is a block to our left.
1700 */
1701 if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
1702 goto error0;
c29aad41 1703 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1704 /*
1705 * It's not contiguous, though.
1706 */
1707 if (ltbno + ltlen < bno)
1708 haveleft = 0;
1709 else {
1710 /*
1711 * If this failure happens the request to free this
1712 * space was invalid, it's (partly) already free.
1713 * Very bad.
1714 */
c29aad41
ES
1715 XFS_WANT_CORRUPTED_GOTO(mp,
1716 ltbno + ltlen <= bno, error0);
1da177e4
LT
1717 }
1718 }
1719 /*
1720 * Look for a neighboring block on the right (higher block numbers)
1721 * that is contiguous with this space.
1722 */
637aa50f 1723 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1da177e4
LT
1724 goto error0;
1725 if (haveright) {
1726 /*
1727 * There is a block to our right.
1728 */
1729 if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
1730 goto error0;
c29aad41 1731 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1732 /*
1733 * It's not contiguous, though.
1734 */
1735 if (bno + len < gtbno)
1736 haveright = 0;
1737 else {
1738 /*
1739 * If this failure happens the request to free this
1740 * space was invalid, it's (partly) already free.
1741 * Very bad.
1742 */
c29aad41 1743 XFS_WANT_CORRUPTED_GOTO(mp, gtbno >= bno + len, error0);
1da177e4
LT
1744 }
1745 }
1746 /*
1747 * Now allocate and initialize a cursor for the by-size tree.
1748 */
561f7d17 1749 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
1da177e4
LT
1750 /*
1751 * Have both left and right contiguous neighbors.
1752 * Merge all three into a single free block.
1753 */
1754 if (haveleft && haveright) {
1755 /*
1756 * Delete the old by-size entry on the left.
1757 */
1758 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1759 goto error0;
c29aad41 1760 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
91cca5df 1761 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4 1762 goto error0;
c29aad41 1763 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1764 /*
1765 * Delete the old by-size entry on the right.
1766 */
1767 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1768 goto error0;
c29aad41 1769 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
91cca5df 1770 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4 1771 goto error0;
c29aad41 1772 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1773 /*
1774 * Delete the old by-block entry for the right block.
1775 */
91cca5df 1776 if ((error = xfs_btree_delete(bno_cur, &i)))
1da177e4 1777 goto error0;
c29aad41 1778 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1779 /*
1780 * Move the by-block cursor back to the left neighbor.
1781 */
8df4da4a 1782 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1da177e4 1783 goto error0;
c29aad41 1784 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1785#ifdef DEBUG
1786 /*
1787 * Check that this is the right record: delete didn't
1788 * mangle the cursor.
1789 */
1790 {
1791 xfs_agblock_t xxbno;
1792 xfs_extlen_t xxlen;
1793
1794 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1795 &i)))
1796 goto error0;
c29aad41 1797 XFS_WANT_CORRUPTED_GOTO(mp,
1da177e4
LT
1798 i == 1 && xxbno == ltbno && xxlen == ltlen,
1799 error0);
1800 }
1801#endif
1802 /*
1803 * Update remaining by-block entry to the new, joined block.
1804 */
1805 nbno = ltbno;
1806 nlen = len + ltlen + gtlen;
1807 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1808 goto error0;
1809 }
1810 /*
1811 * Have only a left contiguous neighbor.
1812 * Merge it together with the new freespace.
1813 */
1814 else if (haveleft) {
1815 /*
1816 * Delete the old by-size entry on the left.
1817 */
1818 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1819 goto error0;
c29aad41 1820 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
91cca5df 1821 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4 1822 goto error0;
c29aad41 1823 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1824 /*
1825 * Back up the by-block cursor to the left neighbor, and
1826 * update its length.
1827 */
8df4da4a 1828 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1da177e4 1829 goto error0;
c29aad41 1830 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1831 nbno = ltbno;
1832 nlen = len + ltlen;
1833 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1834 goto error0;
1835 }
1836 /*
1837 * Have only a right contiguous neighbor.
1838 * Merge it together with the new freespace.
1839 */
1840 else if (haveright) {
1841 /*
1842 * Delete the old by-size entry on the right.
1843 */
1844 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1845 goto error0;
c29aad41 1846 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
91cca5df 1847 if ((error = xfs_btree_delete(cnt_cur, &i)))
1da177e4 1848 goto error0;
c29aad41 1849 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1850 /*
1851 * Update the starting block and length of the right
1852 * neighbor in the by-block tree.
1853 */
1854 nbno = bno;
1855 nlen = len + gtlen;
1856 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1857 goto error0;
1858 }
1859 /*
1860 * No contiguous neighbors.
1861 * Insert the new freespace into the by-block tree.
1862 */
1863 else {
1864 nbno = bno;
1865 nlen = len;
4b22a571 1866 if ((error = xfs_btree_insert(bno_cur, &i)))
1da177e4 1867 goto error0;
c29aad41 1868 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1869 }
1870 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1871 bno_cur = NULL;
1872 /*
1873 * In all cases we need to insert the new freespace in the by-size tree.
1874 */
1875 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1876 goto error0;
c29aad41 1877 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, error0);
4b22a571 1878 if ((error = xfs_btree_insert(cnt_cur, &i)))
1da177e4 1879 goto error0;
c29aad41 1880 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1da177e4
LT
1881 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1882 cnt_cur = NULL;
ecb6928f 1883
1da177e4
LT
1884 /*
1885 * Update the freespace totals in the ag and superblock.
1886 */
ecb6928f
CH
1887 pag = xfs_perag_get(mp, agno);
1888 error = xfs_alloc_update_counters(tp, pag, agbp, len);
3fd129b6 1889 xfs_ag_resv_free_extent(pag, type, tp, len);
ecb6928f
CH
1890 xfs_perag_put(pag);
1891 if (error)
1892 goto error0;
1893
ff6d6af2
BD
1894 XFS_STATS_INC(mp, xs_freex);
1895 XFS_STATS_ADD(mp, xs_freeb, len);
0b1b213f 1896
3fd129b6
DW
1897 trace_xfs_free_extent(mp, agno, bno, len, type == XFS_AG_RESV_AGFL,
1898 haveleft, haveright);
1da177e4 1899
1da177e4
LT
1900 return 0;
1901
1902 error0:
3fd129b6
DW
1903 trace_xfs_free_extent(mp, agno, bno, len, type == XFS_AG_RESV_AGFL,
1904 -1, -1);
1da177e4
LT
1905 if (bno_cur)
1906 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1907 if (cnt_cur)
1908 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1909 return error;
1910}
1911
1912/*
1913 * Visible (exported) allocation/free functions.
1914 * Some of these are used just by xfs_alloc_btree.c and this file.
1915 */
1916
1917/*
1918 * Compute and fill in value of m_ag_maxlevels.
1919 */
1920void
1921xfs_alloc_compute_maxlevels(
1922 xfs_mount_t *mp) /* file system mount structure */
1923{
19b54ee6
DW
1924 mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp, mp->m_alloc_mnr,
1925 (mp->m_sb.sb_agblocks + 1) / 2);
1da177e4
LT
1926}
1927
6cc87645 1928/*
3fd129b6
DW
1929 * Find the length of the longest extent in an AG. The 'need' parameter
1930 * specifies how much space we're going to need for the AGFL and the
1931 * 'reserved' parameter tells us how many blocks in this AG are reserved for
1932 * other callers.
6cc87645
DC
1933 */
1934xfs_extlen_t
1935xfs_alloc_longest_free_extent(
1936 struct xfs_mount *mp,
50adbcb4 1937 struct xfs_perag *pag,
3fd129b6
DW
1938 xfs_extlen_t need,
1939 xfs_extlen_t reserved)
6cc87645 1940{
50adbcb4 1941 xfs_extlen_t delta = 0;
6cc87645 1942
3fd129b6
DW
1943 /*
1944 * If the AGFL needs a recharge, we'll have to subtract that from the
1945 * longest extent.
1946 */
6cc87645
DC
1947 if (need > pag->pagf_flcount)
1948 delta = need - pag->pagf_flcount;
1949
3fd129b6
DW
1950 /*
1951 * If we cannot maintain others' reservations with space from the
1952 * not-longest freesp extents, we'll have to subtract /that/ from
1953 * the longest extent too.
1954 */
1955 if (pag->pagf_freeblks - pag->pagf_longest < reserved)
1956 delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
1957
1958 /*
1959 * If the longest extent is long enough to satisfy all the
1960 * reservations and AGFL rules in place, we can return this extent.
1961 */
6cc87645
DC
1962 if (pag->pagf_longest > delta)
1963 return pag->pagf_longest - delta;
3fd129b6
DW
1964
1965 /* Otherwise, let the caller try for 1 block if there's space. */
6cc87645
DC
1966 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
1967}
1968
496817b4
DC
1969unsigned int
1970xfs_alloc_min_freelist(
1971 struct xfs_mount *mp,
1972 struct xfs_perag *pag)
1973{
1974 unsigned int min_free;
1975
1976 /* space needed by-bno freespace btree */
1977 min_free = min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_BNOi] + 1,
1978 mp->m_ag_maxlevels);
1979 /* space needed by-size freespace btree */
1980 min_free += min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_CNTi] + 1,
1981 mp->m_ag_maxlevels);
52548852
DW
1982 /* space needed reverse mapping used space btree */
1983 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
1984 min_free += min_t(unsigned int,
1985 pag->pagf_levels[XFS_BTNUM_RMAPi] + 1,
1986 mp->m_rmap_maxlevels);
496817b4
DC
1987
1988 return min_free;
1989}
1990
72d55285
DC
1991/*
1992 * Check if the operation we are fixing up the freelist for should go ahead or
1993 * not. If we are freeing blocks, we always allow it, otherwise the allocation
1994 * is dependent on whether the size and shape of free space available will
1995 * permit the requested allocation to take place.
1996 */
1997static bool
1998xfs_alloc_space_available(
1999 struct xfs_alloc_arg *args,
2000 xfs_extlen_t min_free,
2001 int flags)
2002{
2003 struct xfs_perag *pag = args->pag;
12ef8301 2004 xfs_extlen_t alloc_len, longest;
3fd129b6 2005 xfs_extlen_t reservation; /* blocks that are still reserved */
72d55285
DC
2006 int available;
2007
2008 if (flags & XFS_ALLOC_FLAG_FREEING)
2009 return true;
2010
3fd129b6
DW
2011 reservation = xfs_ag_resv_needed(pag, args->resv);
2012
72d55285 2013 /* do we have enough contiguous free space for the allocation? */
12ef8301 2014 alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
3fd129b6
DW
2015 longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free,
2016 reservation);
12ef8301 2017 if (longest < alloc_len)
72d55285
DC
2018 return false;
2019
3fd129b6 2020 /* do we have enough free space remaining for the allocation? */
72d55285 2021 available = (int)(pag->pagf_freeblks + pag->pagf_flcount -
54fee133 2022 reservation - min_free - args->minleft);
12ef8301 2023 if (available < (int)max(args->total, alloc_len))
72d55285
DC
2024 return false;
2025
54fee133
CH
2026 /*
2027 * Clamp maxlen to the amount of free space available for the actual
2028 * extent allocation.
2029 */
2030 if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2031 args->maxlen = available;
2032 ASSERT(args->maxlen > 0);
2033 ASSERT(args->maxlen >= args->minlen);
2034 }
2035
72d55285
DC
2036 return true;
2037}
2038
1da177e4
LT
2039/*
2040 * Decide whether to use this allocation group for this allocation.
2041 * If so, fix up the btree freelist's size.
2042 */
2e9101da 2043int /* error */
1da177e4 2044xfs_alloc_fix_freelist(
396503fc
DC
2045 struct xfs_alloc_arg *args, /* allocation argument structure */
2046 int flags) /* XFS_ALLOC_FLAG_... */
1da177e4 2047{
396503fc
DC
2048 struct xfs_mount *mp = args->mp;
2049 struct xfs_perag *pag = args->pag;
2050 struct xfs_trans *tp = args->tp;
2051 struct xfs_buf *agbp = NULL;
2052 struct xfs_buf *agflbp = NULL;
2053 struct xfs_alloc_arg targs; /* local allocation arguments */
2054 xfs_agblock_t bno; /* freelist block */
2055 xfs_extlen_t need; /* total blocks needed in freelist */
c184f855 2056 int error = 0;
396503fc 2057
1da177e4 2058 if (!pag->pagf_init) {
396503fc
DC
2059 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2060 if (error)
2061 goto out_no_agbp;
1da177e4 2062 if (!pag->pagf_init) {
0e1edbd9
NS
2063 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2064 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
396503fc 2065 goto out_agbp_relse;
1da177e4 2066 }
396503fc 2067 }
1da177e4 2068
0e1edbd9 2069 /*
396503fc
DC
2070 * If this is a metadata preferred pag and we are user data then try
2071 * somewhere else if we are not being asked to try harder at this
2072 * point
1da177e4 2073 */
292378ed 2074 if (pag->pagf_metadata && xfs_alloc_is_userdata(args->datatype) &&
0e1edbd9
NS
2075 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2076 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
396503fc 2077 goto out_agbp_relse;
1da177e4
LT
2078 }
2079
496817b4 2080 need = xfs_alloc_min_freelist(mp, pag);
54fee133
CH
2081 if (!xfs_alloc_space_available(args, need, flags |
2082 XFS_ALLOC_FLAG_CHECK))
396503fc 2083 goto out_agbp_relse;
0e1edbd9 2084
1da177e4
LT
2085 /*
2086 * Get the a.g. freespace buffer.
2087 * Can fail if we're not blocking on locks, and it's held.
2088 */
396503fc
DC
2089 if (!agbp) {
2090 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2091 if (error)
2092 goto out_no_agbp;
2093 if (!agbp) {
0e1edbd9
NS
2094 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2095 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
396503fc 2096 goto out_no_agbp;
0e1edbd9 2097 }
1da177e4 2098 }
50adbcb4 2099
50adbcb4 2100 /* If there isn't enough total space or single-extent, reject it. */
496817b4 2101 need = xfs_alloc_min_freelist(mp, pag);
396503fc
DC
2102 if (!xfs_alloc_space_available(args, need, flags))
2103 goto out_agbp_relse;
72d55285 2104
1da177e4
LT
2105 /*
2106 * Make the freelist shorter if it's too long.
50adbcb4 2107 *
396503fc
DC
2108 * Note that from this point onwards, we will always release the agf and
2109 * agfl buffers on error. This handles the case where we error out and
2110 * the buffers are clean or may not have been joined to the transaction
2111 * and hence need to be released manually. If they have been joined to
2112 * the transaction, then xfs_trans_brelse() will handle them
2113 * appropriately based on the recursion count and dirty state of the
2114 * buffer.
2115 *
50adbcb4
DC
2116 * XXX (dgc): When we have lots of free space, does this buy us
2117 * anything other than extra overhead when we need to put more blocks
2118 * back on the free list? Maybe we should only do this when space is
2119 * getting low or the AGFL is more than half full?
04f13060
DW
2120 *
2121 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2122 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2123 * updating the rmapbt. Both flags are used in xfs_repair while we're
2124 * rebuilding the rmapbt, and neither are used by the kernel. They're
2125 * both required to ensure that rmaps are correctly recorded for the
2126 * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and
2127 * repair/rmap.c in xfsprogs for details.
1da177e4 2128 */
04f13060
DW
2129 memset(&targs, 0, sizeof(targs));
2130 if (flags & XFS_ALLOC_FLAG_NORMAP)
2131 xfs_rmap_skip_owner_update(&targs.oinfo);
2132 else
2133 xfs_rmap_ag_owner(&targs.oinfo, XFS_RMAP_OWN_AG);
2134 while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
50adbcb4 2135 struct xfs_buf *bp;
1da177e4 2136
92821e2b
DC
2137 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
2138 if (error)
396503fc 2139 goto out_agbp_relse;
340785cc 2140 error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1,
3fd129b6 2141 &targs.oinfo, XFS_AG_RESV_AGFL);
50adbcb4 2142 if (error)
396503fc 2143 goto out_agbp_relse;
1da177e4 2144 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
93e8befc
ES
2145 if (!bp) {
2146 error = -EFSCORRUPTED;
2147 goto out_agbp_relse;
2148 }
1da177e4
LT
2149 xfs_trans_binval(tp, bp);
2150 }
50adbcb4 2151
1da177e4
LT
2152 targs.tp = tp;
2153 targs.mp = mp;
2154 targs.agbp = agbp;
2155 targs.agno = args->agno;
3fd129b6 2156 targs.alignment = targs.minlen = targs.prod = 1;
1da177e4
LT
2157 targs.type = XFS_ALLOCTYPE_THIS_AG;
2158 targs.pag = pag;
50adbcb4
DC
2159 error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp);
2160 if (error)
396503fc 2161 goto out_agbp_relse;
50adbcb4
DC
2162
2163 /* Make the freelist longer if it's too short. */
2164 while (pag->pagf_flcount < need) {
1da177e4 2165 targs.agbno = 0;
50adbcb4 2166 targs.maxlen = need - pag->pagf_flcount;
3fd129b6 2167 targs.resv = XFS_AG_RESV_AGFL;
50adbcb4
DC
2168
2169 /* Allocate as many blocks as possible at once. */
2170 error = xfs_alloc_ag_vextent(&targs);
396503fc
DC
2171 if (error)
2172 goto out_agflbp_relse;
2173
1da177e4
LT
2174 /*
2175 * Stop if we run out. Won't happen if callers are obeying
2176 * the restrictions correctly. Can happen for free calls
2177 * on a completely full ag.
2178 */
d210a28c 2179 if (targs.agbno == NULLAGBLOCK) {
0e1edbd9
NS
2180 if (flags & XFS_ALLOC_FLAG_FREEING)
2181 break;
396503fc 2182 goto out_agflbp_relse;
d210a28c 2183 }
1da177e4
LT
2184 /*
2185 * Put each allocated block on the list.
2186 */
2187 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
92821e2b
DC
2188 error = xfs_alloc_put_freelist(tp, agbp,
2189 agflbp, bno, 0);
2190 if (error)
396503fc 2191 goto out_agflbp_relse;
1da177e4
LT
2192 }
2193 }
e63a3690 2194 xfs_trans_brelse(tp, agflbp);
1da177e4
LT
2195 args->agbp = agbp;
2196 return 0;
396503fc
DC
2197
2198out_agflbp_relse:
2199 xfs_trans_brelse(tp, agflbp);
2200out_agbp_relse:
2201 if (agbp)
2202 xfs_trans_brelse(tp, agbp);
2203out_no_agbp:
2204 args->agbp = NULL;
2205 return error;
1da177e4
LT
2206}
2207
2208/*
2209 * Get a block from the freelist.
2210 * Returns with the buffer for the block gotten.
2211 */
2212int /* error */
2213xfs_alloc_get_freelist(
2214 xfs_trans_t *tp, /* transaction pointer */
2215 xfs_buf_t *agbp, /* buffer containing the agf structure */
92821e2b
DC
2216 xfs_agblock_t *bnop, /* block address retrieved from freelist */
2217 int btreeblk) /* destination is a AGF btree */
1da177e4
LT
2218{
2219 xfs_agf_t *agf; /* a.g. freespace structure */
1da177e4
LT
2220 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
2221 xfs_agblock_t bno; /* block number returned */
77c95bba 2222 __be32 *agfl_bno;
1da177e4 2223 int error;
92821e2b 2224 int logflags;
77c95bba 2225 xfs_mount_t *mp = tp->t_mountp;
1da177e4
LT
2226 xfs_perag_t *pag; /* per allocation group data */
2227
1da177e4
LT
2228 /*
2229 * Freelist is empty, give up.
2230 */
77c95bba 2231 agf = XFS_BUF_TO_AGF(agbp);
1da177e4
LT
2232 if (!agf->agf_flcount) {
2233 *bnop = NULLAGBLOCK;
2234 return 0;
2235 }
2236 /*
2237 * Read the array of free blocks.
2238 */
77c95bba
CH
2239 error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
2240 &agflbp);
2241 if (error)
1da177e4 2242 return error;
77c95bba
CH
2243
2244
1da177e4
LT
2245 /*
2246 * Get the block number and update the data structures.
2247 */
77c95bba
CH
2248 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2249 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
413d57c9 2250 be32_add_cpu(&agf->agf_flfirst, 1);
1da177e4 2251 xfs_trans_brelse(tp, agflbp);
16259e7d 2252 if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
1da177e4 2253 agf->agf_flfirst = 0;
a862e0fd
DC
2254
2255 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
413d57c9 2256 be32_add_cpu(&agf->agf_flcount, -1);
1da177e4
LT
2257 xfs_trans_agflist_delta(tp, -1);
2258 pag->pagf_flcount--;
a862e0fd 2259 xfs_perag_put(pag);
92821e2b
DC
2260
2261 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2262 if (btreeblk) {
413d57c9 2263 be32_add_cpu(&agf->agf_btreeblks, 1);
92821e2b
DC
2264 pag->pagf_btreeblks++;
2265 logflags |= XFS_AGF_BTREEBLKS;
2266 }
2267
92821e2b 2268 xfs_alloc_log_agf(tp, agbp, logflags);
1da177e4
LT
2269 *bnop = bno;
2270
1da177e4
LT
2271 return 0;
2272}
2273
2274/*
2275 * Log the given fields from the agf structure.
2276 */
2277void
2278xfs_alloc_log_agf(
2279 xfs_trans_t *tp, /* transaction pointer */
2280 xfs_buf_t *bp, /* buffer for a.g. freelist header */
2281 int fields) /* mask of fields to be logged (XFS_AGF_...) */
2282{
2283 int first; /* first byte offset */
2284 int last; /* last byte offset */
2285 static const short offsets[] = {
2286 offsetof(xfs_agf_t, agf_magicnum),
2287 offsetof(xfs_agf_t, agf_versionnum),
2288 offsetof(xfs_agf_t, agf_seqno),
2289 offsetof(xfs_agf_t, agf_length),
2290 offsetof(xfs_agf_t, agf_roots[0]),
2291 offsetof(xfs_agf_t, agf_levels[0]),
2292 offsetof(xfs_agf_t, agf_flfirst),
2293 offsetof(xfs_agf_t, agf_fllast),
2294 offsetof(xfs_agf_t, agf_flcount),
2295 offsetof(xfs_agf_t, agf_freeblks),
2296 offsetof(xfs_agf_t, agf_longest),
92821e2b 2297 offsetof(xfs_agf_t, agf_btreeblks),
4e0e6040 2298 offsetof(xfs_agf_t, agf_uuid),
f32866fd 2299 offsetof(xfs_agf_t, agf_rmap_blocks),
bdf28630
DW
2300 offsetof(xfs_agf_t, agf_refcount_blocks),
2301 offsetof(xfs_agf_t, agf_refcount_root),
2302 offsetof(xfs_agf_t, agf_refcount_level),
da1f039d
DW
2303 /* needed so that we don't log the whole rest of the structure: */
2304 offsetof(xfs_agf_t, agf_spare64),
1da177e4
LT
2305 sizeof(xfs_agf_t)
2306 };
2307
0b1b213f
CH
2308 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2309
61fe135c 2310 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
4e0e6040 2311
1da177e4
LT
2312 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2313 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2314}
2315
2316/*
2317 * Interface for inode allocation to force the pag data to be initialized.
2318 */
2319int /* error */
2320xfs_alloc_pagf_init(
2321 xfs_mount_t *mp, /* file system mount structure */
2322 xfs_trans_t *tp, /* transaction pointer */
2323 xfs_agnumber_t agno, /* allocation group number */
2324 int flags) /* XFS_ALLOC_FLAGS_... */
2325{
2326 xfs_buf_t *bp;
2327 int error;
2328
2329 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2330 return error;
2331 if (bp)
2332 xfs_trans_brelse(tp, bp);
2333 return 0;
2334}
2335
2336/*
2337 * Put the block on the freelist for the allocation group.
2338 */
2339int /* error */
2340xfs_alloc_put_freelist(
2341 xfs_trans_t *tp, /* transaction pointer */
2342 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
2343 xfs_buf_t *agflbp,/* buffer for a.g. free block array */
92821e2b
DC
2344 xfs_agblock_t bno, /* block being freed */
2345 int btreeblk) /* block came from a AGF btree */
1da177e4
LT
2346{
2347 xfs_agf_t *agf; /* a.g. freespace structure */
e2101005 2348 __be32 *blockp;/* pointer to array entry */
1da177e4 2349 int error;
92821e2b 2350 int logflags;
1da177e4
LT
2351 xfs_mount_t *mp; /* mount structure */
2352 xfs_perag_t *pag; /* per allocation group data */
77c95bba
CH
2353 __be32 *agfl_bno;
2354 int startoff;
1da177e4
LT
2355
2356 agf = XFS_BUF_TO_AGF(agbp);
2357 mp = tp->t_mountp;
2358
2359 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
16259e7d 2360 be32_to_cpu(agf->agf_seqno), &agflbp)))
1da177e4 2361 return error;
413d57c9 2362 be32_add_cpu(&agf->agf_fllast, 1);
16259e7d 2363 if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
1da177e4 2364 agf->agf_fllast = 0;
a862e0fd
DC
2365
2366 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
413d57c9 2367 be32_add_cpu(&agf->agf_flcount, 1);
1da177e4
LT
2368 xfs_trans_agflist_delta(tp, 1);
2369 pag->pagf_flcount++;
92821e2b
DC
2370
2371 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2372 if (btreeblk) {
413d57c9 2373 be32_add_cpu(&agf->agf_btreeblks, -1);
92821e2b
DC
2374 pag->pagf_btreeblks--;
2375 logflags |= XFS_AGF_BTREEBLKS;
2376 }
a862e0fd 2377 xfs_perag_put(pag);
92821e2b 2378
92821e2b
DC
2379 xfs_alloc_log_agf(tp, agbp, logflags);
2380
16259e7d 2381 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
77c95bba
CH
2382
2383 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2384 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
e2101005 2385 *blockp = cpu_to_be32(bno);
77c95bba
CH
2386 startoff = (char *)blockp - (char *)agflbp->b_addr;
2387
92821e2b 2388 xfs_alloc_log_agf(tp, agbp, logflags);
77c95bba 2389
61fe135c 2390 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
77c95bba
CH
2391 xfs_trans_log_buf(tp, agflbp, startoff,
2392 startoff + sizeof(xfs_agblock_t) - 1);
1da177e4
LT
2393 return 0;
2394}
2395
4e0e6040 2396static bool
612cfbfe 2397xfs_agf_verify(
4e0e6040 2398 struct xfs_mount *mp,
5d5f527d
DC
2399 struct xfs_buf *bp)
2400 {
4e0e6040 2401 struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
5d5f527d 2402
a45086e2
BF
2403 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2404 if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
4e0e6040 2405 return false;
a45086e2
BF
2406 if (!xfs_log_check_lsn(mp,
2407 be64_to_cpu(XFS_BUF_TO_AGF(bp)->agf_lsn)))
2408 return false;
2409 }
5d5f527d 2410
4e0e6040
DC
2411 if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
2412 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2413 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2414 be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
2415 be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
2416 be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)))
2417 return false;
5d5f527d 2418
d2a047f3
DW
2419 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
2420 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
2421 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
e1b05723
ES
2422 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
2423 return false;
2424
b8704944 2425 if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
d2a047f3
DW
2426 (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
2427 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
b8704944
DW
2428 return false;
2429
5d5f527d
DC
2430 /*
2431 * during growfs operations, the perag is not fully initialised,
2432 * so we can't use it for any useful checking. growfs ensures we can't
2433 * use it by using uncached buffers that don't have the perag attached
2434 * so we can detect and avoid this problem.
2435 */
4e0e6040
DC
2436 if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
2437 return false;
5d5f527d 2438
4e0e6040
DC
2439 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
2440 be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
2441 return false;
2442
46eeb521 2443 if (xfs_sb_version_hasreflink(&mp->m_sb) &&
d2a047f3
DW
2444 (be32_to_cpu(agf->agf_refcount_level) < 1 ||
2445 be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
46eeb521
DW
2446 return false;
2447
4e0e6040 2448 return true;;
5d5f527d 2449
612cfbfe
DC
2450}
2451
1813dd64
DC
2452static void
2453xfs_agf_read_verify(
612cfbfe
DC
2454 struct xfs_buf *bp)
2455{
4e0e6040 2456 struct xfs_mount *mp = bp->b_target->bt_mount;
4e0e6040 2457
ce5028cf
ES
2458 if (xfs_sb_version_hascrc(&mp->m_sb) &&
2459 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
31ca03c9 2460 xfs_verifier_error(bp, -EFSBADCRC);
ce5028cf 2461 else if (XFS_TEST_ERROR(!xfs_agf_verify(mp, bp), mp,
9e24cfd0 2462 XFS_ERRTAG_ALLOC_READ_AGF))
31ca03c9 2463 xfs_verifier_error(bp, -EFSCORRUPTED);
612cfbfe 2464}
5d5f527d 2465
b0f539de 2466static void
1813dd64 2467xfs_agf_write_verify(
612cfbfe
DC
2468 struct xfs_buf *bp)
2469{
4e0e6040
DC
2470 struct xfs_mount *mp = bp->b_target->bt_mount;
2471 struct xfs_buf_log_item *bip = bp->b_fspriv;
2472
2473 if (!xfs_agf_verify(mp, bp)) {
31ca03c9 2474 xfs_verifier_error(bp, -EFSCORRUPTED);
4e0e6040
DC
2475 return;
2476 }
2477
2478 if (!xfs_sb_version_hascrc(&mp->m_sb))
2479 return;
2480
2481 if (bip)
2482 XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2483
f1dbcd7e 2484 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
5d5f527d
DC
2485}
2486
1813dd64 2487const struct xfs_buf_ops xfs_agf_buf_ops = {
233135b7 2488 .name = "xfs_agf",
1813dd64
DC
2489 .verify_read = xfs_agf_read_verify,
2490 .verify_write = xfs_agf_write_verify,
2491};
2492
1da177e4
LT
2493/*
2494 * Read in the allocation group header (free/alloc section).
2495 */
2496int /* error */
4805621a
FCH
2497xfs_read_agf(
2498 struct xfs_mount *mp, /* mount point structure */
2499 struct xfs_trans *tp, /* transaction pointer */
2500 xfs_agnumber_t agno, /* allocation group number */
2501 int flags, /* XFS_BUF_ */
2502 struct xfs_buf **bpp) /* buffer for the ag freelist header */
1da177e4 2503{
1da177e4
LT
2504 int error;
2505
d123031a
DC
2506 trace_xfs_read_agf(mp, agno);
2507
1da177e4
LT
2508 ASSERT(agno != NULLAGNUMBER);
2509 error = xfs_trans_read_buf(
2510 mp, tp, mp->m_ddev_targp,
2511 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
1813dd64 2512 XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
1da177e4
LT
2513 if (error)
2514 return error;
4805621a 2515 if (!*bpp)
1da177e4 2516 return 0;
4805621a 2517
5a52c2a5 2518 ASSERT(!(*bpp)->b_error);
38f23232 2519 xfs_buf_set_ref(*bpp, XFS_AGF_REF);
4805621a
FCH
2520 return 0;
2521}
2522
2523/*
2524 * Read in the allocation group header (free/alloc section).
2525 */
2526int /* error */
2527xfs_alloc_read_agf(
2528 struct xfs_mount *mp, /* mount point structure */
2529 struct xfs_trans *tp, /* transaction pointer */
2530 xfs_agnumber_t agno, /* allocation group number */
2531 int flags, /* XFS_ALLOC_FLAG_... */
2532 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2533{
2534 struct xfs_agf *agf; /* ag freelist header */
2535 struct xfs_perag *pag; /* per allocation group data */
2536 int error;
2537
d123031a 2538 trace_xfs_alloc_read_agf(mp, agno);
4805621a 2539
d123031a 2540 ASSERT(agno != NULLAGNUMBER);
4805621a 2541 error = xfs_read_agf(mp, tp, agno,
0cadda1c 2542 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
4805621a
FCH
2543 bpp);
2544 if (error)
2545 return error;
2546 if (!*bpp)
2547 return 0;
5a52c2a5 2548 ASSERT(!(*bpp)->b_error);
4805621a
FCH
2549
2550 agf = XFS_BUF_TO_AGF(*bpp);
a862e0fd 2551 pag = xfs_perag_get(mp, agno);
1da177e4 2552 if (!pag->pagf_init) {
16259e7d 2553 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
92821e2b 2554 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
16259e7d
CH
2555 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2556 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
1da177e4 2557 pag->pagf_levels[XFS_BTNUM_BNOi] =
16259e7d 2558 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
1da177e4 2559 pag->pagf_levels[XFS_BTNUM_CNTi] =
16259e7d 2560 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
b8704944
DW
2561 pag->pagf_levels[XFS_BTNUM_RMAPi] =
2562 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
46eeb521 2563 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
007c61c6 2564 spin_lock_init(&pag->pagb_lock);
e57336ff 2565 pag->pagb_count = 0;
ed3b4d6c 2566 pag->pagb_tree = RB_ROOT;
1da177e4
LT
2567 pag->pagf_init = 1;
2568 }
2569#ifdef DEBUG
2570 else if (!XFS_FORCED_SHUTDOWN(mp)) {
16259e7d 2571 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
89b28393 2572 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
16259e7d
CH
2573 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2574 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
1da177e4 2575 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
16259e7d 2576 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
1da177e4 2577 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
16259e7d 2578 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
1da177e4
LT
2579 }
2580#endif
a862e0fd 2581 xfs_perag_put(pag);
1da177e4
LT
2582 return 0;
2583}
2584
2585/*
2586 * Allocate an extent (variable-size).
2587 * Depending on the allocation type, we either look in a single allocation
2588 * group or loop over the allocation groups to find the result.
2589 */
2590int /* error */
e04426b9 2591xfs_alloc_vextent(
1da177e4
LT
2592 xfs_alloc_arg_t *args) /* allocation argument structure */
2593{
2594 xfs_agblock_t agsize; /* allocation group size */
2595 int error;
2596 int flags; /* XFS_ALLOC_FLAG_... locking flags */
1da177e4
LT
2597 xfs_mount_t *mp; /* mount structure pointer */
2598 xfs_agnumber_t sagno; /* starting allocation group number */
2599 xfs_alloctype_t type; /* input allocation type */
2600 int bump_rotor = 0;
1da177e4
LT
2601 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2602
2603 mp = args->mp;
2604 type = args->otype = args->type;
2605 args->agbno = NULLAGBLOCK;
2606 /*
2607 * Just fix this up, for the case where the last a.g. is shorter
2608 * (or there's only one a.g.) and the caller couldn't easily figure
2609 * that out (xfs_bmap_alloc).
2610 */
2611 agsize = mp->m_sb.sb_agblocks;
2612 if (args->maxlen > agsize)
2613 args->maxlen = agsize;
2614 if (args->alignment == 0)
2615 args->alignment = 1;
2616 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2617 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2618 ASSERT(args->minlen <= args->maxlen);
2619 ASSERT(args->minlen <= agsize);
2620 ASSERT(args->mod < args->prod);
2621 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2622 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2623 args->minlen > args->maxlen || args->minlen > agsize ||
2624 args->mod >= args->prod) {
2625 args->fsbno = NULLFSBLOCK;
0b1b213f 2626 trace_xfs_alloc_vextent_badargs(args);
1da177e4
LT
2627 return 0;
2628 }
1da177e4
LT
2629
2630 switch (type) {
2631 case XFS_ALLOCTYPE_THIS_AG:
2632 case XFS_ALLOCTYPE_NEAR_BNO:
2633 case XFS_ALLOCTYPE_THIS_BNO:
2634 /*
2635 * These three force us into a single a.g.
2636 */
2637 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
a862e0fd 2638 args->pag = xfs_perag_get(mp, args->agno);
1da177e4 2639 error = xfs_alloc_fix_freelist(args, 0);
1da177e4 2640 if (error) {
0b1b213f 2641 trace_xfs_alloc_vextent_nofix(args);
1da177e4
LT
2642 goto error0;
2643 }
2644 if (!args->agbp) {
0b1b213f 2645 trace_xfs_alloc_vextent_noagbp(args);
1da177e4
LT
2646 break;
2647 }
2648 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2649 if ((error = xfs_alloc_ag_vextent(args)))
2650 goto error0;
1da177e4
LT
2651 break;
2652 case XFS_ALLOCTYPE_START_BNO:
2653 /*
2654 * Try near allocation first, then anywhere-in-ag after
2655 * the first a.g. fails.
2656 */
292378ed 2657 if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
1da177e4
LT
2658 (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2659 args->fsbno = XFS_AGB_TO_FSB(mp,
2660 ((mp->m_agfrotor / rotorstep) %
2661 mp->m_sb.sb_agcount), 0);
2662 bump_rotor = 1;
2663 }
2664 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2665 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2666 /* FALLTHROUGH */
1da177e4
LT
2667 case XFS_ALLOCTYPE_FIRST_AG:
2668 /*
2669 * Rotate through the allocation groups looking for a winner.
2670 */
8d242e93 2671 if (type == XFS_ALLOCTYPE_FIRST_AG) {
1da177e4
LT
2672 /*
2673 * Start with allocation group given by bno.
2674 */
2675 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2676 args->type = XFS_ALLOCTYPE_THIS_AG;
2677 sagno = 0;
2678 flags = 0;
2679 } else {
1da177e4
LT
2680 /*
2681 * Start with the given allocation group.
2682 */
2683 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2684 flags = XFS_ALLOC_FLAG_TRYLOCK;
2685 }
2686 /*
2687 * Loop over allocation groups twice; first time with
2688 * trylock set, second time without.
2689 */
1da177e4 2690 for (;;) {
a862e0fd 2691 args->pag = xfs_perag_get(mp, args->agno);
1da177e4 2692 error = xfs_alloc_fix_freelist(args, flags);
1da177e4 2693 if (error) {
0b1b213f 2694 trace_xfs_alloc_vextent_nofix(args);
1da177e4
LT
2695 goto error0;
2696 }
2697 /*
2698 * If we get a buffer back then the allocation will fly.
2699 */
2700 if (args->agbp) {
2701 if ((error = xfs_alloc_ag_vextent(args)))
2702 goto error0;
2703 break;
2704 }
0b1b213f
CH
2705
2706 trace_xfs_alloc_vextent_loopfailed(args);
2707
1da177e4
LT
2708 /*
2709 * Didn't work, figure out the next iteration.
2710 */
2711 if (args->agno == sagno &&
2712 type == XFS_ALLOCTYPE_START_BNO)
2713 args->type = XFS_ALLOCTYPE_THIS_AG;
d210a28c
YL
2714 /*
2715 * For the first allocation, we can try any AG to get
2716 * space. However, if we already have allocated a
2717 * block, we don't want to try AGs whose number is below
2718 * sagno. Otherwise, we may end up with out-of-order
2719 * locking of AGF, which might cause deadlock.
2720 */
2721 if (++(args->agno) == mp->m_sb.sb_agcount) {
2722 if (args->firstblock != NULLFSBLOCK)
2723 args->agno = sagno;
2724 else
2725 args->agno = 0;
2726 }
1da177e4
LT
2727 /*
2728 * Reached the starting a.g., must either be done
2729 * or switch to non-trylock mode.
2730 */
2731 if (args->agno == sagno) {
255c5162 2732 if (flags == 0) {
1da177e4 2733 args->agbno = NULLAGBLOCK;
0b1b213f 2734 trace_xfs_alloc_vextent_allfailed(args);
1da177e4
LT
2735 break;
2736 }
255c5162
CH
2737
2738 flags = 0;
2739 if (type == XFS_ALLOCTYPE_START_BNO) {
2740 args->agbno = XFS_FSB_TO_AGBNO(mp,
2741 args->fsbno);
2742 args->type = XFS_ALLOCTYPE_NEAR_BNO;
1da177e4
LT
2743 }
2744 }
a862e0fd 2745 xfs_perag_put(args->pag);
1da177e4 2746 }
8d242e93 2747 if (bump_rotor) {
1da177e4
LT
2748 if (args->agno == sagno)
2749 mp->m_agfrotor = (mp->m_agfrotor + 1) %
2750 (mp->m_sb.sb_agcount * rotorstep);
2751 else
2752 mp->m_agfrotor = (args->agno * rotorstep + 1) %
2753 (mp->m_sb.sb_agcount * rotorstep);
2754 }
2755 break;
2756 default:
2757 ASSERT(0);
2758 /* NOTREACHED */
2759 }
2760 if (args->agbno == NULLAGBLOCK)
2761 args->fsbno = NULLFSBLOCK;
2762 else {
2763 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2764#ifdef DEBUG
2765 ASSERT(args->len >= args->minlen);
2766 ASSERT(args->len <= args->maxlen);
2767 ASSERT(args->agbno % args->alignment == 0);
2768 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2769 args->len);
2770#endif
3fbbbea3
DC
2771
2772 /* Zero the extent if we were asked to do so */
292378ed 2773 if (args->datatype & XFS_ALLOC_USERDATA_ZERO) {
3fbbbea3
DC
2774 error = xfs_zero_extent(args->ip, args->fsbno, args->len);
2775 if (error)
2776 goto error0;
2777 }
2778
1da177e4 2779 }
a862e0fd 2780 xfs_perag_put(args->pag);
1da177e4
LT
2781 return 0;
2782error0:
a862e0fd 2783 xfs_perag_put(args->pag);
1da177e4
LT
2784 return error;
2785}
2786
4d89e20b
DC
2787/* Ensure that the freelist is at full capacity. */
2788int
2789xfs_free_extent_fix_freelist(
2790 struct xfs_trans *tp,
2791 xfs_agnumber_t agno,
2792 struct xfs_buf **agbp)
1da177e4 2793{
4d89e20b
DC
2794 struct xfs_alloc_arg args;
2795 int error;
1da177e4 2796
4d89e20b 2797 memset(&args, 0, sizeof(struct xfs_alloc_arg));
1da177e4
LT
2798 args.tp = tp;
2799 args.mp = tp->t_mountp;
4d89e20b 2800 args.agno = agno;
be65b18a
DC
2801
2802 /*
2803 * validate that the block number is legal - the enables us to detect
2804 * and handle a silent filesystem corruption rather than crashing.
2805 */
be65b18a 2806 if (args.agno >= args.mp->m_sb.sb_agcount)
2451337d 2807 return -EFSCORRUPTED;
be65b18a 2808
a862e0fd 2809 args.pag = xfs_perag_get(args.mp, args.agno);
be65b18a
DC
2810 ASSERT(args.pag);
2811
2812 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
2813 if (error)
4d89e20b
DC
2814 goto out;
2815
2816 *agbp = args.agbp;
2817out:
2818 xfs_perag_put(args.pag);
2819 return error;
2820}
2821
2822/*
2823 * Free an extent.
2824 * Just break up the extent address and hand off to xfs_free_ag_extent
2825 * after fixing up the freelist.
2826 */
2827int /* error */
2828xfs_free_extent(
2829 struct xfs_trans *tp, /* transaction pointer */
2830 xfs_fsblock_t bno, /* starting block number of extent */
340785cc 2831 xfs_extlen_t len, /* length of extent */
3fd129b6
DW
2832 struct xfs_owner_info *oinfo, /* extent owner */
2833 enum xfs_ag_resv_type type) /* block reservation type */
4d89e20b
DC
2834{
2835 struct xfs_mount *mp = tp->t_mountp;
2836 struct xfs_buf *agbp;
2837 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno);
2838 xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno);
2839 int error;
2840
2841 ASSERT(len != 0);
3fd129b6 2842 ASSERT(type != XFS_AG_RESV_AGFL);
4d89e20b 2843
ba9e7802 2844 if (XFS_TEST_ERROR(false, mp,
9e24cfd0 2845 XFS_ERRTAG_FREE_EXTENT))
ba9e7802
DW
2846 return -EIO;
2847
4d89e20b
DC
2848 error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
2849 if (error)
2850 return error;
2851
2852 XFS_WANT_CORRUPTED_GOTO(mp, agbno < mp->m_sb.sb_agblocks, err);
be65b18a
DC
2853
2854 /* validate the extent size is legal now we have the agf locked */
4d89e20b
DC
2855 XFS_WANT_CORRUPTED_GOTO(mp,
2856 agbno + len <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length),
2857 err);
be65b18a 2858
3fd129b6 2859 error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
4d89e20b
DC
2860 if (error)
2861 goto err;
2862
2863 xfs_extent_busy_insert(tp, agno, agbno, len, 0);
2864 return 0;
2865
2866err:
2867 xfs_trans_brelse(tp, agbp);
1da177e4
LT
2868 return error;
2869}
2d520bfa
DW
2870
2871struct xfs_alloc_query_range_info {
2872 xfs_alloc_query_range_fn fn;
2873 void *priv;
2874};
2875
2876/* Format btree record and pass to our callback. */
2877STATIC int
2878xfs_alloc_query_range_helper(
2879 struct xfs_btree_cur *cur,
2880 union xfs_btree_rec *rec,
2881 void *priv)
2882{
2883 struct xfs_alloc_query_range_info *query = priv;
2884 struct xfs_alloc_rec_incore irec;
2885
2886 irec.ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
2887 irec.ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
2888 return query->fn(cur, &irec, query->priv);
2889}
2890
2891/* Find all free space within a given range of blocks. */
2892int
2893xfs_alloc_query_range(
2894 struct xfs_btree_cur *cur,
2895 struct xfs_alloc_rec_incore *low_rec,
2896 struct xfs_alloc_rec_incore *high_rec,
2897 xfs_alloc_query_range_fn fn,
2898 void *priv)
2899{
2900 union xfs_btree_irec low_brec;
2901 union xfs_btree_irec high_brec;
2902 struct xfs_alloc_query_range_info query;
2903
2904 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
2905 low_brec.a = *low_rec;
2906 high_brec.a = *high_rec;
2907 query.priv = priv;
2908 query.fn = fn;
2909 return xfs_btree_query_range(cur, &low_brec, &high_brec,
2910 xfs_alloc_query_range_helper, &query);
2911}
e9a2599a
DW
2912
2913/* Find all free space records. */
2914int
2915xfs_alloc_query_all(
2916 struct xfs_btree_cur *cur,
2917 xfs_alloc_query_range_fn fn,
2918 void *priv)
2919{
2920 struct xfs_alloc_query_range_info query;
2921
2922 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
2923 query.priv = priv;
2924 query.fn = fn;
2925 return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
2926}
21ec5416
DW
2927
2928/* Find the size of the AG, in blocks. */
2929xfs_agblock_t
2930xfs_ag_block_count(
2931 struct xfs_mount *mp,
2932 xfs_agnumber_t agno)
2933{
2934 ASSERT(agno < mp->m_sb.sb_agcount);
2935
2936 if (agno < mp->m_sb.sb_agcount - 1)
2937 return mp->m_sb.sb_agblocks;
2938 return mp->m_sb.sb_dblocks - (agno * mp->m_sb.sb_agblocks);
2939}
2940
2941/*
2942 * Verify that an AG block number pointer neither points outside the AG
2943 * nor points at static metadata.
2944 */
2945bool
2946xfs_verify_agbno(
2947 struct xfs_mount *mp,
2948 xfs_agnumber_t agno,
2949 xfs_agblock_t agbno)
2950{
2951 xfs_agblock_t eoag;
2952
2953 eoag = xfs_ag_block_count(mp, agno);
2954 if (agbno >= eoag)
2955 return false;
2956 if (agbno <= XFS_AGFL_BLOCK(mp))
2957 return false;
2958 return true;
2959}
2960
2961/*
2962 * Verify that an FS block number pointer neither points outside the
2963 * filesystem nor points at static AG metadata.
2964 */
2965bool
2966xfs_verify_fsbno(
2967 struct xfs_mount *mp,
2968 xfs_fsblock_t fsbno)
2969{
2970 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, fsbno);
2971
2972 if (agno >= mp->m_sb.sb_agcount)
2973 return false;
2974 return xfs_verify_agbno(mp, agno, XFS_FSB_TO_AGBNO(mp, fsbno));
2975}