Merge tag 'pm-6.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[linux-2.6-block.git] / fs / ext4 / mballoc.c
CommitLineData
f5166768 1// SPDX-License-Identifier: GPL-2.0
c9de560d
AT
2/*
3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4 * Written by Alex Tomas <alex@clusterfs.com>
c9de560d
AT
5 */
6
7
8/*
9 * mballoc.c contains the multiblocks allocation routines
10 */
11
18aadd47 12#include "ext4_jbd2.h"
8f6e39a7 13#include "mballoc.h"
28623c2f 14#include <linux/log2.h>
a0b30c12 15#include <linux/module.h>
5a0e3ad6 16#include <linux/slab.h>
1a5d5e5d 17#include <linux/nospec.h>
66114cad 18#include <linux/backing-dev.h>
5229a658 19#include <linux/freezer.h>
9bffad1e 20#include <trace/events/ext4.h>
bdefd689 21#include <kunit/static_stub.h>
9bffad1e 22
c9de560d
AT
23/*
24 * MUSTDO:
25 * - test ext4_ext_search_left() and ext4_ext_search_right()
26 * - search for metadata in few groups
27 *
28 * TODO v4:
29 * - normalization should take into account whether file is still open
30 * - discard preallocations if no free space left (policy?)
31 * - don't normalize tails
32 * - quota
33 * - reservation for superuser
34 *
35 * TODO v3:
36 * - bitmap read-ahead (proposed by Oleg Drokin aka green)
37 * - track min/max extents in each group for better group selection
38 * - mb_mark_used() may allocate chunk right after splitting buddy
39 * - tree of groups sorted by number of free blocks
40 * - error handling
41 */
42
43/*
44 * The allocation request involve request for multiple number of blocks
45 * near to the goal(block) value specified.
46 *
b713a5ec
TT
47 * During initialization phase of the allocator we decide to use the
48 * group preallocation or inode preallocation depending on the size of
49 * the file. The size of the file could be the resulting file size we
50 * would have after allocation, or the current file size, which ever
51 * is larger. If the size is less than sbi->s_mb_stream_request we
52 * select to use the group preallocation. The default value of
53 * s_mb_stream_request is 16 blocks. This can also be tuned via
54 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
55 * terms of number of blocks.
c9de560d
AT
56 *
57 * The main motivation for having small file use group preallocation is to
b713a5ec 58 * ensure that we have small files closer together on the disk.
c9de560d 59 *
b713a5ec
TT
60 * First stage the allocator looks at the inode prealloc list,
61 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
62 * spaces for this particular inode. The inode prealloc space is
63 * represented as:
c9de560d
AT
64 *
65 * pa_lstart -> the logical start block for this prealloc space
66 * pa_pstart -> the physical start block for this prealloc space
53accfa9
TT
67 * pa_len -> length for this prealloc space (in clusters)
68 * pa_free -> free space available in this prealloc space (in clusters)
c9de560d
AT
69 *
70 * The inode preallocation space is used looking at the _logical_ start
71 * block. If only the logical file block falls within the range of prealloc
caaf7a29
TM
72 * space we will consume the particular prealloc space. This makes sure that
73 * we have contiguous physical blocks representing the file blocks
c9de560d
AT
74 *
75 * The important thing to be noted in case of inode prealloc space is that
76 * we don't modify the values associated to inode prealloc space except
77 * pa_free.
78 *
79 * If we are not able to find blocks in the inode prealloc space and if we
80 * have the group allocation flag set then we look at the locality group
caaf7a29 81 * prealloc space. These are per CPU prealloc list represented as
c9de560d
AT
82 *
83 * ext4_sb_info.s_locality_groups[smp_processor_id()]
84 *
85 * The reason for having a per cpu locality group is to reduce the contention
86 * between CPUs. It is possible to get scheduled at this point.
87 *
88 * The locality group prealloc space is used looking at whether we have
25985edc 89 * enough free space (pa_free) within the prealloc space.
c9de560d
AT
90 *
91 * If we can't allocate blocks via inode prealloc or/and locality group
92 * prealloc then we look at the buddy cache. The buddy cache is represented
93 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
94 * mapped to the buddy and bitmap information regarding different
95 * groups. The buddy information is attached to buddy cache inode so that
96 * we can access them through the page cache. The information regarding
97 * each group is loaded via ext4_mb_load_buddy. The information involve
98 * block bitmap and buddy information. The information are stored in the
99 * inode as:
100 *
101 * { page }
c3a326a6 102 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
c9de560d
AT
103 *
104 *
105 * one block each for bitmap and buddy information. So for each group we
ea1754a0 106 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
c9de560d
AT
107 * blocksize) blocks. So it can have information regarding groups_per_page
108 * which is blocks_per_page/2
109 *
110 * The buddy cache inode is not stored on disk. The inode is thrown
111 * away when the filesystem is unmounted.
112 *
113 * We look for count number of blocks in the buddy cache. If we were able
114 * to locate that many free blocks we return with additional information
115 * regarding rest of the contiguous physical block available
116 *
117 * Before allocating blocks via buddy cache we normalize the request
118 * blocks. This ensure we ask for more blocks that we needed. The extra
119 * blocks that we get after allocation is added to the respective prealloc
120 * list. In case of inode preallocation we follow a list of heuristics
121 * based on file size. This can be found in ext4_mb_normalize_request. If
122 * we are doing a group prealloc we try to normalize the request to
27baebb8
TT
123 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is
124 * dependent on the cluster size; for non-bigalloc file systems, it is
c9de560d 125 * 512 blocks. This can be tuned via
d7a1fee1 126 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
c9de560d
AT
127 * terms of number of blocks. If we have mounted the file system with -O
128 * stripe=<value> option the group prealloc request is normalized to the
b483bb77 129 * smallest multiple of the stripe value (sbi->s_stripe) which is
d7a1fee1 130 * greater than the default mb_group_prealloc.
c9de560d 131 *
196e402a
HS
132 * If "mb_optimize_scan" mount option is set, we maintain in memory group info
133 * structures in two data structures:
134 *
135 * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
136 *
137 * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
138 *
139 * This is an array of lists where the index in the array represents the
140 * largest free order in the buddy bitmap of the participating group infos of
141 * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total
142 * number of buddy bitmap orders possible) number of lists. Group-infos are
143 * placed in appropriate lists.
144 *
83e80a6e 145 * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
196e402a 146 *
83e80a6e 147 * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
196e402a 148 *
83e80a6e
JK
149 * This is an array of lists where in the i-th list there are groups with
150 * average fragment size >= 2^i and < 2^(i+1). The average fragment size
151 * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
152 * Note that we don't bother with a special list for completely empty groups
153 * so we only have MB_NUM_ORDERS(sb) lists.
196e402a
HS
154 *
155 * When "mb_optimize_scan" mount option is set, mballoc consults the above data
156 * structures to decide the order in which groups are to be traversed for
157 * fulfilling an allocation request.
158 *
f52f3d2b
OM
159 * At CR_POWER2_ALIGNED , we look for groups which have the largest_free_order
160 * >= the order of the request. We directly look at the largest free order list
161 * in the data structure (1) above where largest_free_order = order of the
162 * request. If that list is empty, we look at remaining list in the increasing
163 * order of largest_free_order. This allows us to perform CR_POWER2_ALIGNED
164 * lookup in O(1) time.
165 *
166 * At CR_GOAL_LEN_FAST, we only consider groups where
167 * average fragment size > request size. So, we lookup a group which has average
168 * fragment size just above or equal to request size using our average fragment
169 * size group lists (data structure 2) in O(1) time.
170 *
171 * At CR_BEST_AVAIL_LEN, we aim to optimize allocations which can't be satisfied
172 * in CR_GOAL_LEN_FAST. The fact that we couldn't find a group in
173 * CR_GOAL_LEN_FAST suggests that there is no BG that has avg
174 * fragment size > goal length. So before falling to the slower
175 * CR_GOAL_LEN_SLOW, in CR_BEST_AVAIL_LEN we proactively trim goal length and
176 * then use the same fragment lists as CR_GOAL_LEN_FAST to find a BG with a big
177 * enough average fragment size. This increases the chances of finding a
178 * suitable block group in O(1) time and results in faster allocation at the
179 * cost of reduced size of allocation.
7e170922 180 *
196e402a 181 * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
f52f3d2b
OM
182 * linear order which requires O(N) search time for each CR_POWER2_ALIGNED and
183 * CR_GOAL_LEN_FAST phase.
196e402a 184 *
d7a1fee1 185 * The regular allocator (using the buddy cache) supports a few tunables.
c9de560d 186 *
b713a5ec
TT
187 * /sys/fs/ext4/<partition>/mb_min_to_scan
188 * /sys/fs/ext4/<partition>/mb_max_to_scan
189 * /sys/fs/ext4/<partition>/mb_order2_req
243efbdf 190 * /sys/fs/ext4/<partition>/mb_max_linear_groups
c9de560d 191 *
b713a5ec 192 * The regular allocator uses buddy scan only if the request len is power of
c9de560d
AT
193 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
194 * value of s_mb_order2_reqs can be tuned via
b713a5ec 195 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to
af901ca1 196 * stripe size (sbi->s_stripe), we try to search for contiguous block in
b713a5ec
TT
197 * stripe size. This should result in better allocation on RAID setups. If
198 * not, we search in the specific group using bitmap for best extents. The
199 * tunable min_to_scan and max_to_scan control the behaviour here.
c9de560d 200 * min_to_scan indicate how long the mballoc __must__ look for a best
b713a5ec 201 * extent and max_to_scan indicates how long the mballoc __can__ look for a
c9de560d
AT
202 * best extent in the found extents. Searching for the blocks starts with
203 * the group specified as the goal value in allocation context via
204 * ac_g_ex. Each group is first checked based on the criteria whether it
caaf7a29 205 * can be used for allocation. ext4_mb_good_group explains how the groups are
c9de560d
AT
206 * checked.
207 *
196e402a
HS
208 * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not
209 * get traversed linearly. That may result in subsequent allocations being not
210 * close to each other. And so, the underlying device may get filled up in a
211 * non-linear fashion. While that may not matter on non-rotational devices, for
243efbdf 212 * rotational devices that may result in higher seek times. "mb_max_linear_groups"
196e402a
HS
213 * tells mballoc how many groups mballoc should search linearly before
214 * performing consulting above data structures for more efficient lookups. For
215 * non rotational devices, this value defaults to 0 and for rotational devices
216 * this is set to MB_DEFAULT_LINEAR_LIMIT.
217 *
c9de560d
AT
218 * Both the prealloc space are getting populated as above. So for the first
219 * request we will hit the buddy cache which will result in this prealloc
220 * space getting filled. The prealloc space is then later used for the
221 * subsequent request.
222 */
223
224/*
225 * mballoc operates on the following data:
226 * - on-disk bitmap
227 * - in-core buddy (actually includes buddy and bitmap)
228 * - preallocation descriptors (PAs)
229 *
230 * there are two types of preallocations:
231 * - inode
232 * assiged to specific inode and can be used for this inode only.
233 * it describes part of inode's space preallocated to specific
234 * physical blocks. any block from that preallocated can be used
235 * independent. the descriptor just tracks number of blocks left
236 * unused. so, before taking some block from descriptor, one must
237 * make sure corresponded logical block isn't allocated yet. this
238 * also means that freeing any block within descriptor's range
239 * must discard all preallocated blocks.
240 * - locality group
241 * assigned to specific locality group which does not translate to
242 * permanent set of inodes: inode can join and leave group. space
243 * from this type of preallocation can be used for any inode. thus
244 * it's consumed from the beginning to the end.
245 *
246 * relation between them can be expressed as:
247 * in-core buddy = on-disk bitmap + preallocation descriptors
248 *
249 * this mean blocks mballoc considers used are:
250 * - allocated blocks (persistent)
251 * - preallocated blocks (non-persistent)
252 *
253 * consistency in mballoc world means that at any time a block is either
254 * free or used in ALL structures. notice: "any time" should not be read
255 * literally -- time is discrete and delimited by locks.
256 *
257 * to keep it simple, we don't use block numbers, instead we count number of
258 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
259 *
260 * all operations can be expressed as:
261 * - init buddy: buddy = on-disk + PAs
262 * - new PA: buddy += N; PA = N
263 * - use inode PA: on-disk += N; PA -= N
264 * - discard inode PA buddy -= on-disk - PA; PA = 0
265 * - use locality group PA on-disk += N; PA -= N
266 * - discard locality group PA buddy -= PA; PA = 0
267 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
268 * is used in real operation because we can't know actual used
269 * bits from PA, only from on-disk bitmap
270 *
271 * if we follow this strict logic, then all operations above should be atomic.
272 * given some of them can block, we'd have to use something like semaphores
273 * killing performance on high-end SMP hardware. let's try to relax it using
274 * the following knowledge:
275 * 1) if buddy is referenced, it's already initialized
276 * 2) while block is used in buddy and the buddy is referenced,
277 * nobody can re-allocate that block
278 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
279 * bit set and PA claims same block, it's OK. IOW, one can set bit in
280 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
281 * block
282 *
283 * so, now we're building a concurrency table:
284 * - init buddy vs.
285 * - new PA
286 * blocks for PA are allocated in the buddy, buddy must be referenced
287 * until PA is linked to allocation group to avoid concurrent buddy init
288 * - use inode PA
289 * we need to make sure that either on-disk bitmap or PA has uptodate data
290 * given (3) we care that PA-=N operation doesn't interfere with init
291 * - discard inode PA
292 * the simplest way would be to have buddy initialized by the discard
293 * - use locality group PA
294 * again PA-=N must be serialized with init
295 * - discard locality group PA
296 * the simplest way would be to have buddy initialized by the discard
297 * - new PA vs.
298 * - use inode PA
299 * i_data_sem serializes them
300 * - discard inode PA
301 * discard process must wait until PA isn't used by another process
302 * - use locality group PA
303 * some mutex should serialize them
304 * - discard locality group PA
305 * discard process must wait until PA isn't used by another process
306 * - use inode PA
307 * - use inode PA
308 * i_data_sem or another mutex should serializes them
309 * - discard inode PA
310 * discard process must wait until PA isn't used by another process
311 * - use locality group PA
312 * nothing wrong here -- they're different PAs covering different blocks
313 * - discard locality group PA
314 * discard process must wait until PA isn't used by another process
315 *
316 * now we're ready to make few consequences:
317 * - PA is referenced and while it is no discard is possible
318 * - PA is referenced until block isn't marked in on-disk bitmap
319 * - PA changes only after on-disk bitmap
320 * - discard must not compete with init. either init is done before
321 * any discard or they're serialized somehow
322 * - buddy init as sum of on-disk bitmap and PAs is done atomically
323 *
324 * a special case when we've used PA to emptiness. no need to modify buddy
325 * in this case, but we should care about concurrent init
326 *
327 */
328
329 /*
330 * Logic in few words:
331 *
332 * - allocation:
333 * load group
334 * find blocks
335 * mark bits in on-disk bitmap
336 * release group
337 *
338 * - use preallocation:
339 * find proper PA (per-inode or group)
340 * load group
341 * mark bits in on-disk bitmap
342 * release group
343 * release PA
344 *
345 * - free:
346 * load group
347 * mark bits in on-disk bitmap
348 * release group
349 *
350 * - discard preallocations in group:
351 * mark PAs deleted
352 * move them onto local list
353 * load on-disk bitmap
354 * load group
355 * remove PA from object (inode or locality group)
356 * mark free blocks in-core
357 *
358 * - discard inode's preallocations:
359 */
360
361/*
362 * Locking rules
363 *
364 * Locks:
365 * - bitlock on a group (group)
366 * - object (inode/locality) (object)
367 * - per-pa lock (pa)
f52f3d2b
OM
368 * - cr_power2_aligned lists lock (cr_power2_aligned)
369 * - cr_goal_len_fast lists lock (cr_goal_len_fast)
c9de560d
AT
370 *
371 * Paths:
372 * - new pa
373 * object
374 * group
375 *
376 * - find and use pa:
377 * pa
378 *
379 * - release consumed pa:
380 * pa
381 * group
382 * object
383 *
384 * - generate in-core bitmap:
385 * group
386 * pa
387 *
388 * - discard all for given object (inode, locality group):
389 * object
390 * pa
391 * group
392 *
393 * - discard all for given group:
394 * group
395 * pa
396 * group
397 * object
398 *
196e402a
HS
399 * - allocation path (ext4_mb_regular_allocator)
400 * group
f52f3d2b 401 * cr_power2_aligned/cr_goal_len_fast
c9de560d 402 */
c3a326a6
AK
403static struct kmem_cache *ext4_pspace_cachep;
404static struct kmem_cache *ext4_ac_cachep;
18aadd47 405static struct kmem_cache *ext4_free_data_cachep;
fb1813f4
CW
406
407/* We create slab caches for groupinfo data structures based on the
408 * superblock block size. There will be one per mounted filesystem for
409 * each unique s_blocksize_bits */
2892c15d 410#define NR_GRPINFO_CACHES 8
fb1813f4
CW
411static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
412
d6006186 413static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
2892c15d
ES
414 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
415 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
416 "ext4_groupinfo_64k", "ext4_groupinfo_128k"
417};
418
c3a326a6
AK
419static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
420 ext4_group_t group);
53f86b17 421static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
c3a326a6 422
196e402a 423static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
4eb7a4a1 424 ext4_group_t group, enum criteria cr);
196e402a 425
55cdd0af
WJ
426static int ext4_try_to_trim_range(struct super_block *sb,
427 struct ext4_buddy *e4b, ext4_grpblk_t start,
428 ext4_grpblk_t max, ext4_grpblk_t minblocks);
429
07b5b8e1
RH
430/*
431 * The algorithm using this percpu seq counter goes below:
432 * 1. We sample the percpu discard_pa_seq counter before trying for block
433 * allocation in ext4_mb_new_blocks().
434 * 2. We increment this percpu discard_pa_seq counter when we either allocate
435 * or free these blocks i.e. while marking those blocks as used/free in
436 * mb_mark_used()/mb_free_blocks().
437 * 3. We also increment this percpu seq counter when we successfully identify
438 * that the bb_prealloc_list is not empty and hence proceed for discarding
439 * of those PAs inside ext4_mb_discard_group_preallocations().
440 *
441 * Now to make sure that the regular fast path of block allocation is not
442 * affected, as a small optimization we only sample the percpu seq counter
443 * on that cpu. Only when the block allocation fails and when freed blocks
444 * found were 0, that is when we sample percpu seq counter for all cpus using
445 * below function ext4_get_discard_pa_seq_sum(). This happens after making
446 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
447 */
448static DEFINE_PER_CPU(u64, discard_pa_seq);
449static inline u64 ext4_get_discard_pa_seq_sum(void)
450{
451 int __cpu;
452 u64 __seq = 0;
453
454 for_each_possible_cpu(__cpu)
455 __seq += per_cpu(discard_pa_seq, __cpu);
456 return __seq;
457}
458
ffad0a44
AK
459static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
460{
c9de560d 461#if BITS_PER_LONG == 64
ffad0a44
AK
462 *bit += ((unsigned long) addr & 7UL) << 3;
463 addr = (void *) ((unsigned long) addr & ~7UL);
c9de560d 464#elif BITS_PER_LONG == 32
ffad0a44
AK
465 *bit += ((unsigned long) addr & 3UL) << 3;
466 addr = (void *) ((unsigned long) addr & ~3UL);
c9de560d
AT
467#else
468#error "how many bits you are?!"
469#endif
ffad0a44
AK
470 return addr;
471}
c9de560d
AT
472
473static inline int mb_test_bit(int bit, void *addr)
474{
475 /*
476 * ext4_test_bit on architecture like powerpc
477 * needs unsigned long aligned address
478 */
ffad0a44 479 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
480 return ext4_test_bit(bit, addr);
481}
482
483static inline void mb_set_bit(int bit, void *addr)
484{
ffad0a44 485 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
486 ext4_set_bit(bit, addr);
487}
488
c9de560d
AT
489static inline void mb_clear_bit(int bit, void *addr)
490{
ffad0a44 491 addr = mb_correct_addr_and_bit(&bit, addr);
c9de560d
AT
492 ext4_clear_bit(bit, addr);
493}
494
eabe0444
AS
495static inline int mb_test_and_clear_bit(int bit, void *addr)
496{
497 addr = mb_correct_addr_and_bit(&bit, addr);
498 return ext4_test_and_clear_bit(bit, addr);
499}
500
ffad0a44
AK
501static inline int mb_find_next_zero_bit(void *addr, int max, int start)
502{
e7dfb246 503 int fix = 0, ret, tmpmax;
ffad0a44 504 addr = mb_correct_addr_and_bit(&fix, addr);
e7dfb246 505 tmpmax = max + fix;
ffad0a44
AK
506 start += fix;
507
e7dfb246
AK
508 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
509 if (ret > max)
510 return max;
511 return ret;
ffad0a44
AK
512}
513
514static inline int mb_find_next_bit(void *addr, int max, int start)
515{
e7dfb246 516 int fix = 0, ret, tmpmax;
ffad0a44 517 addr = mb_correct_addr_and_bit(&fix, addr);
e7dfb246 518 tmpmax = max + fix;
ffad0a44
AK
519 start += fix;
520
e7dfb246
AK
521 ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
522 if (ret > max)
523 return max;
524 return ret;
ffad0a44
AK
525}
526
c9de560d
AT
527static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
528{
529 char *bb;
530
c5e8f3f3 531 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
c9de560d
AT
532 BUG_ON(max == NULL);
533
534 if (order > e4b->bd_blkbits + 1) {
535 *max = 0;
536 return NULL;
537 }
538
539 /* at order 0 we see each particular block */
84b775a3
CL
540 if (order == 0) {
541 *max = 1 << (e4b->bd_blkbits + 3);
c5e8f3f3 542 return e4b->bd_bitmap;
84b775a3 543 }
c9de560d 544
c5e8f3f3 545 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
c9de560d
AT
546 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
547
548 return bb;
549}
550
551#ifdef DOUBLE_CHECK
552static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
553 int first, int count)
554{
555 int i;
556 struct super_block *sb = e4b->bd_sb;
557
558 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
559 return;
bc8e6740 560 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
c9de560d
AT
561 for (i = 0; i < count; i++) {
562 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
563 ext4_fsblk_t blocknr;
5661bd68
AM
564
565 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
53accfa9 566 blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
c5f3a382
BL
567 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
568 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
5d1b1b3f 569 ext4_grp_locked_error(sb, e4b->bd_group,
e29136f8
TT
570 inode ? inode->i_ino : 0,
571 blocknr,
572 "freeing block already freed "
573 "(bit %u)",
574 first + i);
c9de560d
AT
575 }
576 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
577 }
578}
579
580static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
581{
582 int i;
583
584 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
585 return;
bc8e6740 586 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
c9de560d
AT
587 for (i = 0; i < count; i++) {
588 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
589 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
590 }
591}
592
593static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
594{
eb2b8ebb
RH
595 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
596 return;
c9de560d
AT
597 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
598 unsigned char *b1, *b2;
599 int i;
600 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
601 b2 = (unsigned char *) bitmap;
602 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
603 if (b1[i] != b2[i]) {
9d8b9ec4
TT
604 ext4_msg(e4b->bd_sb, KERN_ERR,
605 "corruption in group %u "
606 "at byte %u(%u): %x in copy != %x "
607 "on disk/prealloc",
608 e4b->bd_group, i, i * 8, b1[i], b2[i]);
c9de560d
AT
609 BUG();
610 }
611 }
612 }
613}
614
a3450215
RH
615static void mb_group_bb_bitmap_alloc(struct super_block *sb,
616 struct ext4_group_info *grp, ext4_group_t group)
617{
618 struct buffer_head *bh;
619
620 grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
eb2b8ebb
RH
621 if (!grp->bb_bitmap)
622 return;
a3450215
RH
623
624 bh = ext4_read_block_bitmap(sb, group);
eb2b8ebb
RH
625 if (IS_ERR_OR_NULL(bh)) {
626 kfree(grp->bb_bitmap);
627 grp->bb_bitmap = NULL;
628 return;
629 }
a3450215
RH
630
631 memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
632 put_bh(bh);
633}
634
635static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
636{
637 kfree(grp->bb_bitmap);
638}
639
c9de560d
AT
640#else
641static inline void mb_free_blocks_double(struct inode *inode,
642 struct ext4_buddy *e4b, int first, int count)
643{
644 return;
645}
646static inline void mb_mark_used_double(struct ext4_buddy *e4b,
647 int first, int count)
648{
649 return;
650}
651static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
652{
653 return;
654}
a3450215
RH
655
656static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
657 struct ext4_group_info *grp, ext4_group_t group)
658{
659 return;
660}
661
662static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
663{
664 return;
665}
c9de560d
AT
666#endif
667
668#ifdef AGGRESSIVE_CHECK
669
670#define MB_CHECK_ASSERT(assert) \
671do { \
672 if (!(assert)) { \
673 printk(KERN_EMERG \
674 "Assertion failure in %s() at %s:%d: \"%s\"\n", \
675 function, file, line, # assert); \
676 BUG(); \
677 } \
678} while (0)
679
133de5a0 680static void __mb_check_buddy(struct ext4_buddy *e4b, char *file,
c9de560d
AT
681 const char *function, int line)
682{
683 struct super_block *sb = e4b->bd_sb;
684 int order = e4b->bd_blkbits + 1;
685 int max;
686 int max2;
687 int i;
688 int j;
689 int k;
690 int count;
691 struct ext4_group_info *grp;
692 int fragments = 0;
693 int fstart;
694 struct list_head *cur;
695 void *buddy;
696 void *buddy2;
697
addd752c 698 if (e4b->bd_info->bb_check_counter++ % 10)
133de5a0 699 return;
c9de560d
AT
700
701 while (order > 1) {
702 buddy = mb_find_buddy(e4b, order, &max);
703 MB_CHECK_ASSERT(buddy);
704 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
705 MB_CHECK_ASSERT(buddy2);
706 MB_CHECK_ASSERT(buddy != buddy2);
707 MB_CHECK_ASSERT(max * 2 == max2);
708
709 count = 0;
710 for (i = 0; i < max; i++) {
711
712 if (mb_test_bit(i, buddy)) {
af2b3275 713 /* only single bit in buddy2 may be 0 */
c9de560d
AT
714 if (!mb_test_bit(i << 1, buddy2)) {
715 MB_CHECK_ASSERT(
716 mb_test_bit((i<<1)+1, buddy2));
c9de560d
AT
717 }
718 continue;
719 }
720
0a10da73 721 /* both bits in buddy2 must be 1 */
c9de560d
AT
722 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
723 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
724
725 for (j = 0; j < (1 << order); j++) {
726 k = (i * (1 << order)) + j;
727 MB_CHECK_ASSERT(
c5e8f3f3 728 !mb_test_bit(k, e4b->bd_bitmap));
c9de560d
AT
729 }
730 count++;
731 }
732 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
733 order--;
734 }
735
736 fstart = -1;
737 buddy = mb_find_buddy(e4b, 0, &max);
738 for (i = 0; i < max; i++) {
739 if (!mb_test_bit(i, buddy)) {
740 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
741 if (fstart == -1) {
742 fragments++;
743 fstart = i;
744 }
745 continue;
746 }
747 fstart = -1;
748 /* check used bits only */
749 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
750 buddy2 = mb_find_buddy(e4b, j, &max2);
751 k = i >> j;
752 MB_CHECK_ASSERT(k < max2);
753 MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
754 }
755 }
756 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
757 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
758
759 grp = ext4_get_group_info(sb, e4b->bd_group);
5354b2af 760 if (!grp)
133de5a0 761 return;
c9de560d
AT
762 list_for_each(cur, &grp->bb_prealloc_list) {
763 ext4_group_t groupnr;
764 struct ext4_prealloc_space *pa;
60bd63d1
SR
765 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
766 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
c9de560d 767 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
60bd63d1 768 for (i = 0; i < pa->pa_len; i++)
c9de560d
AT
769 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
770 }
c9de560d
AT
771}
772#undef MB_CHECK_ASSERT
773#define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
46e665e9 774 __FILE__, __func__, __LINE__)
c9de560d
AT
775#else
776#define mb_check_buddy(e4b)
777#endif
778
7c786059
CL
779/*
780 * Divide blocks started from @first with length @len into
781 * smaller chunks with power of 2 blocks.
782 * Clear the bits in bitmap which the blocks of the chunk(s) covered,
783 * then increase bb_counters[] for corresponded chunk size.
784 */
c9de560d 785static void ext4_mb_mark_free_simple(struct super_block *sb,
a36b4498 786 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
c9de560d
AT
787 struct ext4_group_info *grp)
788{
789 struct ext4_sb_info *sbi = EXT4_SB(sb);
a36b4498
ES
790 ext4_grpblk_t min;
791 ext4_grpblk_t max;
792 ext4_grpblk_t chunk;
69e43e8c 793 unsigned int border;
c9de560d 794
7137d7a4 795 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
c9de560d
AT
796
797 border = 2 << sb->s_blocksize_bits;
798
799 while (len > 0) {
800 /* find how many blocks can be covered since this position */
801 max = ffs(first | border) - 1;
802
803 /* find how many blocks of power 2 we need to mark */
804 min = fls(len) - 1;
805
806 if (max < min)
807 min = max;
808 chunk = 1 << min;
809
810 /* mark multiblock chunks only */
811 grp->bb_counters[min]++;
812 if (min > 0)
813 mb_clear_bit(first >> min,
814 buddy + sbi->s_mb_offsets[min]);
815
816 len -= chunk;
817 first += chunk;
818 }
819}
820
83e80a6e 821static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
196e402a 822{
83e80a6e 823 int order;
196e402a 824
83e80a6e
JK
825 /*
826 * We don't bother with a special lists groups with only 1 block free
827 * extents and for completely empty groups.
828 */
829 order = fls(len) - 2;
830 if (order < 0)
831 return 0;
832 if (order == MB_NUM_ORDERS(sb))
833 order--;
13df4d44
BL
834 if (WARN_ON_ONCE(order > MB_NUM_ORDERS(sb)))
835 order = MB_NUM_ORDERS(sb) - 1;
83e80a6e 836 return order;
196e402a
HS
837}
838
83e80a6e 839/* Move group to appropriate avg_fragment_size list */
196e402a
HS
840static void
841mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
842{
843 struct ext4_sb_info *sbi = EXT4_SB(sb);
83e80a6e 844 int new_order;
196e402a 845
993bf0f4 846 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0)
196e402a
HS
847 return;
848
83e80a6e
JK
849 new_order = mb_avg_fragment_size_order(sb,
850 grp->bb_free / grp->bb_fragments);
851 if (new_order == grp->bb_avg_fragment_size_order)
852 return;
196e402a 853
83e80a6e
JK
854 if (grp->bb_avg_fragment_size_order != -1) {
855 write_lock(&sbi->s_mb_avg_fragment_size_locks[
856 grp->bb_avg_fragment_size_order]);
857 list_del(&grp->bb_avg_fragment_size_node);
858 write_unlock(&sbi->s_mb_avg_fragment_size_locks[
859 grp->bb_avg_fragment_size_order]);
860 }
861 grp->bb_avg_fragment_size_order = new_order;
862 write_lock(&sbi->s_mb_avg_fragment_size_locks[
863 grp->bb_avg_fragment_size_order]);
864 list_add_tail(&grp->bb_avg_fragment_size_node,
865 &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
866 write_unlock(&sbi->s_mb_avg_fragment_size_locks[
867 grp->bb_avg_fragment_size_order]);
196e402a
HS
868}
869
870/*
871 * Choose next group by traversing largest_free_order lists. Updates *new_cr if
872 * cr level needs an update.
873 */
f52f3d2b 874static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context *ac,
438a35e7 875 enum criteria *new_cr, ext4_group_t *group)
196e402a
HS
876{
877 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
919eb90c 878 struct ext4_group_info *iter;
196e402a
HS
879 int i;
880
881 if (ac->ac_status == AC_STATUS_FOUND)
882 return;
883
f52f3d2b
OM
884 if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED))
885 atomic_inc(&sbi->s_bal_p2_aligned_bad_suggestions);
196e402a 886
196e402a
HS
887 for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
888 if (list_empty(&sbi->s_mb_largest_free_orders[i]))
889 continue;
890 read_lock(&sbi->s_mb_largest_free_orders_locks[i]);
891 if (list_empty(&sbi->s_mb_largest_free_orders[i])) {
892 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
893 continue;
894 }
196e402a
HS
895 list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
896 bb_largest_free_order_node) {
897 if (sbi->s_mb_stats)
f52f3d2b
OM
898 atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]);
899 if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) {
919eb90c
KS
900 *group = iter->bb_group;
901 ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED;
902 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
903 return;
196e402a
HS
904 }
905 }
906 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
196e402a
HS
907 }
908
919eb90c
KS
909 /* Increment cr and search again if no group is found */
910 *new_cr = CR_GOAL_LEN_FAST;
196e402a
HS
911}
912
856d865c
OM
913/*
914 * Find a suitable group of given order from the average fragments list.
915 */
916static struct ext4_group_info *
917ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int order)
918{
919 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
920 struct list_head *frag_list = &sbi->s_mb_avg_fragment_size[order];
921 rwlock_t *frag_list_lock = &sbi->s_mb_avg_fragment_size_locks[order];
922 struct ext4_group_info *grp = NULL, *iter;
923 enum criteria cr = ac->ac_criteria;
924
925 if (list_empty(frag_list))
926 return NULL;
927 read_lock(frag_list_lock);
928 if (list_empty(frag_list)) {
929 read_unlock(frag_list_lock);
930 return NULL;
931 }
932 list_for_each_entry(iter, frag_list, bb_avg_fragment_size_node) {
933 if (sbi->s_mb_stats)
934 atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]);
935 if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) {
936 grp = iter;
937 break;
938 }
939 }
940 read_unlock(frag_list_lock);
941 return grp;
942}
943
196e402a 944/*
83e80a6e
JK
945 * Choose next group by traversing average fragment size list of suitable
946 * order. Updates *new_cr if cr level needs an update.
196e402a 947 */
f52f3d2b 948static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *ac,
438a35e7 949 enum criteria *new_cr, ext4_group_t *group)
196e402a
HS
950{
951 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
856d865c 952 struct ext4_group_info *grp = NULL;
83e80a6e 953 int i;
196e402a 954
f52f3d2b 955 if (unlikely(ac->ac_flags & EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED)) {
196e402a 956 if (sbi->s_mb_stats)
f52f3d2b 957 atomic_inc(&sbi->s_bal_goal_fast_bad_suggestions);
83e80a6e
JK
958 }
959
960 for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
961 i < MB_NUM_ORDERS(ac->ac_sb); i++) {
856d865c 962 grp = ext4_mb_find_good_group_avg_frag_lists(ac, i);
b50675a4
KS
963 if (grp) {
964 *group = grp->bb_group;
965 ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED;
966 return;
967 }
196e402a
HS
968 }
969
772c9f69
RH
970 /*
971 * CR_BEST_AVAIL_LEN works based on the concept that we have
972 * a larger normalized goal len request which can be trimmed to
973 * a smaller goal len such that it can still satisfy original
974 * request len. However, allocation request for non-regular
975 * files never gets normalized.
976 * See function ext4_mb_normalize_request() (EXT4_MB_HINT_DATA).
977 */
978 if (ac->ac_flags & EXT4_MB_HINT_DATA)
979 *new_cr = CR_BEST_AVAIL_LEN;
980 else
981 *new_cr = CR_GOAL_LEN_SLOW;
7e170922
OM
982}
983
984/*
f52f3d2b 985 * We couldn't find a group in CR_GOAL_LEN_FAST so try to find the highest free fragment
7e170922
OM
986 * order we have and proactively trim the goal request length to that order to
987 * find a suitable group faster.
988 *
989 * This optimizes allocation speed at the cost of slightly reduced
990 * preallocations. However, we make sure that we don't trim the request too
f52f3d2b 991 * much and fall to CR_GOAL_LEN_SLOW in that case.
7e170922 992 */
f52f3d2b 993static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context *ac,
438a35e7 994 enum criteria *new_cr, ext4_group_t *group)
7e170922
OM
995{
996 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
997 struct ext4_group_info *grp = NULL;
998 int i, order, min_order;
999 unsigned long num_stripe_clusters = 0;
1000
f52f3d2b 1001 if (unlikely(ac->ac_flags & EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED)) {
7e170922 1002 if (sbi->s_mb_stats)
f52f3d2b 1003 atomic_inc(&sbi->s_bal_best_avail_bad_suggestions);
7e170922
OM
1004 }
1005
1006 /*
1007 * mb_avg_fragment_size_order() returns order in a way that makes
1008 * retrieving back the length using (1 << order) inaccurate. Hence, use
1009 * fls() instead since we need to know the actual length while modifying
1010 * goal length.
1011 */
5d5460fa 1012 order = fls(ac->ac_g_ex.fe_len) - 1;
13df4d44
BL
1013 if (WARN_ON_ONCE(order - 1 > MB_NUM_ORDERS(ac->ac_sb)))
1014 order = MB_NUM_ORDERS(ac->ac_sb);
f52f3d2b 1015 min_order = order - sbi->s_mb_best_avail_max_trim_order;
7e170922
OM
1016 if (min_order < 0)
1017 min_order = 0;
1018
7e170922
OM
1019 if (sbi->s_stripe > 0) {
1020 /*
1021 * We are assuming that stripe size is always a multiple of
1022 * cluster ratio otherwise __ext4_fill_super exists early.
1023 */
1024 num_stripe_clusters = EXT4_NUM_B2C(sbi, sbi->s_stripe);
1025 if (1 << min_order < num_stripe_clusters)
5d5460fa
OM
1026 /*
1027 * We consider 1 order less because later we round
1028 * up the goal len to num_stripe_clusters
1029 */
1030 min_order = fls(num_stripe_clusters) - 1;
7e170922
OM
1031 }
1032
5d5460fa
OM
1033 if (1 << min_order < ac->ac_o_ex.fe_len)
1034 min_order = fls(ac->ac_o_ex.fe_len);
1035
7e170922
OM
1036 for (i = order; i >= min_order; i--) {
1037 int frag_order;
1038 /*
1039 * Scale down goal len to make sure we find something
1040 * in the free fragments list. Basically, reduce
1041 * preallocations.
1042 */
1043 ac->ac_g_ex.fe_len = 1 << i;
1044
1045 if (num_stripe_clusters > 0) {
1046 /*
4c0cfebd
TT
1047 * Try to round up the adjusted goal length to
1048 * stripe size (in cluster units) multiple for
1049 * efficiency.
7e170922
OM
1050 */
1051 ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len,
1052 num_stripe_clusters);
1053 }
1054
1055 frag_order = mb_avg_fragment_size_order(ac->ac_sb,
1056 ac->ac_g_ex.fe_len);
1057
1058 grp = ext4_mb_find_good_group_avg_frag_lists(ac, frag_order);
bcb123ac
KS
1059 if (grp) {
1060 *group = grp->bb_group;
1061 ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED;
1062 return;
1063 }
7e170922
OM
1064 }
1065
bcb123ac
KS
1066 /* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */
1067 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
1068 *new_cr = CR_GOAL_LEN_SLOW;
196e402a
HS
1069}
1070
1071static inline int should_optimize_scan(struct ext4_allocation_context *ac)
1072{
1073 if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN)))
1074 return 0;
f52f3d2b 1075 if (ac->ac_criteria >= CR_GOAL_LEN_SLOW)
196e402a 1076 return 0;
077d0c2c 1077 if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))
196e402a
HS
1078 return 0;
1079 return 1;
1080}
1081
1082/*
da5704ee 1083 * Return next linear group for allocation.
196e402a 1084 */
60c672b7 1085static ext4_group_t
da5704ee 1086next_linear_group(ext4_group_t group, ext4_group_t ngroups)
196e402a 1087{
196e402a
HS
1088 /*
1089 * Artificially restricted ngroups for non-extent
1090 * files makes group > ngroups possible on first loop.
1091 */
1092 return group + 1 >= ngroups ? 0 : group + 1;
1093}
1094
1095/*
1096 * ext4_mb_choose_next_group: choose next group for allocation.
1097 *
1098 * @ac Allocation Context
1099 * @new_cr This is an output parameter. If the there is no good group
1100 * available at current CR level, this field is updated to indicate
1101 * the new cr level that should be used.
1102 * @group This is an input / output parameter. As an input it indicates the
1103 * next group that the allocator intends to use for allocation. As
1104 * output, this field indicates the next group that should be used as
1105 * determined by the optimization functions.
1106 * @ngroups Total number of groups
1107 */
1108static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
4eb7a4a1 1109 enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
196e402a
HS
1110{
1111 *new_cr = ac->ac_criteria;
1112
da5704ee
KS
1113 if (!should_optimize_scan(ac)) {
1114 *group = next_linear_group(*group, ngroups);
1115 return;
1116 }
1117
1118 /*
1119 * Optimized scanning can return non adjacent groups which can cause
1120 * seek overhead for rotational disks. So try few linear groups before
1121 * trying optimized scan.
1122 */
1123 if (ac->ac_groups_linear_remaining) {
1124 *group = next_linear_group(*group, ngroups);
1125 ac->ac_groups_linear_remaining--;
196e402a 1126 return;
4fca50d4 1127 }
196e402a 1128
f52f3d2b 1129 if (*new_cr == CR_POWER2_ALIGNED) {
438a35e7 1130 ext4_mb_choose_next_group_p2_aligned(ac, new_cr, group);
f52f3d2b 1131 } else if (*new_cr == CR_GOAL_LEN_FAST) {
438a35e7 1132 ext4_mb_choose_next_group_goal_fast(ac, new_cr, group);
f52f3d2b 1133 } else if (*new_cr == CR_BEST_AVAIL_LEN) {
438a35e7 1134 ext4_mb_choose_next_group_best_avail(ac, new_cr, group);
196e402a
HS
1135 } else {
1136 /*
2caffb6a
KS
1137 * TODO: For CR_GOAL_LEN_SLOW, we can arrange groups in an
1138 * rb tree sorted by bb_free. But until that happens, we should
1139 * never come here.
196e402a
HS
1140 */
1141 WARN_ON(1);
1142 }
1143}
1144
8a57d9d6
CW
1145/*
1146 * Cache the order of the largest free extent we have available in this block
1147 * group.
1148 */
1149static void
1150mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
1151{
196e402a 1152 struct ext4_sb_info *sbi = EXT4_SB(sb);
8a57d9d6 1153 int i;
8a57d9d6 1154
1940265e
JK
1155 for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
1156 if (grp->bb_counters[i] > 0)
1157 break;
1158 /* No need to move between order lists? */
1159 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
1160 i == grp->bb_largest_free_order) {
1161 grp->bb_largest_free_order = i;
1162 return;
1163 }
1164
1165 if (grp->bb_largest_free_order >= 0) {
196e402a
HS
1166 write_lock(&sbi->s_mb_largest_free_orders_locks[
1167 grp->bb_largest_free_order]);
1168 list_del_init(&grp->bb_largest_free_order_node);
1169 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1170 grp->bb_largest_free_order]);
1171 }
1940265e
JK
1172 grp->bb_largest_free_order = i;
1173 if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
196e402a
HS
1174 write_lock(&sbi->s_mb_largest_free_orders_locks[
1175 grp->bb_largest_free_order]);
1176 list_add_tail(&grp->bb_largest_free_order_node,
1177 &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
1178 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1179 grp->bb_largest_free_order]);
1180 }
8a57d9d6
CW
1181}
1182
089ceecc
ES
1183static noinline_for_stack
1184void ext4_mb_generate_buddy(struct super_block *sb,
5354b2af
TT
1185 void *buddy, void *bitmap, ext4_group_t group,
1186 struct ext4_group_info *grp)
c9de560d 1187{
e43bb4e6 1188 struct ext4_sb_info *sbi = EXT4_SB(sb);
7137d7a4 1189 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
a36b4498
ES
1190 ext4_grpblk_t i = 0;
1191 ext4_grpblk_t first;
1192 ext4_grpblk_t len;
c9de560d
AT
1193 unsigned free = 0;
1194 unsigned fragments = 0;
1195 unsigned long long period = get_cycles();
1196
1197 /* initialize buddy from bitmap which is aggregation
1198 * of on-disk bitmap and preallocations */
ffad0a44 1199 i = mb_find_next_zero_bit(bitmap, max, 0);
c9de560d
AT
1200 grp->bb_first_free = i;
1201 while (i < max) {
1202 fragments++;
1203 first = i;
ffad0a44 1204 i = mb_find_next_bit(bitmap, max, i);
c9de560d
AT
1205 len = i - first;
1206 free += len;
1207 if (len > 1)
1208 ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
1209 else
1210 grp->bb_counters[0]++;
1211 if (i < max)
ffad0a44 1212 i = mb_find_next_zero_bit(bitmap, max, i);
c9de560d
AT
1213 }
1214 grp->bb_fragments = fragments;
1215
1216 if (free != grp->bb_free) {
e29136f8 1217 ext4_grp_locked_error(sb, group, 0, 0,
94d4c066
TT
1218 "block bitmap and bg descriptor "
1219 "inconsistent: %u vs %u free clusters",
e29136f8 1220 free, grp->bb_free);
e56eb659 1221 /*
163a203d 1222 * If we intend to continue, we consider group descriptor
e56eb659
AK
1223 * corrupt and update bb_free using bitmap value
1224 */
c9de560d 1225 grp->bb_free = free;
db79e6d1
WS
1226 ext4_mark_group_bitmap_corrupted(sb, group,
1227 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
c9de560d 1228 }
8a57d9d6 1229 mb_set_largest_free_order(sb, grp);
83e80a6e 1230 mb_update_avg_fragment_size(sb, grp);
c9de560d
AT
1231
1232 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
1233
1234 period = get_cycles() - period;
67d25186
HS
1235 atomic_inc(&sbi->s_mb_buddies_generated);
1236 atomic64_add(period, &sbi->s_mb_generation_time);
c9de560d
AT
1237}
1238
c9b528c3
BL
1239static void mb_regenerate_buddy(struct ext4_buddy *e4b)
1240{
1241 int count;
1242 int order = 1;
1243 void *buddy;
1244
1245 while ((buddy = mb_find_buddy(e4b, order++, &count)))
1246 mb_set_bits(buddy, 0, count);
1247
1248 e4b->bd_info->bb_fragments = 0;
1249 memset(e4b->bd_info->bb_counters, 0,
1250 sizeof(*e4b->bd_info->bb_counters) *
1251 (e4b->bd_sb->s_blocksize_bits + 2));
1252
1253 ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
1254 e4b->bd_bitmap, e4b->bd_group, e4b->bd_info);
1255}
1256
c9de560d
AT
1257/* The buddy information is attached the buddy cache inode
1258 * for convenience. The information regarding each group
1259 * is loaded via ext4_mb_load_buddy. The information involve
1260 * block bitmap and buddy information. The information are
1261 * stored in the inode as
1262 *
1263 * { page }
c3a326a6 1264 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
c9de560d
AT
1265 *
1266 *
1267 * one block each for bitmap and buddy information.
1268 * So for each group we take up 2 blocks. A page can
ea1754a0 1269 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
c9de560d
AT
1270 * So it can have information regarding groups_per_page which
1271 * is blocks_per_page/2
8a57d9d6
CW
1272 *
1273 * Locking note: This routine takes the block group lock of all groups
1274 * for this page; do not hold this lock when calling this routine!
c9de560d
AT
1275 */
1276
e1622a0d 1277static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp)
c9de560d 1278{
8df9675f 1279 ext4_group_t ngroups;
89cadf6e 1280 unsigned int blocksize;
c9de560d
AT
1281 int blocks_per_page;
1282 int groups_per_page;
1283 int err = 0;
1284 int i;
813e5727 1285 ext4_group_t first_group, group;
c9de560d
AT
1286 int first_block;
1287 struct super_block *sb;
1288 struct buffer_head *bhs;
fa77dcfa 1289 struct buffer_head **bh = NULL;
c9de560d
AT
1290 struct inode *inode;
1291 char *data;
1292 char *bitmap;
9b8b7d35 1293 struct ext4_group_info *grinfo;
c9de560d 1294
e1622a0d 1295 inode = folio->mapping->host;
c9de560d 1296 sb = inode->i_sb;
8df9675f 1297 ngroups = ext4_get_groups_count(sb);
93407472 1298 blocksize = i_blocksize(inode);
09cbfeaf 1299 blocks_per_page = PAGE_SIZE / blocksize;
c9de560d 1300
e1622a0d 1301 mb_debug(sb, "init folio %lu\n", folio->index);
d3df1453 1302
c9de560d
AT
1303 groups_per_page = blocks_per_page >> 1;
1304 if (groups_per_page == 0)
1305 groups_per_page = 1;
1306
1307 /* allocate buffer_heads to read bitmaps */
1308 if (groups_per_page > 1) {
c9de560d 1309 i = sizeof(struct buffer_head *) * groups_per_page;
adb7ef60 1310 bh = kzalloc(i, gfp);
139f46d3
KS
1311 if (bh == NULL)
1312 return -ENOMEM;
c9de560d
AT
1313 } else
1314 bh = &bhs;
1315
e1622a0d 1316 first_group = folio->index * blocks_per_page / 2;
c9de560d 1317
e1622a0d 1318 /* read all groups the folio covers into the cache */
813e5727
TT
1319 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1320 if (group >= ngroups)
c9de560d
AT
1321 break;
1322
813e5727 1323 grinfo = ext4_get_group_info(sb, group);
5354b2af
TT
1324 if (!grinfo)
1325 continue;
9b8b7d35
AG
1326 /*
1327 * If page is uptodate then we came here after online resize
1328 * which added some new uninitialized group info structs, so
e1622a0d 1329 * we must skip all initialized uptodate buddies on the folio,
9b8b7d35
AG
1330 * which may be currently in use by an allocating task.
1331 */
e1622a0d
MWO
1332 if (folio_test_uptodate(folio) &&
1333 !EXT4_MB_GRP_NEED_INIT(grinfo)) {
9b8b7d35
AG
1334 bh[i] = NULL;
1335 continue;
1336 }
cfd73237 1337 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
9008a58e
DW
1338 if (IS_ERR(bh[i])) {
1339 err = PTR_ERR(bh[i]);
1340 bh[i] = NULL;
c9de560d 1341 goto out;
2ccb5fb9 1342 }
d3df1453 1343 mb_debug(sb, "read bitmap for group %u\n", group);
c9de560d
AT
1344 }
1345
1346 /* wait for I/O completion */
813e5727 1347 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
9008a58e
DW
1348 int err2;
1349
1350 if (!bh[i])
1351 continue;
1352 err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
1353 if (!err)
1354 err = err2;
813e5727 1355 }
c9de560d 1356
e1622a0d 1357 first_block = folio->index * blocks_per_page;
c9de560d 1358 for (i = 0; i < blocks_per_page; i++) {
c9de560d 1359 group = (first_block + i) >> 1;
8df9675f 1360 if (group >= ngroups)
c9de560d
AT
1361 break;
1362
9b8b7d35
AG
1363 if (!bh[group - first_group])
1364 /* skip initialized uptodate buddy */
1365 continue;
1366
bbdc322f
LC
1367 if (!buffer_verified(bh[group - first_group]))
1368 /* Skip faulty bitmaps */
1369 continue;
1370 err = 0;
1371
c9de560d
AT
1372 /*
1373 * data carry information regarding this
1374 * particular group in the format specified
1375 * above
1376 *
1377 */
e1622a0d 1378 data = folio_address(folio) + (i * blocksize);
c9de560d
AT
1379 bitmap = bh[group - first_group]->b_data;
1380
1381 /*
1382 * We place the buddy block and bitmap block
1383 * close together
1384 */
ebf6cb7c
WJ
1385 grinfo = ext4_get_group_info(sb, group);
1386 if (!grinfo) {
1387 err = -EFSCORRUPTED;
1388 goto out;
1389 }
c9de560d
AT
1390 if ((first_block + i) & 1) {
1391 /* this is block of buddy */
1392 BUG_ON(incore == NULL);
e1622a0d
MWO
1393 mb_debug(sb, "put buddy for group %u in folio %lu/%x\n",
1394 group, folio->index, i * blocksize);
f307333e 1395 trace_ext4_mb_buddy_bitmap_load(sb, group);
c9de560d
AT
1396 grinfo->bb_fragments = 0;
1397 memset(grinfo->bb_counters, 0,
1927805e 1398 sizeof(*grinfo->bb_counters) *
4b68f6df 1399 (MB_NUM_ORDERS(sb)));
c9de560d
AT
1400 /*
1401 * incore got set to the group block bitmap below
1402 */
7a2fcbf7 1403 ext4_lock_group(sb, group);
9b8b7d35
AG
1404 /* init the buddy */
1405 memset(data, 0xff, blocksize);
5354b2af 1406 ext4_mb_generate_buddy(sb, data, incore, group, grinfo);
7a2fcbf7 1407 ext4_unlock_group(sb, group);
c9de560d
AT
1408 incore = NULL;
1409 } else {
1410 /* this is block of bitmap */
1411 BUG_ON(incore != NULL);
e1622a0d
MWO
1412 mb_debug(sb, "put bitmap for group %u in folio %lu/%x\n",
1413 group, folio->index, i * blocksize);
f307333e 1414 trace_ext4_mb_bitmap_load(sb, group);
c9de560d
AT
1415
1416 /* see comments in ext4_mb_put_pa() */
1417 ext4_lock_group(sb, group);
1418 memcpy(data, bitmap, blocksize);
1419
1420 /* mark all preallocated blks used in in-core bitmap */
1421 ext4_mb_generate_from_pa(sb, data, group);
ebf6cb7c 1422 WARN_ON_ONCE(!RB_EMPTY_ROOT(&grinfo->bb_free_root));
c9de560d
AT
1423 ext4_unlock_group(sb, group);
1424
1425 /* set incore so that the buddy information can be
1426 * generated using this
1427 */
1428 incore = data;
1429 }
1430 }
e1622a0d 1431 folio_mark_uptodate(folio);
c9de560d
AT
1432
1433out:
1434 if (bh) {
9b8b7d35 1435 for (i = 0; i < groups_per_page; i++)
c9de560d
AT
1436 brelse(bh[i]);
1437 if (bh != &bhs)
1438 kfree(bh);
1439 }
1440 return err;
1441}
1442
eee4adc7 1443/*
2de8807b
AG
1444 * Lock the buddy and bitmap pages. This make sure other parallel init_group
1445 * on the same buddy page doesn't happen whild holding the buddy page lock.
1446 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
5eea586b 1447 * are on the same page e4b->bd_buddy_folio is NULL and return value is 0.
eee4adc7 1448 */
2de8807b 1449static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
adb7ef60 1450 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
eee4adc7 1451{
2de8807b
AG
1452 struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
1453 int block, pnum, poff;
eee4adc7 1454 int blocks_per_page;
99b150d8 1455 struct folio *folio;
2de8807b 1456
5eea586b 1457 e4b->bd_buddy_folio = NULL;
99b150d8 1458 e4b->bd_bitmap_folio = NULL;
eee4adc7 1459
09cbfeaf 1460 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
eee4adc7
ES
1461 /*
1462 * the buddy cache inode stores the block bitmap
1463 * and buddy information in consecutive blocks.
1464 * So for each group we need two blocks.
1465 */
1466 block = group * 2;
1467 pnum = block / blocks_per_page;
2de8807b 1468 poff = block % blocks_per_page;
99b150d8
MWO
1469 folio = __filemap_get_folio(inode->i_mapping, pnum,
1470 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
1471 if (IS_ERR(folio))
1472 return PTR_ERR(folio);
1473 BUG_ON(folio->mapping != inode->i_mapping);
1474 e4b->bd_bitmap_folio = folio;
1475 e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize);
2de8807b
AG
1476
1477 if (blocks_per_page >= 2) {
1478 /* buddy and bitmap are on the same page */
1479 return 0;
eee4adc7 1480 }
2de8807b 1481
f2fec3e9 1482 /* blocks_per_page == 1, hence we need another page for the buddy */
5eea586b
MWO
1483 folio = __filemap_get_folio(inode->i_mapping, block + 1,
1484 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
1485 if (IS_ERR(folio))
1486 return PTR_ERR(folio);
1487 BUG_ON(folio->mapping != inode->i_mapping);
1488 e4b->bd_buddy_folio = folio;
2de8807b 1489 return 0;
eee4adc7
ES
1490}
1491
2de8807b 1492static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
eee4adc7 1493{
99b150d8
MWO
1494 if (e4b->bd_bitmap_folio) {
1495 folio_unlock(e4b->bd_bitmap_folio);
1496 folio_put(e4b->bd_bitmap_folio);
2de8807b 1497 }
5eea586b
MWO
1498 if (e4b->bd_buddy_folio) {
1499 folio_unlock(e4b->bd_buddy_folio);
1500 folio_put(e4b->bd_buddy_folio);
eee4adc7 1501 }
eee4adc7
ES
1502}
1503
8a57d9d6
CW
1504/*
1505 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1506 * block group lock of all groups for this page; do not hold the BG lock when
1507 * calling this routine!
1508 */
b6a758ec 1509static noinline_for_stack
adb7ef60 1510int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
b6a758ec
AK
1511{
1512
b6a758ec 1513 struct ext4_group_info *this_grp;
2de8807b 1514 struct ext4_buddy e4b;
99b150d8 1515 struct folio *folio;
2de8807b 1516 int ret = 0;
b6a758ec 1517
b10a44c3 1518 might_sleep();
d3df1453 1519 mb_debug(sb, "init group %u\n", group);
b6a758ec 1520 this_grp = ext4_get_group_info(sb, group);
5354b2af
TT
1521 if (!this_grp)
1522 return -EFSCORRUPTED;
1523
b6a758ec 1524 /*
08c3a813
AK
1525 * This ensures that we don't reinit the buddy cache
1526 * page which map to the group from which we are already
1527 * allocating. If we are looking at the buddy cache we would
1528 * have taken a reference using ext4_mb_load_buddy and that
2de8807b 1529 * would have pinned buddy page to page cache.
2457aec6
MG
1530 * The call to ext4_mb_get_buddy_page_lock will mark the
1531 * page accessed.
b6a758ec 1532 */
adb7ef60 1533 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
2de8807b 1534 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
b6a758ec
AK
1535 /*
1536 * somebody initialized the group
1537 * return without doing anything
1538 */
b6a758ec
AK
1539 goto err;
1540 }
2de8807b 1541
99b150d8 1542 folio = e4b.bd_bitmap_folio;
e1622a0d 1543 ret = ext4_mb_init_cache(folio, NULL, gfp);
2de8807b
AG
1544 if (ret)
1545 goto err;
99b150d8 1546 if (!folio_test_uptodate(folio)) {
b6a758ec
AK
1547 ret = -EIO;
1548 goto err;
1549 }
b6a758ec 1550
5eea586b 1551 if (e4b.bd_buddy_folio == NULL) {
b6a758ec
AK
1552 /*
1553 * If both the bitmap and buddy are in
1554 * the same page we don't need to force
1555 * init the buddy
1556 */
2de8807b
AG
1557 ret = 0;
1558 goto err;
b6a758ec 1559 }
2de8807b 1560 /* init buddy cache */
5eea586b 1561 folio = e4b.bd_buddy_folio;
e1622a0d 1562 ret = ext4_mb_init_cache(folio, e4b.bd_bitmap, gfp);
2de8807b
AG
1563 if (ret)
1564 goto err;
5eea586b 1565 if (!folio_test_uptodate(folio)) {
b6a758ec
AK
1566 ret = -EIO;
1567 goto err;
1568 }
b6a758ec 1569err:
2de8807b 1570 ext4_mb_put_buddy_page_lock(&e4b);
b6a758ec
AK
1571 return ret;
1572}
1573
8a57d9d6
CW
1574/*
1575 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1576 * block group lock of all groups for this page; do not hold the BG lock when
1577 * calling this routine!
1578 */
4ddfef7b 1579static noinline_for_stack int
adb7ef60
KK
1580ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1581 struct ext4_buddy *e4b, gfp_t gfp)
c9de560d 1582{
c9de560d
AT
1583 int blocks_per_page;
1584 int block;
1585 int pnum;
1586 int poff;
99b150d8 1587 struct folio *folio;
fdf6c7a7 1588 int ret;
920313a7
AK
1589 struct ext4_group_info *grp;
1590 struct ext4_sb_info *sbi = EXT4_SB(sb);
1591 struct inode *inode = sbi->s_buddy_cache;
c9de560d 1592
b10a44c3 1593 might_sleep();
d3df1453 1594 mb_debug(sb, "load group %u\n", group);
c9de560d 1595
09cbfeaf 1596 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
920313a7 1597 grp = ext4_get_group_info(sb, group);
5354b2af
TT
1598 if (!grp)
1599 return -EFSCORRUPTED;
c9de560d
AT
1600
1601 e4b->bd_blkbits = sb->s_blocksize_bits;
529da704 1602 e4b->bd_info = grp;
c9de560d
AT
1603 e4b->bd_sb = sb;
1604 e4b->bd_group = group;
5eea586b 1605 e4b->bd_buddy_folio = NULL;
99b150d8 1606 e4b->bd_bitmap_folio = NULL;
c9de560d 1607
f41c0750 1608 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
f41c0750
AK
1609 /*
1610 * we need full data about the group
1611 * to make a good selection
1612 */
adb7ef60 1613 ret = ext4_mb_init_group(sb, group, gfp);
f41c0750
AK
1614 if (ret)
1615 return ret;
f41c0750
AK
1616 }
1617
c9de560d
AT
1618 /*
1619 * the buddy cache inode stores the block bitmap
1620 * and buddy information in consecutive blocks.
1621 * So for each group we need two blocks.
1622 */
1623 block = group * 2;
1624 pnum = block / blocks_per_page;
1625 poff = block % blocks_per_page;
1626
99b150d8
MWO
1627 /* Avoid locking the folio in the fast path ... */
1628 folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
1629 if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
1630 if (!IS_ERR(folio))
920313a7 1631 /*
99b150d8
MWO
1632 * drop the folio reference and try
1633 * to get the folio with lock. If we
920313a7 1634 * are not uptodate that implies
99b150d8
MWO
1635 * somebody just created the folio but
1636 * is yet to initialize it. So
920313a7
AK
1637 * wait for it to initialize.
1638 */
99b150d8
MWO
1639 folio_put(folio);
1640 folio = __filemap_get_folio(inode->i_mapping, pnum,
1641 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
1642 if (!IS_ERR(folio)) {
1643 if (WARN_RATELIMIT(folio->mapping != inode->i_mapping,
1644 "ext4: bitmap's mapping != inode->i_mapping\n")) {
19b8b035 1645 /* should never happen */
99b150d8 1646 folio_unlock(folio);
19b8b035
TT
1647 ret = -EINVAL;
1648 goto err;
1649 }
99b150d8 1650 if (!folio_test_uptodate(folio)) {
e1622a0d 1651 ret = ext4_mb_init_cache(folio, NULL, gfp);
fdf6c7a7 1652 if (ret) {
99b150d8 1653 folio_unlock(folio);
fdf6c7a7
SF
1654 goto err;
1655 }
99b150d8 1656 mb_cmp_bitmaps(e4b, folio_address(folio) +
c9de560d
AT
1657 (poff * sb->s_blocksize));
1658 }
99b150d8 1659 folio_unlock(folio);
c9de560d
AT
1660 }
1661 }
99b150d8
MWO
1662 if (IS_ERR(folio)) {
1663 ret = PTR_ERR(folio);
c57ab39b
YL
1664 goto err;
1665 }
99b150d8 1666 if (!folio_test_uptodate(folio)) {
fdf6c7a7 1667 ret = -EIO;
c9de560d 1668 goto err;
fdf6c7a7 1669 }
2457aec6 1670
5eea586b 1671 /* Folios marked accessed already */
99b150d8
MWO
1672 e4b->bd_bitmap_folio = folio;
1673 e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize);
c9de560d
AT
1674
1675 block++;
1676 pnum = block / blocks_per_page;
1677 poff = block % blocks_per_page;
1678
5eea586b
MWO
1679 folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
1680 if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
1681 if (!IS_ERR(folio))
1682 folio_put(folio);
1683 folio = __filemap_get_folio(inode->i_mapping, pnum,
1684 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
1685 if (!IS_ERR(folio)) {
1686 if (WARN_RATELIMIT(folio->mapping != inode->i_mapping,
1687 "ext4: buddy bitmap's mapping != inode->i_mapping\n")) {
19b8b035 1688 /* should never happen */
5eea586b 1689 folio_unlock(folio);
19b8b035
TT
1690 ret = -EINVAL;
1691 goto err;
1692 }
5eea586b 1693 if (!folio_test_uptodate(folio)) {
e1622a0d 1694 ret = ext4_mb_init_cache(folio, e4b->bd_bitmap,
adb7ef60 1695 gfp);
fdf6c7a7 1696 if (ret) {
5eea586b 1697 folio_unlock(folio);
fdf6c7a7
SF
1698 goto err;
1699 }
1700 }
5eea586b 1701 folio_unlock(folio);
c9de560d
AT
1702 }
1703 }
5eea586b
MWO
1704 if (IS_ERR(folio)) {
1705 ret = PTR_ERR(folio);
c57ab39b
YL
1706 goto err;
1707 }
5eea586b 1708 if (!folio_test_uptodate(folio)) {
fdf6c7a7 1709 ret = -EIO;
c9de560d 1710 goto err;
fdf6c7a7 1711 }
2457aec6 1712
5eea586b
MWO
1713 /* Folios marked accessed already */
1714 e4b->bd_buddy_folio = folio;
1715 e4b->bd_buddy = folio_address(folio) + (poff * sb->s_blocksize);
c9de560d 1716
c9de560d
AT
1717 return 0;
1718
1719err:
c6a6c969 1720 if (!IS_ERR_OR_NULL(folio))
5eea586b 1721 folio_put(folio);
99b150d8
MWO
1722 if (e4b->bd_bitmap_folio)
1723 folio_put(e4b->bd_bitmap_folio);
285164b8 1724
c9de560d
AT
1725 e4b->bd_buddy = NULL;
1726 e4b->bd_bitmap = NULL;
fdf6c7a7 1727 return ret;
c9de560d
AT
1728}
1729
adb7ef60
KK
1730static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1731 struct ext4_buddy *e4b)
1732{
1733 return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1734}
1735
e39e07fd 1736static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
c9de560d 1737{
99b150d8
MWO
1738 if (e4b->bd_bitmap_folio)
1739 folio_put(e4b->bd_bitmap_folio);
5eea586b
MWO
1740 if (e4b->bd_buddy_folio)
1741 folio_put(e4b->bd_buddy_folio);
c9de560d
AT
1742}
1743
1744
1745static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1746{
ce3cca33 1747 int order = 1, max;
c9de560d
AT
1748 void *bb;
1749
c5e8f3f3 1750 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
c9de560d
AT
1751 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1752
c9de560d 1753 while (order <= e4b->bd_blkbits + 1) {
ce3cca33
CX
1754 bb = mb_find_buddy(e4b, order, &max);
1755 if (!mb_test_bit(block >> order, bb)) {
c9de560d
AT
1756 /* this block is part of buddy of order 'order' */
1757 return order;
1758 }
c9de560d
AT
1759 order++;
1760 }
1761 return 0;
1762}
1763
955ce5f5 1764static void mb_clear_bits(void *bm, int cur, int len)
c9de560d
AT
1765{
1766 __u32 *addr;
1767
1768 len = cur + len;
1769 while (cur < len) {
1770 if ((cur & 31) == 0 && (len - cur) >= 32) {
1771 /* fast path: clear whole word at once */
1772 addr = bm + (cur >> 3);
1773 *addr = 0;
1774 cur += 32;
1775 continue;
1776 }
955ce5f5 1777 mb_clear_bit(cur, bm);
c9de560d
AT
1778 cur++;
1779 }
1780}
1781
eabe0444
AS
1782/* clear bits in given range
1783 * will return first found zero bit if any, -1 otherwise
1784 */
1785static int mb_test_and_clear_bits(void *bm, int cur, int len)
1786{
1787 __u32 *addr;
1788 int zero_bit = -1;
1789
1790 len = cur + len;
1791 while (cur < len) {
1792 if ((cur & 31) == 0 && (len - cur) >= 32) {
1793 /* fast path: clear whole word at once */
1794 addr = bm + (cur >> 3);
1795 if (*addr != (__u32)(-1) && zero_bit == -1)
1796 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1797 *addr = 0;
1798 cur += 32;
1799 continue;
1800 }
1801 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1802 zero_bit = cur;
1803 cur++;
1804 }
1805
1806 return zero_bit;
1807}
1808
123e3016 1809void mb_set_bits(void *bm, int cur, int len)
c9de560d
AT
1810{
1811 __u32 *addr;
1812
1813 len = cur + len;
1814 while (cur < len) {
1815 if ((cur & 31) == 0 && (len - cur) >= 32) {
1816 /* fast path: set whole word at once */
1817 addr = bm + (cur >> 3);
1818 *addr = 0xffffffff;
1819 cur += 32;
1820 continue;
1821 }
955ce5f5 1822 mb_set_bit(cur, bm);
c9de560d
AT
1823 cur++;
1824 }
1825}
1826
eabe0444
AS
1827static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1828{
1829 if (mb_test_bit(*bit + side, bitmap)) {
1830 mb_clear_bit(*bit, bitmap);
1831 (*bit) -= side;
1832 return 1;
1833 }
1834 else {
1835 (*bit) += side;
1836 mb_set_bit(*bit, bitmap);
1837 return -1;
1838 }
1839}
1840
1841static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1842{
1843 int max;
1844 int order = 1;
1845 void *buddy = mb_find_buddy(e4b, order, &max);
1846
1847 while (buddy) {
1848 void *buddy2;
1849
1850 /* Bits in range [first; last] are known to be set since
1851 * corresponding blocks were allocated. Bits in range
1852 * (first; last) will stay set because they form buddies on
1853 * upper layer. We just deal with borders if they don't
1854 * align with upper layer and then go up.
1855 * Releasing entire group is all about clearing
1856 * single bit of highest order buddy.
1857 */
1858
1859 /* Example:
1860 * ---------------------------------
1861 * | 1 | 1 | 1 | 1 |
1862 * ---------------------------------
1863 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1864 * ---------------------------------
1865 * 0 1 2 3 4 5 6 7
1866 * \_____________________/
1867 *
1868 * Neither [1] nor [6] is aligned to above layer.
1869 * Left neighbour [0] is free, so mark it busy,
1870 * decrease bb_counters and extend range to
1871 * [0; 6]
1872 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1873 * mark [6] free, increase bb_counters and shrink range to
1874 * [0; 5].
1875 * Then shift range to [0; 2], go up and do the same.
1876 */
1877
1878
1879 if (first & 1)
1880 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1881 if (!(last & 1))
1882 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1883 if (first > last)
1884 break;
1885 order++;
1886
976620bd
KS
1887 buddy2 = mb_find_buddy(e4b, order, &max);
1888 if (!buddy2) {
eabe0444
AS
1889 mb_clear_bits(buddy, first, last - first + 1);
1890 e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1891 break;
1892 }
1893 first >>= 1;
1894 last >>= 1;
1895 buddy = buddy2;
1896 }
1897}
1898
7e5a8cdd 1899static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
eabe0444 1900 int first, int count)
c9de560d 1901{
eabe0444
AS
1902 int left_is_free = 0;
1903 int right_is_free = 0;
1904 int block;
1905 int last = first + count - 1;
c9de560d
AT
1906 struct super_block *sb = e4b->bd_sb;
1907
c99d1e6e
TT
1908 if (WARN_ON(count == 0))
1909 return;
eabe0444 1910 BUG_ON(last >= (sb->s_blocksize << 3));
bc8e6740 1911 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
163a203d
DW
1912 /* Don't bother if the block group is corrupt. */
1913 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1914 return;
1915
c9de560d
AT
1916 mb_check_buddy(e4b);
1917 mb_free_blocks_double(inode, e4b, first, count);
1918
eabe0444
AS
1919 /* access memory sequentially: check left neighbour,
1920 * clear range and then check right neighbour
1921 */
c9de560d 1922 if (first != 0)
eabe0444
AS
1923 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1924 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1925 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1926 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1927
1928 if (unlikely(block != -1)) {
e43bb4e6 1929 struct ext4_sb_info *sbi = EXT4_SB(sb);
eabe0444
AS
1930 ext4_fsblk_t blocknr;
1931
2331fd4a
BL
1932 /*
1933 * Fastcommit replay can free already freed blocks which
1934 * corrupts allocation info. Regenerate it.
1935 */
1936 if (sbi->s_mount_state & EXT4_FC_REPLAY) {
1937 mb_regenerate_buddy(e4b);
1938 goto check;
1939 }
1940
eabe0444 1941 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
49598e04 1942 blocknr += EXT4_C2B(sbi, block);
c5f3a382
BL
1943 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
1944 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2331fd4a
BL
1945 ext4_grp_locked_error(sb, e4b->bd_group,
1946 inode ? inode->i_ino : 0, blocknr,
1947 "freeing already freed block (bit %u); block bitmap corrupt.",
1948 block);
2331fd4a 1949 return;
eabe0444
AS
1950 }
1951
2331fd4a
BL
1952 this_cpu_inc(discard_pa_seq);
1953 e4b->bd_info->bb_free += count;
1954 if (first < e4b->bd_info->bb_first_free)
1955 e4b->bd_info->bb_first_free = first;
1956
eabe0444
AS
1957 /* let's maintain fragments counter */
1958 if (left_is_free && right_is_free)
c9de560d 1959 e4b->bd_info->bb_fragments--;
eabe0444 1960 else if (!left_is_free && !right_is_free)
c9de560d
AT
1961 e4b->bd_info->bb_fragments++;
1962
eabe0444
AS
1963 /* buddy[0] == bd_bitmap is a special case, so handle
1964 * it right away and let mb_buddy_mark_free stay free of
1965 * zero order checks.
1966 * Check if neighbours are to be coaleasced,
1967 * adjust bitmap bb_counters and borders appropriately.
1968 */
1969 if (first & 1) {
1970 first += !left_is_free;
1971 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1972 }
1973 if (!(last & 1)) {
1974 last -= !right_is_free;
1975 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1976 }
c9de560d 1977
eabe0444
AS
1978 if (first <= last)
1979 mb_buddy_mark_free(e4b, first >> 1, last >> 1);
c9de560d 1980
8a57d9d6 1981 mb_set_largest_free_order(sb, e4b->bd_info);
196e402a 1982 mb_update_avg_fragment_size(sb, e4b->bd_info);
2331fd4a 1983check:
c9de560d 1984 mb_check_buddy(e4b);
c9de560d
AT
1985}
1986
15c006a2 1987static int mb_find_extent(struct ext4_buddy *e4b, int block,
c9de560d
AT
1988 int needed, struct ext4_free_extent *ex)
1989{
2bf5eb2a 1990 int max, order, next;
c9de560d
AT
1991 void *buddy;
1992
bc8e6740 1993 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
c9de560d
AT
1994 BUG_ON(ex == NULL);
1995
15c006a2 1996 buddy = mb_find_buddy(e4b, 0, &max);
c9de560d
AT
1997 BUG_ON(buddy == NULL);
1998 BUG_ON(block >= max);
1999 if (mb_test_bit(block, buddy)) {
2000 ex->fe_len = 0;
2001 ex->fe_start = 0;
2002 ex->fe_group = 0;
2003 return 0;
2004 }
2005
15c006a2
RD
2006 /* find actual order */
2007 order = mb_find_order_for_block(e4b, block);
c9de560d 2008
2bf5eb2a
GH
2009 ex->fe_len = (1 << order) - (block & ((1 << order) - 1));
2010 ex->fe_start = block;
c9de560d
AT
2011 ex->fe_group = e4b->bd_group;
2012
2bf5eb2a 2013 block = block >> order;
c9de560d
AT
2014
2015 while (needed > ex->fe_len &&
d8ec0c39 2016 mb_find_buddy(e4b, order, &max)) {
c9de560d
AT
2017
2018 if (block + 1 >= max)
2019 break;
2020
2021 next = (block + 1) * (1 << order);
c5e8f3f3 2022 if (mb_test_bit(next, e4b->bd_bitmap))
c9de560d
AT
2023 break;
2024
b051d8dc 2025 order = mb_find_order_for_block(e4b, next);
c9de560d 2026
c9de560d
AT
2027 block = next >> order;
2028 ex->fe_len += 1 << order;
2029 }
2030
31562b95 2031 if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
43c73221
TT
2032 /* Should never happen! (but apparently sometimes does?!?) */
2033 WARN_ON(1);
cd84bbba
SB
2034 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
2035 "corruption or bug in mb_find_extent "
2036 "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
2037 block, order, needed, ex->fe_group, ex->fe_start,
2038 ex->fe_len, ex->fe_logical);
43c73221
TT
2039 ex->fe_len = 0;
2040 ex->fe_start = 0;
2041 ex->fe_group = 0;
2042 }
c9de560d
AT
2043 return ex->fe_len;
2044}
2045
2046static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
2047{
2048 int ord;
2049 int mlen = 0;
2050 int max = 0;
c9de560d
AT
2051 int start = ex->fe_start;
2052 int len = ex->fe_len;
2053 unsigned ret = 0;
2054 int len0 = len;
2055 void *buddy;
d1a3924e 2056 int ord_start, ord_end;
c9de560d
AT
2057
2058 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
2059 BUG_ON(e4b->bd_group != ex->fe_group);
bc8e6740 2060 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
c9de560d
AT
2061 mb_check_buddy(e4b);
2062 mb_mark_used_double(e4b, start, len);
2063
07b5b8e1 2064 this_cpu_inc(discard_pa_seq);
c9de560d
AT
2065 e4b->bd_info->bb_free -= len;
2066 if (e4b->bd_info->bb_first_free == start)
2067 e4b->bd_info->bb_first_free += len;
2068
2069 /* let's maintain fragments counter */
2070 if (start != 0)
c5e8f3f3 2071 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
c9de560d 2072 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
c5e8f3f3 2073 max = !mb_test_bit(start + len, e4b->bd_bitmap);
c9de560d
AT
2074 if (mlen && max)
2075 e4b->bd_info->bb_fragments++;
2076 else if (!mlen && !max)
2077 e4b->bd_info->bb_fragments--;
2078
2079 /* let's maintain buddy itself */
2080 while (len) {
d1a3924e 2081 ord = mb_find_order_for_block(e4b, start);
c9de560d
AT
2082
2083 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
2084 /* the whole chunk may be allocated at once! */
2085 mlen = 1 << ord;
d1a3924e 2086 buddy = mb_find_buddy(e4b, ord, &max);
c9de560d
AT
2087 BUG_ON((start >> ord) >= max);
2088 mb_set_bit(start >> ord, buddy);
2089 e4b->bd_info->bb_counters[ord]--;
2090 start += mlen;
2091 len -= mlen;
2092 BUG_ON(len < 0);
2093 continue;
2094 }
2095
2096 /* store for history */
2097 if (ret == 0)
2098 ret = len | (ord << 16);
2099
c9de560d
AT
2100 BUG_ON(ord <= 0);
2101 buddy = mb_find_buddy(e4b, ord, &max);
2102 mb_set_bit(start >> ord, buddy);
2103 e4b->bd_info->bb_counters[ord]--;
2104
d1a3924e
KS
2105 ord_start = (start >> ord) << ord;
2106 ord_end = ord_start + (1 << ord);
2107 /* first chunk */
2108 if (start > ord_start)
2109 ext4_mb_mark_free_simple(e4b->bd_sb, e4b->bd_buddy,
2110 ord_start, start - ord_start,
2111 e4b->bd_info);
2112
2113 /* last chunk */
2114 if (start + len < ord_end) {
2115 ext4_mb_mark_free_simple(e4b->bd_sb, e4b->bd_buddy,
2116 start + len,
2117 ord_end - (start + len),
2118 e4b->bd_info);
2119 break;
2120 }
2121 len = start + len - ord_end;
2122 start = ord_end;
c9de560d 2123 }
8a57d9d6 2124 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
c9de560d 2125
196e402a 2126 mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info);
123e3016 2127 mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
c9de560d
AT
2128 mb_check_buddy(e4b);
2129
2130 return ret;
2131}
2132
2133/*
2134 * Must be called under group lock!
2135 */
2136static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
2137 struct ext4_buddy *e4b)
2138{
2139 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2140 int ret;
2141
2142 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
2143 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2144
2145 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
2146 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
2147 ret = mb_mark_used(e4b, &ac->ac_b_ex);
2148
2149 /* preallocation can change ac_b_ex, thus we store actually
2150 * allocated blocks for history */
2151 ac->ac_f_ex = ac->ac_b_ex;
2152
2153 ac->ac_status = AC_STATUS_FOUND;
2154 ac->ac_tail = ret & 0xffff;
2155 ac->ac_buddy = ret >> 16;
2156
c3a326a6
AK
2157 /*
2158 * take the page reference. We want the page to be pinned
2159 * so that we don't get a ext4_mb_init_cache_call for this
2160 * group until we update the bitmap. That would mean we
2161 * double allocate blocks. The reference is dropped
2162 * in ext4_mb_release_context
2163 */
ccedf35b
MWO
2164 ac->ac_bitmap_folio = e4b->bd_bitmap_folio;
2165 folio_get(ac->ac_bitmap_folio);
c84f1510
MWO
2166 ac->ac_buddy_folio = e4b->bd_buddy_folio;
2167 folio_get(ac->ac_buddy_folio);
c9de560d 2168 /* store last allocated for subsequent stream allocation */
4ba74d00 2169 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
c9de560d
AT
2170 spin_lock(&sbi->s_md_lock);
2171 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
2172 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
2173 spin_unlock(&sbi->s_md_lock);
2174 }
53f86b17
RH
2175 /*
2176 * As we've just preallocated more space than
2177 * user requested originally, we store allocated
2178 * space in a special descriptor.
2179 */
2180 if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
2181 ext4_mb_new_preallocation(ac);
2182
c9de560d
AT
2183}
2184
c9de560d
AT
2185static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
2186 struct ext4_buddy *e4b,
2187 int finish_group)
2188{
2189 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2190 struct ext4_free_extent *bex = &ac->ac_b_ex;
2191 struct ext4_free_extent *gex = &ac->ac_g_ex;
c9de560d 2192
032115fc
AK
2193 if (ac->ac_status == AC_STATUS_FOUND)
2194 return;
c9de560d
AT
2195 /*
2196 * We don't want to scan for a whole year
2197 */
2198 if (ac->ac_found > sbi->s_mb_max_to_scan &&
2199 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2200 ac->ac_status = AC_STATUS_BREAK;
2201 return;
2202 }
2203
2204 /*
2205 * Haven't found good chunk so far, let's continue
2206 */
2207 if (bex->fe_len < gex->fe_len)
2208 return;
2209
3582e745 2210 if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
78dc9f84 2211 ext4_mb_use_best_found(ac, e4b);
c9de560d
AT
2212}
2213
2214/*
2215 * The routine checks whether found extent is good enough. If it is,
2216 * then the extent gets marked used and flag is set to the context
2217 * to stop scanning. Otherwise, the extent is compared with the
2218 * previous found extent and if new one is better, then it's stored
2219 * in the context. Later, the best found extent will be used, if
2220 * mballoc can't find good enough extent.
2221 *
3582e745
OM
2222 * The algorithm used is roughly as follows:
2223 *
2224 * * If free extent found is exactly as big as goal, then
2225 * stop the scan and use it immediately
2226 *
2227 * * If free extent found is smaller than goal, then keep retrying
2228 * upto a max of sbi->s_mb_max_to_scan times (default 200). After
2229 * that stop scanning and use whatever we have.
2230 *
2231 * * If free extent found is bigger than goal, then keep retrying
2232 * upto a max of sbi->s_mb_min_to_scan times (default 10) before
2233 * stopping the scan and using the extent.
2234 *
2235 *
c9de560d
AT
2236 * FIXME: real allocation policy is to be designed yet!
2237 */
2238static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
2239 struct ext4_free_extent *ex,
2240 struct ext4_buddy *e4b)
2241{
2242 struct ext4_free_extent *bex = &ac->ac_b_ex;
2243 struct ext4_free_extent *gex = &ac->ac_g_ex;
2244
2245 BUG_ON(ex->fe_len <= 0);
7137d7a4
TT
2246 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2247 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
c9de560d
AT
2248 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
2249
2250 ac->ac_found++;
fdd9a009 2251 ac->ac_cX_found[ac->ac_criteria]++;
c9de560d
AT
2252
2253 /*
2254 * The special case - take what you catch first
2255 */
2256 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2257 *bex = *ex;
2258 ext4_mb_use_best_found(ac, e4b);
2259 return;
2260 }
2261
2262 /*
2263 * Let's check whether the chuck is good enough
2264 */
2265 if (ex->fe_len == gex->fe_len) {
2266 *bex = *ex;
2267 ext4_mb_use_best_found(ac, e4b);
2268 return;
2269 }
2270
2271 /*
2272 * If this is first found extent, just store it in the context
2273 */
2274 if (bex->fe_len == 0) {
2275 *bex = *ex;
2276 return;
2277 }
2278
2279 /*
2280 * If new found extent is better, store it in the context
2281 */
2282 if (bex->fe_len < gex->fe_len) {
2283 /* if the request isn't satisfied, any found extent
2284 * larger than previous best one is better */
2285 if (ex->fe_len > bex->fe_len)
2286 *bex = *ex;
2287 } else if (ex->fe_len > gex->fe_len) {
2288 /* if the request is satisfied, then we try to find
2289 * an extent that still satisfy the request, but is
2290 * smaller than previous one */
2291 if (ex->fe_len < bex->fe_len)
2292 *bex = *ex;
2293 }
2294
2295 ext4_mb_check_limits(ac, e4b, 0);
2296}
2297
089ceecc 2298static noinline_for_stack
85b67ffb 2299void ext4_mb_try_best_found(struct ext4_allocation_context *ac,
c9de560d
AT
2300 struct ext4_buddy *e4b)
2301{
2302 struct ext4_free_extent ex = ac->ac_b_ex;
2303 ext4_group_t group = ex.fe_group;
2304 int max;
2305 int err;
2306
2307 BUG_ON(ex.fe_len <= 0);
2308 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2309 if (err)
85b67ffb 2310 return;
c9de560d
AT
2311
2312 ext4_lock_group(ac->ac_sb, group);
4530b366
BL
2313 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
2314 goto out;
2315
15c006a2 2316 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
c9de560d
AT
2317
2318 if (max > 0) {
2319 ac->ac_b_ex = ex;
2320 ext4_mb_use_best_found(ac, e4b);
2321 }
2322
4530b366 2323out:
c9de560d 2324 ext4_unlock_group(ac->ac_sb, group);
e39e07fd 2325 ext4_mb_unload_buddy(e4b);
c9de560d
AT
2326}
2327
089ceecc
ES
2328static noinline_for_stack
2329int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
c9de560d
AT
2330 struct ext4_buddy *e4b)
2331{
2332 ext4_group_t group = ac->ac_g_ex.fe_group;
2333 int max;
2334 int err;
2335 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
838cd0cf 2336 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
c9de560d
AT
2337 struct ext4_free_extent ex;
2338
5354b2af
TT
2339 if (!grp)
2340 return -EFSCORRUPTED;
01e4ca29 2341 if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY)))
c9de560d 2342 return 0;
838cd0cf
YY
2343 if (grp->bb_free == 0)
2344 return 0;
c9de560d
AT
2345
2346 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2347 if (err)
2348 return err;
2349
2350 ext4_lock_group(ac->ac_sb, group);
83269837
BL
2351 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
2352 goto out;
2353
15c006a2 2354 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
c9de560d 2355 ac->ac_g_ex.fe_len, &ex);
ab0c00fc 2356 ex.fe_logical = 0xDEADFA11; /* debug value */
c9de560d 2357
c3defd99 2358 if (max >= ac->ac_g_ex.fe_len &&
ff2beee2 2359 ac->ac_g_ex.fe_len == EXT4_NUM_B2C(sbi, sbi->s_stripe)) {
c9de560d
AT
2360 ext4_fsblk_t start;
2361
99c515e3 2362 start = ext4_grp_offs_to_block(ac->ac_sb, &ex);
c9de560d
AT
2363 /* use do_div to get remainder (would be 64-bit modulo) */
2364 if (do_div(start, sbi->s_stripe) == 0) {
2365 ac->ac_found++;
2366 ac->ac_b_ex = ex;
2367 ext4_mb_use_best_found(ac, e4b);
2368 }
2369 } else if (max >= ac->ac_g_ex.fe_len) {
2370 BUG_ON(ex.fe_len <= 0);
2371 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2372 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2373 ac->ac_found++;
2374 ac->ac_b_ex = ex;
2375 ext4_mb_use_best_found(ac, e4b);
2376 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
2377 /* Sometimes, caller may want to merge even small
2378 * number of blocks to an existing extent */
2379 BUG_ON(ex.fe_len <= 0);
2380 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2381 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2382 ac->ac_found++;
2383 ac->ac_b_ex = ex;
2384 ext4_mb_use_best_found(ac, e4b);
2385 }
83269837 2386out:
c9de560d 2387 ext4_unlock_group(ac->ac_sb, group);
e39e07fd 2388 ext4_mb_unload_buddy(e4b);
c9de560d
AT
2389
2390 return 0;
2391}
2392
2393/*
2394 * The routine scans buddy structures (not bitmap!) from given order
2395 * to max order and tries to find big enough chunk to satisfy the req
2396 */
089ceecc
ES
2397static noinline_for_stack
2398void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
c9de560d
AT
2399 struct ext4_buddy *e4b)
2400{
2401 struct super_block *sb = ac->ac_sb;
2402 struct ext4_group_info *grp = e4b->bd_info;
2403 void *buddy;
2404 int i;
2405 int k;
2406 int max;
2407
2408 BUG_ON(ac->ac_2order <= 0);
4b68f6df 2409 for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) {
c9de560d
AT
2410 if (grp->bb_counters[i] == 0)
2411 continue;
2412
2413 buddy = mb_find_buddy(e4b, i, &max);
19b8b035
TT
2414 if (WARN_RATELIMIT(buddy == NULL,
2415 "ext4: mb_simple_scan_group: mb_find_buddy failed, (%d)\n", i))
2416 continue;
c9de560d 2417
ffad0a44 2418 k = mb_find_next_zero_bit(buddy, max, 0);
eb576086 2419 if (k >= max) {
c5f3a382
BL
2420 ext4_mark_group_bitmap_corrupted(ac->ac_sb,
2421 e4b->bd_group,
2422 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
eb576086
DM
2423 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
2424 "%d free clusters of order %d. But found 0",
2425 grp->bb_counters[i], i);
eb576086
DM
2426 break;
2427 }
c9de560d 2428 ac->ac_found++;
fdd9a009 2429 ac->ac_cX_found[ac->ac_criteria]++;
c9de560d
AT
2430
2431 ac->ac_b_ex.fe_len = 1 << i;
2432 ac->ac_b_ex.fe_start = k << i;
2433 ac->ac_b_ex.fe_group = e4b->bd_group;
2434
2435 ext4_mb_use_best_found(ac, e4b);
2436
53f86b17 2437 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
c9de560d
AT
2438
2439 if (EXT4_SB(sb)->s_mb_stats)
2440 atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
2441
2442 break;
2443 }
2444}
2445
2446/*
2447 * The routine scans the group and measures all found extents.
2448 * In order to optimize scanning, caller must pass number of
2449 * free blocks in the group, so the routine can know upper limit.
2450 */
089ceecc
ES
2451static noinline_for_stack
2452void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
c9de560d
AT
2453 struct ext4_buddy *e4b)
2454{
2455 struct super_block *sb = ac->ac_sb;
c5e8f3f3 2456 void *bitmap = e4b->bd_bitmap;
c9de560d 2457 struct ext4_free_extent ex;
1b420011 2458 int i, j, freelen;
c9de560d
AT
2459 int free;
2460
2461 free = e4b->bd_info->bb_free;
907ea529
TT
2462 if (WARN_ON(free <= 0))
2463 return;
c9de560d
AT
2464
2465 i = e4b->bd_info->bb_first_free;
2466
2467 while (free && ac->ac_status == AC_STATUS_CONTINUE) {
ffad0a44 2468 i = mb_find_next_zero_bit(bitmap,
7137d7a4
TT
2469 EXT4_CLUSTERS_PER_GROUP(sb), i);
2470 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
26346ff6 2471 /*
e56eb659 2472 * IF we have corrupt bitmap, we won't find any
26346ff6 2473 * free blocks even though group info says we
b483bb77 2474 * have free blocks
26346ff6 2475 */
c5f3a382
BL
2476 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2477 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
e29136f8 2478 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
53accfa9 2479 "%d free clusters as per "
fde4d95a 2480 "group info. But bitmap says 0",
26346ff6 2481 free);
c9de560d
AT
2482 break;
2483 }
2484
304749c0 2485 if (!ext4_mb_cr_expensive(ac->ac_criteria)) {
1b420011 2486 /*
f52f3d2b
OM
2487 * In CR_GOAL_LEN_FAST and CR_BEST_AVAIL_LEN, we are
2488 * sure that this group will have a large enough
2489 * continuous free extent, so skip over the smaller free
2490 * extents
1b420011
OM
2491 */
2492 j = mb_find_next_bit(bitmap,
2493 EXT4_CLUSTERS_PER_GROUP(sb), i);
2494 freelen = j - i;
2495
2496 if (freelen < ac->ac_g_ex.fe_len) {
2497 i = j;
2498 free -= freelen;
2499 continue;
2500 }
2501 }
2502
15c006a2 2503 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
907ea529
TT
2504 if (WARN_ON(ex.fe_len <= 0))
2505 break;
26346ff6 2506 if (free < ex.fe_len) {
c5f3a382
BL
2507 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2508 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
e29136f8 2509 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
53accfa9 2510 "%d free clusters as per "
fde4d95a 2511 "group info. But got %d blocks",
26346ff6 2512 free, ex.fe_len);
e56eb659
AK
2513 /*
2514 * The number of free blocks differs. This mostly
2515 * indicate that the bitmap is corrupt. So exit
2516 * without claiming the space.
2517 */
2518 break;
26346ff6 2519 }
ab0c00fc 2520 ex.fe_logical = 0xDEADC0DE; /* debug value */
c9de560d
AT
2521 ext4_mb_measure_extent(ac, &ex, e4b);
2522
2523 i += ex.fe_len;
2524 free -= ex.fe_len;
2525 }
2526
2527 ext4_mb_check_limits(ac, e4b, 1);
2528}
2529
2530/*
2531 * This is a special case for storages like raid5
506bf2d8 2532 * we try to find stripe-aligned chunks for stripe-size-multiple requests
c9de560d 2533 */
089ceecc
ES
2534static noinline_for_stack
2535void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
c9de560d
AT
2536 struct ext4_buddy *e4b)
2537{
2538 struct super_block *sb = ac->ac_sb;
2539 struct ext4_sb_info *sbi = EXT4_SB(sb);
c5e8f3f3 2540 void *bitmap = e4b->bd_bitmap;
c9de560d
AT
2541 struct ext4_free_extent ex;
2542 ext4_fsblk_t first_group_block;
2543 ext4_fsblk_t a;
c3defd99 2544 ext4_grpblk_t i, stripe;
c9de560d
AT
2545 int max;
2546
2547 BUG_ON(sbi->s_stripe == 0);
2548
2549 /* find first stripe-aligned block in group */
5661bd68
AM
2550 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2551
c9de560d
AT
2552 a = first_group_block + sbi->s_stripe - 1;
2553 do_div(a, sbi->s_stripe);
2554 i = (a * sbi->s_stripe) - first_group_block;
2555
ff2beee2 2556 stripe = EXT4_NUM_B2C(sbi, sbi->s_stripe);
c3defd99 2557 i = EXT4_B2C(sbi, i);
7137d7a4 2558 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
c9de560d 2559 if (!mb_test_bit(i, bitmap)) {
c3defd99
KS
2560 max = mb_find_extent(e4b, i, stripe, &ex);
2561 if (max >= stripe) {
c9de560d 2562 ac->ac_found++;
fdd9a009 2563 ac->ac_cX_found[ac->ac_criteria]++;
ab0c00fc 2564 ex.fe_logical = 0xDEADF00D; /* debug value */
c9de560d
AT
2565 ac->ac_b_ex = ex;
2566 ext4_mb_use_best_found(ac, e4b);
2567 break;
2568 }
2569 }
c3defd99 2570 i += stripe;
c9de560d
AT
2571 }
2572}
2573
42ac1848 2574/*
8ef123fe 2575 * This is also called BEFORE we load the buddy bitmap.
42ac1848 2576 * Returns either 1 or 0 indicating that the group is either suitable
8ef123fe 2577 * for the allocation or not.
42ac1848 2578 */
8ef123fe 2579static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
4eb7a4a1 2580 ext4_group_t group, enum criteria cr)
c9de560d 2581{
8ef123fe 2582 ext4_grpblk_t free, fragments;
a4912123 2583 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
c9de560d
AT
2584 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2585
f52f3d2b 2586 BUG_ON(cr < CR_POWER2_ALIGNED || cr >= EXT4_MB_NUM_CRS);
8a57d9d6 2587
a9ce5993 2588 if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
8ef123fe 2589 return false;
01fc48e8 2590
dddcd2f9 2591 free = grp->bb_free;
2592 if (free == 0)
8ef123fe 2593 return false;
c9de560d 2594
c9de560d 2595 fragments = grp->bb_fragments;
c9de560d 2596 if (fragments == 0)
8ef123fe 2597 return false;
c9de560d
AT
2598
2599 switch (cr) {
f52f3d2b 2600 case CR_POWER2_ALIGNED:
c9de560d 2601 BUG_ON(ac->ac_2order == 0);
c9de560d 2602
a4912123
TT
2603 /* Avoid using the first bg of a flexgroup for data files */
2604 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2605 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2606 ((group % flex_size) == 0))
8ef123fe 2607 return false;
a4912123 2608
dddcd2f9 2609 if (free < ac->ac_g_ex.fe_len)
2610 return false;
2611
4b68f6df 2612 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb))
8ef123fe 2613 return true;
40ae3487
TT
2614
2615 if (grp->bb_largest_free_order < ac->ac_2order)
8ef123fe 2616 return false;
40ae3487 2617
8ef123fe 2618 return true;
f52f3d2b
OM
2619 case CR_GOAL_LEN_FAST:
2620 case CR_BEST_AVAIL_LEN:
c9de560d 2621 if ((free / fragments) >= ac->ac_g_ex.fe_len)
8ef123fe 2622 return true;
c9de560d 2623 break;
f52f3d2b 2624 case CR_GOAL_LEN_SLOW:
c9de560d 2625 if (free >= ac->ac_g_ex.fe_len)
8ef123fe 2626 return true;
c9de560d 2627 break;
f52f3d2b 2628 case CR_ANY_FREE:
8ef123fe 2629 return true;
c9de560d
AT
2630 default:
2631 BUG();
2632 }
2633
8ef123fe
RH
2634 return false;
2635}
2636
2637/*
2638 * This could return negative error code if something goes wrong
2639 * during ext4_mb_init_group(). This should not be called with
2640 * ext4_lock_group() held.
a5fda113
TT
2641 *
2642 * Note: because we are conditionally operating with the group lock in
2643 * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this
2644 * function using __acquire and __release. This means we need to be
2645 * super careful before messing with the error path handling via "goto
2646 * out"!
8ef123fe
RH
2647 */
2648static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
4eb7a4a1 2649 ext4_group_t group, enum criteria cr)
8ef123fe
RH
2650{
2651 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
99377830 2652 struct super_block *sb = ac->ac_sb;
c1d2c7d4 2653 struct ext4_sb_info *sbi = EXT4_SB(sb);
99377830 2654 bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
8ef123fe
RH
2655 ext4_grpblk_t free;
2656 int ret = 0;
2657
5354b2af
TT
2658 if (!grp)
2659 return -EFSCORRUPTED;
a6c75eaf
HS
2660 if (sbi->s_mb_stats)
2661 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
a5fda113 2662 if (should_lock) {
99377830 2663 ext4_lock_group(sb, group);
a5fda113
TT
2664 __release(ext4_group_lock_ptr(sb, group));
2665 }
8ef123fe
RH
2666 free = grp->bb_free;
2667 if (free == 0)
2668 goto out;
304749c0
OM
2669 /*
2670 * In all criterias except CR_ANY_FREE we try to avoid groups that
2671 * can't possibly satisfy the full goal request due to insufficient
2672 * free blocks.
2673 */
2674 if (cr < CR_ANY_FREE && free < ac->ac_g_ex.fe_len)
8ef123fe
RH
2675 goto out;
2676 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2677 goto out;
a5fda113
TT
2678 if (should_lock) {
2679 __acquire(ext4_group_lock_ptr(sb, group));
99377830 2680 ext4_unlock_group(sb, group);
a5fda113 2681 }
8ef123fe
RH
2682
2683 /* We only do this if the grp has never been initialized */
2684 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
c1d2c7d4
AZ
2685 struct ext4_group_desc *gdp =
2686 ext4_get_group_desc(sb, group, NULL);
2687 int ret;
2688
f52f3d2b 2689 /*
2caffb6a 2690 * CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic
f52f3d2b
OM
2691 * search to find large good chunks almost for free. If buddy
2692 * data is not ready, then this optimization makes no sense. But
2693 * we never skip the first block group in a flex_bg, since this
2694 * gets used for metadata block allocation, and we want to make
2695 * sure we locate metadata blocks in the first block group in
2696 * the flex_bg if possible.
c1d2c7d4 2697 */
304749c0 2698 if (!ext4_mb_cr_expensive(cr) &&
c1d2c7d4
AZ
2699 (!sbi->s_log_groups_per_flex ||
2700 ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
2701 !(ext4_has_group_desc_csum(sb) &&
2702 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
2703 return 0;
2704 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
8ef123fe
RH
2705 if (ret)
2706 return ret;
2707 }
2708
a5fda113 2709 if (should_lock) {
99377830 2710 ext4_lock_group(sb, group);
a5fda113
TT
2711 __release(ext4_group_lock_ptr(sb, group));
2712 }
8ef123fe
RH
2713 ret = ext4_mb_good_group(ac, group, cr);
2714out:
a5fda113
TT
2715 if (should_lock) {
2716 __acquire(ext4_group_lock_ptr(sb, group));
99377830 2717 ext4_unlock_group(sb, group);
a5fda113 2718 }
8ef123fe 2719 return ret;
c9de560d
AT
2720}
2721
cfd73237
AZ
2722/*
2723 * Start prefetching @nr block bitmaps starting at @group.
2724 * Return the next group which needs to be prefetched.
2725 */
3d392b26
TT
2726ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2727 unsigned int nr, int *cnt)
cfd73237
AZ
2728{
2729 ext4_group_t ngroups = ext4_get_groups_count(sb);
2730 struct buffer_head *bh;
2731 struct blk_plug plug;
2732
2733 blk_start_plug(&plug);
2734 while (nr-- > 0) {
2735 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2736 NULL);
2737 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2738
2739 /*
2740 * Prefetch block groups with free blocks; but don't
2741 * bother if it is marked uninitialized on disk, since
2742 * it won't require I/O to read. Also only try to
2743 * prefetch once, so we avoid getblk() call, which can
2744 * be expensive.
2745 */
5354b2af 2746 if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
cfd73237 2747 EXT4_MB_GRP_NEED_INIT(grp) &&
3c629604 2748 ext4_free_group_clusters(sb, gdp) > 0 ) {
cfd73237
AZ
2749 bh = ext4_read_block_bitmap_nowait(sb, group, true);
2750 if (bh && !IS_ERR(bh)) {
2751 if (!buffer_uptodate(bh) && cnt)
2752 (*cnt)++;
2753 brelse(bh);
2754 }
2755 }
2756 if (++group >= ngroups)
2757 group = 0;
2758 }
2759 blk_finish_plug(&plug);
2760 return group;
2761}
2762
2763/*
2764 * Prefetching reads the block bitmap into the buffer cache; but we
2765 * need to make sure that the buddy bitmap in the page cache has been
2766 * initialized. Note that ext4_mb_init_group() will block if the I/O
2767 * is not yet completed, or indeed if it was not initiated by
2768 * ext4_mb_prefetch did not start the I/O.
2769 *
2770 * TODO: We should actually kick off the buddy bitmap setup in a work
2771 * queue when the buffer I/O is completed, so that we don't block
2772 * waiting for the block allocation bitmap read to finish when
2773 * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2774 */
3d392b26
TT
2775void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2776 unsigned int nr)
cfd73237 2777{
22fab984
KS
2778 struct ext4_group_desc *gdp;
2779 struct ext4_group_info *grp;
cfd73237 2780
22fab984 2781 while (nr-- > 0) {
cfd73237
AZ
2782 if (!group)
2783 group = ext4_get_groups_count(sb);
2784 group--;
22fab984 2785 gdp = ext4_get_group_desc(sb, group, NULL);
cfd73237
AZ
2786 grp = ext4_get_group_info(sb, group);
2787
5354b2af 2788 if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) &&
3c629604 2789 ext4_free_group_clusters(sb, gdp) > 0) {
cfd73237
AZ
2790 if (ext4_mb_init_group(sb, group, GFP_NOFS))
2791 break;
2792 }
2793 }
2794}
2795
4ddfef7b
ES
2796static noinline_for_stack int
2797ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
c9de560d 2798{
cfd73237 2799 ext4_group_t prefetch_grp = 0, ngroups, group, i;
4c0cfebd 2800 enum criteria new_cr, cr = CR_GOAL_LEN_FAST;
42ac1848 2801 int err = 0, first_err = 0;
cfd73237 2802 unsigned int nr = 0, prefetch_ios = 0;
c9de560d
AT
2803 struct ext4_sb_info *sbi;
2804 struct super_block *sb;
2805 struct ext4_buddy e4b;
66d5e027 2806 int lost;
c9de560d
AT
2807
2808 sb = ac->ac_sb;
2809 sbi = EXT4_SB(sb);
8df9675f 2810 ngroups = ext4_get_groups_count(sb);
fb0a387d 2811 /* non-extent files are limited to low blocks/groups */
12e9b892 2812 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
fb0a387d
ES
2813 ngroups = sbi->s_blockfile_groups;
2814
c9de560d
AT
2815 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2816
2817 /* first, try the goal */
2818 err = ext4_mb_find_by_goal(ac, &e4b);
2819 if (err || ac->ac_status == AC_STATUS_FOUND)
2820 goto out;
2821
2822 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2823 goto out;
2824
2825 /*
e9a3cd48 2826 * ac->ac_2order is set only if the fe_len is a power of 2
4eea9fbe
KS
2827 * if ac->ac_2order is set we also set criteria to CR_POWER2_ALIGNED
2828 * so that we try exact allocation using buddy.
c9de560d
AT
2829 */
2830 i = fls(ac->ac_g_ex.fe_len);
2831 ac->ac_2order = 0;
2832 /*
2833 * We search using buddy data only if the order of the request
2834 * is greater than equal to the sbi_s_mb_order2_reqs
b713a5ec 2835 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
d9b22cf9
JK
2836 * We also support searching for power-of-two requests only for
2837 * requests upto maximum buddy size we have constructed.
c9de560d 2838 */
4b68f6df 2839 if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) {
bb60caa2 2840 if (is_power_of_2(ac->ac_g_ex.fe_len))
1a5d5e5d 2841 ac->ac_2order = array_index_nospec(i - 1,
4b68f6df 2842 MB_NUM_ORDERS(sb));
c9de560d
AT
2843 }
2844
4ba74d00
TT
2845 /* if stream allocation is enabled, use global goal */
2846 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
c9de560d
AT
2847 /* TBD: may be hot point */
2848 spin_lock(&sbi->s_md_lock);
2849 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2850 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2851 spin_unlock(&sbi->s_md_lock);
2852 }
4ba74d00 2853
c9de560d 2854 /*
4c0cfebd
TT
2855 * Let's just scan groups to find more-less suitable blocks We
2856 * start with CR_GOAL_LEN_FAST, unless it is power of 2
2857 * aligned, in which case let's do that faster approach first.
c9de560d 2858 */
4c0cfebd
TT
2859 if (ac->ac_2order)
2860 cr = CR_POWER2_ALIGNED;
c9de560d 2861repeat:
4eb7a4a1 2862 for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
c9de560d 2863 ac->ac_criteria = cr;
ed8f9c75
AK
2864 /*
2865 * searching for the right group start
2866 * from the goal value specified
2867 */
2868 group = ac->ac_g_ex.fe_group;
196e402a 2869 ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
cfd73237 2870 prefetch_grp = group;
9c97c34a 2871 nr = 0;
ed8f9c75 2872
4fca50d4
JK
2873 for (i = 0, new_cr = cr; i < ngroups; i++,
2874 ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
2875 int ret = 0;
196e402a 2876
2ed5724d 2877 cond_resched();
196e402a
HS
2878 if (new_cr != cr) {
2879 cr = new_cr;
2880 goto repeat;
2881 }
c9de560d 2882
cfd73237
AZ
2883 /*
2884 * Batch reads of the block allocation bitmaps
2885 * to get multiple READs in flight; limit
4eea9fbe
KS
2886 * prefetching at inexpensive CR, otherwise mballoc
2887 * can spend a lot of time loading imperfect groups
cfd73237
AZ
2888 */
2889 if ((prefetch_grp == group) &&
304749c0 2890 (ext4_mb_cr_expensive(cr) ||
cfd73237 2891 prefetch_ios < sbi->s_mb_prefetch_limit)) {
cfd73237
AZ
2892 nr = sbi->s_mb_prefetch;
2893 if (ext4_has_feature_flex_bg(sb)) {
82ef1370
CX
2894 nr = 1 << sbi->s_log_groups_per_flex;
2895 nr -= group & (nr - 1);
2896 nr = min(nr, sbi->s_mb_prefetch);
cfd73237
AZ
2897 }
2898 prefetch_grp = ext4_mb_prefetch(sb, group,
2899 nr, &prefetch_ios);
cfd73237
AZ
2900 }
2901
8a57d9d6 2902 /* This now checks without needing the buddy page */
8ef123fe 2903 ret = ext4_mb_good_group_nolock(ac, group, cr);
42ac1848
LC
2904 if (ret <= 0) {
2905 if (!first_err)
2906 first_err = ret;
c9de560d 2907 continue;
42ac1848 2908 }
c9de560d 2909
c9de560d
AT
2910 err = ext4_mb_load_buddy(sb, group, &e4b);
2911 if (err)
2912 goto out;
2913
2914 ext4_lock_group(sb, group);
8a57d9d6
CW
2915
2916 /*
2917 * We need to check again after locking the
2918 * block group
2919 */
42ac1848 2920 ret = ext4_mb_good_group(ac, group, cr);
8ef123fe 2921 if (ret == 0) {
c9de560d 2922 ext4_unlock_group(sb, group);
e39e07fd 2923 ext4_mb_unload_buddy(&e4b);
c9de560d
AT
2924 continue;
2925 }
2926
2927 ac->ac_groups_scanned++;
f52f3d2b 2928 if (cr == CR_POWER2_ALIGNED)
c9de560d 2929 ext4_mb_simple_scan_group(ac, &e4b);
1f6bc02f 2930 else {
ff2beee2
OM
2931 bool is_stripe_aligned =
2932 (sbi->s_stripe >=
2933 sbi->s_cluster_ratio) &&
1f6bc02f 2934 !(ac->ac_g_ex.fe_len %
ff2beee2 2935 EXT4_NUM_B2C(sbi, sbi->s_stripe));
1f6bc02f
OM
2936
2937 if ((cr == CR_GOAL_LEN_FAST ||
2938 cr == CR_BEST_AVAIL_LEN) &&
2939 is_stripe_aligned)
2940 ext4_mb_scan_aligned(ac, &e4b);
2941
2942 if (ac->ac_status == AC_STATUS_CONTINUE)
2943 ext4_mb_complex_scan_group(ac, &e4b);
2944 }
c9de560d
AT
2945
2946 ext4_unlock_group(sb, group);
e39e07fd 2947 ext4_mb_unload_buddy(&e4b);
c9de560d
AT
2948
2949 if (ac->ac_status != AC_STATUS_CONTINUE)
2950 break;
2951 }
a6c75eaf
HS
2952 /* Processed all groups and haven't found blocks */
2953 if (sbi->s_mb_stats && i == ngroups)
2954 atomic64_inc(&sbi->s_bal_cX_failed[cr]);
7e170922 2955
f52f3d2b 2956 if (i == ngroups && ac->ac_criteria == CR_BEST_AVAIL_LEN)
7e170922 2957 /* Reset goal length to original goal length before
f52f3d2b 2958 * falling into CR_GOAL_LEN_SLOW */
7e170922 2959 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len;
c9de560d
AT
2960 }
2961
2962 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2963 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2964 /*
2965 * We've been searching too long. Let's try to allocate
2966 * the best chunk we've found so far
2967 */
c9de560d
AT
2968 ext4_mb_try_best_found(ac, &e4b);
2969 if (ac->ac_status != AC_STATUS_FOUND) {
2970 /*
2971 * Someone more lucky has already allocated it.
2972 * The only thing we can do is just take first
2973 * found block(s)
c9de560d 2974 */
66d5e027 2975 lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
2976 mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
c55ee7d2 2977 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
2978 ac->ac_b_ex.fe_len, lost);
2979
c9de560d
AT
2980 ac->ac_b_ex.fe_group = 0;
2981 ac->ac_b_ex.fe_start = 0;
2982 ac->ac_b_ex.fe_len = 0;
2983 ac->ac_status = AC_STATUS_CONTINUE;
2984 ac->ac_flags |= EXT4_MB_HINT_FIRST;
f52f3d2b 2985 cr = CR_ANY_FREE;
c9de560d
AT
2986 goto repeat;
2987 }
2988 }
a6c75eaf
HS
2989
2990 if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
2991 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
c9de560d 2992out:
42ac1848
LC
2993 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2994 err = first_err;
bbc4ec77 2995
d3df1453 2996 mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
bbc4ec77
RH
2997 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
2998 ac->ac_flags, cr, err);
cfd73237
AZ
2999
3000 if (nr)
3001 ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
3002
c9de560d
AT
3003 return err;
3004}
3005
c9de560d
AT
3006static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
3007{
359745d7 3008 struct super_block *sb = pde_data(file_inode(seq->file));
c9de560d
AT
3009 ext4_group_t group;
3010
8df9675f 3011 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
c9de560d 3012 return NULL;
c9de560d 3013 group = *pos + 1;
a9df9a49 3014 return (void *) ((unsigned long) group);
c9de560d
AT
3015}
3016
3017static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
3018{
359745d7 3019 struct super_block *sb = pde_data(file_inode(seq->file));
c9de560d
AT
3020 ext4_group_t group;
3021
3022 ++*pos;
8df9675f 3023 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
c9de560d
AT
3024 return NULL;
3025 group = *pos + 1;
a9df9a49 3026 return (void *) ((unsigned long) group);
c9de560d
AT
3027}
3028
3029static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
3030{
359745d7 3031 struct super_block *sb = pde_data(file_inode(seq->file));
a9df9a49 3032 ext4_group_t group = (ext4_group_t) ((unsigned long) v);
25044880 3033 int i, err;
4b55d343 3034 char nbuf[16];
c9de560d 3035 struct ext4_buddy e4b;
1c8457ca 3036 struct ext4_group_info *grinfo;
2df2c340
AB
3037 unsigned char blocksize_bits = min_t(unsigned char,
3038 sb->s_blocksize_bits,
3039 EXT4_MAX_BLOCK_LOG_SIZE);
7e50bbb1
GS
3040 DEFINE_RAW_FLEX(struct ext4_group_info, sg, bb_counters,
3041 EXT4_MAX_BLOCK_LOG_SIZE + 2);
c9de560d
AT
3042
3043 group--;
3044 if (group == 0)
97b4af2f
RV
3045 seq_puts(seq, "#group: free frags first ["
3046 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 "
802cf1f9 3047 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n");
c9de560d 3048
7e50bbb1 3049 i = (blocksize_bits + 2) * sizeof(sg->bb_counters[0]) +
b80b32b6
TT
3050 sizeof(struct ext4_group_info);
3051
1c8457ca 3052 grinfo = ext4_get_group_info(sb, group);
5354b2af
TT
3053 if (!grinfo)
3054 return 0;
1c8457ca
AK
3055 /* Load the group info in memory only if not already loaded. */
3056 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
3057 err = ext4_mb_load_buddy(sb, group, &e4b);
3058 if (err) {
4b55d343 3059 seq_printf(seq, "#%-5u: %s\n", group, ext4_decode_error(NULL, err, nbuf));
1c8457ca
AK
3060 return 0;
3061 }
25044880 3062 ext4_mb_unload_buddy(&e4b);
c9de560d 3063 }
1c8457ca 3064
25044880 3065 /*
3066 * We care only about free space counters in the group info and
3067 * these are safe to access even after the buddy has been unloaded
3068 */
7e50bbb1
GS
3069 memcpy(sg, grinfo, i);
3070 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg->bb_free,
3071 sg->bb_fragments, sg->bb_first_free);
c9de560d 3072 for (i = 0; i <= 13; i++)
2df2c340 3073 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
7e50bbb1 3074 sg->bb_counters[i] : 0);
68ee261f 3075 seq_puts(seq, " ]");
7e50bbb1 3076 if (EXT4_MB_GRP_BBITMAP_CORRUPT(sg))
68ee261f 3077 seq_puts(seq, " Block bitmap corrupted!");
bd8daa77 3078 seq_putc(seq, '\n');
c9de560d
AT
3079 return 0;
3080}
3081
3082static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
3083{
3084}
3085
247dbed8 3086const struct seq_operations ext4_mb_seq_groups_ops = {
c9de560d
AT
3087 .start = ext4_mb_seq_groups_start,
3088 .next = ext4_mb_seq_groups_next,
3089 .stop = ext4_mb_seq_groups_stop,
3090 .show = ext4_mb_seq_groups_show,
3091};
3092
a6c75eaf
HS
3093int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
3094{
c30365b9 3095 struct super_block *sb = seq->private;
a6c75eaf
HS
3096 struct ext4_sb_info *sbi = EXT4_SB(sb);
3097
3098 seq_puts(seq, "mballoc:\n");
3099 if (!sbi->s_mb_stats) {
3100 seq_puts(seq, "\tmb stats collection turned off.\n");
f52f3d2b
OM
3101 seq_puts(
3102 seq,
3103 "\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
a6c75eaf
HS
3104 return 0;
3105 }
3106 seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
3107 seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
3108
f52f3d2b
OM
3109 seq_printf(seq, "\tgroups_scanned: %u\n",
3110 atomic_read(&sbi->s_bal_groups_scanned));
3111
3112 /* CR_POWER2_ALIGNED stats */
3113 seq_puts(seq, "\tcr_p2_aligned_stats:\n");
3114 seq_printf(seq, "\t\thits: %llu\n",
3115 atomic64_read(&sbi->s_bal_cX_hits[CR_POWER2_ALIGNED]));
3116 seq_printf(
3117 seq, "\t\tgroups_considered: %llu\n",
3118 atomic64_read(
3119 &sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]));
3120 seq_printf(seq, "\t\textents_scanned: %u\n",
3121 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_POWER2_ALIGNED]));
a6c75eaf 3122 seq_printf(seq, "\t\tuseless_loops: %llu\n",
f52f3d2b 3123 atomic64_read(&sbi->s_bal_cX_failed[CR_POWER2_ALIGNED]));
196e402a 3124 seq_printf(seq, "\t\tbad_suggestions: %u\n",
f52f3d2b 3125 atomic_read(&sbi->s_bal_p2_aligned_bad_suggestions));
a6c75eaf 3126
f52f3d2b
OM
3127 /* CR_GOAL_LEN_FAST stats */
3128 seq_puts(seq, "\tcr_goal_fast_stats:\n");
3129 seq_printf(seq, "\t\thits: %llu\n",
3130 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_FAST]));
a6c75eaf 3131 seq_printf(seq, "\t\tgroups_considered: %llu\n",
f52f3d2b
OM
3132 atomic64_read(
3133 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_FAST]));
3134 seq_printf(seq, "\t\textents_scanned: %u\n",
3135 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_FAST]));
a6c75eaf 3136 seq_printf(seq, "\t\tuseless_loops: %llu\n",
f52f3d2b 3137 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_FAST]));
196e402a 3138 seq_printf(seq, "\t\tbad_suggestions: %u\n",
f52f3d2b
OM
3139 atomic_read(&sbi->s_bal_goal_fast_bad_suggestions));
3140
3141 /* CR_BEST_AVAIL_LEN stats */
3142 seq_puts(seq, "\tcr_best_avail_stats:\n");
3143 seq_printf(seq, "\t\thits: %llu\n",
3144 atomic64_read(&sbi->s_bal_cX_hits[CR_BEST_AVAIL_LEN]));
3145 seq_printf(
3146 seq, "\t\tgroups_considered: %llu\n",
3147 atomic64_read(
3148 &sbi->s_bal_cX_groups_considered[CR_BEST_AVAIL_LEN]));
3149 seq_printf(seq, "\t\textents_scanned: %u\n",
3150 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_BEST_AVAIL_LEN]));
7e170922 3151 seq_printf(seq, "\t\tuseless_loops: %llu\n",
f52f3d2b 3152 atomic64_read(&sbi->s_bal_cX_failed[CR_BEST_AVAIL_LEN]));
7e170922 3153 seq_printf(seq, "\t\tbad_suggestions: %u\n",
f52f3d2b 3154 atomic_read(&sbi->s_bal_best_avail_bad_suggestions));
7e170922 3155
f52f3d2b
OM
3156 /* CR_GOAL_LEN_SLOW stats */
3157 seq_puts(seq, "\tcr_goal_slow_stats:\n");
3158 seq_printf(seq, "\t\thits: %llu\n",
3159 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_SLOW]));
a6c75eaf 3160 seq_printf(seq, "\t\tgroups_considered: %llu\n",
f52f3d2b
OM
3161 atomic64_read(
3162 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_SLOW]));
3163 seq_printf(seq, "\t\textents_scanned: %u\n",
3164 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_SLOW]));
a6c75eaf 3165 seq_printf(seq, "\t\tuseless_loops: %llu\n",
f52f3d2b
OM
3166 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_SLOW]));
3167
3168 /* CR_ANY_FREE stats */
3169 seq_puts(seq, "\tcr_any_free_stats:\n");
3170 seq_printf(seq, "\t\thits: %llu\n",
3171 atomic64_read(&sbi->s_bal_cX_hits[CR_ANY_FREE]));
3172 seq_printf(
3173 seq, "\t\tgroups_considered: %llu\n",
3174 atomic64_read(&sbi->s_bal_cX_groups_considered[CR_ANY_FREE]));
3175 seq_printf(seq, "\t\textents_scanned: %u\n",
3176 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_ANY_FREE]));
a6c75eaf 3177 seq_printf(seq, "\t\tuseless_loops: %llu\n",
f52f3d2b
OM
3178 atomic64_read(&sbi->s_bal_cX_failed[CR_ANY_FREE]));
3179
3180 /* Aggregates */
3181 seq_printf(seq, "\textents_scanned: %u\n",
3182 atomic_read(&sbi->s_bal_ex_scanned));
a6c75eaf 3183 seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
f52f3d2b
OM
3184 seq_printf(seq, "\t\tlen_goal_hits: %u\n",
3185 atomic_read(&sbi->s_bal_len_goals));
a6c75eaf
HS
3186 seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
3187 seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
3188 seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
a6c75eaf
HS
3189 seq_printf(seq, "\tbuddies_generated: %u/%u\n",
3190 atomic_read(&sbi->s_mb_buddies_generated),
3191 ext4_get_groups_count(sb));
3192 seq_printf(seq, "\tbuddies_time_used: %llu\n",
3193 atomic64_read(&sbi->s_mb_generation_time));
3194 seq_printf(seq, "\tpreallocated: %u\n",
3195 atomic_read(&sbi->s_mb_preallocated));
f52f3d2b 3196 seq_printf(seq, "\tdiscarded: %u\n", atomic_read(&sbi->s_mb_discarded));
a6c75eaf
HS
3197 return 0;
3198}
3199
f68f4063
HS
3200static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos)
3201{
359745d7 3202 struct super_block *sb = pde_data(file_inode(seq->file));
f68f4063
HS
3203 unsigned long position;
3204
83e80a6e 3205 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
f68f4063
HS
3206 return NULL;
3207 position = *pos + 1;
3208 return (void *) ((unsigned long) position);
3209}
3210
3211static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos)
3212{
359745d7 3213 struct super_block *sb = pde_data(file_inode(seq->file));
f68f4063
HS
3214 unsigned long position;
3215
3216 ++*pos;
83e80a6e 3217 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
f68f4063
HS
3218 return NULL;
3219 position = *pos + 1;
3220 return (void *) ((unsigned long) position);
3221}
3222
3223static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
3224{
359745d7 3225 struct super_block *sb = pde_data(file_inode(seq->file));
f68f4063
HS
3226 struct ext4_sb_info *sbi = EXT4_SB(sb);
3227 unsigned long position = ((unsigned long) v);
3228 struct ext4_group_info *grp;
83e80a6e 3229 unsigned int count;
f68f4063
HS
3230
3231 position--;
3232 if (position >= MB_NUM_ORDERS(sb)) {
83e80a6e
JK
3233 position -= MB_NUM_ORDERS(sb);
3234 if (position == 0)
3235 seq_puts(seq, "avg_fragment_size_lists:\n");
f68f4063 3236
83e80a6e
JK
3237 count = 0;
3238 read_lock(&sbi->s_mb_avg_fragment_size_locks[position]);
3239 list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position],
3240 bb_avg_fragment_size_node)
3241 count++;
3242 read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]);
3243 seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3244 (unsigned int)position, count);
f68f4063
HS
3245 return 0;
3246 }
3247
3248 if (position == 0) {
3249 seq_printf(seq, "optimize_scan: %d\n",
3250 test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0);
3251 seq_puts(seq, "max_free_order_lists:\n");
3252 }
3253 count = 0;
83e80a6e 3254 read_lock(&sbi->s_mb_largest_free_orders_locks[position]);
f68f4063
HS
3255 list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
3256 bb_largest_free_order_node)
3257 count++;
83e80a6e 3258 read_unlock(&sbi->s_mb_largest_free_orders_locks[position]);
f68f4063
HS
3259 seq_printf(seq, "\tlist_order_%u_groups: %u\n",
3260 (unsigned int)position, count);
3261
3262 return 0;
3263}
3264
3265static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
3266{
f68f4063
HS
3267}
3268
3269const struct seq_operations ext4_mb_seq_structs_summary_ops = {
3270 .start = ext4_mb_seq_structs_summary_start,
3271 .next = ext4_mb_seq_structs_summary_next,
3272 .stop = ext4_mb_seq_structs_summary_stop,
3273 .show = ext4_mb_seq_structs_summary_show,
3274};
3275
fb1813f4
CW
3276static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
3277{
3278 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3279 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
3280
3281 BUG_ON(!cachep);
3282 return cachep;
3283}
5f21b0e6 3284
28623c2f
TT
3285/*
3286 * Allocate the top-level s_group_info array for the specified number
3287 * of groups
3288 */
3289int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
3290{
3291 struct ext4_sb_info *sbi = EXT4_SB(sb);
3292 unsigned size;
df3da4ea 3293 struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
28623c2f
TT
3294
3295 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
3296 EXT4_DESC_PER_BLOCK_BITS(sb);
3297 if (size <= sbi->s_group_info_size)
3298 return 0;
3299
3300 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
a7c3e901 3301 new_groupinfo = kvzalloc(size, GFP_KERNEL);
28623c2f
TT
3302 if (!new_groupinfo) {
3303 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
3304 return -ENOMEM;
3305 }
df3da4ea
SJS
3306 rcu_read_lock();
3307 old_groupinfo = rcu_dereference(sbi->s_group_info);
3308 if (old_groupinfo)
3309 memcpy(new_groupinfo, old_groupinfo,
28623c2f 3310 sbi->s_group_info_size * sizeof(*sbi->s_group_info));
df3da4ea
SJS
3311 rcu_read_unlock();
3312 rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
28623c2f 3313 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
df3da4ea
SJS
3314 if (old_groupinfo)
3315 ext4_kvfree_array_rcu(old_groupinfo);
666245d9 3316 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
28623c2f
TT
3317 sbi->s_group_info_size);
3318 return 0;
3319}
3320
5f21b0e6 3321/* Create and initialize ext4_group_info data for the given group. */
920313a7 3322int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
5f21b0e6
FB
3323 struct ext4_group_desc *desc)
3324{
fb1813f4 3325 int i;
5f21b0e6 3326 int metalen = 0;
df3da4ea 3327 int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
5f21b0e6
FB
3328 struct ext4_sb_info *sbi = EXT4_SB(sb);
3329 struct ext4_group_info **meta_group_info;
fb1813f4 3330 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
5f21b0e6
FB
3331
3332 /*
3333 * First check if this group is the first of a reserved block.
3334 * If it's true, we have to allocate a new table of pointers
3335 * to ext4_group_info structures
3336 */
3337 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3338 metalen = sizeof(*meta_group_info) <<
3339 EXT4_DESC_PER_BLOCK_BITS(sb);
4fdb5543 3340 meta_group_info = kmalloc(metalen, GFP_NOFS);
5f21b0e6 3341 if (meta_group_info == NULL) {
7f6a11e7 3342 ext4_msg(sb, KERN_ERR, "can't allocate mem "
9d8b9ec4 3343 "for a buddy group");
df119095 3344 return -ENOMEM;
5f21b0e6 3345 }
df3da4ea
SJS
3346 rcu_read_lock();
3347 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
3348 rcu_read_unlock();
5f21b0e6
FB
3349 }
3350
df3da4ea 3351 meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
5f21b0e6
FB
3352 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
3353
4fdb5543 3354 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
5f21b0e6 3355 if (meta_group_info[i] == NULL) {
7f6a11e7 3356 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
5f21b0e6
FB
3357 goto exit_group_info;
3358 }
3359 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
3360 &(meta_group_info[i]->bb_state));
3361
3362 /*
3363 * initialize bb_free to be able to skip
3364 * empty groups without initialization
3365 */
8844618d
TT
3366 if (ext4_has_group_desc_csum(sb) &&
3367 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
5f21b0e6 3368 meta_group_info[i]->bb_free =
cff1dfd7 3369 ext4_free_clusters_after_init(sb, group, desc);
5f21b0e6
FB
3370 } else {
3371 meta_group_info[i]->bb_free =
021b65bb 3372 ext4_free_group_clusters(sb, desc);
5f21b0e6
FB
3373 }
3374
3375 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
920313a7 3376 init_rwsem(&meta_group_info[i]->alloc_sem);
64e290ec 3377 meta_group_info[i]->bb_free_root = RB_ROOT;
196e402a 3378 INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
83e80a6e 3379 INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node);
8a57d9d6 3380 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
83e80a6e 3381 meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */
196e402a 3382 meta_group_info[i]->bb_group = group;
5f21b0e6 3383
a3450215 3384 mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
5f21b0e6
FB
3385 return 0;
3386
3387exit_group_info:
3388 /* If a meta_group_info table has been allocated, release it now */
caaf7a29 3389 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
df3da4ea
SJS
3390 struct ext4_group_info ***group_info;
3391
3392 rcu_read_lock();
3393 group_info = rcu_dereference(sbi->s_group_info);
3394 kfree(group_info[idx]);
3395 group_info[idx] = NULL;
3396 rcu_read_unlock();
caaf7a29 3397 }
5f21b0e6
FB
3398 return -ENOMEM;
3399} /* ext4_mb_add_groupinfo */
3400
c9de560d
AT
3401static int ext4_mb_init_backend(struct super_block *sb)
3402{
8df9675f 3403 ext4_group_t ngroups = ext4_get_groups_count(sb);
c9de560d 3404 ext4_group_t i;
c9de560d 3405 struct ext4_sb_info *sbi = EXT4_SB(sb);
28623c2f 3406 int err;
5f21b0e6 3407 struct ext4_group_desc *desc;
df3da4ea 3408 struct ext4_group_info ***group_info;
fb1813f4 3409 struct kmem_cache *cachep;
5f21b0e6 3410
28623c2f
TT
3411 err = ext4_mb_alloc_groupinfo(sb, ngroups);
3412 if (err)
3413 return err;
c9de560d 3414
c9de560d
AT
3415 sbi->s_buddy_cache = new_inode(sb);
3416 if (sbi->s_buddy_cache == NULL) {
9d8b9ec4 3417 ext4_msg(sb, KERN_ERR, "can't get new inode");
c9de560d
AT
3418 goto err_freesgi;
3419 }
48e6061b
YJ
3420 /* To avoid potentially colliding with an valid on-disk inode number,
3421 * use EXT4_BAD_INO for the buddy cache inode number. This inode is
3422 * not in the inode hash, so it should never be found by iget(), but
3423 * this will avoid confusion if it ever shows up during debugging. */
3424 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
c9de560d 3425 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
8df9675f 3426 for (i = 0; i < ngroups; i++) {
4b99faa2 3427 cond_resched();
c9de560d
AT
3428 desc = ext4_get_group_desc(sb, i, NULL);
3429 if (desc == NULL) {
9d8b9ec4 3430 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
c9de560d
AT
3431 goto err_freebuddy;
3432 }
5f21b0e6
FB
3433 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
3434 goto err_freebuddy;
c9de560d
AT
3435 }
3436
cfd73237 3437 if (ext4_has_feature_flex_bg(sb)) {
f91436d5
ST
3438 /* a single flex group is supposed to be read by a single IO.
3439 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
3440 * unsigned integer, so the maximum shift is 32.
3441 */
3442 if (sbi->s_es->s_log_groups_per_flex >= 32) {
3443 ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
a8867f4e 3444 goto err_freebuddy;
f91436d5
ST
3445 }
3446 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
82ef1370 3447 BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
cfd73237
AZ
3448 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
3449 } else {
3450 sbi->s_mb_prefetch = 32;
3451 }
3452 if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
3453 sbi->s_mb_prefetch = ext4_get_groups_count(sb);
2caffb6a
KS
3454 /*
3455 * now many real IOs to prefetch within a single allocation at
3456 * CR_POWER2_ALIGNED. Given CR_POWER2_ALIGNED is an CPU-related
3457 * optimization we shouldn't try to load too many groups, at some point
3458 * we should start to use what we've got in memory.
cfd73237
AZ
3459 * with an average random access time 5ms, it'd take a second to get
3460 * 200 groups (* N with flex_bg), so let's make this limit 4
3461 */
3462 sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
3463 if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
3464 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
3465
c9de560d
AT
3466 return 0;
3467
3468err_freebuddy:
fb1813f4 3469 cachep = get_groupinfo_cache(sb->s_blocksize_bits);
5354b2af
TT
3470 while (i-- > 0) {
3471 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3472
3473 if (grp)
3474 kmem_cache_free(cachep, grp);
3475 }
28623c2f 3476 i = sbi->s_group_info_size;
df3da4ea
SJS
3477 rcu_read_lock();
3478 group_info = rcu_dereference(sbi->s_group_info);
f1fa3342 3479 while (i-- > 0)
df3da4ea
SJS
3480 kfree(group_info[i]);
3481 rcu_read_unlock();
c9de560d
AT
3482 iput(sbi->s_buddy_cache);
3483err_freesgi:
df3da4ea
SJS
3484 rcu_read_lock();
3485 kvfree(rcu_dereference(sbi->s_group_info));
3486 rcu_read_unlock();
c9de560d
AT
3487 return -ENOMEM;
3488}
3489
2892c15d
ES
3490static void ext4_groupinfo_destroy_slabs(void)
3491{
3492 int i;
3493
3494 for (i = 0; i < NR_GRPINFO_CACHES; i++) {
21c580d8 3495 kmem_cache_destroy(ext4_groupinfo_caches[i]);
2892c15d
ES
3496 ext4_groupinfo_caches[i] = NULL;
3497 }
3498}
3499
3500static int ext4_groupinfo_create_slab(size_t size)
3501{
3502 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
3503 int slab_size;
3504 int blocksize_bits = order_base_2(size);
3505 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3506 struct kmem_cache *cachep;
3507
3508 if (cache_index >= NR_GRPINFO_CACHES)
3509 return -EINVAL;
3510
3511 if (unlikely(cache_index < 0))
3512 cache_index = 0;
3513
3514 mutex_lock(&ext4_grpinfo_slab_create_mutex);
3515 if (ext4_groupinfo_caches[cache_index]) {
3516 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3517 return 0; /* Already created */
3518 }
3519
3520 slab_size = offsetof(struct ext4_group_info,
3521 bb_counters[blocksize_bits + 2]);
3522
3523 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
3524 slab_size, 0, SLAB_RECLAIM_ACCOUNT,
3525 NULL);
3526
823ba01f
TM
3527 ext4_groupinfo_caches[cache_index] = cachep;
3528
2892c15d
ES
3529 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3530 if (!cachep) {
9d8b9ec4
TT
3531 printk(KERN_EMERG
3532 "EXT4-fs: no memory for groupinfo slab cache\n");
2892c15d
ES
3533 return -ENOMEM;
3534 }
3535
2892c15d
ES
3536 return 0;
3537}
3538
55cdd0af
WJ
3539static void ext4_discard_work(struct work_struct *work)
3540{
3541 struct ext4_sb_info *sbi = container_of(work,
3542 struct ext4_sb_info, s_discard_work);
3543 struct super_block *sb = sbi->s_sb;
3544 struct ext4_free_data *fd, *nfd;
3545 struct ext4_buddy e4b;
0f6bc579 3546 LIST_HEAD(discard_list);
55cdd0af
WJ
3547 ext4_group_t grp, load_grp;
3548 int err = 0;
3549
55cdd0af
WJ
3550 spin_lock(&sbi->s_md_lock);
3551 list_splice_init(&sbi->s_discard_list, &discard_list);
3552 spin_unlock(&sbi->s_md_lock);
3553
3554 load_grp = UINT_MAX;
3555 list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) {
3556 /*
5036ab8d
WJ
3557 * If filesystem is umounting or no memory or suffering
3558 * from no space, give up the discard
55cdd0af 3559 */
5036ab8d
WJ
3560 if ((sb->s_flags & SB_ACTIVE) && !err &&
3561 !atomic_read(&sbi->s_retry_alloc_pending)) {
55cdd0af
WJ
3562 grp = fd->efd_group;
3563 if (grp != load_grp) {
3564 if (load_grp != UINT_MAX)
3565 ext4_mb_unload_buddy(&e4b);
3566
3567 err = ext4_mb_load_buddy(sb, grp, &e4b);
3568 if (err) {
3569 kmem_cache_free(ext4_free_data_cachep, fd);
3570 load_grp = UINT_MAX;
3571 continue;
3572 } else {
3573 load_grp = grp;
3574 }
3575 }
3576
3577 ext4_lock_group(sb, grp);
3578 ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster,
3579 fd->efd_start_cluster + fd->efd_count - 1, 1);
3580 ext4_unlock_group(sb, grp);
3581 }
3582 kmem_cache_free(ext4_free_data_cachep, fd);
3583 }
3584
3585 if (load_grp != UINT_MAX)
3586 ext4_mb_unload_buddy(&e4b);
3587}
3588
9d99012f 3589int ext4_mb_init(struct super_block *sb)
c9de560d
AT
3590{
3591 struct ext4_sb_info *sbi = EXT4_SB(sb);
6be2ded1 3592 unsigned i, j;
935244cd 3593 unsigned offset, offset_incr;
c9de560d 3594 unsigned max;
74767c5a 3595 int ret;
c9de560d 3596
4b68f6df 3597 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets);
c9de560d
AT
3598
3599 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
3600 if (sbi->s_mb_offsets == NULL) {
fb1813f4
CW
3601 ret = -ENOMEM;
3602 goto out;
c9de560d 3603 }
ff7ef329 3604
4b68f6df 3605 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs);
c9de560d
AT
3606 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
3607 if (sbi->s_mb_maxs == NULL) {
fb1813f4
CW
3608 ret = -ENOMEM;
3609 goto out;
3610 }
3611
2892c15d
ES
3612 ret = ext4_groupinfo_create_slab(sb->s_blocksize);
3613 if (ret < 0)
3614 goto out;
c9de560d
AT
3615
3616 /* order 0 is regular bitmap */
3617 sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
3618 sbi->s_mb_offsets[0] = 0;
3619
3620 i = 1;
3621 offset = 0;
935244cd 3622 offset_incr = 1 << (sb->s_blocksize_bits - 1);
c9de560d
AT
3623 max = sb->s_blocksize << 2;
3624 do {
3625 sbi->s_mb_offsets[i] = offset;
3626 sbi->s_mb_maxs[i] = max;
935244cd
NS
3627 offset += offset_incr;
3628 offset_incr = offset_incr >> 1;
c9de560d
AT
3629 max = max >> 1;
3630 i++;
4b68f6df
HS
3631 } while (i < MB_NUM_ORDERS(sb));
3632
83e80a6e
JK
3633 sbi->s_mb_avg_fragment_size =
3634 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3635 GFP_KERNEL);
3636 if (!sbi->s_mb_avg_fragment_size) {
3637 ret = -ENOMEM;
3638 goto out;
3639 }
3640 sbi->s_mb_avg_fragment_size_locks =
3641 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3642 GFP_KERNEL);
3643 if (!sbi->s_mb_avg_fragment_size_locks) {
3644 ret = -ENOMEM;
3645 goto out;
3646 }
3647 for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3648 INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]);
3649 rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]);
3650 }
196e402a
HS
3651 sbi->s_mb_largest_free_orders =
3652 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3653 GFP_KERNEL);
3654 if (!sbi->s_mb_largest_free_orders) {
3655 ret = -ENOMEM;
3656 goto out;
3657 }
3658 sbi->s_mb_largest_free_orders_locks =
3659 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3660 GFP_KERNEL);
3661 if (!sbi->s_mb_largest_free_orders_locks) {
3662 ret = -ENOMEM;
3663 goto out;
3664 }
3665 for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3666 INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
3667 rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
3668 }
c9de560d 3669
c9de560d 3670 spin_lock_init(&sbi->s_md_lock);
d08854f5 3671 sbi->s_mb_free_pending = 0;
ce774e53
JH
3672 INIT_LIST_HEAD(&sbi->s_freed_data_list[0]);
3673 INIT_LIST_HEAD(&sbi->s_freed_data_list[1]);
55cdd0af
WJ
3674 INIT_LIST_HEAD(&sbi->s_discard_list);
3675 INIT_WORK(&sbi->s_discard_work, ext4_discard_work);
5036ab8d 3676 atomic_set(&sbi->s_retry_alloc_pending, 0);
c9de560d
AT
3677
3678 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
3679 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
3680 sbi->s_mb_stats = MB_DEFAULT_STATS;
3681 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
3682 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
f52f3d2b 3683 sbi->s_mb_best_avail_max_trim_order = MB_DEFAULT_BEST_AVAIL_TRIM_ORDER;
7e170922 3684
27baebb8
TT
3685 /*
3686 * The default group preallocation is 512, which for 4k block
3687 * sizes translates to 2 megabytes. However for bigalloc file
3688 * systems, this is probably too big (i.e, if the cluster size
3689 * is 1 megabyte, then group preallocation size becomes half a
3690 * gigabyte!). As a default, we will keep a two megabyte
3691 * group pralloc size for cluster sizes up to 64k, and after
3692 * that, we will force a minimum group preallocation size of
3693 * 32 clusters. This translates to 8 megs when the cluster
3694 * size is 256k, and 32 megs when the cluster size is 1 meg,
3695 * which seems reasonable as a default.
3696 */
3697 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
3698 sbi->s_cluster_bits, 32);
d7a1fee1
DE
3699 /*
3700 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
3701 * to the lowest multiple of s_stripe which is bigger than
3702 * the s_mb_group_prealloc as determined above. We want
3703 * the preallocation size to be an exact multiple of the
3704 * RAID stripe size so that preallocations don't fragment
3705 * the stripes.
3706 */
3707 if (sbi->s_stripe > 1) {
3708 sbi->s_mb_group_prealloc = roundup(
ff2beee2 3709 sbi->s_mb_group_prealloc, EXT4_NUM_B2C(sbi, sbi->s_stripe));
d7a1fee1 3710 }
c9de560d 3711
730c213c 3712 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
c9de560d 3713 if (sbi->s_locality_groups == NULL) {
fb1813f4 3714 ret = -ENOMEM;
029b10c5 3715 goto out;
c9de560d 3716 }
730c213c 3717 for_each_possible_cpu(i) {
c9de560d 3718 struct ext4_locality_group *lg;
730c213c 3719 lg = per_cpu_ptr(sbi->s_locality_groups, i);
c9de560d 3720 mutex_init(&lg->lg_mutex);
6be2ded1
AK
3721 for (j = 0; j < PREALLOC_TB_SIZE; j++)
3722 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
c9de560d
AT
3723 spin_lock_init(&lg->lg_prealloc_lock);
3724 }
3725
10f0d2a5 3726 if (bdev_nonrot(sb->s_bdev))
196e402a
HS
3727 sbi->s_mb_max_linear_groups = 0;
3728 else
3729 sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT;
79a77c5a
YJ
3730 /* init file for buddy data */
3731 ret = ext4_mb_init_backend(sb);
7aa0baea
TM
3732 if (ret != 0)
3733 goto out_free_locality_groups;
79a77c5a 3734
7aa0baea
TM
3735 return 0;
3736
3737out_free_locality_groups:
3738 free_percpu(sbi->s_locality_groups);
3739 sbi->s_locality_groups = NULL;
fb1813f4 3740out:
83e80a6e
JK
3741 kfree(sbi->s_mb_avg_fragment_size);
3742 kfree(sbi->s_mb_avg_fragment_size_locks);
196e402a
HS
3743 kfree(sbi->s_mb_largest_free_orders);
3744 kfree(sbi->s_mb_largest_free_orders_locks);
7aa0baea
TM
3745 kfree(sbi->s_mb_offsets);
3746 sbi->s_mb_offsets = NULL;
3747 kfree(sbi->s_mb_maxs);
3748 sbi->s_mb_maxs = NULL;
fb1813f4 3749 return ret;
c9de560d
AT
3750}
3751
955ce5f5 3752/* need to called with the ext4 group lock held */
d3df1453 3753static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
c9de560d
AT
3754{
3755 struct ext4_prealloc_space *pa;
3756 struct list_head *cur, *tmp;
3757 int count = 0;
3758
3759 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
3760 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3761 list_del(&pa->pa_group_list);
3762 count++;
688f05a0 3763 kmem_cache_free(ext4_pspace_cachep, pa);
c9de560d 3764 }
d3df1453 3765 return count;
c9de560d
AT
3766}
3767
90817717 3768void ext4_mb_release(struct super_block *sb)
c9de560d 3769{
8df9675f 3770 ext4_group_t ngroups = ext4_get_groups_count(sb);
c9de560d
AT
3771 ext4_group_t i;
3772 int num_meta_group_infos;
df3da4ea 3773 struct ext4_group_info *grinfo, ***group_info;
c9de560d 3774 struct ext4_sb_info *sbi = EXT4_SB(sb);
fb1813f4 3775 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
d3df1453 3776 int count;
c9de560d 3777
55cdd0af
WJ
3778 if (test_opt(sb, DISCARD)) {
3779 /*
3780 * wait the discard work to drain all of ext4_free_data
3781 */
3782 flush_work(&sbi->s_discard_work);
3783 WARN_ON_ONCE(!list_empty(&sbi->s_discard_list));
3784 }
3785
c9de560d 3786 if (sbi->s_group_info) {
8df9675f 3787 for (i = 0; i < ngroups; i++) {
4b99faa2 3788 cond_resched();
c9de560d 3789 grinfo = ext4_get_group_info(sb, i);
5354b2af
TT
3790 if (!grinfo)
3791 continue;
a3450215 3792 mb_group_bb_bitmap_free(grinfo);
c9de560d 3793 ext4_lock_group(sb, i);
d3df1453
RH
3794 count = ext4_mb_cleanup_pa(grinfo);
3795 if (count)
3796 mb_debug(sb, "mballoc: %d PAs left\n",
3797 count);
c9de560d 3798 ext4_unlock_group(sb, i);
fb1813f4 3799 kmem_cache_free(cachep, grinfo);
c9de560d 3800 }
8df9675f 3801 num_meta_group_infos = (ngroups +
c9de560d
AT
3802 EXT4_DESC_PER_BLOCK(sb) - 1) >>
3803 EXT4_DESC_PER_BLOCK_BITS(sb);
df3da4ea
SJS
3804 rcu_read_lock();
3805 group_info = rcu_dereference(sbi->s_group_info);
c9de560d 3806 for (i = 0; i < num_meta_group_infos; i++)
df3da4ea
SJS
3807 kfree(group_info[i]);
3808 kvfree(group_info);
3809 rcu_read_unlock();
c9de560d 3810 }
83e80a6e
JK
3811 kfree(sbi->s_mb_avg_fragment_size);
3812 kfree(sbi->s_mb_avg_fragment_size_locks);
196e402a
HS
3813 kfree(sbi->s_mb_largest_free_orders);
3814 kfree(sbi->s_mb_largest_free_orders_locks);
c9de560d
AT
3815 kfree(sbi->s_mb_offsets);
3816 kfree(sbi->s_mb_maxs);
bfcba2d0 3817 iput(sbi->s_buddy_cache);
c9de560d 3818 if (sbi->s_mb_stats) {
9d8b9ec4
TT
3819 ext4_msg(sb, KERN_INFO,
3820 "mballoc: %u blocks %u reqs (%u success)",
c9de560d
AT
3821 atomic_read(&sbi->s_bal_allocated),
3822 atomic_read(&sbi->s_bal_reqs),
3823 atomic_read(&sbi->s_bal_success));
9d8b9ec4 3824 ext4_msg(sb, KERN_INFO,
a6c75eaf 3825 "mballoc: %u extents scanned, %u groups scanned, %u goal hits, "
9d8b9ec4 3826 "%u 2^N hits, %u breaks, %u lost",
c9de560d 3827 atomic_read(&sbi->s_bal_ex_scanned),
a6c75eaf 3828 atomic_read(&sbi->s_bal_groups_scanned),
c9de560d
AT
3829 atomic_read(&sbi->s_bal_goals),
3830 atomic_read(&sbi->s_bal_2orders),
3831 atomic_read(&sbi->s_bal_breaks),
3832 atomic_read(&sbi->s_mb_lost_chunks));
9d8b9ec4 3833 ext4_msg(sb, KERN_INFO,
67d25186
HS
3834 "mballoc: %u generated and it took %llu",
3835 atomic_read(&sbi->s_mb_buddies_generated),
3836 atomic64_read(&sbi->s_mb_generation_time));
9d8b9ec4
TT
3837 ext4_msg(sb, KERN_INFO,
3838 "mballoc: %u preallocated, %u discarded",
c9de560d
AT
3839 atomic_read(&sbi->s_mb_preallocated),
3840 atomic_read(&sbi->s_mb_discarded));
3841 }
3842
730c213c 3843 free_percpu(sbi->s_locality_groups);
c9de560d
AT
3844}
3845
77ca6cdf 3846static inline int ext4_issue_discard(struct super_block *sb,
0efcd739 3847 ext4_group_t block_group, ext4_grpblk_t cluster, int count)
5c521830 3848{
5c521830
JZ
3849 ext4_fsblk_t discard_block;
3850
84130193
TT
3851 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
3852 ext4_group_first_block_no(sb, block_group));
3853 count = EXT4_C2B(EXT4_SB(sb), count);
5c521830
JZ
3854 trace_ext4_discard_blocks(sb,
3855 (unsigned long long) discard_block, count);
0efcd739
WH
3856
3857 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
5c521830
JZ
3858}
3859
a0154344
DJ
3860static void ext4_free_data_in_buddy(struct super_block *sb,
3861 struct ext4_free_data *entry)
c9de560d 3862{
c9de560d 3863 struct ext4_buddy e4b;
c894058d 3864 struct ext4_group_info *db;
c7f2bafa 3865 int err, count = 0;
c9de560d 3866
d3df1453 3867 mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
18aadd47 3868 entry->efd_count, entry->efd_group, entry);
c9de560d 3869
18aadd47
BJ
3870 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
3871 /* we expect to find existing buddy because it's pinned */
3872 BUG_ON(err != 0);
b90f6870 3873
d08854f5
TT
3874 spin_lock(&EXT4_SB(sb)->s_md_lock);
3875 EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
3876 spin_unlock(&EXT4_SB(sb)->s_md_lock);
c9de560d 3877
18aadd47
BJ
3878 db = e4b.bd_info;
3879 /* there are blocks to put in buddy to make them really free */
3880 count += entry->efd_count;
18aadd47
BJ
3881 ext4_lock_group(sb, entry->efd_group);
3882 /* Take it out of per group rb tree */
3883 rb_erase(&entry->efd_node, &(db->bb_free_root));
3884 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
c894058d 3885
18aadd47
BJ
3886 /*
3887 * Clear the trimmed flag for the group so that the next
3888 * ext4_trim_fs can trim it.
18aadd47 3889 */
20cee68f 3890 EXT4_MB_GRP_CLEAR_TRIMMED(db);
3d56b8d2 3891
18aadd47
BJ
3892 if (!db->bb_free_root.rb_node) {
3893 /* No more items in the per group rb tree
3894 * balance refcounts from ext4_mb_free_metadata()
3895 */
5eea586b 3896 folio_put(e4b.bd_buddy_folio);
99b150d8 3897 folio_put(e4b.bd_bitmap_folio);
3e624fc7 3898 }
18aadd47 3899 ext4_unlock_group(sb, entry->efd_group);
18aadd47 3900 ext4_mb_unload_buddy(&e4b);
c9de560d 3901
c7f2bafa 3902 mb_debug(sb, "freed %d blocks in 1 structures\n", count);
c9de560d
AT
3903}
3904
a0154344
DJ
3905/*
3906 * This function is called by the jbd2 layer once the commit has finished,
3907 * so we know we can free the blocks that were released with that commit.
3908 */
3909void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
3910{
3911 struct ext4_sb_info *sbi = EXT4_SB(sb);
3912 struct ext4_free_data *entry, *tmp;
0f6bc579 3913 LIST_HEAD(freed_data_list);
ce774e53 3914 struct list_head *s_freed_head = &sbi->s_freed_data_list[commit_tid & 1];
55cdd0af 3915 bool wake;
a0154344 3916
ce774e53 3917 list_replace_init(s_freed_head, &freed_data_list);
a0154344 3918
55cdd0af
WJ
3919 list_for_each_entry(entry, &freed_data_list, efd_list)
3920 ext4_free_data_in_buddy(sb, entry);
a0154344 3921
55cdd0af
WJ
3922 if (test_opt(sb, DISCARD)) {
3923 spin_lock(&sbi->s_md_lock);
3924 wake = list_empty(&sbi->s_discard_list);
3925 list_splice_tail(&freed_data_list, &sbi->s_discard_list);
3926 spin_unlock(&sbi->s_md_lock);
3927 if (wake)
3928 queue_work(system_unbound_wq, &sbi->s_discard_work);
3929 } else {
3930 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
3931 kmem_cache_free(ext4_free_data_cachep, entry);
a0154344 3932 }
a0154344
DJ
3933}
3934
5dabfc78 3935int __init ext4_init_mballoc(void)
c9de560d 3936{
16828088
TT
3937 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
3938 SLAB_RECLAIM_ACCOUNT);
c9de560d 3939 if (ext4_pspace_cachep == NULL)
f283529a 3940 goto out;
c9de560d 3941
16828088
TT
3942 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
3943 SLAB_RECLAIM_ACCOUNT);
f283529a
RH
3944 if (ext4_ac_cachep == NULL)
3945 goto out_pa_free;
c894058d 3946
18aadd47
BJ
3947 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
3948 SLAB_RECLAIM_ACCOUNT);
f283529a
RH
3949 if (ext4_free_data_cachep == NULL)
3950 goto out_ac_free;
3951
c9de560d 3952 return 0;
f283529a
RH
3953
3954out_ac_free:
3955 kmem_cache_destroy(ext4_ac_cachep);
3956out_pa_free:
3957 kmem_cache_destroy(ext4_pspace_cachep);
3958out:
3959 return -ENOMEM;
c9de560d
AT
3960}
3961
5dabfc78 3962void ext4_exit_mballoc(void)
c9de560d 3963{
60e6679e 3964 /*
3e03f9ca
JDB
3965 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3966 * before destroying the slab cache.
3967 */
3968 rcu_barrier();
c9de560d 3969 kmem_cache_destroy(ext4_pspace_cachep);
256bdb49 3970 kmem_cache_destroy(ext4_ac_cachep);
18aadd47 3971 kmem_cache_destroy(ext4_free_data_cachep);
2892c15d 3972 ext4_groupinfo_destroy_slabs();
c9de560d
AT
3973}
3974
c431d386
KS
3975#define EXT4_MB_BITMAP_MARKED_CHECK 0x0001
3976#define EXT4_MB_SYNC_UPDATE 0x0002
f9e2d95a 3977static int
c431d386
KS
3978ext4_mb_mark_context(handle_t *handle, struct super_block *sb, bool state,
3979 ext4_group_t group, ext4_grpblk_t blkoff,
3980 ext4_grpblk_t len, int flags, ext4_grpblk_t *ret_changed)
f9e2d95a
KS
3981{
3982 struct ext4_sb_info *sbi = EXT4_SB(sb);
3983 struct buffer_head *bitmap_bh = NULL;
3984 struct ext4_group_desc *gdp;
3985 struct buffer_head *gdp_bh;
3986 int err;
c431d386 3987 unsigned int i, already, changed = len;
bdefd689
KS
3988
3989 KUNIT_STATIC_STUB_REDIRECT(ext4_mb_mark_context,
3990 handle, sb, state, group, blkoff, len,
3991 flags, ret_changed);
f9e2d95a 3992
c431d386
KS
3993 if (ret_changed)
3994 *ret_changed = 0;
f9e2d95a
KS
3995 bitmap_bh = ext4_read_block_bitmap(sb, group);
3996 if (IS_ERR(bitmap_bh))
3997 return PTR_ERR(bitmap_bh);
3998
c431d386
KS
3999 if (handle) {
4000 BUFFER_TRACE(bitmap_bh, "getting write access");
4001 err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
4002 EXT4_JTR_NONE);
4003 if (err)
4004 goto out_err;
4005 }
4006
f9e2d95a
KS
4007 err = -EIO;
4008 gdp = ext4_get_group_desc(sb, group, &gdp_bh);
4009 if (!gdp)
4010 goto out_err;
4011
c431d386
KS
4012 if (handle) {
4013 BUFFER_TRACE(gdp_bh, "get_write_access");
4014 err = ext4_journal_get_write_access(handle, sb, gdp_bh,
4015 EXT4_JTR_NONE);
4016 if (err)
4017 goto out_err;
4018 }
4019
f9e2d95a
KS
4020 ext4_lock_group(sb, group);
4021 if (ext4_has_group_desc_csum(sb) &&
4022 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
4023 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
4024 ext4_free_group_clusters_set(sb, gdp,
4025 ext4_free_clusters_after_init(sb, group, gdp));
4026 }
4027
c431d386
KS
4028 if (flags & EXT4_MB_BITMAP_MARKED_CHECK) {
4029 already = 0;
4030 for (i = 0; i < len; i++)
4031 if (mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
4032 state)
4033 already++;
4034 changed = len - already;
4035 }
f9e2d95a
KS
4036
4037 if (state) {
4038 mb_set_bits(bitmap_bh->b_data, blkoff, len);
4039 ext4_free_group_clusters_set(sb, gdp,
4040 ext4_free_group_clusters(sb, gdp) - changed);
4041 } else {
4042 mb_clear_bits(bitmap_bh->b_data, blkoff, len);
4043 ext4_free_group_clusters_set(sb, gdp,
4044 ext4_free_group_clusters(sb, gdp) + changed);
4045 }
4046
4047 ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh);
4048 ext4_group_desc_csum_set(sb, group, gdp);
4049 ext4_unlock_group(sb, group);
c431d386
KS
4050 if (ret_changed)
4051 *ret_changed = changed;
f9e2d95a
KS
4052
4053 if (sbi->s_log_groups_per_flex) {
4054 ext4_group_t flex_group = ext4_flex_group(sbi, group);
4055 struct flex_groups *fg = sbi_array_rcu_deref(sbi,
4056 s_flex_groups, flex_group);
4057
4058 if (state)
4059 atomic64_sub(changed, &fg->free_clusters);
4060 else
4061 atomic64_add(changed, &fg->free_clusters);
4062 }
4063
c431d386 4064 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
f9e2d95a
KS
4065 if (err)
4066 goto out_err;
c431d386 4067 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
f9e2d95a
KS
4068 if (err)
4069 goto out_err;
4070
c431d386
KS
4071 if (flags & EXT4_MB_SYNC_UPDATE) {
4072 sync_dirty_buffer(bitmap_bh);
4073 sync_dirty_buffer(gdp_bh);
4074 }
f9e2d95a
KS
4075
4076out_err:
4077 brelse(bitmap_bh);
4078 return err;
4079}
c9de560d
AT
4080
4081/*
73b2c716 4082 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
c9de560d
AT
4083 * Returns 0 if success or error code
4084 */
4ddfef7b
ES
4085static noinline_for_stack int
4086ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
53accfa9 4087 handle_t *handle, unsigned int reserv_clstrs)
c9de560d 4088{
c9de560d 4089 struct ext4_group_desc *gdp;
c9de560d
AT
4090 struct ext4_sb_info *sbi;
4091 struct super_block *sb;
4092 ext4_fsblk_t block;
519deca0 4093 int err, len;
2f94711b
KS
4094 int flags = 0;
4095 ext4_grpblk_t changed;
c9de560d
AT
4096
4097 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4098 BUG_ON(ac->ac_b_ex.fe_len <= 0);
4099
4100 sb = ac->ac_sb;
4101 sbi = EXT4_SB(sb);
c9de560d 4102
2f94711b 4103 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, NULL);
c9de560d 4104 if (!gdp)
2f94711b 4105 return -EIO;
a9df9a49 4106 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
021b65bb 4107 ext4_free_group_clusters(sb, gdp));
03cddb80 4108
bda00de7 4109 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
53accfa9 4110 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
ce9f24cc 4111 if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
12062ddd 4112 ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
1084f252 4113 "fs metadata", block, block+len);
519deca0 4114 /* File system mounted not to panic on error
554a5ccc 4115 * Fix the bitmap and return EFSCORRUPTED
519deca0
AK
4116 * We leak some of the blocks here.
4117 */
2f94711b
KS
4118 err = ext4_mb_mark_context(handle, sb, true,
4119 ac->ac_b_ex.fe_group,
4120 ac->ac_b_ex.fe_start,
4121 ac->ac_b_ex.fe_len,
4122 0, NULL);
519deca0 4123 if (!err)
554a5ccc 4124 err = -EFSCORRUPTED;
2f94711b 4125 return err;
c9de560d 4126 }
955ce5f5 4127
c9de560d 4128#ifdef AGGRESSIVE_CHECK
2f94711b 4129 flags |= EXT4_MB_BITMAP_MARKED_CHECK;
c9de560d 4130#endif
2f94711b
KS
4131 err = ext4_mb_mark_context(handle, sb, true, ac->ac_b_ex.fe_group,
4132 ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len,
4133 flags, &changed);
4134
4135 if (err && changed == 0)
4136 return err;
955ce5f5 4137
2f94711b
KS
4138#ifdef AGGRESSIVE_CHECK
4139 BUG_ON(changed != ac->ac_b_ex.fe_len);
4140#endif
57042651 4141 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
d2a17637 4142 /*
6bc6e63f 4143 * Now reduce the dirty block count also. Should not go negative
d2a17637 4144 */
6bc6e63f
AK
4145 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
4146 /* release all the reserved blocks if non delalloc */
57042651
TT
4147 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
4148 reserv_clstrs);
c9de560d 4149
c9de560d
AT
4150 return err;
4151}
4152
8016e29f
HS
4153/*
4154 * Idempotent helper for Ext4 fast commit replay path to set the state of
4155 * blocks in bitmaps and update counters.
4156 */
4157void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
d2f7cf40 4158 int len, bool state)
8016e29f 4159{
8016e29f
HS
4160 struct ext4_sb_info *sbi = EXT4_SB(sb);
4161 ext4_group_t group;
4162 ext4_grpblk_t blkoff;
f9e2d95a
KS
4163 int err = 0;
4164 unsigned int clen, thisgrp_len;
8016e29f 4165
bfdc502a
RH
4166 while (len > 0) {
4167 ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
8016e29f 4168
bfdc502a
RH
4169 /*
4170 * Check to see if we are freeing blocks across a group
4171 * boundary.
4172 * In case of flex_bg, this can happen that (block, len) may
4173 * span across more than one group. In that case we need to
4174 * get the corresponding group metadata to work with.
4175 * For this we have goto again loop.
4176 */
4177 thisgrp_len = min_t(unsigned int, (unsigned int)len,
4178 EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
4179 clen = EXT4_NUM_B2C(sbi, thisgrp_len);
4180
8c91c579
RH
4181 if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) {
4182 ext4_error(sb, "Marking blocks in system zone - "
4183 "Block = %llu, len = %u",
4184 block, thisgrp_len);
bfdc502a
RH
4185 break;
4186 }
8016e29f 4187
c431d386
KS
4188 err = ext4_mb_mark_context(NULL, sb, state,
4189 group, blkoff, clen,
4190 EXT4_MB_BITMAP_MARKED_CHECK |
4191 EXT4_MB_SYNC_UPDATE,
4192 NULL);
bfdc502a
RH
4193 if (err)
4194 break;
4195
4196 block += thisgrp_len;
4197 len -= thisgrp_len;
bfdc502a 4198 BUG_ON(len < 0);
8016e29f 4199 }
8016e29f
HS
4200}
4201
c9de560d
AT
4202/*
4203 * here we normalize request for locality group
d7a1fee1
DE
4204 * Group request are normalized to s_mb_group_prealloc, which goes to
4205 * s_strip if we set the same via mount option.
4206 * s_mb_group_prealloc can be configured via
b713a5ec 4207 * /sys/fs/ext4/<partition>/mb_group_prealloc
c9de560d
AT
4208 *
4209 * XXX: should we try to preallocate more than the group has now?
4210 */
4211static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
4212{
4213 struct super_block *sb = ac->ac_sb;
4214 struct ext4_locality_group *lg = ac->ac_lg;
4215
4216 BUG_ON(lg == NULL);
d7a1fee1 4217 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
d3df1453 4218 mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
c9de560d
AT
4219}
4220
38727786
OM
4221/*
4222 * This function returns the next element to look at during inode
4223 * PA rbtree walk. We assume that we have held the inode PA rbtree lock
4224 * (ei->i_prealloc_lock)
4225 *
4226 * new_start The start of the range we want to compare
4227 * cur_start The existing start that we are comparing against
4228 * node The node of the rb_tree
4229 */
4230static inline struct rb_node*
4231ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_node *node)
4232{
4233 if (new_start < cur_start)
4234 return node->rb_left;
4235 else
4236 return node->rb_right;
4237}
4238
7692094a
OM
4239static inline void
4240ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
bedc5d34 4241 ext4_lblk_t start, loff_t end)
7692094a
OM
4242{
4243 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4244 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4245 struct ext4_prealloc_space *tmp_pa;
bedc5d34
BL
4246 ext4_lblk_t tmp_pa_start;
4247 loff_t tmp_pa_end;
38727786 4248 struct rb_node *iter;
7692094a 4249
38727786
OM
4250 read_lock(&ei->i_prealloc_lock);
4251 for (iter = ei->i_prealloc_node.rb_node; iter;
4252 iter = ext4_mb_pa_rb_next_iter(start, tmp_pa_start, iter)) {
4253 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4254 pa_node.inode_node);
4255 tmp_pa_start = tmp_pa->pa_lstart;
bedc5d34 4256 tmp_pa_end = pa_logical_end(sbi, tmp_pa);
7692094a 4257
38727786
OM
4258 spin_lock(&tmp_pa->pa_lock);
4259 if (tmp_pa->pa_deleted == 0)
7692094a 4260 BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start));
7692094a
OM
4261 spin_unlock(&tmp_pa->pa_lock);
4262 }
38727786 4263 read_unlock(&ei->i_prealloc_lock);
7692094a
OM
4264}
4265
0830344c
OM
4266/*
4267 * Given an allocation context "ac" and a range "start", "end", check
4268 * and adjust boundaries if the range overlaps with any of the existing
4269 * preallocatoins stored in the corresponding inode of the allocation context.
4270 *
38727786 4271 * Parameters:
0830344c
OM
4272 * ac allocation context
4273 * start start of the new range
4274 * end end of the new range
4275 */
4276static inline void
4277ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
bedc5d34 4278 ext4_lblk_t *start, loff_t *end)
0830344c
OM
4279{
4280 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4281 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
38727786
OM
4282 struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL;
4283 struct rb_node *iter;
bedc5d34
BL
4284 ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1;
4285 loff_t new_end, tmp_pa_end, left_pa_end = -1;
0830344c
OM
4286
4287 new_start = *start;
4288 new_end = *end;
4289
38727786
OM
4290 /*
4291 * Adjust the normalized range so that it doesn't overlap with any
4292 * existing preallocated blocks(PAs). Make sure to hold the rbtree lock
4293 * so it doesn't change underneath us.
4294 */
4295 read_lock(&ei->i_prealloc_lock);
4296
4297 /* Step 1: find any one immediate neighboring PA of the normalized range */
4298 for (iter = ei->i_prealloc_node.rb_node; iter;
4299 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical,
4300 tmp_pa_start, iter)) {
4301 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4302 pa_node.inode_node);
0830344c 4303 tmp_pa_start = tmp_pa->pa_lstart;
bedc5d34 4304 tmp_pa_end = pa_logical_end(sbi, tmp_pa);
0830344c
OM
4305
4306 /* PA must not overlap original request */
38727786
OM
4307 spin_lock(&tmp_pa->pa_lock);
4308 if (tmp_pa->pa_deleted == 0)
4309 BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end ||
4310 ac->ac_o_ex.fe_logical < tmp_pa_start));
4311 spin_unlock(&tmp_pa->pa_lock);
4312 }
4313
4314 /*
4315 * Step 2: check if the found PA is left or right neighbor and
4316 * get the other neighbor
4317 */
4318 if (tmp_pa) {
4319 if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) {
4320 struct rb_node *tmp;
4321
4322 left_pa = tmp_pa;
4323 tmp = rb_next(&left_pa->pa_node.inode_node);
4324 if (tmp) {
4325 right_pa = rb_entry(tmp,
4326 struct ext4_prealloc_space,
4327 pa_node.inode_node);
4328 }
4329 } else {
4330 struct rb_node *tmp;
4331
4332 right_pa = tmp_pa;
4333 tmp = rb_prev(&right_pa->pa_node.inode_node);
4334 if (tmp) {
4335 left_pa = rb_entry(tmp,
4336 struct ext4_prealloc_space,
4337 pa_node.inode_node);
4338 }
4339 }
4340 }
4341
4342 /* Step 3: get the non deleted neighbors */
4343 if (left_pa) {
4344 for (iter = &left_pa->pa_node.inode_node;;
4345 iter = rb_prev(iter)) {
4346 if (!iter) {
4347 left_pa = NULL;
4348 break;
4349 }
0830344c 4350
38727786
OM
4351 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4352 pa_node.inode_node);
4353 left_pa = tmp_pa;
4354 spin_lock(&tmp_pa->pa_lock);
4355 if (tmp_pa->pa_deleted == 0) {
4356 spin_unlock(&tmp_pa->pa_lock);
4357 break;
4358 }
0830344c 4359 spin_unlock(&tmp_pa->pa_lock);
0830344c 4360 }
38727786
OM
4361 }
4362
4363 if (right_pa) {
4364 for (iter = &right_pa->pa_node.inode_node;;
4365 iter = rb_next(iter)) {
4366 if (!iter) {
4367 right_pa = NULL;
4368 break;
4369 }
4370
4371 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4372 pa_node.inode_node);
4373 right_pa = tmp_pa;
4374 spin_lock(&tmp_pa->pa_lock);
4375 if (tmp_pa->pa_deleted == 0) {
4376 spin_unlock(&tmp_pa->pa_lock);
4377 break;
4378 }
4379 spin_unlock(&tmp_pa->pa_lock);
0830344c 4380 }
0830344c 4381 }
38727786
OM
4382
4383 if (left_pa) {
bedc5d34 4384 left_pa_end = pa_logical_end(sbi, left_pa);
38727786
OM
4385 BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical);
4386 }
4387
4388 if (right_pa) {
4389 right_pa_start = right_pa->pa_lstart;
4390 BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical);
4391 }
4392
4393 /* Step 4: trim our normalized range to not overlap with the neighbors */
4394 if (left_pa) {
4395 if (left_pa_end > new_start)
4396 new_start = left_pa_end;
4397 }
4398
4399 if (right_pa) {
4400 if (right_pa_start < new_end)
4401 new_end = right_pa_start;
4402 }
4403 read_unlock(&ei->i_prealloc_lock);
0830344c
OM
4404
4405 /* XXX: extra loop to check we really don't overlap preallocations */
4406 ext4_mb_pa_assert_overlap(ac, new_start, new_end);
4407
4408 *start = new_start;
4409 *end = new_end;
4410}
4411
c9de560d
AT
4412/*
4413 * Normalization means making request better in terms of
4414 * size and alignment
4415 */
4ddfef7b
ES
4416static noinline_for_stack void
4417ext4_mb_normalize_request(struct ext4_allocation_context *ac,
c9de560d
AT
4418 struct ext4_allocation_request *ar)
4419{
53accfa9 4420 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
b07ffe69 4421 struct ext4_super_block *es = sbi->s_es;
c9de560d 4422 int bsbits, max;
bedc5d34 4423 loff_t size, start_off, end;
1592d2c5 4424 loff_t orig_size __maybe_unused;
5a0790c2 4425 ext4_lblk_t start;
c9de560d
AT
4426
4427 /* do normalize only data requests, metadata requests
4428 do not need preallocation */
4429 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4430 return;
4431
4432 /* sometime caller may want exact blocks */
4433 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4434 return;
4435
4436 /* caller may indicate that preallocation isn't
4437 * required (it's a tail, for example) */
4438 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
4439 return;
4440
4441 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
4442 ext4_mb_normalize_group_request(ac);
4443 return ;
4444 }
4445
4446 bsbits = ac->ac_sb->s_blocksize_bits;
4447
4448 /* first, let's learn actual file size
4449 * given current request is allocated */
43bbddc0 4450 size = extent_logical_end(sbi, &ac->ac_o_ex);
c9de560d
AT
4451 size = size << bsbits;
4452 if (size < i_size_read(ac->ac_inode))
4453 size = i_size_read(ac->ac_inode);
5a0790c2 4454 orig_size = size;
c9de560d 4455
1930479c
VC
4456 /* max size of free chunks */
4457 max = 2 << bsbits;
c9de560d 4458
1930479c
VC
4459#define NRL_CHECK_SIZE(req, size, max, chunk_size) \
4460 (req <= (size) || max <= (chunk_size))
c9de560d
AT
4461
4462 /* first, try to predict filesize */
4463 /* XXX: should this table be tunable? */
4464 start_off = 0;
4465 if (size <= 16 * 1024) {
4466 size = 16 * 1024;
4467 } else if (size <= 32 * 1024) {
4468 size = 32 * 1024;
4469 } else if (size <= 64 * 1024) {
4470 size = 64 * 1024;
4471 } else if (size <= 128 * 1024) {
4472 size = 128 * 1024;
4473 } else if (size <= 256 * 1024) {
4474 size = 256 * 1024;
4475 } else if (size <= 512 * 1024) {
4476 size = 512 * 1024;
4477 } else if (size <= 1024 * 1024) {
4478 size = 1024 * 1024;
1930479c 4479 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
c9de560d 4480 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
1930479c
VC
4481 (21 - bsbits)) << 21;
4482 size = 2 * 1024 * 1024;
4483 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
c9de560d
AT
4484 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4485 (22 - bsbits)) << 22;
4486 size = 4 * 1024 * 1024;
b3916da0 4487 } else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len),
1930479c 4488 (8<<20)>>bsbits, max, 8 * 1024)) {
c9de560d
AT
4489 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4490 (23 - bsbits)) << 23;
4491 size = 8 * 1024 * 1024;
4492 } else {
b27b1535 4493 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
91a48aaf 4494 size = (loff_t) EXT4_C2B(sbi,
b27b1535 4495 ac->ac_o_ex.fe_len) << bsbits;
c9de560d 4496 }
5a0790c2
AK
4497 size = size >> bsbits;
4498 start = start_off >> bsbits;
c9de560d 4499
a08f789d
BL
4500 /*
4501 * For tiny groups (smaller than 8MB) the chosen allocation
4502 * alignment may be larger than group size. Make sure the
4503 * alignment does not move allocation to a different group which
4504 * makes mballoc fail assertions later.
4505 */
4506 start = max(start, rounddown(ac->ac_o_ex.fe_logical,
4507 (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
4508
2dcf5fde
BL
4509 /* avoid unnecessary preallocation that may trigger assertions */
4510 if (start + size > EXT_MAX_BLOCKS)
4511 size = EXT_MAX_BLOCKS - start;
4512
c9de560d
AT
4513 /* don't cover already allocated blocks in selected range */
4514 if (ar->pleft && start <= ar->lleft) {
4515 size -= ar->lleft + 1 - start;
4516 start = ar->lleft + 1;
4517 }
4518 if (ar->pright && start + size - 1 >= ar->lright)
4519 size -= start + size - ar->lright;
4520
cd648b8a
JK
4521 /*
4522 * Trim allocation request for filesystems with artificially small
4523 * groups.
4524 */
4525 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
4526 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
4527
c9de560d
AT
4528 end = start + size;
4529
0830344c 4530 ext4_mb_pa_adjust_overlap(ac, &start, &end);
c9de560d 4531
c9de560d
AT
4532 size = end - start;
4533
cf4ff938
BL
4534 /*
4535 * In this function "start" and "size" are normalized for better
4536 * alignment and length such that we could preallocate more blocks.
4537 * This normalization is done such that original request of
4538 * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and
4539 * "size" boundaries.
4540 * (Note fe_len can be relaxed since FS block allocation API does not
4541 * provide gurantee on number of contiguous blocks allocation since that
4542 * depends upon free space left, etc).
4543 * In case of inode pa, later we use the allocated blocks
1221b235 4544 * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated
cf4ff938
BL
4545 * range of goal/best blocks [start, size] to put it at the
4546 * ac_o_ex.fe_logical extent of this inode.
4547 * (See ext4_mb_use_inode_pa() for more details)
4548 */
4549 if (start + size <= ac->ac_o_ex.fe_logical ||
c9de560d 4550 start > ac->ac_o_ex.fe_logical) {
9d8b9ec4
TT
4551 ext4_msg(ac->ac_sb, KERN_ERR,
4552 "start %lu, size %lu, fe_logical %lu",
4553 (unsigned long) start, (unsigned long) size,
4554 (unsigned long) ac->ac_o_ex.fe_logical);
dfe076c1 4555 BUG();
c9de560d 4556 }
b5b60778 4557 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
c9de560d
AT
4558
4559 /* now prepare goal request */
4560
4561 /* XXX: is it better to align blocks WRT to logical
4562 * placement or satisfy big request as is */
4563 ac->ac_g_ex.fe_logical = start;
53accfa9 4564 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
7e170922 4565 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len;
c9de560d
AT
4566
4567 /* define goal start in order to merge */
b07ffe69
KS
4568 if (ar->pright && (ar->lright == (start + size)) &&
4569 ar->pright >= size &&
4570 ar->pright - size >= le32_to_cpu(es->s_first_data_block)) {
c9de560d
AT
4571 /* merge to the right */
4572 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
b07ffe69
KS
4573 &ac->ac_g_ex.fe_group,
4574 &ac->ac_g_ex.fe_start);
c9de560d
AT
4575 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4576 }
b07ffe69
KS
4577 if (ar->pleft && (ar->lleft + 1 == start) &&
4578 ar->pleft + 1 < ext4_blocks_count(es)) {
c9de560d
AT
4579 /* merge to the left */
4580 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
b07ffe69
KS
4581 &ac->ac_g_ex.fe_group,
4582 &ac->ac_g_ex.fe_start);
c9de560d
AT
4583 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4584 }
4585
d3df1453
RH
4586 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
4587 orig_size, start);
c9de560d
AT
4588}
4589
4590static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
4591{
4592 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4593
a6c75eaf 4594 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) {
c9de560d
AT
4595 atomic_inc(&sbi->s_bal_reqs);
4596 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
291dae47 4597 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
c9de560d 4598 atomic_inc(&sbi->s_bal_success);
fdd9a009 4599
c9de560d 4600 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
fdd9a009
OM
4601 for (int i=0; i<EXT4_MB_NUM_CRS; i++) {
4602 atomic_add(ac->ac_cX_found[i], &sbi->s_bal_cX_ex_scanned[i]);
4603 }
4604
a6c75eaf 4605 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned);
c9de560d
AT
4606 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
4607 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
4608 atomic_inc(&sbi->s_bal_goals);
7e170922
OM
4609 /* did we allocate as much as normalizer originally wanted? */
4610 if (ac->ac_f_ex.fe_len == ac->ac_orig_goal_len)
3ef5d263 4611 atomic_inc(&sbi->s_bal_len_goals);
7e170922 4612
c9de560d
AT
4613 if (ac->ac_found > sbi->s_mb_max_to_scan)
4614 atomic_inc(&sbi->s_bal_breaks);
4615 }
4616
296c355c
TT
4617 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
4618 trace_ext4_mballoc_alloc(ac);
4619 else
4620 trace_ext4_mballoc_prealloc(ac);
c9de560d
AT
4621}
4622
b844167e
CW
4623/*
4624 * Called on failure; free up any blocks from the inode PA for this
4625 * context. We don't need this for MB_GROUP_PA because we only change
4626 * pa_free in ext4_mb_release_context(), but on failure, we've already
4627 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4628 */
4629static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
4630{
4631 struct ext4_prealloc_space *pa = ac->ac_pa;
86f0afd4
TT
4632 struct ext4_buddy e4b;
4633 int err;
b844167e 4634
86f0afd4 4635 if (pa == NULL) {
c99d1e6e
TT
4636 if (ac->ac_f_ex.fe_len == 0)
4637 return;
86f0afd4 4638 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
19b8b035
TT
4639 if (WARN_RATELIMIT(err,
4640 "ext4: mb_load_buddy failed (%d)", err))
86f0afd4
TT
4641 /*
4642 * This should never happen since we pin the
4643 * pages in the ext4_allocation_context so
4644 * ext4_mb_load_buddy() should never fail.
4645 */
86f0afd4 4646 return;
86f0afd4
TT
4647 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4648 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
4649 ac->ac_f_ex.fe_len);
4650 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
c99d1e6e 4651 ext4_mb_unload_buddy(&e4b);
86f0afd4
TT
4652 return;
4653 }
36cb0f52
KS
4654 if (pa->pa_type == MB_INODE_PA) {
4655 spin_lock(&pa->pa_lock);
400db9d3 4656 pa->pa_free += ac->ac_b_ex.fe_len;
36cb0f52
KS
4657 spin_unlock(&pa->pa_lock);
4658 }
b844167e
CW
4659}
4660
c9de560d
AT
4661/*
4662 * use blocks preallocated to inode
4663 */
4664static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
4665 struct ext4_prealloc_space *pa)
4666{
53accfa9 4667 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
c9de560d
AT
4668 ext4_fsblk_t start;
4669 ext4_fsblk_t end;
4670 int len;
4671
4672 /* found preallocated blocks, use them */
4673 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
53accfa9
TT
4674 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
4675 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
4676 len = EXT4_NUM_B2C(sbi, end - start);
c9de560d
AT
4677 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
4678 &ac->ac_b_ex.fe_start);
4679 ac->ac_b_ex.fe_len = len;
4680 ac->ac_status = AC_STATUS_FOUND;
4681 ac->ac_pa = pa;
4682
4683 BUG_ON(start < pa->pa_pstart);
53accfa9 4684 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
c9de560d 4685 BUG_ON(pa->pa_free < len);
93cdf49f 4686 BUG_ON(ac->ac_b_ex.fe_len <= 0);
c9de560d
AT
4687 pa->pa_free -= len;
4688
d3df1453 4689 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
c9de560d
AT
4690}
4691
4692/*
4693 * use blocks preallocated to locality group
4694 */
4695static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
4696 struct ext4_prealloc_space *pa)
4697{
03cddb80 4698 unsigned int len = ac->ac_o_ex.fe_len;
6be2ded1 4699
c9de560d
AT
4700 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
4701 &ac->ac_b_ex.fe_group,
4702 &ac->ac_b_ex.fe_start);
4703 ac->ac_b_ex.fe_len = len;
4704 ac->ac_status = AC_STATUS_FOUND;
4705 ac->ac_pa = pa;
4706
1221b235 4707 /* we don't correct pa_pstart or pa_len here to avoid
26346ff6 4708 * possible race when the group is being loaded concurrently
c9de560d 4709 * instead we correct pa later, after blocks are marked
26346ff6
AK
4710 * in on-disk bitmap -- see ext4_mb_release_context()
4711 * Other CPUs are prevented from allocating from this pa by lg_mutex
c9de560d 4712 */
d3df1453 4713 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
1afdc588 4714 pa->pa_lstart, len, pa);
c9de560d
AT
4715}
4716
5e745b04
AK
4717/*
4718 * Return the prealloc space that have minimal distance
4719 * from the goal block. @cpa is the prealloc
4720 * space that is having currently known minimal distance
4721 * from the goal block.
4722 */
4723static struct ext4_prealloc_space *
4724ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
4725 struct ext4_prealloc_space *pa,
4726 struct ext4_prealloc_space *cpa)
4727{
4728 ext4_fsblk_t cur_distance, new_distance;
4729
4730 if (cpa == NULL) {
4731 atomic_inc(&pa->pa_count);
4732 return pa;
4733 }
79211c8e
AM
4734 cur_distance = abs(goal_block - cpa->pa_pstart);
4735 new_distance = abs(goal_block - pa->pa_pstart);
5e745b04 4736
5a54b2f1 4737 if (cur_distance <= new_distance)
5e745b04
AK
4738 return cpa;
4739
4740 /* drop the previous reference */
4741 atomic_dec(&cpa->pa_count);
4742 atomic_inc(&pa->pa_count);
4743 return pa;
4744}
4745
1eff5904
KS
4746/*
4747 * check if found pa meets EXT4_MB_HINT_GOAL_ONLY
4748 */
4749static bool
4750ext4_mb_pa_goal_check(struct ext4_allocation_context *ac,
4751 struct ext4_prealloc_space *pa)
4752{
4753 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4754 ext4_fsblk_t start;
4755
4756 if (likely(!(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)))
4757 return true;
4758
4759 /*
4760 * If EXT4_MB_HINT_GOAL_ONLY is set, ac_g_ex will not be adjusted
4761 * in ext4_mb_normalize_request and will keep same with ac_o_ex
4762 * from ext4_mb_initialize_context. Choose ac_g_ex here to keep
4763 * consistent with ext4_mb_find_by_goal.
4764 */
4765 start = pa->pa_pstart +
4766 (ac->ac_g_ex.fe_logical - pa->pa_lstart);
4767 if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start)
4768 return false;
4769
4770 if (ac->ac_g_ex.fe_len > pa->pa_len -
4771 EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart))
4772 return false;
4773
4774 return true;
4775}
4776
c9de560d
AT
4777/*
4778 * search goal blocks in preallocated space
4779 */
4fca8f07 4780static noinline_for_stack bool
4ddfef7b 4781ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
c9de560d 4782{
53accfa9 4783 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
6be2ded1 4784 int order, i;
c9de560d
AT
4785 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4786 struct ext4_locality_group *lg;
9d3de7ee 4787 struct ext4_prealloc_space *tmp_pa = NULL, *cpa = NULL;
38727786 4788 struct rb_node *iter;
5e745b04 4789 ext4_fsblk_t goal_block;
c9de560d
AT
4790
4791 /* only data can be preallocated */
4792 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4fca8f07 4793 return false;
c9de560d 4794
9d3de7ee
OM
4795 /*
4796 * first, try per-file preallocation by searching the inode pa rbtree.
4797 *
4798 * Here, we can't do a direct traversal of the tree because
4799 * ext4_mb_discard_group_preallocation() can paralelly mark the pa
4800 * deleted and that can cause direct traversal to skip some entries.
4801 */
38727786 4802 read_lock(&ei->i_prealloc_lock);
9d3de7ee
OM
4803
4804 if (RB_EMPTY_ROOT(&ei->i_prealloc_node)) {
4805 goto try_group_pa;
4806 }
4807
4808 /*
4809 * Step 1: Find a pa with logical start immediately adjacent to the
4810 * original logical start. This could be on the left or right.
4811 *
4812 * (tmp_pa->pa_lstart never changes so we can skip locking for it).
4813 */
38727786
OM
4814 for (iter = ei->i_prealloc_node.rb_node; iter;
4815 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical,
9d3de7ee 4816 tmp_pa->pa_lstart, iter)) {
38727786
OM
4817 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4818 pa_node.inode_node);
9d3de7ee 4819 }
c9de560d 4820
9d3de7ee
OM
4821 /*
4822 * Step 2: The adjacent pa might be to the right of logical start, find
4823 * the left adjacent pa. After this step we'd have a valid tmp_pa whose
4824 * logical start is towards the left of original request's logical start
4825 */
4826 if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) {
4827 struct rb_node *tmp;
4828 tmp = rb_prev(&tmp_pa->pa_node.inode_node);
c9de560d 4829
9d3de7ee
OM
4830 if (tmp) {
4831 tmp_pa = rb_entry(tmp, struct ext4_prealloc_space,
4832 pa_node.inode_node);
4833 } else {
e86a7182 4834 /*
9d3de7ee
OM
4835 * If there is no adjacent pa to the left then finding
4836 * an overlapping pa is not possible hence stop searching
4837 * inode pa tree
e86a7182 4838 */
9d3de7ee 4839 goto try_group_pa;
e86a7182 4840 }
9d3de7ee
OM
4841 }
4842
4843 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
fb0a387d 4844
9d3de7ee
OM
4845 /*
4846 * Step 3: If the left adjacent pa is deleted, keep moving left to find
4847 * the first non deleted adjacent pa. After this step we should have a
4848 * valid tmp_pa which is guaranteed to be non deleted.
4849 */
4850 for (iter = &tmp_pa->pa_node.inode_node;; iter = rb_prev(iter)) {
4851 if (!iter) {
4852 /*
4853 * no non deleted left adjacent pa, so stop searching
4854 * inode pa tree
4855 */
4856 goto try_group_pa;
4857 }
4858 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4859 pa_node.inode_node);
bcf43499 4860 spin_lock(&tmp_pa->pa_lock);
9d3de7ee
OM
4861 if (tmp_pa->pa_deleted == 0) {
4862 /*
4863 * We will keep holding the pa_lock from
4864 * this point on because we don't want group discard
4865 * to delete this pa underneath us. Since group
4866 * discard is anyways an ENOSPC operation it
4867 * should be okay for it to wait a few more cycles.
4868 */
4869 break;
4870 } else {
bcf43499 4871 spin_unlock(&tmp_pa->pa_lock);
c9de560d 4872 }
9d3de7ee
OM
4873 }
4874
4875 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
4876 BUG_ON(tmp_pa->pa_deleted == 1);
4877
4878 /*
4879 * Step 4: We now have the non deleted left adjacent pa. Only this
4880 * pa can possibly satisfy the request hence check if it overlaps
4881 * original logical start and stop searching if it doesn't.
4882 */
43bbddc0 4883 if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) {
bcf43499 4884 spin_unlock(&tmp_pa->pa_lock);
9d3de7ee
OM
4885 goto try_group_pa;
4886 }
4887
4888 /* non-extent files can't have physical blocks past 2^32 */
4889 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
4890 (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) >
4891 EXT4_MAX_BLOCK_FILE_PHYS)) {
4892 /*
4893 * Since PAs don't overlap, we won't find any other PA to
4894 * satisfy this.
4895 */
4896 spin_unlock(&tmp_pa->pa_lock);
4897 goto try_group_pa;
4898 }
4899
4900 if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) {
4901 atomic_inc(&tmp_pa->pa_count);
4902 ext4_mb_use_inode_pa(ac, tmp_pa);
4903 spin_unlock(&tmp_pa->pa_lock);
4904 read_unlock(&ei->i_prealloc_lock);
4905 return true;
4906 } else {
4907 /*
4908 * We found a valid overlapping pa but couldn't use it because
4909 * it had no free blocks. This should ideally never happen
4910 * because:
4911 *
4912 * 1. When a new inode pa is added to rbtree it must have
4913 * pa_free > 0 since otherwise we won't actually need
4914 * preallocation.
4915 *
4916 * 2. An inode pa that is in the rbtree can only have it's
4917 * pa_free become zero when another thread calls:
4918 * ext4_mb_new_blocks
4919 * ext4_mb_use_preallocated
4920 * ext4_mb_use_inode_pa
4921 *
4922 * 3. Further, after the above calls make pa_free == 0, we will
4923 * immediately remove it from the rbtree in:
4924 * ext4_mb_new_blocks
4925 * ext4_mb_release_context
4926 * ext4_mb_put_pa
4927 *
4928 * 4. Since the pa_free becoming 0 and pa_free getting removed
4929 * from tree both happen in ext4_mb_new_blocks, which is always
4930 * called with i_data_sem held for data allocations, we can be
4931 * sure that another process will never see a pa in rbtree with
4932 * pa_free == 0.
4933 */
4934 WARN_ON_ONCE(tmp_pa->pa_free == 0);
c9de560d 4935 }
9d3de7ee
OM
4936 spin_unlock(&tmp_pa->pa_lock);
4937try_group_pa:
38727786 4938 read_unlock(&ei->i_prealloc_lock);
c9de560d
AT
4939
4940 /* can we use group allocation? */
4941 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
4fca8f07 4942 return false;
c9de560d
AT
4943
4944 /* inode may have no locality group for some reason */
4945 lg = ac->ac_lg;
4946 if (lg == NULL)
4fca8f07 4947 return false;
6be2ded1
AK
4948 order = fls(ac->ac_o_ex.fe_len) - 1;
4949 if (order > PREALLOC_TB_SIZE - 1)
4950 /* The max size of hash table is PREALLOC_TB_SIZE */
4951 order = PREALLOC_TB_SIZE - 1;
4952
bda00de7 4953 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
5e745b04
AK
4954 /*
4955 * search for the prealloc space that is having
4956 * minimal distance from the goal block.
4957 */
6be2ded1
AK
4958 for (i = order; i < PREALLOC_TB_SIZE; i++) {
4959 rcu_read_lock();
bcf43499 4960 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i],
a8e38fd3 4961 pa_node.lg_list) {
bcf43499
OM
4962 spin_lock(&tmp_pa->pa_lock);
4963 if (tmp_pa->pa_deleted == 0 &&
4964 tmp_pa->pa_free >= ac->ac_o_ex.fe_len) {
5e745b04
AK
4965
4966 cpa = ext4_mb_check_group_pa(goal_block,
bcf43499 4967 tmp_pa, cpa);
6be2ded1 4968 }
bcf43499 4969 spin_unlock(&tmp_pa->pa_lock);
c9de560d 4970 }
6be2ded1 4971 rcu_read_unlock();
c9de560d 4972 }
5e745b04
AK
4973 if (cpa) {
4974 ext4_mb_use_group_pa(ac, cpa);
4fca8f07 4975 return true;
5e745b04 4976 }
4fca8f07 4977 return false;
c9de560d
AT
4978}
4979
4980/*
4981 * the function goes through all preallocation in this group and marks them
4982 * used in in-core bitmap. buddy must be generated from this bitmap
955ce5f5 4983 * Need to be called with ext4 group lock held
c9de560d 4984 */
089ceecc
ES
4985static noinline_for_stack
4986void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
c9de560d
AT
4987 ext4_group_t group)
4988{
4989 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4990 struct ext4_prealloc_space *pa;
4991 struct list_head *cur;
4992 ext4_group_t groupnr;
4993 ext4_grpblk_t start;
4994 int preallocated = 0;
c9de560d
AT
4995 int len;
4996
5354b2af
TT
4997 if (!grp)
4998 return;
4999
c9de560d
AT
5000 /* all form of preallocation discards first load group,
5001 * so the only competing code is preallocation use.
5002 * we don't need any locking here
5003 * notice we do NOT ignore preallocations with pa_deleted
5004 * otherwise we could leave used blocks available for
5005 * allocation in buddy when concurrent ext4_mb_put_pa()
5006 * is dropping preallocation
5007 */
5008 list_for_each(cur, &grp->bb_prealloc_list) {
5009 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
5010 spin_lock(&pa->pa_lock);
5011 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5012 &groupnr, &start);
5013 len = pa->pa_len;
5014 spin_unlock(&pa->pa_lock);
5015 if (unlikely(len == 0))
5016 continue;
5017 BUG_ON(groupnr != group);
123e3016 5018 mb_set_bits(bitmap, start, len);
c9de560d 5019 preallocated += len;
c9de560d 5020 }
d3df1453 5021 mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
c9de560d
AT
5022}
5023
27bc446e 5024static void ext4_mb_mark_pa_deleted(struct super_block *sb,
5025 struct ext4_prealloc_space *pa)
5026{
5027 struct ext4_inode_info *ei;
5028
5029 if (pa->pa_deleted) {
5030 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
5031 pa->pa_type, pa->pa_pstart, pa->pa_lstart,
5032 pa->pa_len);
5033 return;
5034 }
5035
5036 pa->pa_deleted = 1;
5037
5038 if (pa->pa_type == MB_INODE_PA) {
5039 ei = EXT4_I(pa->pa_inode);
5040 atomic_dec(&ei->i_prealloc_active);
5041 }
5042}
5043
82089725 5044static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa)
c9de560d 5045{
82089725 5046 BUG_ON(!pa);
4e8d2139
JR
5047 BUG_ON(atomic_read(&pa->pa_count));
5048 BUG_ON(pa->pa_deleted == 0);
c9de560d
AT
5049 kmem_cache_free(ext4_pspace_cachep, pa);
5050}
5051
82089725
OM
5052static void ext4_mb_pa_callback(struct rcu_head *head)
5053{
5054 struct ext4_prealloc_space *pa;
5055
5056 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
5057 ext4_mb_pa_free(pa);
5058}
5059
c9de560d
AT
5060/*
5061 * drops a reference to preallocated space descriptor
5062 * if this was the last reference and the space is consumed
5063 */
5064static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
5065 struct super_block *sb, struct ext4_prealloc_space *pa)
5066{
a9df9a49 5067 ext4_group_t grp;
d33a1976 5068 ext4_fsblk_t grp_blk;
38727786 5069 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
c9de560d 5070
c9de560d
AT
5071 /* in this short window concurrent discard can set pa_deleted */
5072 spin_lock(&pa->pa_lock);
4e8d2139
JR
5073 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
5074 spin_unlock(&pa->pa_lock);
5075 return;
5076 }
5077
c9de560d
AT
5078 if (pa->pa_deleted == 1) {
5079 spin_unlock(&pa->pa_lock);
5080 return;
5081 }
5082
27bc446e 5083 ext4_mb_mark_pa_deleted(sb, pa);
c9de560d
AT
5084 spin_unlock(&pa->pa_lock);
5085
d33a1976 5086 grp_blk = pa->pa_pstart;
60e6679e 5087 /*
cc0fb9ad
AK
5088 * If doing group-based preallocation, pa_pstart may be in the
5089 * next group when pa is used up
5090 */
5091 if (pa->pa_type == MB_GROUP_PA)
d33a1976
ES
5092 grp_blk--;
5093
bd86298e 5094 grp = ext4_get_group_number(sb, grp_blk);
c9de560d
AT
5095
5096 /*
5097 * possible race:
5098 *
5099 * P1 (buddy init) P2 (regular allocation)
5100 * find block B in PA
5101 * copy on-disk bitmap to buddy
5102 * mark B in on-disk bitmap
5103 * drop PA from group
5104 * mark all PAs in buddy
5105 *
5106 * thus, P1 initializes buddy with B available. to prevent this
5107 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
5108 * against that pair
5109 */
5110 ext4_lock_group(sb, grp);
5111 list_del(&pa->pa_group_list);
5112 ext4_unlock_group(sb, grp);
5113
a8e38fd3 5114 if (pa->pa_type == MB_INODE_PA) {
38727786
OM
5115 write_lock(pa->pa_node_lock.inode_lock);
5116 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5117 write_unlock(pa->pa_node_lock.inode_lock);
5118 ext4_mb_pa_free(pa);
a8e38fd3
OM
5119 } else {
5120 spin_lock(pa->pa_node_lock.lg_lock);
5121 list_del_rcu(&pa->pa_node.lg_list);
5122 spin_unlock(pa->pa_node_lock.lg_lock);
38727786
OM
5123 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5124 }
5125}
5126
5127static void ext4_mb_pa_rb_insert(struct rb_root *root, struct rb_node *new)
5128{
5129 struct rb_node **iter = &root->rb_node, *parent = NULL;
5130 struct ext4_prealloc_space *iter_pa, *new_pa;
5131 ext4_lblk_t iter_start, new_start;
5132
5133 while (*iter) {
5134 iter_pa = rb_entry(*iter, struct ext4_prealloc_space,
5135 pa_node.inode_node);
5136 new_pa = rb_entry(new, struct ext4_prealloc_space,
5137 pa_node.inode_node);
5138 iter_start = iter_pa->pa_lstart;
5139 new_start = new_pa->pa_lstart;
5140
5141 parent = *iter;
5142 if (new_start < iter_start)
5143 iter = &((*iter)->rb_left);
5144 else
5145 iter = &((*iter)->rb_right);
a8e38fd3 5146 }
c9de560d 5147
38727786
OM
5148 rb_link_node(new, parent, iter);
5149 rb_insert_color(new, root);
c9de560d
AT
5150}
5151
5152/*
5153 * creates new preallocated space for given inode
5154 */
53f86b17 5155static noinline_for_stack void
4ddfef7b 5156ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
c9de560d
AT
5157{
5158 struct super_block *sb = ac->ac_sb;
53accfa9 5159 struct ext4_sb_info *sbi = EXT4_SB(sb);
c9de560d
AT
5160 struct ext4_prealloc_space *pa;
5161 struct ext4_group_info *grp;
5162 struct ext4_inode_info *ei;
5163
5164 /* preallocate only when found space is larger then requested */
5165 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
5166 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
5167 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
53f86b17 5168 BUG_ON(ac->ac_pa == NULL);
c9de560d 5169
53f86b17 5170 pa = ac->ac_pa;
c9de560d 5171
7e170922 5172 if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) {
bc056e71
BL
5173 struct ext4_free_extent ex = {
5174 .fe_logical = ac->ac_g_ex.fe_logical,
5175 .fe_len = ac->ac_orig_goal_len,
5176 };
5177 loff_t orig_goal_end = extent_logical_end(sbi, &ex);
4fbf8bc7 5178 loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex);
c9de560d 5179
4fbf8bc7
BL
5180 /*
5181 * We can't allocate as much as normalizer wants, so we try
5182 * to get proper lstart to cover the original request, except
5183 * when the goal doesn't cover the original request as below:
5184 *
5185 * orig_ex:2045/2055(10), isize:8417280 -> normalized:0/2048
5186 * best_ex:0/200(200) -> adjusted: 1848/2048(200)
5187 */
c9de560d
AT
5188 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
5189 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
5190
93cdf49f
OM
5191 /*
5192 * Use the below logic for adjusting best extent as it keeps
5193 * fragmentation in check while ensuring logical range of best
5194 * extent doesn't overflow out of goal extent:
5195 *
7e170922
OM
5196 * 1. Check if best ex can be kept at end of goal (before
5197 * cr_best_avail trimmed it) and still cover original start
93cdf49f 5198 * 2. Else, check if best ex can be kept at start of goal and
4fbf8bc7 5199 * still cover original end
93cdf49f
OM
5200 * 3. Else, keep the best ex at start of original request.
5201 */
bc056e71 5202 ex.fe_len = ac->ac_b_ex.fe_len;
c9de560d 5203
bc056e71
BL
5204 ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len);
5205 if (ac->ac_o_ex.fe_logical >= ex.fe_logical)
93cdf49f 5206 goto adjust_bex;
c9de560d 5207
bc056e71 5208 ex.fe_logical = ac->ac_g_ex.fe_logical;
4fbf8bc7 5209 if (o_ex_end <= extent_logical_end(sbi, &ex))
bc056e71 5210 goto adjust_bex;
c9de560d 5211
bc056e71 5212 ex.fe_logical = ac->ac_o_ex.fe_logical;
93cdf49f 5213adjust_bex:
bc056e71 5214 ac->ac_b_ex.fe_logical = ex.fe_logical;
c9de560d 5215
c9de560d 5216 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
bc056e71 5217 BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
c9de560d
AT
5218 }
5219
c9de560d
AT
5220 pa->pa_lstart = ac->ac_b_ex.fe_logical;
5221 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5222 pa->pa_len = ac->ac_b_ex.fe_len;
5223 pa->pa_free = pa->pa_len;
c9de560d 5224 spin_lock_init(&pa->pa_lock);
d794bf8e 5225 INIT_LIST_HEAD(&pa->pa_group_list);
c9de560d 5226 pa->pa_deleted = 0;
cc0fb9ad 5227 pa->pa_type = MB_INODE_PA;
c9de560d 5228
d3df1453
RH
5229 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
5230 pa->pa_len, pa->pa_lstart);
9bffad1e 5231 trace_ext4_mb_new_inode_pa(ac, pa);
c9de560d 5232
53accfa9 5233 atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
abc075d4 5234 ext4_mb_use_inode_pa(ac, pa);
c9de560d
AT
5235
5236 ei = EXT4_I(ac->ac_inode);
5237 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
5354b2af
TT
5238 if (!grp)
5239 return;
c9de560d 5240
a8e38fd3 5241 pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock;
c9de560d
AT
5242 pa->pa_inode = ac->ac_inode;
5243
c9de560d 5244 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
c9de560d 5245
38727786
OM
5246 write_lock(pa->pa_node_lock.inode_lock);
5247 ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node);
5248 write_unlock(pa->pa_node_lock.inode_lock);
27bc446e 5249 atomic_inc(&ei->i_prealloc_active);
c9de560d
AT
5250}
5251
5252/*
5253 * creates new preallocated space for locality group inodes belongs to
5254 */
53f86b17 5255static noinline_for_stack void
4ddfef7b 5256ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
c9de560d
AT
5257{
5258 struct super_block *sb = ac->ac_sb;
5259 struct ext4_locality_group *lg;
5260 struct ext4_prealloc_space *pa;
5261 struct ext4_group_info *grp;
5262
5263 /* preallocate only when found space is larger then requested */
5264 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
5265 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
5266 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
53f86b17 5267 BUG_ON(ac->ac_pa == NULL);
c9de560d 5268
53f86b17 5269 pa = ac->ac_pa;
c9de560d 5270
c9de560d
AT
5271 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5272 pa->pa_lstart = pa->pa_pstart;
5273 pa->pa_len = ac->ac_b_ex.fe_len;
5274 pa->pa_free = pa->pa_len;
c9de560d 5275 spin_lock_init(&pa->pa_lock);
a8e38fd3 5276 INIT_LIST_HEAD(&pa->pa_node.lg_list);
d794bf8e 5277 INIT_LIST_HEAD(&pa->pa_group_list);
c9de560d 5278 pa->pa_deleted = 0;
cc0fb9ad 5279 pa->pa_type = MB_GROUP_PA;
c9de560d 5280
d3df1453
RH
5281 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
5282 pa->pa_len, pa->pa_lstart);
9bffad1e 5283 trace_ext4_mb_new_group_pa(ac, pa);
c9de560d
AT
5284
5285 ext4_mb_use_group_pa(ac, pa);
5286 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
5287
5288 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
5354b2af
TT
5289 if (!grp)
5290 return;
c9de560d
AT
5291 lg = ac->ac_lg;
5292 BUG_ON(lg == NULL);
5293
a8e38fd3 5294 pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock;
c9de560d
AT
5295 pa->pa_inode = NULL;
5296
c9de560d 5297 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
c9de560d 5298
6be2ded1
AK
5299 /*
5300 * We will later add the new pa to the right bucket
5301 * after updating the pa_free in ext4_mb_release_context
5302 */
c9de560d
AT
5303}
5304
53f86b17 5305static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
c9de560d 5306{
c9de560d 5307 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
53f86b17 5308 ext4_mb_new_group_pa(ac);
c9de560d 5309 else
53f86b17 5310 ext4_mb_new_inode_pa(ac);
c9de560d
AT
5311}
5312
5313/*
5314 * finds all unused blocks in on-disk bitmap, frees them in
5315 * in-core bitmap and buddy.
5316 * @pa must be unlinked from inode and group lists, so that
5317 * nobody else can find/use it.
5318 * the caller MUST hold group/inode locks.
5319 * TODO: optimize the case when there are no in-core structures yet
5320 */
820c2808 5321static noinline_for_stack void
4ddfef7b 5322ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3e1e5f50 5323 struct ext4_prealloc_space *pa)
c9de560d 5324{
c9de560d
AT
5325 struct super_block *sb = e4b->bd_sb;
5326 struct ext4_sb_info *sbi = EXT4_SB(sb);
498e5f24
TT
5327 unsigned int end;
5328 unsigned int next;
c9de560d
AT
5329 ext4_group_t group;
5330 ext4_grpblk_t bit;
ba80b101 5331 unsigned long long grp_blk_start;
c9de560d
AT
5332 int free = 0;
5333
5334 BUG_ON(pa->pa_deleted == 0);
5335 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
53accfa9 5336 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
c9de560d
AT
5337 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
5338 end = bit + pa->pa_len;
5339
c9de560d 5340 while (bit < end) {
ffad0a44 5341 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
c9de560d
AT
5342 if (bit >= end)
5343 break;
ffad0a44 5344 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
d3df1453 5345 mb_debug(sb, "free preallocated %u/%u in group %u\n",
5a0790c2
AK
5346 (unsigned) ext4_group_first_block_no(sb, group) + bit,
5347 (unsigned) next - bit, (unsigned) group);
c9de560d
AT
5348 free += next - bit;
5349
3e1e5f50 5350 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
53accfa9
TT
5351 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
5352 EXT4_C2B(sbi, bit)),
a9c667f8 5353 next - bit);
c9de560d
AT
5354 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
5355 bit = next + 1;
5356 }
5357 if (free != pa->pa_free) {
9d8b9ec4 5358 ext4_msg(e4b->bd_sb, KERN_CRIT,
36bad423 5359 "pa %p: logic %lu, phys. %lu, len %d",
9d8b9ec4
TT
5360 pa, (unsigned long) pa->pa_lstart,
5361 (unsigned long) pa->pa_pstart,
36bad423 5362 pa->pa_len);
e29136f8 5363 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
5d1b1b3f 5364 free, pa->pa_free);
e56eb659
AK
5365 /*
5366 * pa is already deleted so we use the value obtained
5367 * from the bitmap and continue.
5368 */
c9de560d 5369 }
c9de560d 5370 atomic_add(free, &sbi->s_mb_discarded);
c9de560d
AT
5371}
5372
20427949 5373static noinline_for_stack void
4ddfef7b 5374ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3e1e5f50 5375 struct ext4_prealloc_space *pa)
c9de560d 5376{
c9de560d
AT
5377 struct super_block *sb = e4b->bd_sb;
5378 ext4_group_t group;
5379 ext4_grpblk_t bit;
5380
60e07cf5 5381 trace_ext4_mb_release_group_pa(sb, pa);
c9de560d
AT
5382 BUG_ON(pa->pa_deleted == 0);
5383 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
463808f2
TT
5384 if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
5385 ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
5386 e4b->bd_group, group, pa->pa_pstart);
20427949 5387 return;
463808f2 5388 }
c9de560d
AT
5389 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
5390 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3e1e5f50 5391 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
c9de560d
AT
5392}
5393
5394/*
5395 * releases all preallocations in given group
5396 *
5397 * first, we need to decide discard policy:
5398 * - when do we discard
5399 * 1) ENOSPC
5400 * - how many do we discard
5401 * 1) how many requested
5402 */
4ddfef7b
ES
5403static noinline_for_stack int
5404ext4_mb_discard_group_preallocations(struct super_block *sb,
8c80fb31 5405 ext4_group_t group, int *busy)
c9de560d
AT
5406{
5407 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
5408 struct buffer_head *bitmap_bh = NULL;
5409 struct ext4_prealloc_space *pa, *tmp;
0f6bc579 5410 LIST_HEAD(list);
c9de560d 5411 struct ext4_buddy e4b;
38727786 5412 struct ext4_inode_info *ei;
c9de560d 5413 int err;
8c80fb31 5414 int free = 0;
c9de560d 5415
5354b2af
TT
5416 if (!grp)
5417 return 0;
d3df1453 5418 mb_debug(sb, "discard preallocation for group %u\n", group);
c9de560d 5419 if (list_empty(&grp->bb_prealloc_list))
bbc4ec77 5420 goto out_dbg;
c9de560d 5421
574ca174 5422 bitmap_bh = ext4_read_block_bitmap(sb, group);
9008a58e
DW
5423 if (IS_ERR(bitmap_bh)) {
5424 err = PTR_ERR(bitmap_bh);
54d3adbc
TT
5425 ext4_error_err(sb, -err,
5426 "Error %d reading block bitmap for %u",
5427 err, group);
bbc4ec77 5428 goto out_dbg;
c9de560d
AT
5429 }
5430
5431 err = ext4_mb_load_buddy(sb, group, &e4b);
ce89f46c 5432 if (err) {
9651e6b2
KK
5433 ext4_warning(sb, "Error %d loading buddy information for %u",
5434 err, group);
ce89f46c 5435 put_bh(bitmap_bh);
bbc4ec77 5436 goto out_dbg;
ce89f46c 5437 }
c9de560d 5438
c9de560d
AT
5439 ext4_lock_group(sb, group);
5440 list_for_each_entry_safe(pa, tmp,
5441 &grp->bb_prealloc_list, pa_group_list) {
5442 spin_lock(&pa->pa_lock);
5443 if (atomic_read(&pa->pa_count)) {
5444 spin_unlock(&pa->pa_lock);
8c80fb31 5445 *busy = 1;
c9de560d
AT
5446 continue;
5447 }
5448 if (pa->pa_deleted) {
5449 spin_unlock(&pa->pa_lock);
5450 continue;
5451 }
5452
5453 /* seems this one can be freed ... */
27bc446e 5454 ext4_mb_mark_pa_deleted(sb, pa);
c9de560d 5455
70022da8
YB
5456 if (!free)
5457 this_cpu_inc(discard_pa_seq);
5458
c9de560d
AT
5459 /* we can trust pa_free ... */
5460 free += pa->pa_free;
5461
5462 spin_unlock(&pa->pa_lock);
5463
5464 list_del(&pa->pa_group_list);
5465 list_add(&pa->u.pa_tmp_list, &list);
5466 }
5467
c9de560d
AT
5468 /* now free all selected PAs */
5469 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5470
5471 /* remove from object (inode or locality group) */
a8e38fd3
OM
5472 if (pa->pa_type == MB_GROUP_PA) {
5473 spin_lock(pa->pa_node_lock.lg_lock);
5474 list_del_rcu(&pa->pa_node.lg_list);
5475 spin_unlock(pa->pa_node_lock.lg_lock);
5476 } else {
38727786
OM
5477 write_lock(pa->pa_node_lock.inode_lock);
5478 ei = EXT4_I(pa->pa_inode);
5479 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5480 write_unlock(pa->pa_node_lock.inode_lock);
a8e38fd3 5481 }
c9de560d 5482
38727786
OM
5483 list_del(&pa->u.pa_tmp_list);
5484
5485 if (pa->pa_type == MB_GROUP_PA) {
3e1e5f50 5486 ext4_mb_release_group_pa(&e4b, pa);
38727786
OM
5487 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5488 } else {
3e1e5f50 5489 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
38727786
OM
5490 ext4_mb_pa_free(pa);
5491 }
c9de560d
AT
5492 }
5493
c9de560d 5494 ext4_unlock_group(sb, group);
e39e07fd 5495 ext4_mb_unload_buddy(&e4b);
c9de560d 5496 put_bh(bitmap_bh);
bbc4ec77 5497out_dbg:
d3df1453 5498 mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
8c80fb31
CX
5499 free, group, grp->bb_free);
5500 return free;
c9de560d
AT
5501}
5502
5503/*
5504 * releases all non-used preallocated blocks for given inode
5505 *
5506 * It's important to discard preallocations under i_data_sem
5507 * We don't want another block to be served from the prealloc
5508 * space when we are discarding the inode prealloc space.
5509 *
5510 * FIXME!! Make sure it is valid at all the call sites
5511 */
2ffd2a6a 5512void ext4_discard_preallocations(struct inode *inode)
c9de560d
AT
5513{
5514 struct ext4_inode_info *ei = EXT4_I(inode);
5515 struct super_block *sb = inode->i_sb;
5516 struct buffer_head *bitmap_bh = NULL;
5517 struct ext4_prealloc_space *pa, *tmp;
5518 ext4_group_t group = 0;
0f6bc579 5519 LIST_HEAD(list);
c9de560d 5520 struct ext4_buddy e4b;
38727786 5521 struct rb_node *iter;
c9de560d
AT
5522 int err;
5523
f0e54b60 5524 if (!S_ISREG(inode->i_mode))
c9de560d 5525 return;
c9de560d 5526
8016e29f
HS
5527 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
5528 return;
5529
d3df1453
RH
5530 mb_debug(sb, "discard preallocation for inode %lu\n",
5531 inode->i_ino);
27bc446e 5532 trace_ext4_discard_preallocations(inode,
f0e54b60 5533 atomic_read(&ei->i_prealloc_active));
27bc446e 5534
c9de560d
AT
5535repeat:
5536 /* first, collect all pa's in the inode */
38727786 5537 write_lock(&ei->i_prealloc_lock);
2ffd2a6a 5538 for (iter = rb_first(&ei->i_prealloc_node); iter;
38727786
OM
5539 iter = rb_next(iter)) {
5540 pa = rb_entry(iter, struct ext4_prealloc_space,
5541 pa_node.inode_node);
a8e38fd3 5542 BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock);
38727786 5543
c9de560d
AT
5544 spin_lock(&pa->pa_lock);
5545 if (atomic_read(&pa->pa_count)) {
5546 /* this shouldn't happen often - nobody should
5547 * use preallocation while we're discarding it */
5548 spin_unlock(&pa->pa_lock);
38727786 5549 write_unlock(&ei->i_prealloc_lock);
9d8b9ec4
TT
5550 ext4_msg(sb, KERN_ERR,
5551 "uh-oh! used pa while discarding");
c9de560d
AT
5552 WARN_ON(1);
5553 schedule_timeout_uninterruptible(HZ);
5554 goto repeat;
5555
5556 }
5557 if (pa->pa_deleted == 0) {
27bc446e 5558 ext4_mb_mark_pa_deleted(sb, pa);
c9de560d 5559 spin_unlock(&pa->pa_lock);
38727786 5560 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
c9de560d
AT
5561 list_add(&pa->u.pa_tmp_list, &list);
5562 continue;
5563 }
5564
5565 /* someone is deleting pa right now */
5566 spin_unlock(&pa->pa_lock);
38727786 5567 write_unlock(&ei->i_prealloc_lock);
c9de560d
AT
5568
5569 /* we have to wait here because pa_deleted
5570 * doesn't mean pa is already unlinked from
5571 * the list. as we might be called from
5572 * ->clear_inode() the inode will get freed
5573 * and concurrent thread which is unlinking
5574 * pa from inode's list may access already
5575 * freed memory, bad-bad-bad */
5576
5577 /* XXX: if this happens too often, we can
5578 * add a flag to force wait only in case
5579 * of ->clear_inode(), but not in case of
5580 * regular truncate */
5581 schedule_timeout_uninterruptible(HZ);
5582 goto repeat;
5583 }
38727786 5584 write_unlock(&ei->i_prealloc_lock);
c9de560d
AT
5585
5586 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
cc0fb9ad 5587 BUG_ON(pa->pa_type != MB_INODE_PA);
bd86298e 5588 group = ext4_get_group_number(sb, pa->pa_pstart);
c9de560d 5589
9651e6b2
KK
5590 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5591 GFP_NOFS|__GFP_NOFAIL);
ce89f46c 5592 if (err) {
54d3adbc
TT
5593 ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5594 err, group);
ce89f46c
AK
5595 continue;
5596 }
c9de560d 5597
574ca174 5598 bitmap_bh = ext4_read_block_bitmap(sb, group);
9008a58e
DW
5599 if (IS_ERR(bitmap_bh)) {
5600 err = PTR_ERR(bitmap_bh);
54d3adbc
TT
5601 ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
5602 err, group);
e39e07fd 5603 ext4_mb_unload_buddy(&e4b);
ce89f46c 5604 continue;
c9de560d
AT
5605 }
5606
5607 ext4_lock_group(sb, group);
5608 list_del(&pa->pa_group_list);
3e1e5f50 5609 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
c9de560d
AT
5610 ext4_unlock_group(sb, group);
5611
e39e07fd 5612 ext4_mb_unload_buddy(&e4b);
c9de560d
AT
5613 put_bh(bitmap_bh);
5614
5615 list_del(&pa->u.pa_tmp_list);
38727786 5616 ext4_mb_pa_free(pa);
c9de560d
AT
5617 }
5618}
5619
53f86b17
RH
5620static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
5621{
5622 struct ext4_prealloc_space *pa;
5623
5624 BUG_ON(ext4_pspace_cachep == NULL);
5625 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
5626 if (!pa)
5627 return -ENOMEM;
5628 atomic_set(&pa->pa_count, 1);
5629 ac->ac_pa = pa;
5630 return 0;
5631}
5632
82089725 5633static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac)
53f86b17
RH
5634{
5635 struct ext4_prealloc_space *pa = ac->ac_pa;
5636
5637 BUG_ON(!pa);
5638 ac->ac_pa = NULL;
5639 WARN_ON(!atomic_dec_and_test(&pa->pa_count));
82089725
OM
5640 /*
5641 * current function is only called due to an error or due to
5642 * len of found blocks < len of requested blocks hence the PA has not
5643 * been added to grp->bb_prealloc_list. So we don't need to lock it
5644 */
5645 pa->pa_deleted = 1;
5646 ext4_mb_pa_free(pa);
53f86b17
RH
5647}
5648
6ba495e9 5649#ifdef CONFIG_EXT4_DEBUG
e68cf40c 5650static inline void ext4_mb_show_pa(struct super_block *sb)
c9de560d 5651{
e68cf40c 5652 ext4_group_t i, ngroups;
c9de560d 5653
0a1b2f5e 5654 if (ext4_emergency_state(sb))
e3570639
ES
5655 return;
5656
8df9675f 5657 ngroups = ext4_get_groups_count(sb);
d3df1453 5658 mb_debug(sb, "groups: ");
8df9675f 5659 for (i = 0; i < ngroups; i++) {
c9de560d
AT
5660 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
5661 struct ext4_prealloc_space *pa;
5662 ext4_grpblk_t start;
5663 struct list_head *cur;
5354b2af
TT
5664
5665 if (!grp)
5666 continue;
c9de560d
AT
5667 ext4_lock_group(sb, i);
5668 list_for_each(cur, &grp->bb_prealloc_list) {
5669 pa = list_entry(cur, struct ext4_prealloc_space,
5670 pa_group_list);
5671 spin_lock(&pa->pa_lock);
5672 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5673 NULL, &start);
5674 spin_unlock(&pa->pa_lock);
d3df1453
RH
5675 mb_debug(sb, "PA:%u:%d:%d\n", i, start,
5676 pa->pa_len);
c9de560d 5677 }
60bd63d1 5678 ext4_unlock_group(sb, i);
d3df1453
RH
5679 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
5680 grp->bb_fragments);
c9de560d 5681 }
c9de560d 5682}
e68cf40c
RH
5683
5684static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5685{
5686 struct super_block *sb = ac->ac_sb;
5687
0a1b2f5e 5688 if (ext4_emergency_state(sb))
e68cf40c
RH
5689 return;
5690
d3df1453 5691 mb_debug(sb, "Can't allocate:"
e68cf40c 5692 " Allocation context details:");
d3df1453 5693 mb_debug(sb, "status %u flags 0x%x",
e68cf40c 5694 ac->ac_status, ac->ac_flags);
d3df1453 5695 mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
e68cf40c
RH
5696 "goal %lu/%lu/%lu@%lu, "
5697 "best %lu/%lu/%lu@%lu cr %d",
5698 (unsigned long)ac->ac_o_ex.fe_group,
5699 (unsigned long)ac->ac_o_ex.fe_start,
5700 (unsigned long)ac->ac_o_ex.fe_len,
5701 (unsigned long)ac->ac_o_ex.fe_logical,
5702 (unsigned long)ac->ac_g_ex.fe_group,
5703 (unsigned long)ac->ac_g_ex.fe_start,
5704 (unsigned long)ac->ac_g_ex.fe_len,
5705 (unsigned long)ac->ac_g_ex.fe_logical,
5706 (unsigned long)ac->ac_b_ex.fe_group,
5707 (unsigned long)ac->ac_b_ex.fe_start,
5708 (unsigned long)ac->ac_b_ex.fe_len,
5709 (unsigned long)ac->ac_b_ex.fe_logical,
5710 (int)ac->ac_criteria);
d3df1453 5711 mb_debug(sb, "%u found", ac->ac_found);
6a0c5887 5712 mb_debug(sb, "used pa: %s, ", str_yes_no(ac->ac_pa));
569f196f
RH
5713 if (ac->ac_pa)
5714 mb_debug(sb, "pa_type %s\n", ac->ac_pa->pa_type == MB_GROUP_PA ?
5715 "group pa" : "inode pa");
e68cf40c
RH
5716 ext4_mb_show_pa(sb);
5717}
c9de560d 5718#else
e68cf40c
RH
5719static inline void ext4_mb_show_pa(struct super_block *sb)
5720{
e68cf40c 5721}
c9de560d
AT
5722static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5723{
e68cf40c 5724 ext4_mb_show_pa(ac->ac_sb);
c9de560d
AT
5725}
5726#endif
5727
5728/*
5729 * We use locality group preallocation for small size file. The size of the
5730 * file is determined by the current size or the resulting size after
5731 * allocation which ever is larger
5732 *
b713a5ec 5733 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
c9de560d
AT
5734 */
5735static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
5736{
5737 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5738 int bsbits = ac->ac_sb->s_blocksize_bits;
5739 loff_t size, isize;
a9f2a293 5740 bool inode_pa_eligible, group_pa_eligible;
c9de560d
AT
5741
5742 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
5743 return;
5744
4ba74d00
TT
5745 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
5746 return;
5747
a9f2a293
JK
5748 group_pa_eligible = sbi->s_mb_group_prealloc > 0;
5749 inode_pa_eligible = true;
43bbddc0 5750 size = extent_logical_end(sbi, &ac->ac_o_ex);
50797481
TT
5751 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
5752 >> bsbits;
c9de560d 5753
a9f2a293 5754 /* No point in using inode preallocation for closed files */
82dd124c 5755 if ((size == isize) && !ext4_fs_is_busy(sbi) &&
a9f2a293
JK
5756 !inode_is_open_for_write(ac->ac_inode))
5757 inode_pa_eligible = false;
50797481 5758
71780577 5759 size = max(size, isize);
a9f2a293
JK
5760 /* Don't use group allocation for large files */
5761 if (size > sbi->s_mb_stream_request)
5762 group_pa_eligible = false;
5763
5764 if (!group_pa_eligible) {
5765 if (inode_pa_eligible)
5766 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
5767 else
5768 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
c9de560d 5769 return;
4ba74d00 5770 }
c9de560d
AT
5771
5772 BUG_ON(ac->ac_lg != NULL);
5773 /*
5774 * locality group prealloc space are per cpu. The reason for having
5775 * per cpu locality group is to reduce the contention between block
5776 * request from multiple CPUs.
5777 */
a0b6bc63 5778 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
c9de560d
AT
5779
5780 /* we're going to use group allocation */
5781 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
5782
5783 /* serialize all allocations in the group */
5784 mutex_lock(&ac->ac_lg->lg_mutex);
5785}
5786
d73eff68 5787static noinline_for_stack void
4ddfef7b 5788ext4_mb_initialize_context(struct ext4_allocation_context *ac,
c9de560d
AT
5789 struct ext4_allocation_request *ar)
5790{
5791 struct super_block *sb = ar->inode->i_sb;
5792 struct ext4_sb_info *sbi = EXT4_SB(sb);
5793 struct ext4_super_block *es = sbi->s_es;
5794 ext4_group_t group;
498e5f24
TT
5795 unsigned int len;
5796 ext4_fsblk_t goal;
c9de560d
AT
5797 ext4_grpblk_t block;
5798
5799 /* we can't allocate > group size */
5800 len = ar->len;
5801
5802 /* just a dirty hack to filter too big requests */
40ae3487
TT
5803 if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
5804 len = EXT4_CLUSTERS_PER_GROUP(sb);
c9de560d
AT
5805
5806 /* start searching from the goal */
5807 goal = ar->goal;
5808 if (goal < le32_to_cpu(es->s_first_data_block) ||
5809 goal >= ext4_blocks_count(es))
5810 goal = le32_to_cpu(es->s_first_data_block);
5811 ext4_get_group_no_and_offset(sb, goal, &group, &block);
5812
5813 /* set up allocation goals */
f5a44db5 5814 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
c9de560d 5815 ac->ac_status = AC_STATUS_CONTINUE;
c9de560d
AT
5816 ac->ac_sb = sb;
5817 ac->ac_inode = ar->inode;
53accfa9 5818 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
c9de560d
AT
5819 ac->ac_o_ex.fe_group = group;
5820 ac->ac_o_ex.fe_start = block;
5821 ac->ac_o_ex.fe_len = len;
53accfa9 5822 ac->ac_g_ex = ac->ac_o_ex;
7e170922 5823 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len;
c9de560d 5824 ac->ac_flags = ar->flags;
c9de560d 5825
3cb77bd2 5826 /* we have to define context: we'll work with a file or
c9de560d
AT
5827 * locality group. this is a policy, actually */
5828 ext4_mb_group_or_file(ac);
5829
d3df1453 5830 mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
c9de560d
AT
5831 "left: %u/%u, right %u/%u to %swritable\n",
5832 (unsigned) ar->len, (unsigned) ar->logical,
5833 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
5834 (unsigned) ar->lleft, (unsigned) ar->pleft,
5835 (unsigned) ar->lright, (unsigned) ar->pright,
82dd124c 5836 inode_is_open_for_write(ar->inode) ? "" : "non-");
c9de560d
AT
5837}
5838
6be2ded1
AK
5839static noinline_for_stack void
5840ext4_mb_discard_lg_preallocations(struct super_block *sb,
5841 struct ext4_locality_group *lg,
5842 int order, int total_entries)
5843{
5844 ext4_group_t group = 0;
5845 struct ext4_buddy e4b;
0f6bc579 5846 LIST_HEAD(discard_list);
6be2ded1 5847 struct ext4_prealloc_space *pa, *tmp;
6be2ded1 5848
d3df1453 5849 mb_debug(sb, "discard locality group preallocation\n");
6be2ded1 5850
6be2ded1
AK
5851 spin_lock(&lg->lg_prealloc_lock);
5852 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
a8e38fd3 5853 pa_node.lg_list,
92e9c58c 5854 lockdep_is_held(&lg->lg_prealloc_lock)) {
6be2ded1
AK
5855 spin_lock(&pa->pa_lock);
5856 if (atomic_read(&pa->pa_count)) {
5857 /*
5858 * This is the pa that we just used
5859 * for block allocation. So don't
5860 * free that
5861 */
5862 spin_unlock(&pa->pa_lock);
5863 continue;
5864 }
5865 if (pa->pa_deleted) {
5866 spin_unlock(&pa->pa_lock);
5867 continue;
5868 }
5869 /* only lg prealloc space */
cc0fb9ad 5870 BUG_ON(pa->pa_type != MB_GROUP_PA);
6be2ded1
AK
5871
5872 /* seems this one can be freed ... */
27bc446e 5873 ext4_mb_mark_pa_deleted(sb, pa);
6be2ded1
AK
5874 spin_unlock(&pa->pa_lock);
5875
a8e38fd3 5876 list_del_rcu(&pa->pa_node.lg_list);
6be2ded1
AK
5877 list_add(&pa->u.pa_tmp_list, &discard_list);
5878
5879 total_entries--;
5880 if (total_entries <= 5) {
5881 /*
5882 * we want to keep only 5 entries
5883 * allowing it to grow to 8. This
5884 * mak sure we don't call discard
5885 * soon for this list.
5886 */
5887 break;
5888 }
5889 }
5890 spin_unlock(&lg->lg_prealloc_lock);
5891
5892 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
9651e6b2 5893 int err;
6be2ded1 5894
bd86298e 5895 group = ext4_get_group_number(sb, pa->pa_pstart);
9651e6b2
KK
5896 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5897 GFP_NOFS|__GFP_NOFAIL);
5898 if (err) {
54d3adbc
TT
5899 ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5900 err, group);
6be2ded1
AK
5901 continue;
5902 }
5903 ext4_lock_group(sb, group);
5904 list_del(&pa->pa_group_list);
3e1e5f50 5905 ext4_mb_release_group_pa(&e4b, pa);
6be2ded1
AK
5906 ext4_unlock_group(sb, group);
5907
e39e07fd 5908 ext4_mb_unload_buddy(&e4b);
6be2ded1
AK
5909 list_del(&pa->u.pa_tmp_list);
5910 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5911 }
6be2ded1
AK
5912}
5913
5914/*
5915 * We have incremented pa_count. So it cannot be freed at this
5916 * point. Also we hold lg_mutex. So no parallel allocation is
5917 * possible from this lg. That means pa_free cannot be updated.
5918 *
5919 * A parallel ext4_mb_discard_group_preallocations is possible.
5920 * which can cause the lg_prealloc_list to be updated.
5921 */
5922
5923static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
5924{
5925 int order, added = 0, lg_prealloc_count = 1;
5926 struct super_block *sb = ac->ac_sb;
5927 struct ext4_locality_group *lg = ac->ac_lg;
5928 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
5929
5930 order = fls(pa->pa_free) - 1;
5931 if (order > PREALLOC_TB_SIZE - 1)
5932 /* The max size of hash table is PREALLOC_TB_SIZE */
5933 order = PREALLOC_TB_SIZE - 1;
5934 /* Add the prealloc space to lg */
f1167009 5935 spin_lock(&lg->lg_prealloc_lock);
6be2ded1 5936 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
a8e38fd3 5937 pa_node.lg_list,
92e9c58c 5938 lockdep_is_held(&lg->lg_prealloc_lock)) {
6be2ded1
AK
5939 spin_lock(&tmp_pa->pa_lock);
5940 if (tmp_pa->pa_deleted) {
e7c9e3e9 5941 spin_unlock(&tmp_pa->pa_lock);
6be2ded1
AK
5942 continue;
5943 }
5944 if (!added && pa->pa_free < tmp_pa->pa_free) {
5945 /* Add to the tail of the previous entry */
a8e38fd3
OM
5946 list_add_tail_rcu(&pa->pa_node.lg_list,
5947 &tmp_pa->pa_node.lg_list);
6be2ded1
AK
5948 added = 1;
5949 /*
5950 * we want to count the total
5951 * number of entries in the list
5952 */
5953 }
5954 spin_unlock(&tmp_pa->pa_lock);
5955 lg_prealloc_count++;
5956 }
5957 if (!added)
a8e38fd3 5958 list_add_tail_rcu(&pa->pa_node.lg_list,
6be2ded1 5959 &lg->lg_prealloc_list[order]);
f1167009 5960 spin_unlock(&lg->lg_prealloc_lock);
6be2ded1
AK
5961
5962 /* Now trim the list to be not more than 8 elements */
ad635507 5963 if (lg_prealloc_count > 8)
6be2ded1 5964 ext4_mb_discard_lg_preallocations(sb, lg,
f1167009 5965 order, lg_prealloc_count);
6be2ded1
AK
5966}
5967
c9de560d
AT
5968/*
5969 * release all resource we used in allocation
5970 */
11fd1a5d 5971static void ext4_mb_release_context(struct ext4_allocation_context *ac)
c9de560d 5972{
53accfa9 5973 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
6be2ded1
AK
5974 struct ext4_prealloc_space *pa = ac->ac_pa;
5975 if (pa) {
cc0fb9ad 5976 if (pa->pa_type == MB_GROUP_PA) {
c9de560d 5977 /* see comment in ext4_mb_use_group_pa() */
6be2ded1 5978 spin_lock(&pa->pa_lock);
53accfa9
TT
5979 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5980 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
6be2ded1
AK
5981 pa->pa_free -= ac->ac_b_ex.fe_len;
5982 pa->pa_len -= ac->ac_b_ex.fe_len;
5983 spin_unlock(&pa->pa_lock);
66d5e027 5984
5985 /*
5986 * We want to add the pa to the right bucket.
5987 * Remove it from the list and while adding
5988 * make sure the list to which we are adding
5989 * doesn't grow big.
5990 */
5991 if (likely(pa->pa_free)) {
a8e38fd3
OM
5992 spin_lock(pa->pa_node_lock.lg_lock);
5993 list_del_rcu(&pa->pa_node.lg_list);
5994 spin_unlock(pa->pa_node_lock.lg_lock);
66d5e027 5995 ext4_mb_add_n_trim(ac);
5996 }
ba443916 5997 }
27bc446e 5998
ba443916
AK
5999 ext4_mb_put_pa(ac, ac->ac_sb, pa);
6000 }
ccedf35b
MWO
6001 if (ac->ac_bitmap_folio)
6002 folio_put(ac->ac_bitmap_folio);
c84f1510
MWO
6003 if (ac->ac_buddy_folio)
6004 folio_put(ac->ac_buddy_folio);
c9de560d
AT
6005 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
6006 mutex_unlock(&ac->ac_lg->lg_mutex);
6007 ext4_mb_collect_stats(ac);
c9de560d
AT
6008}
6009
6010static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
6011{
8df9675f 6012 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
c9de560d 6013 int ret;
8c80fb31
CX
6014 int freed = 0, busy = 0;
6015 int retry = 0;
c9de560d 6016
9bffad1e 6017 trace_ext4_mb_discard_preallocations(sb, needed);
8c80fb31
CX
6018
6019 if (needed == 0)
6020 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
6021 repeat:
8df9675f 6022 for (i = 0; i < ngroups && needed > 0; i++) {
8c80fb31 6023 ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
c9de560d
AT
6024 freed += ret;
6025 needed -= ret;
8c80fb31
CX
6026 cond_resched();
6027 }
6028
6029 if (needed > 0 && busy && ++retry < 3) {
6030 busy = 0;
6031 goto repeat;
c9de560d
AT
6032 }
6033
6034 return freed;
6035}
6036
cf5e2ca6 6037static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
07b5b8e1 6038 struct ext4_allocation_context *ac, u64 *seq)
cf5e2ca6
RH
6039{
6040 int freed;
07b5b8e1
RH
6041 u64 seq_retry = 0;
6042 bool ret = false;
cf5e2ca6
RH
6043
6044 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
07b5b8e1
RH
6045 if (freed) {
6046 ret = true;
6047 goto out_dbg;
6048 }
6049 seq_retry = ext4_get_discard_pa_seq_sum();
99377830
RH
6050 if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
6051 ac->ac_flags |= EXT4_MB_STRICT_CHECK;
07b5b8e1
RH
6052 *seq = seq_retry;
6053 ret = true;
6054 }
6055
6056out_dbg:
867b7390 6057 mb_debug(sb, "freed %d, retry ? %s\n", freed, str_yes_no(ret));
07b5b8e1 6058 return ret;
cf5e2ca6
RH
6059}
6060
ad78b5ef
KS
6061/*
6062 * Simple allocator for Ext4 fast commit replay path. It searches for blocks
6063 * linearly starting at the goal block and also excludes the blocks which
6064 * are going to be in use after fast commit replay.
6065 */
6066static ext4_fsblk_t
6067ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp)
6068{
6069 struct buffer_head *bitmap_bh;
6070 struct super_block *sb = ar->inode->i_sb;
6071 struct ext4_sb_info *sbi = EXT4_SB(sb);
6072 ext4_group_t group, nr;
6073 ext4_grpblk_t blkoff;
6074 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
6075 ext4_grpblk_t i = 0;
6076 ext4_fsblk_t goal, block;
79ebf48c 6077 struct ext4_super_block *es = sbi->s_es;
ad78b5ef
KS
6078
6079 goal = ar->goal;
6080 if (goal < le32_to_cpu(es->s_first_data_block) ||
6081 goal >= ext4_blocks_count(es))
6082 goal = le32_to_cpu(es->s_first_data_block);
6083
6084 ar->len = 0;
6085 ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
6086 for (nr = ext4_get_groups_count(sb); nr > 0; nr--) {
6087 bitmap_bh = ext4_read_block_bitmap(sb, group);
6088 if (IS_ERR(bitmap_bh)) {
6089 *errp = PTR_ERR(bitmap_bh);
6090 pr_warn("Failed to read block bitmap\n");
6091 return 0;
6092 }
6093
6094 while (1) {
6095 i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
6096 blkoff);
6097 if (i >= max)
6098 break;
6099 if (ext4_fc_replay_check_excluded(sb,
6100 ext4_group_first_block_no(sb, group) +
6101 EXT4_C2B(sbi, i))) {
6102 blkoff = i + 1;
6103 } else
6104 break;
6105 }
6106 brelse(bitmap_bh);
6107 if (i < max)
6108 break;
6109
6110 if (++group >= ext4_get_groups_count(sb))
6111 group = 0;
6112
6113 blkoff = 0;
6114 }
6115
6116 if (i >= max) {
6117 *errp = -ENOSPC;
6118 return 0;
6119 }
6120
6121 block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i);
d2f7cf40 6122 ext4_mb_mark_bb(sb, block, 1, true);
ad78b5ef
KS
6123 ar->len = 1;
6124
3f4830ab 6125 *errp = 0;
ad78b5ef
KS
6126 return block;
6127}
8016e29f 6128
c9de560d
AT
6129/*
6130 * Main entry point into mballoc to allocate blocks
6131 * it tries to use preallocation first, then falls back
6132 * to usual allocation
6133 */
6134ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
6c7a120a 6135 struct ext4_allocation_request *ar, int *errp)
c9de560d 6136{
256bdb49 6137 struct ext4_allocation_context *ac = NULL;
c9de560d
AT
6138 struct ext4_sb_info *sbi;
6139 struct super_block *sb;
6140 ext4_fsblk_t block = 0;
60e58e0f 6141 unsigned int inquota = 0;
53accfa9 6142 unsigned int reserv_clstrs = 0;
80fa46d6 6143 int retries = 0;
07b5b8e1 6144 u64 seq;
c9de560d 6145
b10a44c3 6146 might_sleep();
c9de560d
AT
6147 sb = ar->inode->i_sb;
6148 sbi = EXT4_SB(sb);
6149
9bffad1e 6150 trace_ext4_request_blocks(ar);
8016e29f 6151 if (sbi->s_mount_state & EXT4_FC_REPLAY)
ad78b5ef 6152 return ext4_mb_new_blocks_simple(ar, errp);
ba80b101 6153
45dc63e7 6154 /* Allow to use superuser reservation for quota file */
02749a4c 6155 if (ext4_is_quota_file(ar->inode))
45dc63e7
DM
6156 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
6157
e3cf5d5d 6158 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
60e58e0f
MC
6159 /* Without delayed allocation we need to verify
6160 * there is enough free blocks to do block allocation
6161 * and verify allocation doesn't exceed the quota limits.
d2a17637 6162 */
55f020db 6163 while (ar->len &&
e7d5f315 6164 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
55f020db 6165
030ba6bc 6166 /* let others to free the space */
bb8b20ed 6167 cond_resched();
030ba6bc
AK
6168 ar->len = ar->len >> 1;
6169 }
6170 if (!ar->len) {
bbc4ec77 6171 ext4_mb_show_pa(sb);
a30d542a
AK
6172 *errp = -ENOSPC;
6173 return 0;
6174 }
53accfa9 6175 reserv_clstrs = ar->len;
55f020db 6176 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
53accfa9
TT
6177 dquot_alloc_block_nofail(ar->inode,
6178 EXT4_C2B(sbi, ar->len));
55f020db
AH
6179 } else {
6180 while (ar->len &&
53accfa9
TT
6181 dquot_alloc_block(ar->inode,
6182 EXT4_C2B(sbi, ar->len))) {
55f020db
AH
6183
6184 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
6185 ar->len--;
6186 }
60e58e0f
MC
6187 }
6188 inquota = ar->len;
6189 if (ar->len == 0) {
6190 *errp = -EDQUOT;
6c7a120a 6191 goto out;
60e58e0f 6192 }
07031431 6193 }
d2a17637 6194
85556c9a 6195 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
833576b3 6196 if (!ac) {
363d4251 6197 ar->len = 0;
256bdb49 6198 *errp = -ENOMEM;
6c7a120a 6199 goto out;
256bdb49
ES
6200 }
6201
d73eff68 6202 ext4_mb_initialize_context(ac, ar);
c9de560d 6203
256bdb49 6204 ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
81198536 6205 seq = this_cpu_read(discard_pa_seq);
256bdb49 6206 if (!ext4_mb_use_preallocated(ac)) {
256bdb49
ES
6207 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
6208 ext4_mb_normalize_request(ac, ar);
53f86b17
RH
6209
6210 *errp = ext4_mb_pa_alloc(ac);
6211 if (*errp)
6212 goto errout;
c9de560d
AT
6213repeat:
6214 /* allocate space in core */
6c7a120a 6215 *errp = ext4_mb_regular_allocator(ac);
53f86b17
RH
6216 /*
6217 * pa allocated above is added to grp->bb_prealloc_list only
6218 * when we were able to allocate some block i.e. when
6219 * ac->ac_status == AC_STATUS_FOUND.
6220 * And error from above mean ac->ac_status != AC_STATUS_FOUND
6221 * So we have to free this pa here itself.
6222 */
2c00ef3e 6223 if (*errp) {
82089725 6224 ext4_mb_pa_put_free(ac);
2c00ef3e
AK
6225 ext4_discard_allocated_blocks(ac);
6226 goto errout;
6227 }
53f86b17
RH
6228 if (ac->ac_status == AC_STATUS_FOUND &&
6229 ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
82089725 6230 ext4_mb_pa_put_free(ac);
c9de560d 6231 }
256bdb49 6232 if (likely(ac->ac_status == AC_STATUS_FOUND)) {
53accfa9 6233 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
554a5ccc 6234 if (*errp) {
b844167e 6235 ext4_discard_allocated_blocks(ac);
6d138ced
ES
6236 goto errout;
6237 } else {
519deca0
AK
6238 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
6239 ar->len = ac->ac_b_ex.fe_len;
6240 }
c9de560d 6241 } else {
80fa46d6
TT
6242 if (++retries < 3 &&
6243 ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
c9de560d 6244 goto repeat;
53f86b17
RH
6245 /*
6246 * If block allocation fails then the pa allocated above
6247 * needs to be freed here itself.
6248 */
82089725 6249 ext4_mb_pa_put_free(ac);
c9de560d 6250 *errp = -ENOSPC;
6c7a120a
AK
6251 }
6252
6253 if (*errp) {
aaae558d 6254errout:
256bdb49 6255 ac->ac_b_ex.fe_len = 0;
c9de560d 6256 ar->len = 0;
256bdb49 6257 ext4_mb_show_ac(ac);
c9de560d 6258 }
256bdb49 6259 ext4_mb_release_context(ac);
aaae558d 6260 kmem_cache_free(ext4_ac_cachep, ac);
6c7a120a 6261out:
60e58e0f 6262 if (inquota && ar->len < inquota)
53accfa9 6263 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
0087d9fb 6264 if (!ar->len) {
e3cf5d5d 6265 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
0087d9fb 6266 /* release all the reserved blocks if non delalloc */
57042651 6267 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
53accfa9 6268 reserv_clstrs);
0087d9fb 6269 }
c9de560d 6270
9bffad1e 6271 trace_ext4_allocate_blocks(ar, (unsigned long long)block);
ba80b101 6272
c9de560d
AT
6273 return block;
6274}
c9de560d 6275
c894058d
AK
6276/*
6277 * We can merge two free data extents only if the physical blocks
6278 * are contiguous, AND the extents were freed by the same transaction,
6279 * AND the blocks are associated with the same group.
6280 */
a0154344
DJ
6281static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
6282 struct ext4_free_data *entry,
6283 struct ext4_free_data *new_entry,
6284 struct rb_root *entry_rb_root)
c894058d 6285{
a0154344
DJ
6286 if ((entry->efd_tid != new_entry->efd_tid) ||
6287 (entry->efd_group != new_entry->efd_group))
6288 return;
6289 if (entry->efd_start_cluster + entry->efd_count ==
6290 new_entry->efd_start_cluster) {
6291 new_entry->efd_start_cluster = entry->efd_start_cluster;
6292 new_entry->efd_count += entry->efd_count;
6293 } else if (new_entry->efd_start_cluster + new_entry->efd_count ==
6294 entry->efd_start_cluster) {
6295 new_entry->efd_count += entry->efd_count;
6296 } else
6297 return;
6298 spin_lock(&sbi->s_md_lock);
6299 list_del(&entry->efd_list);
6300 spin_unlock(&sbi->s_md_lock);
6301 rb_erase(&entry->efd_node, entry_rb_root);
6302 kmem_cache_free(ext4_free_data_cachep, entry);
c894058d
AK
6303}
6304
85b67ffb 6305static noinline_for_stack void
4ddfef7b 6306ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
7a2fcbf7 6307 struct ext4_free_data *new_entry)
c9de560d 6308{
e29136f8 6309 ext4_group_t group = e4b->bd_group;
84130193 6310 ext4_grpblk_t cluster;
d08854f5 6311 ext4_grpblk_t clusters = new_entry->efd_count;
7a2fcbf7 6312 struct ext4_free_data *entry;
c9de560d
AT
6313 struct ext4_group_info *db = e4b->bd_info;
6314 struct super_block *sb = e4b->bd_sb;
6315 struct ext4_sb_info *sbi = EXT4_SB(sb);
c894058d
AK
6316 struct rb_node **n = &db->bb_free_root.rb_node, *node;
6317 struct rb_node *parent = NULL, *new_node;
6318
0390131b 6319 BUG_ON(!ext4_handle_valid(handle));
99b150d8 6320 BUG_ON(e4b->bd_bitmap_folio == NULL);
5eea586b 6321 BUG_ON(e4b->bd_buddy_folio == NULL);
c9de560d 6322
18aadd47
BJ
6323 new_node = &new_entry->efd_node;
6324 cluster = new_entry->efd_start_cluster;
c894058d 6325
c894058d
AK
6326 if (!*n) {
6327 /* first free block exent. We need to
6328 protect buddy cache from being freed,
6329 * otherwise we'll refresh it from
6330 * on-disk bitmap and lose not-yet-available
6331 * blocks */
5eea586b 6332 folio_get(e4b->bd_buddy_folio);
99b150d8 6333 folio_get(e4b->bd_bitmap_folio);
c894058d
AK
6334 }
6335 while (*n) {
6336 parent = *n;
18aadd47
BJ
6337 entry = rb_entry(parent, struct ext4_free_data, efd_node);
6338 if (cluster < entry->efd_start_cluster)
c894058d 6339 n = &(*n)->rb_left;
18aadd47 6340 else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
c894058d
AK
6341 n = &(*n)->rb_right;
6342 else {
e29136f8 6343 ext4_grp_locked_error(sb, group, 0,
84130193
TT
6344 ext4_group_first_block_no(sb, group) +
6345 EXT4_C2B(sbi, cluster),
e29136f8 6346 "Block already on to-be-freed list");
cca41553 6347 kmem_cache_free(ext4_free_data_cachep, new_entry);
85b67ffb 6348 return;
c9de560d 6349 }
c894058d 6350 }
c9de560d 6351
c894058d
AK
6352 rb_link_node(new_node, parent, n);
6353 rb_insert_color(new_node, &db->bb_free_root);
6354
6355 /* Now try to see the extent can be merged to left and right */
6356 node = rb_prev(new_node);
6357 if (node) {
18aadd47 6358 entry = rb_entry(node, struct ext4_free_data, efd_node);
a0154344
DJ
6359 ext4_try_merge_freed_extent(sbi, entry, new_entry,
6360 &(db->bb_free_root));
c894058d 6361 }
c9de560d 6362
c894058d
AK
6363 node = rb_next(new_node);
6364 if (node) {
18aadd47 6365 entry = rb_entry(node, struct ext4_free_data, efd_node);
a0154344
DJ
6366 ext4_try_merge_freed_extent(sbi, entry, new_entry,
6367 &(db->bb_free_root));
c9de560d 6368 }
a0154344 6369
d08854f5 6370 spin_lock(&sbi->s_md_lock);
ce774e53 6371 list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list[new_entry->efd_tid & 1]);
d08854f5
TT
6372 sbi->s_mb_free_pending += clusters;
6373 spin_unlock(&sbi->s_md_lock);
c9de560d
AT
6374}
6375
8016e29f
HS
6376static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
6377 unsigned long count)
6378{
8016e29f 6379 struct super_block *sb = inode->i_sb;
8016e29f
HS
6380 ext4_group_t group;
6381 ext4_grpblk_t blkoff;
8016e29f
HS
6382
6383 ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
c431d386
KS
6384 ext4_mb_mark_context(NULL, sb, false, group, blkoff, count,
6385 EXT4_MB_BITMAP_MARKED_CHECK |
6386 EXT4_MB_SYNC_UPDATE,
6387 NULL);
8016e29f
HS
6388}
6389
44338711 6390/**
8ac3939d
RH
6391 * ext4_mb_clear_bb() -- helper function for freeing blocks.
6392 * Used by ext4_free_blocks()
44338711
TT
6393 * @handle: handle for this transaction
6394 * @inode: inode
c60990b3
TT
6395 * @block: starting physical block to be freed
6396 * @count: number of blocks to be freed
5def1360 6397 * @flags: flags used by ext4_free_blocks
c9de560d 6398 */
8ac3939d
RH
6399static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
6400 ext4_fsblk_t block, unsigned long count,
6401 int flags)
c9de560d 6402{
c9de560d 6403 struct super_block *sb = inode->i_sb;
5354b2af 6404 struct ext4_group_info *grp;
498e5f24 6405 unsigned int overflow;
c9de560d 6406 ext4_grpblk_t bit;
c9de560d
AT
6407 ext4_group_t block_group;
6408 struct ext4_sb_info *sbi;
6409 struct ext4_buddy e4b;
84130193 6410 unsigned int count_clusters;
c9de560d 6411 int err = 0;
38b8f70c
KS
6412 int mark_flags = 0;
6413 ext4_grpblk_t changed;
c9de560d 6414
8016e29f
HS
6415 sbi = EXT4_SB(sb);
6416
1e1c2b86
LC
6417 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6418 !ext4_inode_block_valid(inode, block, count)) {
6419 ext4_error(sb, "Freeing blocks in system zone - "
6420 "Block = %llu, count = %lu", block, count);
6421 /* err = 0. ext4_std_error should be a no op */
33e728c6 6422 goto error_out;
1e1c2b86
LC
6423 }
6424 flags |= EXT4_FREE_BLOCKS_VALIDATED;
6425
c9de560d
AT
6426do_more:
6427 overflow = 0;
6428 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6429
5354b2af
TT
6430 grp = ext4_get_group_info(sb, block_group);
6431 if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
163a203d
DW
6432 return;
6433
c9de560d
AT
6434 /*
6435 * Check to see if we are freeing blocks across a group
6436 * boundary.
6437 */
84130193
TT
6438 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
6439 overflow = EXT4_C2B(sbi, bit) + count -
6440 EXT4_BLOCKS_PER_GROUP(sb);
c9de560d 6441 count -= overflow;
1e1c2b86
LC
6442 /* The range changed so it's no longer validated */
6443 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
c9de560d 6444 }
810da240 6445 count_clusters = EXT4_NUM_B2C(sbi, count);
33e728c6
KS
6446 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
6447
6448 /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
6449 err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
6450 GFP_NOFS|__GFP_NOFAIL);
6451 if (err)
6452 goto error_out;
6453
6454 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6455 !ext4_inode_block_valid(inode, block, count)) {
6456 ext4_error(sb, "Freeing blocks in system zone - "
6457 "Block = %llu, count = %lu", block, count);
6458 /* err = 0. ext4_std_error should be a no op */
6459 goto error_clean;
6460 }
6461
c9de560d 6462#ifdef AGGRESSIVE_CHECK
38b8f70c 6463 mark_flags |= EXT4_MB_BITMAP_MARKED_CHECK;
c9de560d 6464#endif
38b8f70c
KS
6465 err = ext4_mb_mark_context(handle, sb, false, block_group, bit,
6466 count_clusters, mark_flags, &changed);
c9de560d 6467
33e728c6 6468
38b8f70c
KS
6469 if (err && changed == 0)
6470 goto error_clean;
33e728c6 6471
38b8f70c
KS
6472#ifdef AGGRESSIVE_CHECK
6473 BUG_ON(changed != count_clusters);
6474#endif
e6362609 6475
f96c450d
DJ
6476 /*
6477 * We need to make sure we don't reuse the freed block until after the
6478 * transaction is committed. We make an exception if the inode is to be
6479 * written in writeback mode since writeback mode has weak data
6480 * consistency guarantees.
6481 */
6482 if (ext4_handle_valid(handle) &&
6483 ((flags & EXT4_FREE_BLOCKS_METADATA) ||
6484 !ext4_should_writeback_data(inode))) {
7a2fcbf7
AK
6485 struct ext4_free_data *new_entry;
6486 /*
7444a072
MH
6487 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
6488 * to fail.
7a2fcbf7 6489 */
7444a072
MH
6490 new_entry = kmem_cache_alloc(ext4_free_data_cachep,
6491 GFP_NOFS|__GFP_NOFAIL);
18aadd47
BJ
6492 new_entry->efd_start_cluster = bit;
6493 new_entry->efd_group = block_group;
6494 new_entry->efd_count = count_clusters;
6495 new_entry->efd_tid = handle->h_transaction->t_tid;
955ce5f5 6496
7a2fcbf7 6497 ext4_lock_group(sb, block_group);
7a2fcbf7 6498 ext4_mb_free_metadata(handle, &e4b, new_entry);
c9de560d 6499 } else {
d71c1ae2 6500 if (test_opt(sb, DISCARD)) {
247c3d21 6501 err = ext4_issue_discard(sb, block_group, bit,
0efcd739 6502 count_clusters);
fa606293
JK
6503 /*
6504 * Ignore EOPNOTSUPP error. This is consistent with
6505 * what happens when using journal.
6506 */
6507 if (err == -EOPNOTSUPP)
6508 err = 0;
6509 if (err)
d71c1ae2 6510 ext4_msg(sb, KERN_WARNING, "discard request in"
a00b482b 6511 " group:%u block:%d count:%lu failed"
d71c1ae2
LC
6512 " with %d", block_group, bit, count,
6513 err);
20cee68f 6514 }
6515
6516 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
d71c1ae2 6517
955ce5f5 6518 ext4_lock_group(sb, block_group);
84130193 6519 mb_free_blocks(inode, &e4b, bit, count_clusters);
c9de560d
AT
6520 }
6521
955ce5f5 6522 ext4_unlock_group(sb, block_group);
c9de560d 6523
9fe67149
EW
6524 /*
6525 * on a bigalloc file system, defer the s_freeclusters_counter
6526 * update to the caller (ext4_remove_space and friends) so they
6527 * can determine if a cluster freed here should be rereserved
6528 */
6529 if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
6530 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
6531 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
6532 percpu_counter_add(&sbi->s_freeclusters_counter,
6533 count_clusters);
6534 }
7d734532 6535
c9de560d
AT
6536 if (overflow && !err) {
6537 block += count;
6538 count = overflow;
33e728c6 6539 ext4_mb_unload_buddy(&e4b);
1e1c2b86
LC
6540 /* The range changed so it's no longer validated */
6541 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
c9de560d
AT
6542 goto do_more;
6543 }
33e728c6
KS
6544
6545error_clean:
6546 ext4_mb_unload_buddy(&e4b);
33e728c6 6547error_out:
c9de560d 6548 ext4_std_error(sb, err);
c9de560d 6549}
7360d173 6550
8ac3939d
RH
6551/**
6552 * ext4_free_blocks() -- Free given blocks and update quota
6553 * @handle: handle for this transaction
6554 * @inode: inode
6555 * @bh: optional buffer of the block to be freed
6556 * @block: starting physical block to be freed
6557 * @count: number of blocks to be freed
6558 * @flags: flags used by ext4_free_blocks
6559 */
6560void ext4_free_blocks(handle_t *handle, struct inode *inode,
6561 struct buffer_head *bh, ext4_fsblk_t block,
6562 unsigned long count, int flags)
6563{
6564 struct super_block *sb = inode->i_sb;
6565 unsigned int overflow;
6566 struct ext4_sb_info *sbi;
6567
6568 sbi = EXT4_SB(sb);
6569
8ac3939d
RH
6570 if (bh) {
6571 if (block)
6572 BUG_ON(block != bh->b_blocknr);
6573 else
6574 block = bh->b_blocknr;
6575 }
6576
11b6890b 6577 if (sbi->s_mount_state & EXT4_FC_REPLAY) {
2ec6d0a5 6578 ext4_free_blocks_simple(inode, block, EXT4_NUM_B2C(sbi, count));
11b6890b
KS
6579 return;
6580 }
6581
6582 might_sleep();
6583
8ac3939d
RH
6584 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6585 !ext4_inode_block_valid(inode, block, count)) {
6586 ext4_error(sb, "Freeing blocks not in datazone - "
6587 "block = %llu, count = %lu", block, count);
6588 return;
6589 }
1e1c2b86 6590 flags |= EXT4_FREE_BLOCKS_VALIDATED;
8ac3939d
RH
6591
6592 ext4_debug("freeing block %llu\n", block);
6593 trace_ext4_free_blocks(inode, block, count, flags);
6594
6595 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6596 BUG_ON(count > 1);
6597
6598 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
6599 inode, bh, block);
6600 }
6601
6602 /*
6603 * If the extent to be freed does not begin on a cluster
6604 * boundary, we need to deal with partial clusters at the
6605 * beginning and end of the extent. Normally we will free
6606 * blocks at the beginning or the end unless we are explicitly
6607 * requested to avoid doing so.
6608 */
6609 overflow = EXT4_PBLK_COFF(sbi, block);
6610 if (overflow) {
6611 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
6612 overflow = sbi->s_cluster_ratio - overflow;
6613 block += overflow;
6614 if (count > overflow)
6615 count -= overflow;
6616 else
6617 return;
6618 } else {
6619 block -= overflow;
6620 count += overflow;
6621 }
1e1c2b86
LC
6622 /* The range changed so it's no longer validated */
6623 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
8ac3939d
RH
6624 }
6625 overflow = EXT4_LBLK_COFF(sbi, count);
6626 if (overflow) {
6627 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
6628 if (count > overflow)
6629 count -= overflow;
6630 else
6631 return;
6632 } else
6633 count += sbi->s_cluster_ratio - overflow;
1e1c2b86
LC
6634 /* The range changed so it's no longer validated */
6635 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
8ac3939d
RH
6636 }
6637
6638 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6639 int i;
6640 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
6641
6642 for (i = 0; i < count; i++) {
6643 cond_resched();
6644 if (is_metadata)
6e8f57fd
DB
6645 bh = sb_find_get_block_nonatomic(inode->i_sb,
6646 block + i);
8ac3939d
RH
6647 ext4_forget(handle, is_metadata, inode, bh, block + i);
6648 }
6649 }
6650
6651 ext4_mb_clear_bb(handle, inode, block, count, flags);
8ac3939d
RH
6652}
6653
2846e820 6654/**
0529155e 6655 * ext4_group_add_blocks() -- Add given blocks to an existing group
2846e820
AG
6656 * @handle: handle to this transaction
6657 * @sb: super block
4907cb7b 6658 * @block: start physical block to add to the block group
2846e820
AG
6659 * @count: number of blocks to free
6660 *
e73a347b 6661 * This marks the blocks as free in the bitmap and buddy.
2846e820 6662 */
cc7365df 6663int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
2846e820
AG
6664 ext4_fsblk_t block, unsigned long count)
6665{
2846e820
AG
6666 ext4_group_t block_group;
6667 ext4_grpblk_t bit;
2846e820 6668 struct ext4_sb_info *sbi = EXT4_SB(sb);
e73a347b 6669 struct ext4_buddy e4b;
5c657db4 6670 int err = 0;
d77147ff 6671 ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
6672 ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
6673 unsigned long cluster_count = last_cluster - first_cluster + 1;
5c657db4 6674 ext4_grpblk_t changed;
2846e820
AG
6675
6676 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
6677
5c657db4 6678 if (cluster_count == 0)
4740b830
YY
6679 return 0;
6680
2846e820 6681 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
2846e820
AG
6682 /*
6683 * Check to see if we are freeing blocks across a group
6684 * boundary.
6685 */
d77147ff 6686 if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
6687 ext4_warning(sb, "too many blocks added to group %u",
cc7365df
YY
6688 block_group);
6689 err = -EINVAL;
03c7fc39
KS
6690 goto error_out;
6691 }
6692
6693 err = ext4_mb_load_buddy(sb, block_group, &e4b);
6694 if (err)
6695 goto error_out;
6696
6697 if (!ext4_sb_block_valid(sb, NULL, block, count)) {
6698 ext4_error(sb, "Adding blocks in system zones - "
6699 "Block = %llu, count = %lu",
6700 block, count);
6701 err = -EINVAL;
6702 goto error_clean;
cc7365df 6703 }
2cd05cc3 6704
5c657db4
KS
6705 err = ext4_mb_mark_context(handle, sb, false, block_group, bit,
6706 cluster_count, EXT4_MB_BITMAP_MARKED_CHECK,
6707 &changed);
6708 if (err && changed == 0)
03c7fc39 6709 goto error_clean;
e73a347b 6710
5c657db4
KS
6711 if (changed != cluster_count)
6712 ext4_error(sb, "bit already cleared in group %u", block_group);
2846e820 6713
03c7fc39
KS
6714 ext4_lock_group(sb, block_group);
6715 mb_free_blocks(NULL, &e4b, bit, cluster_count);
6716 ext4_unlock_group(sb, block_group);
6717 percpu_counter_add(&sbi->s_freeclusters_counter,
5c657db4 6718 changed);
03c7fc39
KS
6719
6720error_clean:
03c7fc39
KS
6721 ext4_mb_unload_buddy(&e4b);
6722error_out:
2846e820 6723 ext4_std_error(sb, err);
cc7365df 6724 return err;
2846e820
AG
6725}
6726
7360d173
LC
6727/**
6728 * ext4_trim_extent -- function to TRIM one single free extent in the group
6729 * @sb: super block for the file system
6730 * @start: starting block of the free extent in the alloc. group
6731 * @count: number of blocks to TRIM
7360d173
LC
6732 * @e4b: ext4 buddy for the group
6733 *
6734 * Trim "count" blocks starting at "start" in the "group". To assure that no
6735 * one will allocate those blocks, mark it as used in buddy bitmap. This must
6736 * be called with under the group lock.
6737 */
bd2eea8d
WJ
6738static int ext4_trim_extent(struct super_block *sb,
6739 int start, int count, struct ext4_buddy *e4b)
e2cbd587 6740__releases(bitlock)
6741__acquires(bitlock)
7360d173
LC
6742{
6743 struct ext4_free_extent ex;
bd2eea8d 6744 ext4_group_t group = e4b->bd_group;
d71c1ae2 6745 int ret = 0;
7360d173 6746
b3d4c2b1
TM
6747 trace_ext4_trim_extent(sb, group, start, count);
6748
7360d173
LC
6749 assert_spin_locked(ext4_group_lock_ptr(sb, group));
6750
6751 ex.fe_start = start;
6752 ex.fe_group = group;
6753 ex.fe_len = count;
6754
6755 /*
6756 * Mark blocks used, so no one can reuse them while
6757 * being trimmed.
6758 */
6759 mb_mark_used(e4b, &ex);
6760 ext4_unlock_group(sb, group);
0efcd739 6761 ret = ext4_issue_discard(sb, group, start, count);
7360d173
LC
6762 ext4_lock_group(sb, group);
6763 mb_free_blocks(NULL, e4b, start, ex.fe_len);
d71c1ae2 6764 return ret;
7360d173
LC
6765}
6766
45e4ab32
JK
6767static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
6768 ext4_group_t grp)
6769{
7c784d62
SJS
6770 unsigned long nr_clusters_in_group;
6771
6772 if (grp < (ext4_get_groups_count(sb) - 1))
6773 nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb);
6774 else
6775 nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) -
6776 ext4_group_first_block_no(sb, grp))
6777 >> EXT4_CLUSTER_BITS(sb);
6778
6779 return nr_clusters_in_group - 1;
45e4ab32
JK
6780}
6781
5229a658
JK
6782static bool ext4_trim_interrupted(void)
6783{
6784 return fatal_signal_pending(current) || freezing(current);
6785}
6786
6920b391
WJ
6787static int ext4_try_to_trim_range(struct super_block *sb,
6788 struct ext4_buddy *e4b, ext4_grpblk_t start,
6789 ext4_grpblk_t max, ext4_grpblk_t minblocks)
a5fda113
TT
6790__acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
6791__releases(ext4_group_lock_ptr(sb, e4b->bd_group))
6920b391 6792{
68da4c44 6793 ext4_grpblk_t next, count, free_count, last, origin_start;
45e4ab32 6794 bool set_trimmed = false;
6920b391 6795 void *bitmap;
6920b391 6796
17220215
BL
6797 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
6798 return 0;
6799
68da4c44 6800 last = ext4_last_grp_cluster(sb, e4b->bd_group);
6920b391 6801 bitmap = e4b->bd_bitmap;
68da4c44 6802 if (start == 0 && max >= last)
45e4ab32 6803 set_trimmed = true;
68da4c44 6804 origin_start = start;
de8bf0e5 6805 start = max(e4b->bd_info->bb_first_free, start);
6920b391
WJ
6806 count = 0;
6807 free_count = 0;
6808
6809 while (start <= max) {
6810 start = mb_find_next_zero_bit(bitmap, max + 1, start);
6811 if (start > max)
6812 break;
68da4c44
YB
6813
6814 next = mb_find_next_bit(bitmap, last + 1, start);
6815 if (origin_start == 0 && next >= last)
6816 set_trimmed = true;
6920b391
WJ
6817
6818 if ((next - start) >= minblocks) {
afcc4e32
LB
6819 int ret = ext4_trim_extent(sb, start, next - start, e4b);
6820
6920b391 6821 if (ret && ret != -EOPNOTSUPP)
45e4ab32 6822 return count;
6920b391
WJ
6823 count += next - start;
6824 }
6825 free_count += next - start;
6826 start = next + 1;
6827
5229a658
JK
6828 if (ext4_trim_interrupted())
6829 return count;
6920b391
WJ
6830
6831 if (need_resched()) {
6832 ext4_unlock_group(sb, e4b->bd_group);
6833 cond_resched();
6834 ext4_lock_group(sb, e4b->bd_group);
6835 }
6836
6837 if ((e4b->bd_info->bb_free - free_count) < minblocks)
6838 break;
6839 }
6840
45e4ab32
JK
6841 if (set_trimmed)
6842 EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
6843
6920b391
WJ
6844 return count;
6845}
6846
7360d173
LC
6847/**
6848 * ext4_trim_all_free -- function to trim all free space in alloc. group
6849 * @sb: super block for file system
22612283 6850 * @group: group to be trimmed
7360d173
LC
6851 * @start: first group block to examine
6852 * @max: last group block to examine
6853 * @minblocks: minimum extent block count
6854 *
7360d173
LC
6855 * ext4_trim_all_free walks through group's block bitmap searching for free
6856 * extents. When the free extent is found, mark it as used in group buddy
6857 * bitmap. Then issue a TRIM command on this extent and free the extent in
b6f5558c 6858 * the group buddy bitmap.
7360d173 6859 */
0b75a840 6860static ext4_grpblk_t
78944086
LC
6861ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
6862 ext4_grpblk_t start, ext4_grpblk_t max,
45e4ab32 6863 ext4_grpblk_t minblocks)
7360d173 6864{
78944086 6865 struct ext4_buddy e4b;
6920b391 6866 int ret;
7360d173 6867
b3d4c2b1
TM
6868 trace_ext4_trim_all_free(sb, group, start, max);
6869
78944086
LC
6870 ret = ext4_mb_load_buddy(sb, group, &e4b);
6871 if (ret) {
9651e6b2
KK
6872 ext4_warning(sb, "Error %d loading buddy information for %u",
6873 ret, group);
78944086
LC
6874 return ret;
6875 }
28739eea
LC
6876
6877 ext4_lock_group(sb, group);
7360d173 6878
6920b391 6879 if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
45e4ab32 6880 minblocks < EXT4_SB(sb)->s_last_trim_minblks)
6920b391 6881 ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
45e4ab32 6882 else
6920b391 6883 ret = 0;
3d56b8d2 6884
7360d173 6885 ext4_unlock_group(sb, group);
78944086 6886 ext4_mb_unload_buddy(&e4b);
7360d173
LC
6887
6888 ext4_debug("trimmed %d blocks in the group %d\n",
6920b391 6889 ret, group);
7360d173 6890
d71c1ae2 6891 return ret;
7360d173
LC
6892}
6893
6894/**
6895 * ext4_trim_fs() -- trim ioctl handle function
6896 * @sb: superblock for filesystem
6897 * @range: fstrim_range structure
6898 *
6899 * start: First Byte to trim
6900 * len: number of Bytes to trim from start
6901 * minlen: minimum extent length in Bytes
6902 * ext4_trim_fs goes through all allocation groups containing Bytes from
6903 * start to start+len. For each such a group ext4_trim_all_free function
6904 * is invoked to trim all free space.
6905 */
6906int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
6907{
7b47ef52 6908 unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev);
78944086 6909 struct ext4_group_info *grp;
913eed83 6910 ext4_group_t group, first_group, last_group;
7137d7a4 6911 ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
913eed83 6912 uint64_t start, end, minlen, trimmed = 0;
0f0a25bf
JK
6913 ext4_fsblk_t first_data_blk =
6914 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
913eed83 6915 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
7360d173
LC
6916 int ret = 0;
6917
6918 start = range->start >> sb->s_blocksize_bits;
913eed83 6919 end = start + (range->len >> sb->s_blocksize_bits) - 1;
aaf7d73e
LC
6920 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6921 range->minlen >> sb->s_blocksize_bits);
7360d173 6922
5de35e8d
LC
6923 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
6924 start >= max_blks ||
6925 range->len < sb->s_blocksize)
7360d173 6926 return -EINVAL;
173b6e38 6927 /* No point to try to trim less than discard granularity */
7b47ef52 6928 if (range->minlen < discard_granularity) {
173b6e38 6929 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
7b47ef52 6930 discard_granularity >> sb->s_blocksize_bits);
173b6e38
JK
6931 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
6932 goto out;
6933 }
45e4ab32 6934 if (end >= max_blks - 1)
913eed83
LC
6935 end = max_blks - 1;
6936 if (end <= first_data_blk)
22f10457 6937 goto out;
913eed83 6938 if (start < first_data_blk)
0f0a25bf 6939 start = first_data_blk;
7360d173 6940
913eed83 6941 /* Determine first and last group to examine based on start and end */
7360d173 6942 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
7137d7a4 6943 &first_group, &first_cluster);
913eed83 6944 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
7137d7a4 6945 &last_group, &last_cluster);
7360d173 6946
913eed83
LC
6947 /* end now represents the last cluster to discard in this group */
6948 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
7360d173
LC
6949
6950 for (group = first_group; group <= last_group; group++) {
5229a658
JK
6951 if (ext4_trim_interrupted())
6952 break;
78944086 6953 grp = ext4_get_group_info(sb, group);
5354b2af
TT
6954 if (!grp)
6955 continue;
78944086
LC
6956 /* We only do this if the grp has never been initialized */
6957 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
adb7ef60 6958 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
78944086
LC
6959 if (ret)
6960 break;
7360d173
LC
6961 }
6962
0ba08517 6963 /*
913eed83
LC
6964 * For all the groups except the last one, last cluster will
6965 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
6966 * change it for the last group, note that last_cluster is
6967 * already computed earlier by ext4_get_group_no_and_offset()
0ba08517 6968 */
45e4ab32 6969 if (group == last_group)
913eed83 6970 end = last_cluster;
78944086 6971 if (grp->bb_free >= minlen) {
7137d7a4 6972 cnt = ext4_trim_all_free(sb, group, first_cluster,
45e4ab32 6973 end, minlen);
7360d173
LC
6974 if (cnt < 0) {
6975 ret = cnt;
7360d173
LC
6976 break;
6977 }
21e7fd22 6978 trimmed += cnt;
7360d173 6979 }
913eed83
LC
6980
6981 /*
6982 * For every group except the first one, we are sure
6983 * that the first cluster to discard will be cluster #0.
6984 */
7137d7a4 6985 first_cluster = 0;
7360d173 6986 }
7360d173 6987
3d56b8d2 6988 if (!ret)
2327fb2e 6989 EXT4_SB(sb)->s_last_trim_minblks = minlen;
3d56b8d2 6990
22f10457 6991out:
aaf7d73e 6992 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
7360d173
LC
6993 return ret;
6994}
0c9ec4be
DW
6995
6996/* Iterate all the free extents in the group. */
6997int
6998ext4_mballoc_query_range(
6999 struct super_block *sb,
7000 ext4_group_t group,
4a622e4d 7001 ext4_grpblk_t first,
0c9ec4be 7002 ext4_grpblk_t end,
4a622e4d 7003 ext4_mballoc_query_range_fn meta_formatter,
0c9ec4be
DW
7004 ext4_mballoc_query_range_fn formatter,
7005 void *priv)
7006{
7007 void *bitmap;
4a622e4d 7008 ext4_grpblk_t start, next;
0c9ec4be
DW
7009 struct ext4_buddy e4b;
7010 int error;
7011
7012 error = ext4_mb_load_buddy(sb, group, &e4b);
7013 if (error)
7014 return error;
7015 bitmap = e4b.bd_bitmap;
7016
7017 ext4_lock_group(sb, group);
7018
4a622e4d 7019 start = max(e4b.bd_info->bb_first_free, first);
0c9ec4be
DW
7020 if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
7021 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
4a622e4d
TT
7022 if (meta_formatter && start != first) {
7023 if (start > end)
7024 start = end;
7025 ext4_unlock_group(sb, group);
7026 error = meta_formatter(sb, group, first, start - first,
7027 priv);
7028 if (error)
7029 goto out_unload;
7030 ext4_lock_group(sb, group);
7031 }
0c9ec4be
DW
7032 while (start <= end) {
7033 start = mb_find_next_zero_bit(bitmap, end + 1, start);
7034 if (start > end)
7035 break;
7036 next = mb_find_next_bit(bitmap, end + 1, start);
7037
7038 ext4_unlock_group(sb, group);
7039 error = formatter(sb, group, start, next - start, priv);
7040 if (error)
7041 goto out_unload;
7042 ext4_lock_group(sb, group);
7043
7044 start = next + 1;
7045 }
7046
7047 ext4_unlock_group(sb, group);
7048out_unload:
7049 ext4_mb_unload_buddy(&e4b);
7050
7051 return error;
7052}
7c9fa399
KS
7053
7054#ifdef CONFIG_EXT4_KUNIT_TESTS
7055#include "mballoc-test.c"
7056#endif