Commit | Line | Data |
---|---|---|
7c9fa399 KS |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * KUnit test of ext4 multiblocks allocation. | |
4 | */ | |
5 | ||
6 | #include <kunit/test.h> | |
7 | #include <kunit/static_stub.h> | |
6c5e0c9c | 8 | #include <linux/random.h> |
7c9fa399 KS |
9 | |
10 | #include "ext4.h" | |
11 | ||
12 | struct mbt_grp_ctx { | |
13 | struct buffer_head bitmap_bh; | |
14 | /* desc and gd_bh are just the place holders for now */ | |
15 | struct ext4_group_desc desc; | |
16 | struct buffer_head gd_bh; | |
17 | }; | |
18 | ||
19 | struct mbt_ctx { | |
20 | struct mbt_grp_ctx *grp_ctx; | |
21 | }; | |
22 | ||
23 | struct mbt_ext4_super_block { | |
24 | struct super_block sb; | |
25 | struct mbt_ctx mbt_ctx; | |
26 | }; | |
27 | ||
28 | #define MBT_CTX(_sb) (&(container_of((_sb), struct mbt_ext4_super_block, sb)->mbt_ctx)) | |
29 | #define MBT_GRP_CTX(_sb, _group) (&MBT_CTX(_sb)->grp_ctx[_group]) | |
30 | ||
67d2a11b KS |
31 | static const struct super_operations mbt_sops = { |
32 | }; | |
33 | ||
34 | static int mbt_mb_init(struct super_block *sb) | |
35 | { | |
36 | int ret; | |
37 | ||
38 | /* needed by ext4_mb_init->bdev_nonrot(sb->s_bdev) */ | |
39 | sb->s_bdev = kzalloc(sizeof(*sb->s_bdev), GFP_KERNEL); | |
40 | if (sb->s_bdev == NULL) | |
41 | return -ENOMEM; | |
42 | ||
43 | sb->s_bdev->bd_queue = kzalloc(sizeof(struct request_queue), GFP_KERNEL); | |
44 | if (sb->s_bdev->bd_queue == NULL) { | |
45 | kfree(sb->s_bdev); | |
46 | return -ENOMEM; | |
47 | } | |
48 | ||
49 | /* | |
50 | * needed by ext4_mb_init->ext4_mb_init_backend-> sbi->s_buddy_cache = | |
51 | * new_inode(sb); | |
52 | */ | |
53 | INIT_LIST_HEAD(&sb->s_inodes); | |
54 | sb->s_op = &mbt_sops; | |
55 | ||
56 | ret = ext4_mb_init(sb); | |
57 | if (ret != 0) | |
58 | goto err_out; | |
59 | ||
60 | return 0; | |
61 | ||
62 | err_out: | |
63 | kfree(sb->s_bdev->bd_queue); | |
64 | kfree(sb->s_bdev); | |
65 | return ret; | |
66 | } | |
67 | ||
68 | static void mbt_mb_release(struct super_block *sb) | |
69 | { | |
70 | ext4_mb_release(sb); | |
71 | kfree(sb->s_bdev->bd_queue); | |
72 | kfree(sb->s_bdev); | |
73 | } | |
74 | ||
7c9fa399 KS |
75 | static struct super_block *mbt_ext4_alloc_super_block(void) |
76 | { | |
77 | struct ext4_super_block *es = kzalloc(sizeof(*es), GFP_KERNEL); | |
78 | struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); | |
79 | struct mbt_ext4_super_block *fsb = kzalloc(sizeof(*fsb), GFP_KERNEL); | |
80 | ||
81 | if (fsb == NULL || sbi == NULL || es == NULL) | |
82 | goto out; | |
83 | ||
67d2a11b KS |
84 | sbi->s_blockgroup_lock = |
85 | kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); | |
86 | if (!sbi->s_blockgroup_lock) | |
87 | goto out; | |
88 | ||
89 | bgl_lock_init(sbi->s_blockgroup_lock); | |
90 | ||
7c9fa399 KS |
91 | sbi->s_es = es; |
92 | fsb->sb.s_fs_info = sbi; | |
67d2a11b | 93 | |
7c9fa399 KS |
94 | return &fsb->sb; |
95 | ||
96 | out: | |
97 | kfree(fsb); | |
98 | kfree(sbi); | |
99 | kfree(es); | |
100 | return NULL; | |
101 | } | |
102 | ||
103 | static void mbt_ext4_free_super_block(struct super_block *sb) | |
104 | { | |
105 | struct mbt_ext4_super_block *fsb = | |
106 | container_of(sb, struct mbt_ext4_super_block, sb); | |
107 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
108 | ||
67d2a11b | 109 | kfree(sbi->s_blockgroup_lock); |
7c9fa399 KS |
110 | kfree(sbi->s_es); |
111 | kfree(sbi); | |
112 | kfree(fsb); | |
113 | } | |
114 | ||
115 | struct mbt_ext4_block_layout { | |
116 | unsigned char blocksize_bits; | |
117 | unsigned int cluster_bits; | |
118 | uint32_t blocks_per_group; | |
119 | ext4_group_t group_count; | |
120 | uint16_t desc_size; | |
121 | }; | |
122 | ||
123 | static void mbt_init_sb_layout(struct super_block *sb, | |
124 | struct mbt_ext4_block_layout *layout) | |
125 | { | |
126 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
127 | struct ext4_super_block *es = sbi->s_es; | |
128 | ||
129 | sb->s_blocksize = 1UL << layout->blocksize_bits; | |
130 | sb->s_blocksize_bits = layout->blocksize_bits; | |
131 | ||
132 | sbi->s_groups_count = layout->group_count; | |
133 | sbi->s_blocks_per_group = layout->blocks_per_group; | |
134 | sbi->s_cluster_bits = layout->cluster_bits; | |
135 | sbi->s_cluster_ratio = 1U << layout->cluster_bits; | |
136 | sbi->s_clusters_per_group = layout->blocks_per_group >> | |
137 | layout->cluster_bits; | |
138 | sbi->s_desc_size = layout->desc_size; | |
67d2a11b KS |
139 | sbi->s_desc_per_block_bits = |
140 | sb->s_blocksize_bits - (fls(layout->desc_size) - 1); | |
141 | sbi->s_desc_per_block = 1 << sbi->s_desc_per_block_bits; | |
7c9fa399 KS |
142 | |
143 | es->s_first_data_block = cpu_to_le32(0); | |
144 | es->s_blocks_count_lo = cpu_to_le32(layout->blocks_per_group * | |
145 | layout->group_count); | |
146 | } | |
147 | ||
148 | static int mbt_grp_ctx_init(struct super_block *sb, | |
149 | struct mbt_grp_ctx *grp_ctx) | |
150 | { | |
ac96b56a KS |
151 | ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); |
152 | ||
7c9fa399 KS |
153 | grp_ctx->bitmap_bh.b_data = kzalloc(EXT4_BLOCK_SIZE(sb), GFP_KERNEL); |
154 | if (grp_ctx->bitmap_bh.b_data == NULL) | |
155 | return -ENOMEM; | |
ac96b56a KS |
156 | mb_set_bits(grp_ctx->bitmap_bh.b_data, max, sb->s_blocksize * 8 - max); |
157 | ext4_free_group_clusters_set(sb, &grp_ctx->desc, max); | |
7c9fa399 KS |
158 | |
159 | return 0; | |
160 | } | |
161 | ||
162 | static void mbt_grp_ctx_release(struct mbt_grp_ctx *grp_ctx) | |
163 | { | |
164 | kfree(grp_ctx->bitmap_bh.b_data); | |
165 | grp_ctx->bitmap_bh.b_data = NULL; | |
166 | } | |
167 | ||
168 | static void mbt_ctx_mark_used(struct super_block *sb, ext4_group_t group, | |
169 | unsigned int start, unsigned int len) | |
170 | { | |
171 | struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group); | |
172 | ||
173 | mb_set_bits(grp_ctx->bitmap_bh.b_data, start, len); | |
174 | } | |
175 | ||
6c5e0c9c KS |
176 | static void *mbt_ctx_bitmap(struct super_block *sb, ext4_group_t group) |
177 | { | |
178 | struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group); | |
179 | ||
180 | return grp_ctx->bitmap_bh.b_data; | |
181 | } | |
182 | ||
7c9fa399 KS |
183 | /* called after mbt_init_sb_layout */ |
184 | static int mbt_ctx_init(struct super_block *sb) | |
185 | { | |
186 | struct mbt_ctx *ctx = MBT_CTX(sb); | |
187 | ext4_group_t i, ngroups = ext4_get_groups_count(sb); | |
188 | ||
189 | ctx->grp_ctx = kcalloc(ngroups, sizeof(struct mbt_grp_ctx), | |
190 | GFP_KERNEL); | |
191 | if (ctx->grp_ctx == NULL) | |
192 | return -ENOMEM; | |
193 | ||
194 | for (i = 0; i < ngroups; i++) | |
195 | if (mbt_grp_ctx_init(sb, &ctx->grp_ctx[i])) | |
196 | goto out; | |
197 | ||
198 | /* | |
199 | * first data block(first cluster in first group) is used by | |
200 | * metadata, mark it used to avoid to alloc data block at first | |
201 | * block which will fail ext4_sb_block_valid check. | |
202 | */ | |
203 | mb_set_bits(ctx->grp_ctx[0].bitmap_bh.b_data, 0, 1); | |
ac96b56a KS |
204 | ext4_free_group_clusters_set(sb, &ctx->grp_ctx[0].desc, |
205 | EXT4_CLUSTERS_PER_GROUP(sb) - 1); | |
7c9fa399 KS |
206 | |
207 | return 0; | |
208 | out: | |
209 | while (i-- > 0) | |
210 | mbt_grp_ctx_release(&ctx->grp_ctx[i]); | |
211 | kfree(ctx->grp_ctx); | |
212 | return -ENOMEM; | |
213 | } | |
214 | ||
215 | static void mbt_ctx_release(struct super_block *sb) | |
216 | { | |
217 | struct mbt_ctx *ctx = MBT_CTX(sb); | |
218 | ext4_group_t i, ngroups = ext4_get_groups_count(sb); | |
219 | ||
220 | for (i = 0; i < ngroups; i++) | |
221 | mbt_grp_ctx_release(&ctx->grp_ctx[i]); | |
222 | kfree(ctx->grp_ctx); | |
223 | } | |
224 | ||
225 | static struct buffer_head * | |
226 | ext4_read_block_bitmap_nowait_stub(struct super_block *sb, ext4_group_t block_group, | |
227 | bool ignore_locked) | |
228 | { | |
229 | struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group); | |
230 | ||
231 | /* paired with brelse from caller of ext4_read_block_bitmap_nowait */ | |
232 | get_bh(&grp_ctx->bitmap_bh); | |
233 | return &grp_ctx->bitmap_bh; | |
234 | } | |
235 | ||
236 | static int ext4_wait_block_bitmap_stub(struct super_block *sb, | |
237 | ext4_group_t block_group, | |
238 | struct buffer_head *bh) | |
239 | { | |
ac96b56a KS |
240 | /* |
241 | * real ext4_wait_block_bitmap will set these flags and | |
242 | * functions like ext4_mb_init_cache will verify the flags. | |
243 | */ | |
244 | set_buffer_uptodate(bh); | |
245 | set_bitmap_uptodate(bh); | |
246 | set_buffer_verified(bh); | |
7c9fa399 KS |
247 | return 0; |
248 | } | |
249 | ||
250 | static struct ext4_group_desc * | |
251 | ext4_get_group_desc_stub(struct super_block *sb, ext4_group_t block_group, | |
252 | struct buffer_head **bh) | |
253 | { | |
254 | struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group); | |
255 | ||
256 | if (bh != NULL) | |
257 | *bh = &grp_ctx->gd_bh; | |
258 | ||
259 | return &grp_ctx->desc; | |
260 | } | |
261 | ||
262 | static int | |
263 | ext4_mb_mark_context_stub(handle_t *handle, struct super_block *sb, bool state, | |
264 | ext4_group_t group, ext4_grpblk_t blkoff, | |
265 | ext4_grpblk_t len, int flags, | |
266 | ext4_grpblk_t *ret_changed) | |
267 | { | |
268 | struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group); | |
269 | struct buffer_head *bitmap_bh = &grp_ctx->bitmap_bh; | |
270 | ||
271 | if (state) | |
272 | mb_set_bits(bitmap_bh->b_data, blkoff, len); | |
273 | else | |
274 | mb_clear_bits(bitmap_bh->b_data, blkoff, len); | |
275 | ||
276 | return 0; | |
277 | } | |
278 | ||
7c9fa399 KS |
279 | #define TEST_GOAL_GROUP 1 |
280 | static int mbt_kunit_init(struct kunit *test) | |
281 | { | |
28b95ee8 KS |
282 | struct mbt_ext4_block_layout *layout = |
283 | (struct mbt_ext4_block_layout *)(test->param_value); | |
7c9fa399 KS |
284 | struct super_block *sb; |
285 | int ret; | |
286 | ||
287 | sb = mbt_ext4_alloc_super_block(); | |
288 | if (sb == NULL) | |
289 | return -ENOMEM; | |
290 | ||
28b95ee8 | 291 | mbt_init_sb_layout(sb, layout); |
7c9fa399 KS |
292 | |
293 | ret = mbt_ctx_init(sb); | |
294 | if (ret != 0) { | |
295 | mbt_ext4_free_super_block(sb); | |
296 | return ret; | |
297 | } | |
298 | ||
299 | test->priv = sb; | |
300 | kunit_activate_static_stub(test, | |
301 | ext4_read_block_bitmap_nowait, | |
302 | ext4_read_block_bitmap_nowait_stub); | |
303 | kunit_activate_static_stub(test, | |
304 | ext4_wait_block_bitmap, | |
305 | ext4_wait_block_bitmap_stub); | |
306 | kunit_activate_static_stub(test, | |
307 | ext4_get_group_desc, | |
308 | ext4_get_group_desc_stub); | |
309 | kunit_activate_static_stub(test, | |
310 | ext4_mb_mark_context, | |
311 | ext4_mb_mark_context_stub); | |
67d2a11b KS |
312 | |
313 | /* stub function will be called in mt_mb_init->ext4_mb_init */ | |
314 | if (mbt_mb_init(sb) != 0) { | |
315 | mbt_ctx_release(sb); | |
316 | mbt_ext4_free_super_block(sb); | |
317 | return -ENOMEM; | |
318 | } | |
319 | ||
7c9fa399 KS |
320 | return 0; |
321 | } | |
322 | ||
323 | static void mbt_kunit_exit(struct kunit *test) | |
324 | { | |
325 | struct super_block *sb = (struct super_block *)test->priv; | |
326 | ||
67d2a11b | 327 | mbt_mb_release(sb); |
7c9fa399 KS |
328 | mbt_ctx_release(sb); |
329 | mbt_ext4_free_super_block(sb); | |
330 | } | |
331 | ||
332 | static void test_new_blocks_simple(struct kunit *test) | |
333 | { | |
334 | struct super_block *sb = (struct super_block *)test->priv; | |
d60c5369 | 335 | struct inode *inode; |
7c9fa399 KS |
336 | struct ext4_allocation_request ar; |
337 | ext4_group_t i, goal_group = TEST_GOAL_GROUP; | |
338 | int err = 0; | |
339 | ext4_fsblk_t found; | |
340 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
341 | ||
d60c5369 AB |
342 | inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL); |
343 | if (!inode) | |
344 | return; | |
345 | ||
346 | inode->i_sb = sb; | |
347 | ar.inode = inode; | |
7c9fa399 KS |
348 | |
349 | /* get block at goal */ | |
350 | ar.goal = ext4_group_first_block_no(sb, goal_group); | |
351 | found = ext4_mb_new_blocks_simple(&ar, &err); | |
352 | KUNIT_ASSERT_EQ_MSG(test, ar.goal, found, | |
353 | "failed to alloc block at goal, expected %llu found %llu", | |
354 | ar.goal, found); | |
355 | ||
356 | /* get block after goal in goal group */ | |
357 | ar.goal = ext4_group_first_block_no(sb, goal_group); | |
358 | found = ext4_mb_new_blocks_simple(&ar, &err); | |
359 | KUNIT_ASSERT_EQ_MSG(test, ar.goal + EXT4_C2B(sbi, 1), found, | |
360 | "failed to alloc block after goal in goal group, expected %llu found %llu", | |
361 | ar.goal + 1, found); | |
362 | ||
363 | /* get block after goal group */ | |
364 | mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb)); | |
365 | ar.goal = ext4_group_first_block_no(sb, goal_group); | |
366 | found = ext4_mb_new_blocks_simple(&ar, &err); | |
367 | KUNIT_ASSERT_EQ_MSG(test, | |
368 | ext4_group_first_block_no(sb, goal_group + 1), found, | |
369 | "failed to alloc block after goal group, expected %llu found %llu", | |
370 | ext4_group_first_block_no(sb, goal_group + 1), found); | |
371 | ||
372 | /* get block before goal group */ | |
373 | for (i = goal_group; i < ext4_get_groups_count(sb); i++) | |
374 | mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb)); | |
375 | ar.goal = ext4_group_first_block_no(sb, goal_group); | |
376 | found = ext4_mb_new_blocks_simple(&ar, &err); | |
377 | KUNIT_ASSERT_EQ_MSG(test, | |
378 | ext4_group_first_block_no(sb, 0) + EXT4_C2B(sbi, 1), found, | |
379 | "failed to alloc block before goal group, expected %llu found %llu", | |
380 | ext4_group_first_block_no(sb, 0 + EXT4_C2B(sbi, 1)), found); | |
381 | ||
382 | /* no block available, fail to allocate block */ | |
383 | for (i = 0; i < ext4_get_groups_count(sb); i++) | |
384 | mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb)); | |
385 | ar.goal = ext4_group_first_block_no(sb, goal_group); | |
386 | found = ext4_mb_new_blocks_simple(&ar, &err); | |
387 | KUNIT_ASSERT_NE_MSG(test, err, 0, | |
388 | "unexpectedly get block when no block is available"); | |
389 | } | |
390 | ||
6c5e0c9c KS |
391 | #define TEST_RANGE_COUNT 8 |
392 | ||
393 | struct test_range { | |
394 | ext4_grpblk_t start; | |
395 | ext4_grpblk_t len; | |
396 | }; | |
397 | ||
398 | static void | |
399 | mbt_generate_test_ranges(struct super_block *sb, struct test_range *ranges, | |
400 | int count) | |
401 | { | |
402 | ext4_grpblk_t start, len, max; | |
403 | int i; | |
404 | ||
405 | max = EXT4_CLUSTERS_PER_GROUP(sb) / count; | |
406 | for (i = 0; i < count; i++) { | |
407 | start = get_random_u32() % max; | |
408 | len = get_random_u32() % max; | |
409 | len = min(len, max - start); | |
410 | ||
411 | ranges[i].start = start + i * max; | |
412 | ranges[i].len = len; | |
413 | } | |
414 | } | |
415 | ||
416 | static void | |
417 | validate_free_blocks_simple(struct kunit *test, struct super_block *sb, | |
418 | ext4_group_t goal_group, ext4_grpblk_t start, | |
419 | ext4_grpblk_t len) | |
420 | { | |
421 | void *bitmap; | |
422 | ext4_grpblk_t bit, max = EXT4_CLUSTERS_PER_GROUP(sb); | |
423 | ext4_group_t i; | |
424 | ||
425 | for (i = 0; i < ext4_get_groups_count(sb); i++) { | |
426 | if (i == goal_group) | |
427 | continue; | |
428 | ||
429 | bitmap = mbt_ctx_bitmap(sb, i); | |
430 | bit = mb_find_next_zero_bit(bitmap, max, 0); | |
431 | KUNIT_ASSERT_EQ_MSG(test, bit, max, | |
432 | "free block on unexpected group %d", i); | |
433 | } | |
434 | ||
435 | bitmap = mbt_ctx_bitmap(sb, goal_group); | |
436 | bit = mb_find_next_zero_bit(bitmap, max, 0); | |
437 | KUNIT_ASSERT_EQ(test, bit, start); | |
438 | ||
439 | bit = mb_find_next_bit(bitmap, max, bit + 1); | |
440 | KUNIT_ASSERT_EQ(test, bit, start + len); | |
441 | } | |
442 | ||
443 | static void | |
444 | test_free_blocks_simple_range(struct kunit *test, ext4_group_t goal_group, | |
445 | ext4_grpblk_t start, ext4_grpblk_t len) | |
446 | { | |
447 | struct super_block *sb = (struct super_block *)test->priv; | |
448 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
d60c5369 | 449 | struct inode *inode; |
6c5e0c9c KS |
450 | ext4_fsblk_t block; |
451 | ||
d60c5369 AB |
452 | inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL); |
453 | if (!inode) | |
454 | return; | |
455 | inode->i_sb = sb; | |
456 | ||
6c5e0c9c KS |
457 | if (len == 0) |
458 | return; | |
459 | ||
460 | block = ext4_group_first_block_no(sb, goal_group) + | |
461 | EXT4_C2B(sbi, start); | |
d60c5369 | 462 | ext4_free_blocks_simple(inode, block, len); |
6c5e0c9c KS |
463 | validate_free_blocks_simple(test, sb, goal_group, start, len); |
464 | mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb)); | |
465 | } | |
466 | ||
467 | static void test_free_blocks_simple(struct kunit *test) | |
468 | { | |
469 | struct super_block *sb = (struct super_block *)test->priv; | |
470 | ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); | |
471 | ext4_group_t i; | |
472 | struct test_range ranges[TEST_RANGE_COUNT]; | |
473 | ||
474 | for (i = 0; i < ext4_get_groups_count(sb); i++) | |
475 | mbt_ctx_mark_used(sb, i, 0, max); | |
476 | ||
477 | mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT); | |
478 | for (i = 0; i < TEST_RANGE_COUNT; i++) | |
479 | test_free_blocks_simple_range(test, TEST_GOAL_GROUP, | |
480 | ranges[i].start, ranges[i].len); | |
481 | } | |
482 | ||
2b81493f KS |
483 | static void |
484 | test_mark_diskspace_used_range(struct kunit *test, | |
485 | struct ext4_allocation_context *ac, | |
486 | ext4_grpblk_t start, | |
487 | ext4_grpblk_t len) | |
488 | { | |
489 | struct super_block *sb = (struct super_block *)test->priv; | |
490 | int ret; | |
491 | void *bitmap; | |
492 | ext4_grpblk_t i, max; | |
493 | ||
494 | /* ext4_mb_mark_diskspace_used will BUG if len is 0 */ | |
495 | if (len == 0) | |
496 | return; | |
497 | ||
498 | ac->ac_b_ex.fe_group = TEST_GOAL_GROUP; | |
499 | ac->ac_b_ex.fe_start = start; | |
500 | ac->ac_b_ex.fe_len = len; | |
501 | ||
502 | bitmap = mbt_ctx_bitmap(sb, TEST_GOAL_GROUP); | |
503 | memset(bitmap, 0, sb->s_blocksize); | |
504 | ret = ext4_mb_mark_diskspace_used(ac, NULL, 0); | |
505 | KUNIT_ASSERT_EQ(test, ret, 0); | |
506 | ||
507 | max = EXT4_CLUSTERS_PER_GROUP(sb); | |
508 | i = mb_find_next_bit(bitmap, max, 0); | |
509 | KUNIT_ASSERT_EQ(test, i, start); | |
510 | i = mb_find_next_zero_bit(bitmap, max, i + 1); | |
511 | KUNIT_ASSERT_EQ(test, i, start + len); | |
512 | i = mb_find_next_bit(bitmap, max, i + 1); | |
513 | KUNIT_ASSERT_EQ(test, max, i); | |
514 | } | |
515 | ||
516 | static void test_mark_diskspace_used(struct kunit *test) | |
517 | { | |
518 | struct super_block *sb = (struct super_block *)test->priv; | |
d60c5369 | 519 | struct inode *inode; |
2b81493f KS |
520 | struct ext4_allocation_context ac; |
521 | struct test_range ranges[TEST_RANGE_COUNT]; | |
522 | int i; | |
523 | ||
524 | mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT); | |
525 | ||
d60c5369 AB |
526 | inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL); |
527 | if (!inode) | |
528 | return; | |
529 | inode->i_sb = sb; | |
530 | ||
2b81493f KS |
531 | ac.ac_status = AC_STATUS_FOUND; |
532 | ac.ac_sb = sb; | |
d60c5369 | 533 | ac.ac_inode = inode; |
2b81493f KS |
534 | for (i = 0; i < TEST_RANGE_COUNT; i++) |
535 | test_mark_diskspace_used_range(test, &ac, ranges[i].start, | |
536 | ranges[i].len); | |
537 | } | |
538 | ||
67d2a11b KS |
539 | static void mbt_generate_buddy(struct super_block *sb, void *buddy, |
540 | void *bitmap, struct ext4_group_info *grp) | |
541 | { | |
542 | struct ext4_sb_info *sbi = EXT4_SB(sb); | |
543 | uint32_t order, off; | |
544 | void *bb, *bb_h; | |
545 | int max; | |
546 | ||
547 | memset(buddy, 0xff, sb->s_blocksize); | |
548 | memset(grp, 0, offsetof(struct ext4_group_info, | |
549 | bb_counters[MB_NUM_ORDERS(sb)])); | |
550 | ||
551 | bb = bitmap; | |
552 | max = EXT4_CLUSTERS_PER_GROUP(sb); | |
553 | bb_h = buddy + sbi->s_mb_offsets[1]; | |
554 | ||
555 | off = mb_find_next_zero_bit(bb, max, 0); | |
556 | grp->bb_first_free = off; | |
557 | while (off < max) { | |
558 | grp->bb_counters[0]++; | |
559 | grp->bb_free++; | |
560 | ||
561 | if (!(off & 1) && !mb_test_bit(off + 1, bb)) { | |
562 | grp->bb_free++; | |
563 | grp->bb_counters[0]--; | |
564 | mb_clear_bit(off >> 1, bb_h); | |
565 | grp->bb_counters[1]++; | |
566 | grp->bb_largest_free_order = 1; | |
567 | off++; | |
568 | } | |
569 | ||
570 | off = mb_find_next_zero_bit(bb, max, off + 1); | |
571 | } | |
572 | ||
573 | for (order = 1; order < MB_NUM_ORDERS(sb) - 1; order++) { | |
574 | bb = buddy + sbi->s_mb_offsets[order]; | |
575 | bb_h = buddy + sbi->s_mb_offsets[order + 1]; | |
576 | max = max >> 1; | |
577 | off = mb_find_next_zero_bit(bb, max, 0); | |
578 | ||
579 | while (off < max) { | |
580 | if (!(off & 1) && !mb_test_bit(off + 1, bb)) { | |
581 | mb_set_bits(bb, off, 2); | |
582 | grp->bb_counters[order] -= 2; | |
583 | mb_clear_bit(off >> 1, bb_h); | |
584 | grp->bb_counters[order + 1]++; | |
585 | grp->bb_largest_free_order = order + 1; | |
586 | off++; | |
587 | } | |
588 | ||
589 | off = mb_find_next_zero_bit(bb, max, off + 1); | |
590 | } | |
591 | } | |
592 | ||
593 | max = EXT4_CLUSTERS_PER_GROUP(sb); | |
594 | off = mb_find_next_zero_bit(bitmap, max, 0); | |
595 | while (off < max) { | |
596 | grp->bb_fragments++; | |
597 | ||
598 | off = mb_find_next_bit(bitmap, max, off + 1); | |
599 | if (off + 1 >= max) | |
600 | break; | |
601 | ||
602 | off = mb_find_next_zero_bit(bitmap, max, off + 1); | |
603 | } | |
604 | } | |
605 | ||
606 | static void | |
607 | mbt_validate_group_info(struct kunit *test, struct ext4_group_info *grp1, | |
608 | struct ext4_group_info *grp2) | |
609 | { | |
610 | struct super_block *sb = (struct super_block *)test->priv; | |
611 | int i; | |
612 | ||
613 | KUNIT_ASSERT_EQ(test, grp1->bb_first_free, | |
614 | grp2->bb_first_free); | |
615 | KUNIT_ASSERT_EQ(test, grp1->bb_fragments, | |
616 | grp2->bb_fragments); | |
617 | KUNIT_ASSERT_EQ(test, grp1->bb_free, grp2->bb_free); | |
618 | KUNIT_ASSERT_EQ(test, grp1->bb_largest_free_order, | |
619 | grp2->bb_largest_free_order); | |
620 | ||
621 | for (i = 1; i < MB_NUM_ORDERS(sb); i++) { | |
622 | KUNIT_ASSERT_EQ_MSG(test, grp1->bb_counters[i], | |
623 | grp2->bb_counters[i], | |
624 | "bb_counters[%d] diffs, expected %d, generated %d", | |
625 | i, grp1->bb_counters[i], | |
626 | grp2->bb_counters[i]); | |
627 | } | |
628 | } | |
629 | ||
630 | static void | |
631 | do_test_generate_buddy(struct kunit *test, struct super_block *sb, void *bitmap, | |
632 | void *mbt_buddy, struct ext4_group_info *mbt_grp, | |
633 | void *ext4_buddy, struct ext4_group_info *ext4_grp) | |
634 | { | |
635 | int i; | |
636 | ||
637 | mbt_generate_buddy(sb, mbt_buddy, bitmap, mbt_grp); | |
638 | ||
639 | for (i = 0; i < MB_NUM_ORDERS(sb); i++) | |
640 | ext4_grp->bb_counters[i] = 0; | |
641 | /* needed by validation in ext4_mb_generate_buddy */ | |
642 | ext4_grp->bb_free = mbt_grp->bb_free; | |
643 | memset(ext4_buddy, 0xff, sb->s_blocksize); | |
644 | ext4_mb_generate_buddy(sb, ext4_buddy, bitmap, TEST_GOAL_GROUP, | |
645 | ext4_grp); | |
646 | ||
647 | KUNIT_ASSERT_EQ(test, memcmp(mbt_buddy, ext4_buddy, sb->s_blocksize), | |
648 | 0); | |
649 | mbt_validate_group_info(test, mbt_grp, ext4_grp); | |
650 | } | |
651 | ||
652 | static void test_mb_generate_buddy(struct kunit *test) | |
653 | { | |
654 | struct super_block *sb = (struct super_block *)test->priv; | |
655 | void *bitmap, *expected_bb, *generate_bb; | |
656 | struct ext4_group_info *expected_grp, *generate_grp; | |
657 | struct test_range ranges[TEST_RANGE_COUNT]; | |
658 | int i; | |
659 | ||
660 | bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL); | |
661 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap); | |
662 | expected_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL); | |
663 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_bb); | |
664 | generate_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL); | |
665 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, generate_bb); | |
666 | expected_grp = kunit_kzalloc(test, offsetof(struct ext4_group_info, | |
667 | bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL); | |
668 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_grp); | |
669 | generate_grp = ext4_get_group_info(sb, TEST_GOAL_GROUP); | |
670 | KUNIT_ASSERT_NOT_NULL(test, generate_grp); | |
671 | ||
672 | mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT); | |
673 | for (i = 0; i < TEST_RANGE_COUNT; i++) { | |
674 | mb_set_bits(bitmap, ranges[i].start, ranges[i].len); | |
675 | do_test_generate_buddy(test, sb, bitmap, expected_bb, | |
676 | expected_grp, generate_bb, generate_grp); | |
677 | } | |
678 | } | |
679 | ||
ac96b56a KS |
680 | static void |
681 | test_mb_mark_used_range(struct kunit *test, struct ext4_buddy *e4b, | |
682 | ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap, | |
683 | void *buddy, struct ext4_group_info *grp) | |
684 | { | |
685 | struct super_block *sb = (struct super_block *)test->priv; | |
686 | struct ext4_free_extent ex; | |
687 | int i; | |
688 | ||
689 | /* mb_mark_used only accepts non-zero len */ | |
690 | if (len == 0) | |
691 | return; | |
692 | ||
693 | ex.fe_start = start; | |
694 | ex.fe_len = len; | |
695 | ex.fe_group = TEST_GOAL_GROUP; | |
696 | mb_mark_used(e4b, &ex); | |
697 | ||
698 | mb_set_bits(bitmap, start, len); | |
699 | /* bypass bb_free validatoin in ext4_mb_generate_buddy */ | |
700 | grp->bb_free -= len; | |
701 | memset(buddy, 0xff, sb->s_blocksize); | |
702 | for (i = 0; i < MB_NUM_ORDERS(sb); i++) | |
703 | grp->bb_counters[i] = 0; | |
704 | ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp); | |
705 | ||
706 | KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize), | |
707 | 0); | |
708 | mbt_validate_group_info(test, grp, e4b->bd_info); | |
709 | } | |
710 | ||
711 | static void test_mb_mark_used(struct kunit *test) | |
712 | { | |
713 | struct ext4_buddy e4b; | |
714 | struct super_block *sb = (struct super_block *)test->priv; | |
715 | void *bitmap, *buddy; | |
716 | struct ext4_group_info *grp; | |
717 | int ret; | |
718 | struct test_range ranges[TEST_RANGE_COUNT]; | |
719 | int i; | |
720 | ||
721 | /* buddy cache assumes that each page contains at least one block */ | |
722 | if (sb->s_blocksize > PAGE_SIZE) | |
723 | kunit_skip(test, "blocksize exceeds pagesize"); | |
724 | ||
725 | bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL); | |
726 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap); | |
727 | buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL); | |
728 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy); | |
729 | grp = kunit_kzalloc(test, offsetof(struct ext4_group_info, | |
730 | bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL); | |
731 | ||
732 | ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b); | |
733 | KUNIT_ASSERT_EQ(test, ret, 0); | |
734 | ||
735 | grp->bb_free = EXT4_CLUSTERS_PER_GROUP(sb); | |
736 | mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT); | |
737 | for (i = 0; i < TEST_RANGE_COUNT; i++) | |
738 | test_mb_mark_used_range(test, &e4b, ranges[i].start, | |
739 | ranges[i].len, bitmap, buddy, grp); | |
740 | ||
741 | ext4_mb_unload_buddy(&e4b); | |
742 | } | |
743 | ||
b7098e1f KS |
744 | static void |
745 | test_mb_free_blocks_range(struct kunit *test, struct ext4_buddy *e4b, | |
746 | ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap, | |
747 | void *buddy, struct ext4_group_info *grp) | |
748 | { | |
749 | struct super_block *sb = (struct super_block *)test->priv; | |
750 | int i; | |
751 | ||
752 | /* mb_free_blocks will WARN if len is 0 */ | |
753 | if (len == 0) | |
754 | return; | |
755 | ||
756 | mb_free_blocks(NULL, e4b, start, len); | |
757 | ||
758 | mb_clear_bits(bitmap, start, len); | |
759 | /* bypass bb_free validatoin in ext4_mb_generate_buddy */ | |
760 | grp->bb_free += len; | |
761 | memset(buddy, 0xff, sb->s_blocksize); | |
762 | for (i = 0; i < MB_NUM_ORDERS(sb); i++) | |
763 | grp->bb_counters[i] = 0; | |
764 | ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp); | |
765 | ||
766 | KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize), | |
767 | 0); | |
768 | mbt_validate_group_info(test, grp, e4b->bd_info); | |
769 | ||
770 | } | |
771 | ||
772 | static void test_mb_free_blocks(struct kunit *test) | |
773 | { | |
774 | struct ext4_buddy e4b; | |
775 | struct super_block *sb = (struct super_block *)test->priv; | |
776 | void *bitmap, *buddy; | |
777 | struct ext4_group_info *grp; | |
778 | struct ext4_free_extent ex; | |
779 | int ret; | |
780 | int i; | |
781 | struct test_range ranges[TEST_RANGE_COUNT]; | |
782 | ||
783 | /* buddy cache assumes that each page contains at least one block */ | |
784 | if (sb->s_blocksize > PAGE_SIZE) | |
785 | kunit_skip(test, "blocksize exceeds pagesize"); | |
786 | ||
787 | bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL); | |
788 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap); | |
789 | buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL); | |
790 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy); | |
791 | grp = kunit_kzalloc(test, offsetof(struct ext4_group_info, | |
792 | bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL); | |
793 | ||
794 | ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b); | |
795 | KUNIT_ASSERT_EQ(test, ret, 0); | |
796 | ||
797 | ex.fe_start = 0; | |
798 | ex.fe_len = EXT4_CLUSTERS_PER_GROUP(sb); | |
799 | ex.fe_group = TEST_GOAL_GROUP; | |
800 | mb_mark_used(&e4b, &ex); | |
801 | grp->bb_free = 0; | |
802 | memset(bitmap, 0xff, sb->s_blocksize); | |
803 | ||
804 | mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT); | |
805 | for (i = 0; i < TEST_RANGE_COUNT; i++) | |
806 | test_mb_free_blocks_range(test, &e4b, ranges[i].start, | |
807 | ranges[i].len, bitmap, buddy, grp); | |
808 | ||
809 | ext4_mb_unload_buddy(&e4b); | |
810 | } | |
811 | ||
28b95ee8 KS |
812 | static const struct mbt_ext4_block_layout mbt_test_layouts[] = { |
813 | { | |
814 | .blocksize_bits = 10, | |
815 | .cluster_bits = 3, | |
816 | .blocks_per_group = 8192, | |
817 | .group_count = 4, | |
818 | .desc_size = 64, | |
819 | }, | |
820 | { | |
821 | .blocksize_bits = 12, | |
822 | .cluster_bits = 3, | |
823 | .blocks_per_group = 8192, | |
824 | .group_count = 4, | |
825 | .desc_size = 64, | |
826 | }, | |
827 | { | |
828 | .blocksize_bits = 16, | |
829 | .cluster_bits = 3, | |
830 | .blocks_per_group = 8192, | |
831 | .group_count = 4, | |
832 | .desc_size = 64, | |
833 | }, | |
834 | }; | |
835 | ||
836 | static void mbt_show_layout(const struct mbt_ext4_block_layout *layout, | |
837 | char *desc) | |
838 | { | |
839 | snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_bits=%d cluster_bits=%d " | |
840 | "blocks_per_group=%d group_count=%d desc_size=%d\n", | |
841 | layout->blocksize_bits, layout->cluster_bits, | |
842 | layout->blocks_per_group, layout->group_count, | |
843 | layout->desc_size); | |
844 | } | |
845 | KUNIT_ARRAY_PARAM(mbt_layouts, mbt_test_layouts, mbt_show_layout); | |
7c9fa399 KS |
846 | |
847 | static struct kunit_case mbt_test_cases[] = { | |
28b95ee8 | 848 | KUNIT_CASE_PARAM(test_new_blocks_simple, mbt_layouts_gen_params), |
6c5e0c9c | 849 | KUNIT_CASE_PARAM(test_free_blocks_simple, mbt_layouts_gen_params), |
67d2a11b | 850 | KUNIT_CASE_PARAM(test_mb_generate_buddy, mbt_layouts_gen_params), |
ac96b56a | 851 | KUNIT_CASE_PARAM(test_mb_mark_used, mbt_layouts_gen_params), |
b7098e1f | 852 | KUNIT_CASE_PARAM(test_mb_free_blocks, mbt_layouts_gen_params), |
2b81493f | 853 | KUNIT_CASE_PARAM(test_mark_diskspace_used, mbt_layouts_gen_params), |
7c9fa399 KS |
854 | {} |
855 | }; | |
856 | ||
857 | static struct kunit_suite mbt_test_suite = { | |
858 | .name = "ext4_mballoc_test", | |
859 | .init = mbt_kunit_init, | |
860 | .exit = mbt_kunit_exit, | |
861 | .test_cases = mbt_test_cases, | |
862 | }; | |
863 | ||
864 | kunit_test_suites(&mbt_test_suite); | |
865 | ||
866 | MODULE_LICENSE("GPL"); |