2 * linux/fs/ext4/balloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
14 #include <linux/time.h>
15 #include <linux/capability.h>
17 #include <linux/jbd2.h>
18 #include <linux/quotaops.h>
19 #include <linux/buffer_head.h>
21 #include "ext4_jbd2.h"
24 #include <trace/events/ext4.h>
26 static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
27 ext4_group_t block_group);
29 * balloc.c contains the blocks allocation and deallocation routines
33 * Calculate the block group number and offset into the block/cluster
34 * allocation bitmap, given a block number
36 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
37 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
39 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
42 blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
43 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >>
44 EXT4_SB(sb)->s_cluster_bits;
53 * Check whether the 'block' lives within the 'block_group'. Returns 1 if so
56 static inline int ext4_block_in_group(struct super_block *sb,
58 ext4_group_t block_group)
60 ext4_group_t actual_group;
62 if (test_opt2(sb, STD_GROUP_SIZE))
64 (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) +
66 (EXT4_BLOCK_SIZE_BITS(sb) + EXT4_CLUSTER_BITS(sb) + 3);
68 ext4_get_group_no_and_offset(sb, block, &actual_group, NULL);
69 return (actual_group == block_group) ? 1 : 0;
72 /* Return the number of clusters used for file system metadata; this
73 * represents the overhead needed by the file system.
75 unsigned ext4_num_overhead_clusters(struct super_block *sb,
76 ext4_group_t block_group,
77 struct ext4_group_desc *gdp)
79 unsigned num_clusters;
80 int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c;
81 ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group);
82 ext4_fsblk_t itbl_blk;
83 struct ext4_sb_info *sbi = EXT4_SB(sb);
85 /* This is the number of clusters used by the superblock,
86 * block group descriptors, and reserved block group
87 * descriptor blocks */
88 num_clusters = ext4_num_base_meta_clusters(sb, block_group);
91 * For the allocation bitmaps and inode table, we first need
92 * to check to see if the block is in the block group. If it
93 * is, then check to see if the cluster is already accounted
94 * for in the clusters used for the base metadata cluster, or
95 * if we can increment the base metadata cluster to include
96 * that block. Otherwise, we will have to track the cluster
97 * used for the allocation bitmap or inode table explicitly.
98 * Normally all of these blocks are contiguous, so the special
99 * case handling shouldn't be necessary except for *very*
100 * unusual file system layouts.
102 if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
103 block_cluster = EXT4_B2C(sbi,
104 ext4_block_bitmap(sb, gdp) - start);
105 if (block_cluster < num_clusters)
107 else if (block_cluster == num_clusters) {
113 if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
114 inode_cluster = EXT4_B2C(sbi,
115 ext4_inode_bitmap(sb, gdp) - start);
116 if (inode_cluster < num_clusters)
118 else if (inode_cluster == num_clusters) {
124 itbl_blk = ext4_inode_table(sb, gdp);
125 for (i = 0; i < sbi->s_itb_per_group; i++) {
126 if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
127 c = EXT4_B2C(sbi, itbl_blk + i - start);
128 if ((c < num_clusters) || (c == inode_cluster) ||
129 (c == block_cluster) || (c == itbl_cluster))
131 if (c == num_clusters) {
140 if (block_cluster != -1)
142 if (inode_cluster != -1)
148 static unsigned int num_clusters_in_group(struct super_block *sb,
149 ext4_group_t block_group)
153 if (block_group == ext4_get_groups_count(sb) - 1) {
155 * Even though mke2fs always initializes the first and
156 * last group, just in case some other tool was used,
157 * we need to make sure we calculate the right free
160 blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) -
161 ext4_group_first_block_no(sb, block_group);
163 blocks = EXT4_BLOCKS_PER_GROUP(sb);
164 return EXT4_NUM_B2C(EXT4_SB(sb), blocks);
167 /* Initializes an uninitialized block bitmap */
168 void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
169 ext4_group_t block_group,
170 struct ext4_group_desc *gdp)
172 unsigned int bit, bit_max;
173 struct ext4_sb_info *sbi = EXT4_SB(sb);
174 ext4_fsblk_t start, tmp;
177 J_ASSERT_BH(bh, buffer_locked(bh));
179 /* If checksum is bad mark all blocks used to prevent allocation
180 * essentially implementing a per-group read-only flag. */
181 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
182 ext4_error(sb, "Checksum bad for group %u", block_group);
183 ext4_free_group_clusters_set(sb, gdp, 0);
184 ext4_free_inodes_set(sb, gdp, 0);
185 ext4_itable_unused_set(sb, gdp, 0);
186 memset(bh->b_data, 0xff, sb->s_blocksize);
187 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
190 memset(bh->b_data, 0, sb->s_blocksize);
192 bit_max = ext4_num_base_meta_clusters(sb, block_group);
193 for (bit = 0; bit < bit_max; bit++)
194 ext4_set_bit(bit, bh->b_data);
196 start = ext4_group_first_block_no(sb, block_group);
198 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
201 /* Set bits for block and inode bitmaps, and inode table */
202 tmp = ext4_block_bitmap(sb, gdp);
203 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
204 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
206 tmp = ext4_inode_bitmap(sb, gdp);
207 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
208 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
210 tmp = ext4_inode_table(sb, gdp);
211 for (; tmp < ext4_inode_table(sb, gdp) +
212 sbi->s_itb_per_group; tmp++) {
213 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
214 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
218 * Also if the number of blocks within the group is less than
219 * the blocksize * 8 ( which is the size of bitmap ), set rest
220 * of the block bitmap to 1
222 ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
223 sb->s_blocksize * 8, bh->b_data);
224 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
225 ext4_group_desc_csum_set(sb, block_group, gdp);
228 /* Return the number of free blocks in a block group. It is used when
229 * the block bitmap is uninitialized, so we can't just count the bits
231 unsigned ext4_free_clusters_after_init(struct super_block *sb,
232 ext4_group_t block_group,
233 struct ext4_group_desc *gdp)
235 return num_clusters_in_group(sb, block_group) -
236 ext4_num_overhead_clusters(sb, block_group, gdp);
240 * The free blocks are managed by bitmaps. A file system contains several
241 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
242 * block for inodes, N blocks for the inode table and data blocks.
244 * The file system contains group descriptors which are located after the
245 * super block. Each descriptor contains the number of the bitmap block and
246 * the free blocks count in the block. The descriptors are loaded in memory
247 * when a file system is mounted (see ext4_fill_super).
251 * ext4_get_group_desc() -- load group descriptor from disk
253 * @block_group: given block group
254 * @bh: pointer to the buffer head to store the block
257 struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
258 ext4_group_t block_group,
259 struct buffer_head **bh)
261 unsigned int group_desc;
263 ext4_group_t ngroups = ext4_get_groups_count(sb);
264 struct ext4_group_desc *desc;
265 struct ext4_sb_info *sbi = EXT4_SB(sb);
267 if (block_group >= ngroups) {
268 ext4_error(sb, "block_group >= groups_count - block_group = %u,"
269 " groups_count = %u", block_group, ngroups);
274 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
275 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
276 if (!sbi->s_group_desc[group_desc]) {
277 ext4_error(sb, "Group descriptor not loaded - "
278 "block_group = %u, group_desc = %u, desc = %u",
279 block_group, group_desc, offset);
283 desc = (struct ext4_group_desc *)(
284 (__u8 *)sbi->s_group_desc[group_desc]->b_data +
285 offset * EXT4_DESC_SIZE(sb));
287 *bh = sbi->s_group_desc[group_desc];
292 * Return the block number which was discovered to be invalid, or 0 if
293 * the block bitmap is valid.
295 static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
296 struct ext4_group_desc *desc,
297 unsigned int block_group,
298 struct buffer_head *bh)
300 ext4_grpblk_t offset;
301 ext4_grpblk_t next_zero_bit;
303 ext4_fsblk_t group_first_block;
305 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
306 /* with FLEX_BG, the inode/block bitmaps and itable
307 * blocks may not be in the group at all
308 * so the bitmap validation will be skipped for those groups
309 * or it has to also read the block group where the bitmaps
310 * are located to verify they are set.
314 group_first_block = ext4_group_first_block_no(sb, block_group);
316 /* check whether block bitmap block number is set */
317 blk = ext4_block_bitmap(sb, desc);
318 offset = blk - group_first_block;
319 if (!ext4_test_bit(offset, bh->b_data))
320 /* bad block bitmap */
323 /* check whether the inode bitmap block number is set */
324 blk = ext4_inode_bitmap(sb, desc);
325 offset = blk - group_first_block;
326 if (!ext4_test_bit(offset, bh->b_data))
327 /* bad block bitmap */
330 /* check whether the inode table block number is set */
331 blk = ext4_inode_table(sb, desc);
332 offset = blk - group_first_block;
333 next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
334 offset + EXT4_SB(sb)->s_itb_per_group,
336 if (next_zero_bit < offset + EXT4_SB(sb)->s_itb_per_group)
337 /* bad bitmap for inode tables */
342 void ext4_validate_block_bitmap(struct super_block *sb,
343 struct ext4_group_desc *desc,
344 unsigned int block_group,
345 struct buffer_head *bh)
349 if (buffer_verified(bh))
352 ext4_lock_group(sb, block_group);
353 blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
354 if (unlikely(blk != 0)) {
355 ext4_unlock_group(sb, block_group);
356 ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
360 if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
362 ext4_unlock_group(sb, block_group);
363 ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
366 set_buffer_verified(bh);
367 ext4_unlock_group(sb, block_group);
371 * ext4_read_block_bitmap_nowait()
373 * @block_group: given block group
375 * Read the bitmap for a given block_group,and validate the
376 * bits for block/inode/inode tables are set in the bitmaps
378 * Return buffer_head on success or NULL in case of failure.
381 ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
383 struct ext4_group_desc *desc;
384 struct buffer_head *bh;
385 ext4_fsblk_t bitmap_blk;
387 desc = ext4_get_group_desc(sb, block_group, NULL);
390 bitmap_blk = ext4_block_bitmap(sb, desc);
391 bh = sb_getblk(sb, bitmap_blk);
393 ext4_error(sb, "Cannot get buffer for block bitmap - "
394 "block_group = %u, block_bitmap = %llu",
395 block_group, bitmap_blk);
399 if (bitmap_uptodate(bh))
403 if (bitmap_uptodate(bh)) {
407 ext4_lock_group(sb, block_group);
408 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
409 ext4_init_block_bitmap(sb, bh, block_group, desc);
410 set_bitmap_uptodate(bh);
411 set_buffer_uptodate(bh);
412 ext4_unlock_group(sb, block_group);
416 ext4_unlock_group(sb, block_group);
417 if (buffer_uptodate(bh)) {
419 * if not uninit if bh is uptodate,
420 * bitmap is also uptodate
422 set_bitmap_uptodate(bh);
427 * submit the buffer_head for reading
430 trace_ext4_read_block_bitmap_load(sb, block_group);
431 bh->b_end_io = ext4_end_bitmap_read;
436 ext4_validate_block_bitmap(sb, desc, block_group, bh);
440 /* Returns 0 on success, 1 on error */
441 int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
442 struct buffer_head *bh)
444 struct ext4_group_desc *desc;
448 desc = ext4_get_group_desc(sb, block_group, NULL);
452 if (!buffer_uptodate(bh)) {
453 ext4_error(sb, "Cannot read block bitmap - "
454 "block_group = %u, block_bitmap = %llu",
455 block_group, (unsigned long long) bh->b_blocknr);
458 clear_buffer_new(bh);
459 /* Panic or remount fs read-only if block bitmap is invalid */
460 ext4_validate_block_bitmap(sb, desc, block_group, bh);
465 ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
467 struct buffer_head *bh;
469 bh = ext4_read_block_bitmap_nowait(sb, block_group);
472 if (ext4_wait_block_bitmap(sb, block_group, bh)) {
480 * ext4_has_free_clusters()
481 * @sbi: in-core super block structure.
482 * @nclusters: number of needed blocks
483 * @flags: flags from ext4_mb_new_blocks()
485 * Check if filesystem has nclusters free & available for allocation.
486 * On success return 1, return 0 on failure.
488 static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
489 s64 nclusters, unsigned int flags)
491 s64 free_clusters, dirty_clusters, root_clusters;
492 struct percpu_counter *fcc = &sbi->s_freeclusters_counter;
493 struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter;
495 free_clusters = percpu_counter_read_positive(fcc);
496 dirty_clusters = percpu_counter_read_positive(dcc);
499 * r_blocks_count should always be multiple of the cluster ratio so
500 * we are safe to do a plane bit shift only.
502 root_clusters = ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
504 if (free_clusters - (nclusters + root_clusters + dirty_clusters) <
505 EXT4_FREECLUSTERS_WATERMARK) {
506 free_clusters = percpu_counter_sum_positive(fcc);
507 dirty_clusters = percpu_counter_sum_positive(dcc);
509 /* Check whether we have space after accounting for current
510 * dirty clusters & root reserved clusters.
512 if (free_clusters >= ((root_clusters + nclusters) + dirty_clusters))
515 /* Hm, nope. Are (enough) root reserved clusters available? */
516 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
517 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
518 capable(CAP_SYS_RESOURCE) ||
519 (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
521 if (free_clusters >= (nclusters + dirty_clusters))
528 int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
529 s64 nclusters, unsigned int flags)
531 if (ext4_has_free_clusters(sbi, nclusters, flags)) {
532 percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters);
539 * ext4_should_retry_alloc()
541 * @retries number of attemps has been made
543 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
544 * it is profitable to retry the operation, this function will wait
545 * for the current or committing transaction to complete, and then
548 * if the total number of retries exceed three times, return FALSE.
550 int ext4_should_retry_alloc(struct super_block *sb, int *retries)
552 if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
554 !EXT4_SB(sb)->s_journal)
557 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
559 return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
563 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
565 * @handle: handle to this transaction
567 * @goal: given target block(filesystem wide)
568 * @count: pointer to total number of clusters needed
571 * Return 1st allocated block number on success, *count stores total account
572 * error stores in errp pointer
574 ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
575 ext4_fsblk_t goal, unsigned int flags,
576 unsigned long *count, int *errp)
578 struct ext4_allocation_request ar;
581 memset(&ar, 0, sizeof(ar));
582 /* Fill with neighbour allocated blocks */
585 ar.len = count ? *count : 1;
588 ret = ext4_mb_new_blocks(handle, &ar, errp);
592 * Account for the allocated meta blocks. We will never
593 * fail EDQUOT for metdata, but we do account for it.
596 ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
597 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
598 EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
599 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
600 dquot_alloc_block_nofail(inode,
601 EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
607 * ext4_count_free_clusters() -- count filesystem free clusters
610 * Adds up the number of free clusters from each block group.
612 ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
614 ext4_fsblk_t desc_count;
615 struct ext4_group_desc *gdp;
617 ext4_group_t ngroups = ext4_get_groups_count(sb);
619 struct ext4_super_block *es;
620 ext4_fsblk_t bitmap_count;
622 struct buffer_head *bitmap_bh = NULL;
624 es = EXT4_SB(sb)->s_es;
629 for (i = 0; i < ngroups; i++) {
630 gdp = ext4_get_group_desc(sb, i, NULL);
633 desc_count += ext4_free_group_clusters(sb, gdp);
635 bitmap_bh = ext4_read_block_bitmap(sb, i);
636 if (bitmap_bh == NULL)
639 x = ext4_count_free(bitmap_bh->b_data,
640 EXT4_BLOCKS_PER_GROUP(sb) / 8);
641 printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
642 i, ext4_free_group_clusters(sb, gdp), x);
646 printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
647 ", computed = %llu, %llu\n",
648 EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
649 desc_count, bitmap_count);
653 for (i = 0; i < ngroups; i++) {
654 gdp = ext4_get_group_desc(sb, i, NULL);
657 desc_count += ext4_free_group_clusters(sb, gdp);
664 static inline int test_root(ext4_group_t a, int b)
673 static int ext4_group_sparse(ext4_group_t group)
679 return (test_root(group, 7) || test_root(group, 5) ||
680 test_root(group, 3));
684 * ext4_bg_has_super - number of blocks used by the superblock in group
685 * @sb: superblock for filesystem
686 * @group: group number to check
688 * Return the number of blocks used by the superblock (primary or backup)
689 * in this group. Currently this will be only 0 or 1.
691 int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
693 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
694 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
695 !ext4_group_sparse(group))
700 static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
703 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
704 ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
705 ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
707 if (group == first || group == first + 1 || group == last)
712 static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
715 if (!ext4_bg_has_super(sb, group))
718 if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG))
719 return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
721 return EXT4_SB(sb)->s_gdb_count;
725 * ext4_bg_num_gdb - number of blocks used by the group table in group
726 * @sb: superblock for filesystem
727 * @group: group number to check
729 * Return the number of blocks used by the group descriptor table
730 * (primary or backup) in this group. In the future there may be a
731 * different number of descriptor blocks in each group.
733 unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
735 unsigned long first_meta_bg =
736 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
737 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
739 if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
740 metagroup < first_meta_bg)
741 return ext4_bg_num_gdb_nometa(sb, group);
743 return ext4_bg_num_gdb_meta(sb,group);
748 * This function returns the number of file system metadata clusters at
749 * the beginning of a block group, including the reserved gdt blocks.
751 static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
752 ext4_group_t block_group)
754 struct ext4_sb_info *sbi = EXT4_SB(sb);
757 /* Check for superblock and gdt backups in this group */
758 num = ext4_bg_has_super(sb, block_group);
760 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
761 block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
762 sbi->s_desc_per_block) {
764 num += ext4_bg_num_gdb(sb, block_group);
765 num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
767 } else { /* For META_BG_BLOCK_GROUPS */
768 num += ext4_bg_num_gdb(sb, block_group);
770 return EXT4_NUM_B2C(sbi, num);
773 * ext4_inode_to_goal_block - return a hint for block allocation
774 * @inode: inode for block allocation
776 * Return the ideal location to start allocating blocks for a
777 * newly created inode.
779 ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
781 struct ext4_inode_info *ei = EXT4_I(inode);
782 ext4_group_t block_group;
783 ext4_grpblk_t colour;
784 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
785 ext4_fsblk_t bg_start;
786 ext4_fsblk_t last_block;
788 block_group = ei->i_block_group;
789 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
791 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
792 * block groups per flexgroup, reserve the first block
793 * group for directories and special files. Regular
794 * files will start at the second block group. This
795 * tends to speed up directory access and improves
798 block_group &= ~(flex_size-1);
799 if (S_ISREG(inode->i_mode))
802 bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
803 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
806 * If we are doing delayed allocation, we don't need take
807 * colour into account.
809 if (test_opt(inode->i_sb, DELALLOC))
812 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
813 colour = (current->pid % 16) *
814 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
816 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
817 return bg_start + colour;