1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
6 * This code builds two trees of free clusters extents.
7 * Trees are sorted by start of extent and by length of extent.
8 * NTFS_MAX_WND_EXTENTS defines the maximum number of elements in trees.
9 * In extreme case code reads on-disk bitmap to find free clusters
13 #include <linux/blkdev.h>
14 #include <linux/buffer_head.h>
16 #include <linux/nls.h>
23 * Maximum number of extents in tree.
25 #define NTFS_MAX_WND_EXTENTS (32u * 1024u)
33 * Tree is sorted by start (key)
36 struct rb_node_key start; /* Tree sorted by start */
37 struct rb_node_key count; /* Tree sorted by len*/
40 static int wnd_rescan(struct wnd_bitmap *wnd);
41 static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw);
42 static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits);
44 static struct kmem_cache *ntfs_enode_cachep;
46 int __init ntfs3_init_bitmap(void)
49 kmem_cache_create("ntfs3_enode_cache", sizeof(struct e_node), 0,
50 SLAB_RECLAIM_ACCOUNT, NULL);
51 return ntfs_enode_cachep ? 0 : -ENOMEM;
54 void ntfs3_exit_bitmap(void)
56 kmem_cache_destroy(ntfs_enode_cachep);
59 static inline u32 wnd_bits(const struct wnd_bitmap *wnd, size_t i)
61 return i + 1 == wnd->nwnd ? wnd->bits_last : wnd->sb->s_blocksize * 8;
65 * b_pos + b_len - biggest fragment
66 * Scan range [wpos wbits) window 'buf'
67 * Returns -1 if not found
69 static size_t wnd_scan(const ulong *buf, size_t wbit, u32 wpos, u32 wend,
70 size_t to_alloc, size_t *prev_tail, size_t *b_pos,
76 u32 used = find_next_zero_bit(buf, wend, wpos);
79 if (*b_len < *prev_tail) {
80 *b_pos = wbit - *prev_tail;
90 if (*b_len < *prev_tail) {
91 *b_pos = wbit - *prev_tail;
99 * Now we have a fragment [wpos, wend) staring with 0
101 end = wpos + to_alloc - *prev_tail;
102 free_bits = find_next_bit(buf, min(end, wend), wpos);
104 free_len = *prev_tail + free_bits - wpos;
106 if (*b_len < free_len) {
107 *b_pos = wbit + wpos - *prev_tail;
111 if (free_len >= to_alloc)
112 return wbit + wpos - *prev_tail;
114 if (free_bits >= wend) {
115 *prev_tail += free_bits - wpos;
119 wpos = free_bits + 1;
130 * Frees all resources
132 void wnd_close(struct wnd_bitmap *wnd)
134 struct rb_node *node, *next;
136 kfree(wnd->free_bits);
137 run_close(&wnd->run);
139 node = rb_first(&wnd->start_tree);
142 next = rb_next(node);
143 rb_erase(node, &wnd->start_tree);
144 kmem_cache_free(ntfs_enode_cachep,
145 rb_entry(node, struct e_node, start.node));
150 static struct rb_node *rb_lookup(struct rb_root *root, size_t v)
152 struct rb_node **p = &root->rb_node;
153 struct rb_node *r = NULL;
156 struct rb_node_key *k;
158 k = rb_entry(*p, struct rb_node_key, node);
161 } else if (v > k->key) {
175 * Helper function to insert special kind of 'count' tree
177 static inline bool rb_insert_count(struct rb_root *root, struct e_node *e)
179 struct rb_node **p = &root->rb_node;
180 struct rb_node *parent = NULL;
181 size_t e_ckey = e->count.key;
182 size_t e_skey = e->start.key;
186 rb_entry(parent = *p, struct e_node, count.node);
188 if (e_ckey > k->count.key) {
190 } else if (e_ckey < k->count.key) {
192 } else if (e_skey < k->start.key) {
194 } else if (e_skey > k->start.key) {
202 rb_link_node(&e->count.node, parent, p);
203 rb_insert_color(&e->count.node, root);
208 * inline bool rb_insert_start
210 * Helper function to insert special kind of 'start' tree
212 static inline bool rb_insert_start(struct rb_root *root, struct e_node *e)
214 struct rb_node **p = &root->rb_node;
215 struct rb_node *parent = NULL;
216 size_t e_skey = e->start.key;
223 k = rb_entry(parent, struct e_node, start.node);
224 if (e_skey < k->start.key) {
226 } else if (e_skey > k->start.key) {
234 rb_link_node(&e->start.node, parent, p);
235 rb_insert_color(&e->start.node, root);
242 * adds a new extent of free space
243 * build = 1 when building tree
245 static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
248 struct e_node *e, *e0 = NULL;
249 size_t ib, end_in = bit + len;
253 /* Use extent_min to filter too short extents */
254 if (wnd->count >= NTFS_MAX_WND_EXTENTS &&
255 len <= wnd->extent_min) {
260 /* Try to find extent before 'bit' */
261 n = rb_lookup(&wnd->start_tree, bit);
264 n = rb_first(&wnd->start_tree);
266 e = rb_entry(n, struct e_node, start.node);
268 if (e->start.key + e->count.key == bit) {
272 rb_erase(&e->start.node, &wnd->start_tree);
273 rb_erase(&e->count.node, &wnd->count_tree);
282 e = rb_entry(n, struct e_node, start.node);
283 next_end = e->start.key + e->count.key;
284 if (e->start.key > end_in)
289 len += next_end - end_in;
291 rb_erase(&e->start.node, &wnd->start_tree);
292 rb_erase(&e->count.node, &wnd->count_tree);
298 kmem_cache_free(ntfs_enode_cachep, e);
301 if (wnd->uptodated != 1) {
302 /* Check bits before 'bit' */
303 ib = wnd->zone_bit == wnd->zone_end ||
308 while (bit > ib && wnd_is_free_hlp(wnd, bit - 1, 1)) {
313 /* Check bits after 'end_in' */
314 ib = wnd->zone_bit == wnd->zone_end ||
315 end_in > wnd->zone_bit
319 while (end_in < ib && wnd_is_free_hlp(wnd, end_in, 1)) {
325 /* Insert new fragment */
326 if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
328 kmem_cache_free(ntfs_enode_cachep, e0);
332 /* Compare with smallest fragment */
333 n = rb_last(&wnd->count_tree);
334 e = rb_entry(n, struct e_node, count.node);
335 if (len <= e->count.key)
336 goto out; /* Do not insert small fragments */
342 e2 = rb_entry(n, struct e_node, count.node);
343 /* smallest fragment will be 'e2->count.key' */
344 wnd->extent_min = e2->count.key;
347 /* Replace smallest fragment by new one */
348 rb_erase(&e->start.node, &wnd->start_tree);
349 rb_erase(&e->count.node, &wnd->count_tree);
352 e = e0 ? e0 : kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC);
358 if (build && len <= wnd->extent_min)
359 wnd->extent_min = len;
363 if (len > wnd->extent_max)
364 wnd->extent_max = len;
366 rb_insert_start(&wnd->start_tree, e);
367 rb_insert_count(&wnd->count_tree, e);
374 * wnd_remove_free_ext
376 * removes a run from the cached free space
378 static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len)
380 struct rb_node *n, *n3;
381 struct e_node *e, *e3;
382 size_t end_in = bit + len;
383 size_t end3, end, new_key, new_len, max_new_len;
385 /* Try to find extent before 'bit' */
386 n = rb_lookup(&wnd->start_tree, bit);
391 e = rb_entry(n, struct e_node, start.node);
392 end = e->start.key + e->count.key;
394 new_key = new_len = 0;
397 /* Range [bit,end_in) must be inside 'e' or outside 'e' and 'n' */
398 if (e->start.key > bit)
400 else if (end_in <= end) {
401 /* Range [bit,end_in) inside 'e' */
403 new_len = end - end_in;
404 len = bit - e->start.key;
405 } else if (bit > end) {
411 e3 = rb_entry(n3, struct e_node, start.node);
412 if (e3->start.key >= end_in)
415 if (e3->count.key == wnd->extent_max)
418 end3 = e3->start.key + e3->count.key;
420 e3->start.key = end_in;
421 rb_erase(&e3->count.node, &wnd->count_tree);
422 e3->count.key = end3 - end_in;
423 rb_insert_count(&wnd->count_tree, e3);
428 rb_erase(&e3->start.node, &wnd->start_tree);
429 rb_erase(&e3->count.node, &wnd->count_tree);
431 kmem_cache_free(ntfs_enode_cachep, e3);
435 n3 = rb_first(&wnd->count_tree);
437 n3 ? rb_entry(n3, struct e_node, count.node)->count.key
442 if (e->count.key != wnd->extent_max) {
444 } else if (rb_prev(&e->count.node)) {
447 n3 = rb_next(&e->count.node);
448 max_new_len = len > new_len ? len : new_len;
450 wnd->extent_max = max_new_len;
452 e3 = rb_entry(n3, struct e_node, count.node);
453 wnd->extent_max = max(e3->count.key, max_new_len);
459 e->start.key = new_key;
460 rb_erase(&e->count.node, &wnd->count_tree);
461 e->count.key = new_len;
462 rb_insert_count(&wnd->count_tree, e);
464 rb_erase(&e->start.node, &wnd->start_tree);
465 rb_erase(&e->count.node, &wnd->count_tree);
467 kmem_cache_free(ntfs_enode_cachep, e);
471 rb_erase(&e->count.node, &wnd->count_tree);
473 rb_insert_count(&wnd->count_tree, e);
478 if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
481 /* Get minimal extent */
482 e = rb_entry(rb_last(&wnd->count_tree), struct e_node,
484 if (e->count.key > new_len)
487 /* Replace minimum */
488 rb_erase(&e->start.node, &wnd->start_tree);
489 rb_erase(&e->count.node, &wnd->count_tree);
492 e = kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC);
498 e->start.key = new_key;
499 e->count.key = new_len;
500 rb_insert_start(&wnd->start_tree, e);
501 rb_insert_count(&wnd->count_tree, e);
506 if (!wnd->count && 1 != wnd->uptodated)
513 * Scan all bitmap. used while initialization.
515 static int wnd_rescan(struct wnd_bitmap *wnd)
518 size_t prev_tail = 0;
519 struct super_block *sb = wnd->sb;
520 struct ntfs_sb_info *sbi = sb->s_fs_info;
522 u32 blocksize = sb->s_blocksize;
523 u8 cluster_bits = sbi->cluster_bits;
524 u32 wbits = 8 * sb->s_blocksize;
527 size_t wpos, wbit, iw, vbo;
528 struct buffer_head *bh = NULL;
533 wnd->extent_min = MINUS_ONE_T;
534 wnd->total_zeroes = 0;
538 for (iw = 0; iw < wnd->nwnd; iw++) {
539 if (iw + 1 == wnd->nwnd)
540 wbits = wnd->bits_last;
543 if (!wnd->free_bits[iw]) {
546 wnd_add_free_ext(wnd,
553 if (wbits == wnd->free_bits[iw]) {
556 wnd->total_zeroes += wbits;
562 u32 off = vbo & sbi->cluster_mask;
564 if (!run_lookup_entry(&wnd->run, vbo >> cluster_bits,
565 &lcn, &clen, NULL)) {
570 lbo = ((u64)lcn << cluster_bits) + off;
571 len = ((u64)clen << cluster_bits) - off;
574 bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
580 buf = (ulong *)bh->b_data;
582 used = __bitmap_weight(buf, wbits);
585 wnd->free_bits[iw] = frb;
586 wnd->total_zeroes += frb;
592 if (wbit + wbits > wnd->nbits)
593 wbits = wnd->nbits - wbit;
596 used = find_next_zero_bit(buf, wbits, wpos);
598 if (used > wpos && prev_tail) {
599 wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
612 frb = find_next_bit(buf, wbits, wpos);
614 /* keep last free block */
615 prev_tail += frb - wpos;
619 wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
620 frb + prev_tail - wpos, true);
622 /* Skip free block and first '1' */
624 /* Reset previous tail */
626 } while (wpos < wbits);
643 wnd_add_free_ext(wnd, wnd->nbits - prev_tail, prev_tail, true);
646 * Before init cycle wnd->uptodated was 0
647 * If any errors or limits occurs while initialization then
648 * wnd->uptodated will be -1
649 * If 'uptodated' is still 0 then Tree is really updated
654 if (wnd->zone_bit != wnd->zone_end) {
655 size_t zlen = wnd->zone_end - wnd->zone_bit;
657 wnd->zone_end = wnd->zone_bit;
658 wnd_zone_set(wnd, wnd->zone_bit, zlen);
668 int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
671 u32 blocksize = sb->s_blocksize;
672 u32 wbits = blocksize * 8;
674 init_rwsem(&wnd->rw_lock);
678 wnd->total_zeroes = nbits;
679 wnd->extent_max = MINUS_ONE_T;
680 wnd->zone_bit = wnd->zone_end = 0;
681 wnd->nwnd = bytes_to_block(sb, bitmap_size(nbits));
682 wnd->bits_last = nbits & (wbits - 1);
684 wnd->bits_last = wbits;
686 wnd->free_bits = kzalloc(wnd->nwnd * sizeof(u16), GFP_NOFS);
690 err = wnd_rescan(wnd);
702 * call sb_bread for requested window
704 static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw)
708 struct super_block *sb = wnd->sb;
709 struct ntfs_sb_info *sbi;
710 struct buffer_head *bh;
714 vbo = (u64)iw << sb->s_blocksize_bits;
716 if (!run_lookup_entry(&wnd->run, vbo >> sbi->cluster_bits, &lcn, &clen,
718 return ERR_PTR(-ENOENT);
721 lbo = ((u64)lcn << sbi->cluster_bits) + (vbo & sbi->cluster_mask);
723 bh = ntfs_bread(wnd->sb, lbo >> sb->s_blocksize_bits);
725 return ERR_PTR(-EIO);
733 * Marks the bits range from bit to bit + bits as free
735 int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
738 struct super_block *sb = wnd->sb;
740 u32 wbits = 8 * sb->s_blocksize;
741 size_t iw = bit >> (sb->s_blocksize_bits + 3);
742 u32 wbit = bit & (wbits - 1);
743 struct buffer_head *bh;
745 while (iw < wnd->nwnd && bits) {
749 if (iw + 1 == wnd->nwnd)
750 wbits = wnd->bits_last;
753 op = tail < bits ? tail : bits;
755 bh = wnd_map(wnd, iw);
761 buf = (ulong *)bh->b_data;
765 __bitmap_clear(buf, wbit, op);
767 wnd->free_bits[iw] += op;
769 set_buffer_uptodate(bh);
770 mark_buffer_dirty(bh);
774 wnd->total_zeroes += op;
780 wnd_add_free_ext(wnd, bit, bits0, false);
788 * Marks the bits range from bit to bit + bits as used
790 int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
793 struct super_block *sb = wnd->sb;
795 size_t iw = bit >> (sb->s_blocksize_bits + 3);
796 u32 wbits = 8 * sb->s_blocksize;
797 u32 wbit = bit & (wbits - 1);
798 struct buffer_head *bh;
800 while (iw < wnd->nwnd && bits) {
804 if (unlikely(iw + 1 == wnd->nwnd))
805 wbits = wnd->bits_last;
808 op = tail < bits ? tail : bits;
810 bh = wnd_map(wnd, iw);
815 buf = (ulong *)bh->b_data;
819 __bitmap_set(buf, wbit, op);
820 wnd->free_bits[iw] -= op;
822 set_buffer_uptodate(bh);
823 mark_buffer_dirty(bh);
827 wnd->total_zeroes -= op;
833 if (!RB_EMPTY_ROOT(&wnd->start_tree))
834 wnd_remove_free_ext(wnd, bit, bits0);
842 * Returns true if all clusters [bit, bit+bits) are free (bitmap only)
844 static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits)
846 struct super_block *sb = wnd->sb;
847 size_t iw = bit >> (sb->s_blocksize_bits + 3);
848 u32 wbits = 8 * sb->s_blocksize;
849 u32 wbit = bit & (wbits - 1);
851 while (iw < wnd->nwnd && bits) {
854 if (unlikely(iw + 1 == wnd->nwnd))
855 wbits = wnd->bits_last;
858 op = tail < bits ? tail : bits;
860 if (wbits != wnd->free_bits[iw]) {
862 struct buffer_head *bh = wnd_map(wnd, iw);
867 ret = are_bits_clear((ulong *)bh->b_data, wbit, op);
885 * Returns true if all clusters [bit, bit+bits) are free
887 bool wnd_is_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
894 if (RB_EMPTY_ROOT(&wnd->start_tree))
897 n = rb_lookup(&wnd->start_tree, bit);
901 e = rb_entry(n, struct e_node, start.node);
903 end = e->start.key + e->count.key;
905 if (bit < end && bit + bits <= end)
909 ret = wnd_is_free_hlp(wnd, bit, bits);
917 * Returns true if all clusters [bit, bit+bits) are used
919 bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
922 struct super_block *sb = wnd->sb;
923 size_t iw = bit >> (sb->s_blocksize_bits + 3);
924 u32 wbits = 8 * sb->s_blocksize;
925 u32 wbit = bit & (wbits - 1);
930 if (RB_EMPTY_ROOT(&wnd->start_tree))
934 n = rb_lookup(&wnd->start_tree, end - 1);
938 e = rb_entry(n, struct e_node, start.node);
939 if (e->start.key + e->count.key > bit)
943 while (iw < wnd->nwnd && bits) {
946 if (unlikely(iw + 1 == wnd->nwnd))
947 wbits = wnd->bits_last;
950 op = tail < bits ? tail : bits;
952 if (wnd->free_bits[iw]) {
954 struct buffer_head *bh = wnd_map(wnd, iw);
959 ret = are_bits_set((ulong *)bh->b_data, wbit, op);
977 * - flags - BITMAP_FIND_XXX flags
979 * looks for free space
980 * Returns 0 if not found
982 size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
983 size_t flags, size_t *allocated)
985 struct super_block *sb;
986 u32 wbits, wpos, wzbit, wzend;
987 size_t fnd, max_alloc, b_len, b_pos;
988 size_t iw, prev_tail, nwnd, wbit, ebit, zbit, zend;
989 size_t to_alloc0 = to_alloc;
991 const struct e_node *e;
992 const struct rb_node *pr, *cr;
995 struct buffer_head *bh;
997 /* fast checking for available free space */
998 if (flags & BITMAP_FIND_FULL) {
999 size_t zeroes = wnd_zeroes(wnd);
1001 zeroes -= wnd->zone_end - wnd->zone_bit;
1002 if (zeroes < to_alloc0)
1005 if (to_alloc0 > wnd->extent_max)
1008 if (to_alloc > wnd->extent_max)
1009 to_alloc = wnd->extent_max;
1012 if (wnd->zone_bit <= hint && hint < wnd->zone_end)
1013 hint = wnd->zone_end;
1015 max_alloc = wnd->nbits;
1018 if (hint >= max_alloc)
1021 if (RB_EMPTY_ROOT(&wnd->start_tree)) {
1022 if (wnd->uptodated == 1) {
1023 /* extents tree is updated -> no free space */
1031 goto allocate_biggest;
1033 /* Use hint: enumerate extents by start >= hint */
1035 cr = wnd->start_tree.rb_node;
1038 e = rb_entry(cr, struct e_node, start.node);
1040 if (e->start.key == hint)
1043 if (e->start.key < hint) {
1053 e = pr ? rb_entry(pr, struct e_node, start.node) : NULL;
1059 goto allocate_biggest;
1061 if (e->start.key + e->count.key > hint) {
1062 /* We have found extension with 'hint' inside */
1063 size_t len = e->start.key + e->count.key - hint;
1065 if (len >= to_alloc && hint + to_alloc <= max_alloc) {
1070 if (!(flags & BITMAP_FIND_FULL)) {
1074 if (hint + len <= max_alloc) {
1083 /* Allocate from biggest free extent */
1084 e = rb_entry(rb_first(&wnd->count_tree), struct e_node, count.node);
1085 if (e->count.key != wnd->extent_max)
1086 wnd->extent_max = e->count.key;
1088 if (e->count.key < max_alloc) {
1089 if (e->count.key >= to_alloc) {
1091 } else if (flags & BITMAP_FIND_FULL) {
1092 if (e->count.key < to_alloc0) {
1093 /* Biggest free block is less then requested */
1096 to_alloc = e->count.key;
1097 } else if (-1 != wnd->uptodated) {
1098 to_alloc = e->count.key;
1100 /* Check if we can use more bits */
1101 size_t op, max_check;
1102 struct rb_root start_tree;
1104 memcpy(&start_tree, &wnd->start_tree,
1105 sizeof(struct rb_root));
1106 memset(&wnd->start_tree, 0, sizeof(struct rb_root));
1108 max_check = e->start.key + to_alloc;
1109 if (max_check > max_alloc)
1110 max_check = max_alloc;
1111 for (op = e->start.key + e->count.key; op < max_check;
1113 if (!wnd_is_free(wnd, op, 1))
1116 memcpy(&wnd->start_tree, &start_tree,
1117 sizeof(struct rb_root));
1118 to_alloc = op - e->start.key;
1121 /* Prepare to return */
1123 if (e->start.key + to_alloc > max_alloc)
1124 to_alloc = max_alloc - e->start.key;
1128 if (wnd->uptodated == 1) {
1129 /* extents tree is updated -> no free space */
1133 b_len = e->count.key;
1134 b_pos = e->start.key;
1138 log2_bits = sb->s_blocksize_bits + 3;
1140 /* At most two ranges [hint, max_alloc) + [0, hint) */
1143 /* TODO: optimize request for case nbits > wbits */
1144 iw = hint >> log2_bits;
1145 wbits = sb->s_blocksize * 8;
1146 wpos = hint & (wbits - 1);
1150 if (max_alloc == wnd->nbits) {
1153 size_t t = max_alloc + wbits - 1;
1155 nwnd = likely(t > max_alloc) ? (t >> log2_bits) : wnd->nwnd;
1158 /* Enumerate all windows */
1159 for (; iw < nwnd; iw++) {
1160 wbit = iw << log2_bits;
1162 if (!wnd->free_bits[iw]) {
1163 if (prev_tail > b_len) {
1164 b_pos = wbit - prev_tail;
1168 /* Skip full used window */
1174 if (unlikely(iw + 1 == nwnd)) {
1175 if (max_alloc == wnd->nbits) {
1176 wbits = wnd->bits_last;
1178 size_t t = max_alloc & (wbits - 1);
1182 fbits_valid = false;
1187 if (wnd->zone_end > wnd->zone_bit) {
1188 ebit = wbit + wbits;
1189 zbit = max(wnd->zone_bit, wbit);
1190 zend = min(wnd->zone_end, ebit);
1192 /* Here we have a window [wbit, ebit) and zone [zbit, zend) */
1194 /* Zone does not overlap window */
1196 wzbit = zbit - wbit;
1197 wzend = zend - wbit;
1199 /* Zone overlaps window */
1200 if (wnd->free_bits[iw] == wzend - wzbit) {
1206 /* Scan two ranges window: [wbit, zbit) and [zend, ebit) */
1207 bh = wnd_map(wnd, iw);
1216 buf = (ulong *)bh->b_data;
1218 /* Scan range [wbit, zbit) */
1220 /* Scan range [wpos, zbit) */
1221 fnd = wnd_scan(buf, wbit, wpos, wzbit,
1222 to_alloc, &prev_tail,
1224 if (fnd != MINUS_ONE_T) {
1232 /* Scan range [zend, ebit) */
1233 if (wzend < wbits) {
1234 fnd = wnd_scan(buf, wbit,
1235 max(wzend, wpos), wbits,
1236 to_alloc, &prev_tail,
1238 if (fnd != MINUS_ONE_T) {
1250 /* Current window does not overlap zone */
1251 if (!wpos && fbits_valid && wnd->free_bits[iw] == wbits) {
1252 /* window is empty */
1253 if (prev_tail + wbits >= to_alloc) {
1254 fnd = wbit + wpos - prev_tail;
1258 /* Increase 'prev_tail' and process next window */
1265 bh = wnd_map(wnd, iw);
1273 buf = (ulong *)bh->b_data;
1275 /* Scan range [wpos, eBits) */
1276 fnd = wnd_scan(buf, wbit, wpos, wbits, to_alloc, &prev_tail,
1279 if (fnd != MINUS_ONE_T)
1283 if (b_len < prev_tail) {
1284 /* The last fragment */
1286 b_pos = max_alloc - prev_tail;
1291 * We have scanned range [hint max_alloc)
1292 * Prepare to scan range [0 hint + to_alloc)
1294 size_t nextmax = hint + to_alloc;
1296 if (likely(nextmax >= hint) && nextmax < max_alloc)
1297 max_alloc = nextmax;
1305 wnd->extent_max = b_len;
1307 if (flags & BITMAP_FIND_FULL)
1314 if (flags & BITMAP_FIND_MARK_AS_USED) {
1315 /* TODO optimize remove extent (pass 'e'?) */
1316 if (wnd_set_used(wnd, fnd, to_alloc))
1318 } else if (wnd->extent_max != MINUS_ONE_T &&
1319 to_alloc > wnd->extent_max) {
1320 wnd->extent_max = to_alloc;
1333 * Extend bitmap ($MFT bitmap)
1335 int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
1338 struct super_block *sb = wnd->sb;
1339 struct ntfs_sb_info *sbi = sb->s_fs_info;
1340 u32 blocksize = sb->s_blocksize;
1341 u32 wbits = blocksize * 8;
1343 size_t bits, iw, new_wnd;
1344 size_t old_bits = wnd->nbits;
1347 if (new_bits <= old_bits)
1350 /* align to 8 byte boundary */
1351 new_wnd = bytes_to_block(sb, bitmap_size(new_bits));
1352 new_last = new_bits & (wbits - 1);
1356 if (new_wnd != wnd->nwnd) {
1357 new_free = kmalloc(new_wnd * sizeof(u16), GFP_NOFS);
1361 if (new_free != wnd->free_bits)
1362 memcpy(new_free, wnd->free_bits,
1363 wnd->nwnd * sizeof(short));
1364 memset(new_free + wnd->nwnd, 0,
1365 (new_wnd - wnd->nwnd) * sizeof(short));
1366 kfree(wnd->free_bits);
1367 wnd->free_bits = new_free;
1370 /* Zero bits [old_bits,new_bits) */
1371 bits = new_bits - old_bits;
1372 b0 = old_bits & (wbits - 1);
1374 for (iw = old_bits >> (sb->s_blocksize_bits + 3); bits; iw += 1) {
1377 u64 vbo, lbo, bytes;
1378 struct buffer_head *bh;
1381 if (iw + 1 == new_wnd)
1384 op = b0 + bits > wbits ? wbits - b0 : bits;
1385 vbo = (u64)iw * blocksize;
1387 err = ntfs_vbo_to_lbo(sbi, &wnd->run, vbo, &lbo, &bytes);
1391 bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1396 buf = (ulong *)bh->b_data;
1398 __bitmap_clear(buf, b0, blocksize * 8 - b0);
1399 frb = wbits - __bitmap_weight(buf, wbits);
1400 wnd->total_zeroes += frb - wnd->free_bits[iw];
1401 wnd->free_bits[iw] = frb;
1403 set_buffer_uptodate(bh);
1404 mark_buffer_dirty(bh);
1406 /*err = sync_dirty_buffer(bh);*/
1412 wnd->nbits = new_bits;
1413 wnd->nwnd = new_wnd;
1414 wnd->bits_last = new_last;
1416 wnd_add_free_ext(wnd, old_bits, new_bits - old_bits, false);
1424 void wnd_zone_set(struct wnd_bitmap *wnd, size_t lcn, size_t len)
1428 zlen = wnd->zone_end - wnd->zone_bit;
1430 wnd_add_free_ext(wnd, wnd->zone_bit, zlen, false);
1432 if (!RB_EMPTY_ROOT(&wnd->start_tree) && len)
1433 wnd_remove_free_ext(wnd, lcn, len);
1435 wnd->zone_bit = lcn;
1436 wnd->zone_end = lcn + len;
1439 int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range)
1442 struct super_block *sb = sbi->sb;
1443 struct wnd_bitmap *wnd = &sbi->used.bitmap;
1444 u32 wbits = 8 * sb->s_blocksize;
1445 CLST len = 0, lcn = 0, done = 0;
1446 CLST minlen = bytes_to_cluster(sbi, range->minlen);
1447 CLST lcn_from = bytes_to_cluster(sbi, range->start);
1448 size_t iw = lcn_from >> (sb->s_blocksize_bits + 3);
1449 u32 wbit = lcn_from & (wbits - 1);
1456 if (range->len == (u64)-1)
1457 lcn_to = wnd->nbits;
1459 lcn_to = bytes_to_cluster(sbi, range->start + range->len);
1461 down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
1463 for (; iw < wnd->nbits; iw++, wbit = 0) {
1464 CLST lcn_wnd = iw * wbits;
1465 struct buffer_head *bh;
1467 if (lcn_wnd > lcn_to)
1470 if (!wnd->free_bits[iw])
1473 if (iw + 1 == wnd->nwnd)
1474 wbits = wnd->bits_last;
1476 if (lcn_wnd + wbits > lcn_to)
1477 wbits = lcn_to - lcn_wnd;
1479 bh = wnd_map(wnd, iw);
1485 buf = (ulong *)bh->b_data;
1487 for (; wbit < wbits; wbit++) {
1488 if (!test_bit(wbit, buf)) {
1490 lcn = lcn_wnd + wbit;
1494 if (len >= minlen) {
1495 err = ntfs_discard(sbi, lcn, len);
1505 /* Process the last fragment */
1506 if (len >= minlen) {
1507 err = ntfs_discard(sbi, lcn, len);
1514 range->len = (u64)done << sbi->cluster_bits;
1516 up_read(&wnd->rw_lock);