Commit | Line | Data |
---|---|---|
c1d7c514 | 1 | // SPDX-License-Identifier: GPL-2.0 |
0f9dd46c JB |
2 | /* |
3 | * Copyright (C) 2008 Red Hat. All rights reserved. | |
0f9dd46c JB |
4 | */ |
5 | ||
96303081 | 6 | #include <linux/pagemap.h> |
0f9dd46c | 7 | #include <linux/sched.h> |
f361bf4a | 8 | #include <linux/sched/signal.h> |
5a0e3ad6 | 9 | #include <linux/slab.h> |
96303081 | 10 | #include <linux/math64.h> |
6ab60601 | 11 | #include <linux/ratelimit.h> |
540adea3 | 12 | #include <linux/error-injection.h> |
84de76a2 | 13 | #include <linux/sched/mm.h> |
18bb8bbf | 14 | #include "misc.h" |
0f9dd46c | 15 | #include "ctree.h" |
fa9c0d79 CM |
16 | #include "free-space-cache.h" |
17 | #include "transaction.h" | |
0af3d00b | 18 | #include "disk-io.h" |
43be2146 | 19 | #include "extent_io.h" |
04216820 | 20 | #include "volumes.h" |
8719aaae | 21 | #include "space-info.h" |
86736342 | 22 | #include "delalloc-space.h" |
aac0023c | 23 | #include "block-group.h" |
b0643e59 | 24 | #include "discard.h" |
fa9c0d79 | 25 | |
0ef6447a | 26 | #define BITS_PER_BITMAP (PAGE_SIZE * 8UL) |
5d90c5c7 DZ |
27 | #define MAX_CACHE_BYTES_PER_GIG SZ_64K |
28 | #define FORCE_EXTENT_THRESHOLD SZ_1M | |
0f9dd46c | 29 | |
55507ce3 FM |
30 | struct btrfs_trim_range { |
31 | u64 start; | |
32 | u64 bytes; | |
33 | struct list_head list; | |
34 | }; | |
35 | ||
34d52cb6 | 36 | static int link_free_space(struct btrfs_free_space_ctl *ctl, |
0cb59c99 | 37 | struct btrfs_free_space *info); |
cd023e7b JB |
38 | static void unlink_free_space(struct btrfs_free_space_ctl *ctl, |
39 | struct btrfs_free_space *info); | |
cd79909b JB |
40 | static int search_bitmap(struct btrfs_free_space_ctl *ctl, |
41 | struct btrfs_free_space *bitmap_info, u64 *offset, | |
42 | u64 *bytes, bool for_alloc); | |
43 | static void free_bitmap(struct btrfs_free_space_ctl *ctl, | |
44 | struct btrfs_free_space *bitmap_info); | |
45 | static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, | |
46 | struct btrfs_free_space *info, u64 offset, | |
47 | u64 bytes); | |
0cb59c99 | 48 | |
0414efae LZ |
49 | static struct inode *__lookup_free_space_inode(struct btrfs_root *root, |
50 | struct btrfs_path *path, | |
51 | u64 offset) | |
0af3d00b | 52 | { |
0b246afa | 53 | struct btrfs_fs_info *fs_info = root->fs_info; |
0af3d00b JB |
54 | struct btrfs_key key; |
55 | struct btrfs_key location; | |
56 | struct btrfs_disk_key disk_key; | |
57 | struct btrfs_free_space_header *header; | |
58 | struct extent_buffer *leaf; | |
59 | struct inode *inode = NULL; | |
84de76a2 | 60 | unsigned nofs_flag; |
0af3d00b JB |
61 | int ret; |
62 | ||
0af3d00b | 63 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; |
0414efae | 64 | key.offset = offset; |
0af3d00b JB |
65 | key.type = 0; |
66 | ||
67 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
68 | if (ret < 0) | |
69 | return ERR_PTR(ret); | |
70 | if (ret > 0) { | |
b3b4aa74 | 71 | btrfs_release_path(path); |
0af3d00b JB |
72 | return ERR_PTR(-ENOENT); |
73 | } | |
74 | ||
75 | leaf = path->nodes[0]; | |
76 | header = btrfs_item_ptr(leaf, path->slots[0], | |
77 | struct btrfs_free_space_header); | |
78 | btrfs_free_space_key(leaf, header, &disk_key); | |
79 | btrfs_disk_key_to_cpu(&location, &disk_key); | |
b3b4aa74 | 80 | btrfs_release_path(path); |
0af3d00b | 81 | |
84de76a2 JB |
82 | /* |
83 | * We are often under a trans handle at this point, so we need to make | |
84 | * sure NOFS is set to keep us from deadlocking. | |
85 | */ | |
86 | nofs_flag = memalloc_nofs_save(); | |
0202e83f | 87 | inode = btrfs_iget_path(fs_info->sb, location.objectid, root, path); |
4222ea71 | 88 | btrfs_release_path(path); |
84de76a2 | 89 | memalloc_nofs_restore(nofs_flag); |
0af3d00b JB |
90 | if (IS_ERR(inode)) |
91 | return inode; | |
0af3d00b | 92 | |
528c0327 | 93 | mapping_set_gfp_mask(inode->i_mapping, |
c62d2555 MH |
94 | mapping_gfp_constraint(inode->i_mapping, |
95 | ~(__GFP_FS | __GFP_HIGHMEM))); | |
adae52b9 | 96 | |
0414efae LZ |
97 | return inode; |
98 | } | |
99 | ||
32da5386 | 100 | struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group, |
7949f339 | 101 | struct btrfs_path *path) |
0414efae | 102 | { |
7949f339 | 103 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
0414efae | 104 | struct inode *inode = NULL; |
5b0e95bf | 105 | u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW; |
0414efae LZ |
106 | |
107 | spin_lock(&block_group->lock); | |
108 | if (block_group->inode) | |
109 | inode = igrab(block_group->inode); | |
110 | spin_unlock(&block_group->lock); | |
111 | if (inode) | |
112 | return inode; | |
113 | ||
77ab86bf | 114 | inode = __lookup_free_space_inode(fs_info->tree_root, path, |
b3470b5d | 115 | block_group->start); |
0414efae LZ |
116 | if (IS_ERR(inode)) |
117 | return inode; | |
118 | ||
0af3d00b | 119 | spin_lock(&block_group->lock); |
5b0e95bf | 120 | if (!((BTRFS_I(inode)->flags & flags) == flags)) { |
0b246afa | 121 | btrfs_info(fs_info, "Old style space inode found, converting."); |
5b0e95bf JB |
122 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM | |
123 | BTRFS_INODE_NODATACOW; | |
2f356126 JB |
124 | block_group->disk_cache_state = BTRFS_DC_CLEAR; |
125 | } | |
126 | ||
300e4f8a | 127 | if (!block_group->iref) { |
0af3d00b JB |
128 | block_group->inode = igrab(inode); |
129 | block_group->iref = 1; | |
130 | } | |
131 | spin_unlock(&block_group->lock); | |
132 | ||
133 | return inode; | |
134 | } | |
135 | ||
48a3b636 ES |
136 | static int __create_free_space_inode(struct btrfs_root *root, |
137 | struct btrfs_trans_handle *trans, | |
138 | struct btrfs_path *path, | |
139 | u64 ino, u64 offset) | |
0af3d00b JB |
140 | { |
141 | struct btrfs_key key; | |
142 | struct btrfs_disk_key disk_key; | |
143 | struct btrfs_free_space_header *header; | |
144 | struct btrfs_inode_item *inode_item; | |
145 | struct extent_buffer *leaf; | |
f0d1219d NB |
146 | /* We inline CRCs for the free disk space cache */ |
147 | const u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC | | |
148 | BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW; | |
0af3d00b JB |
149 | int ret; |
150 | ||
0414efae | 151 | ret = btrfs_insert_empty_inode(trans, root, path, ino); |
0af3d00b JB |
152 | if (ret) |
153 | return ret; | |
154 | ||
155 | leaf = path->nodes[0]; | |
156 | inode_item = btrfs_item_ptr(leaf, path->slots[0], | |
157 | struct btrfs_inode_item); | |
158 | btrfs_item_key(leaf, &disk_key, path->slots[0]); | |
b159fa28 | 159 | memzero_extent_buffer(leaf, (unsigned long)inode_item, |
0af3d00b JB |
160 | sizeof(*inode_item)); |
161 | btrfs_set_inode_generation(leaf, inode_item, trans->transid); | |
162 | btrfs_set_inode_size(leaf, inode_item, 0); | |
163 | btrfs_set_inode_nbytes(leaf, inode_item, 0); | |
164 | btrfs_set_inode_uid(leaf, inode_item, 0); | |
165 | btrfs_set_inode_gid(leaf, inode_item, 0); | |
166 | btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600); | |
5b0e95bf | 167 | btrfs_set_inode_flags(leaf, inode_item, flags); |
0af3d00b JB |
168 | btrfs_set_inode_nlink(leaf, inode_item, 1); |
169 | btrfs_set_inode_transid(leaf, inode_item, trans->transid); | |
0414efae | 170 | btrfs_set_inode_block_group(leaf, inode_item, offset); |
0af3d00b | 171 | btrfs_mark_buffer_dirty(leaf); |
b3b4aa74 | 172 | btrfs_release_path(path); |
0af3d00b JB |
173 | |
174 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | |
0414efae | 175 | key.offset = offset; |
0af3d00b | 176 | key.type = 0; |
0af3d00b JB |
177 | ret = btrfs_insert_empty_item(trans, root, path, &key, |
178 | sizeof(struct btrfs_free_space_header)); | |
179 | if (ret < 0) { | |
b3b4aa74 | 180 | btrfs_release_path(path); |
0af3d00b JB |
181 | return ret; |
182 | } | |
c9dc4c65 | 183 | |
0af3d00b JB |
184 | leaf = path->nodes[0]; |
185 | header = btrfs_item_ptr(leaf, path->slots[0], | |
186 | struct btrfs_free_space_header); | |
b159fa28 | 187 | memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header)); |
0af3d00b JB |
188 | btrfs_set_free_space_key(leaf, header, &disk_key); |
189 | btrfs_mark_buffer_dirty(leaf); | |
b3b4aa74 | 190 | btrfs_release_path(path); |
0af3d00b JB |
191 | |
192 | return 0; | |
193 | } | |
194 | ||
4ca75f1b | 195 | int create_free_space_inode(struct btrfs_trans_handle *trans, |
32da5386 | 196 | struct btrfs_block_group *block_group, |
0414efae LZ |
197 | struct btrfs_path *path) |
198 | { | |
199 | int ret; | |
200 | u64 ino; | |
201 | ||
543068a2 | 202 | ret = btrfs_get_free_objectid(trans->fs_info->tree_root, &ino); |
0414efae LZ |
203 | if (ret < 0) |
204 | return ret; | |
205 | ||
4ca75f1b | 206 | return __create_free_space_inode(trans->fs_info->tree_root, trans, path, |
b3470b5d | 207 | ino, block_group->start); |
0414efae LZ |
208 | } |
209 | ||
36b216c8 BB |
210 | /* |
211 | * inode is an optional sink: if it is NULL, btrfs_remove_free_space_inode | |
212 | * handles lookup, otherwise it takes ownership and iputs the inode. | |
213 | * Don't reuse an inode pointer after passing it into this function. | |
214 | */ | |
215 | int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans, | |
216 | struct inode *inode, | |
217 | struct btrfs_block_group *block_group) | |
218 | { | |
219 | struct btrfs_path *path; | |
220 | struct btrfs_key key; | |
221 | int ret = 0; | |
222 | ||
223 | path = btrfs_alloc_path(); | |
224 | if (!path) | |
225 | return -ENOMEM; | |
226 | ||
227 | if (!inode) | |
228 | inode = lookup_free_space_inode(block_group, path); | |
229 | if (IS_ERR(inode)) { | |
230 | if (PTR_ERR(inode) != -ENOENT) | |
231 | ret = PTR_ERR(inode); | |
232 | goto out; | |
233 | } | |
234 | ret = btrfs_orphan_add(trans, BTRFS_I(inode)); | |
235 | if (ret) { | |
236 | btrfs_add_delayed_iput(inode); | |
237 | goto out; | |
238 | } | |
239 | clear_nlink(inode); | |
240 | /* One for the block groups ref */ | |
241 | spin_lock(&block_group->lock); | |
242 | if (block_group->iref) { | |
243 | block_group->iref = 0; | |
244 | block_group->inode = NULL; | |
245 | spin_unlock(&block_group->lock); | |
246 | iput(inode); | |
247 | } else { | |
248 | spin_unlock(&block_group->lock); | |
249 | } | |
250 | /* One for the lookup ref */ | |
251 | btrfs_add_delayed_iput(inode); | |
252 | ||
253 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | |
254 | key.type = 0; | |
255 | key.offset = block_group->start; | |
256 | ret = btrfs_search_slot(trans, trans->fs_info->tree_root, &key, path, | |
257 | -1, 1); | |
258 | if (ret) { | |
259 | if (ret > 0) | |
260 | ret = 0; | |
261 | goto out; | |
262 | } | |
263 | ret = btrfs_del_item(trans, trans->fs_info->tree_root, path); | |
264 | out: | |
265 | btrfs_free_path(path); | |
266 | return ret; | |
267 | } | |
268 | ||
2ff7e61e | 269 | int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info, |
7b61cd92 | 270 | struct btrfs_block_rsv *rsv) |
0af3d00b | 271 | { |
c8174313 | 272 | u64 needed_bytes; |
7b61cd92 | 273 | int ret; |
c8174313 JB |
274 | |
275 | /* 1 for slack space, 1 for updating the inode */ | |
2bd36e7b JB |
276 | needed_bytes = btrfs_calc_insert_metadata_size(fs_info, 1) + |
277 | btrfs_calc_metadata_size(fs_info, 1); | |
c8174313 | 278 | |
7b61cd92 MX |
279 | spin_lock(&rsv->lock); |
280 | if (rsv->reserved < needed_bytes) | |
281 | ret = -ENOSPC; | |
282 | else | |
283 | ret = 0; | |
284 | spin_unlock(&rsv->lock); | |
4b286cd1 | 285 | return ret; |
7b61cd92 MX |
286 | } |
287 | ||
77ab86bf | 288 | int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans, |
32da5386 | 289 | struct btrfs_block_group *block_group, |
7b61cd92 MX |
290 | struct inode *inode) |
291 | { | |
77ab86bf | 292 | struct btrfs_root *root = BTRFS_I(inode)->root; |
7b61cd92 | 293 | int ret = 0; |
35c76642 | 294 | bool locked = false; |
1bbc621e | 295 | |
1bbc621e | 296 | if (block_group) { |
21e75ffe JM |
297 | struct btrfs_path *path = btrfs_alloc_path(); |
298 | ||
299 | if (!path) { | |
300 | ret = -ENOMEM; | |
301 | goto fail; | |
302 | } | |
35c76642 | 303 | locked = true; |
1bbc621e CM |
304 | mutex_lock(&trans->transaction->cache_write_mutex); |
305 | if (!list_empty(&block_group->io_list)) { | |
306 | list_del_init(&block_group->io_list); | |
307 | ||
afdb5718 | 308 | btrfs_wait_cache_io(trans, block_group, path); |
1bbc621e CM |
309 | btrfs_put_block_group(block_group); |
310 | } | |
311 | ||
312 | /* | |
313 | * now that we've truncated the cache away, its no longer | |
314 | * setup or written | |
315 | */ | |
316 | spin_lock(&block_group->lock); | |
317 | block_group->disk_cache_state = BTRFS_DC_CLEAR; | |
318 | spin_unlock(&block_group->lock); | |
21e75ffe | 319 | btrfs_free_path(path); |
1bbc621e | 320 | } |
0af3d00b | 321 | |
6ef06d27 | 322 | btrfs_i_size_write(BTRFS_I(inode), 0); |
7caef267 | 323 | truncate_pagecache(inode, 0); |
0af3d00b JB |
324 | |
325 | /* | |
f7e9e8fc OS |
326 | * We skip the throttling logic for free space cache inodes, so we don't |
327 | * need to check for -EAGAIN. | |
0af3d00b | 328 | */ |
50743398 | 329 | ret = btrfs_truncate_inode_items(trans, root, BTRFS_I(inode), |
0d7d3165 | 330 | 0, BTRFS_EXTENT_DATA_KEY, NULL); |
35c76642 FM |
331 | if (ret) |
332 | goto fail; | |
0af3d00b | 333 | |
9a56fcd1 | 334 | ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); |
1bbc621e | 335 | |
1bbc621e | 336 | fail: |
35c76642 FM |
337 | if (locked) |
338 | mutex_unlock(&trans->transaction->cache_write_mutex); | |
79787eaa | 339 | if (ret) |
66642832 | 340 | btrfs_abort_transaction(trans, ret); |
c8174313 | 341 | |
82d5902d | 342 | return ret; |
0af3d00b JB |
343 | } |
344 | ||
1d480538 | 345 | static void readahead_cache(struct inode *inode) |
9d66e233 | 346 | { |
98caf953 | 347 | struct file_ra_state ra; |
9d66e233 JB |
348 | unsigned long last_index; |
349 | ||
98caf953 | 350 | file_ra_state_init(&ra, inode->i_mapping); |
09cbfeaf | 351 | last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; |
9d66e233 | 352 | |
98caf953 | 353 | page_cache_sync_readahead(inode->i_mapping, &ra, NULL, 0, last_index); |
9d66e233 JB |
354 | } |
355 | ||
4c6d1d85 | 356 | static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode, |
f15376df | 357 | int write) |
a67509c3 | 358 | { |
5349d6c3 | 359 | int num_pages; |
5349d6c3 | 360 | |
09cbfeaf | 361 | num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
5349d6c3 | 362 | |
8f6c72a9 | 363 | /* Make sure we can fit our crcs and generation into the first page */ |
7dbdb443 | 364 | if (write && (num_pages * sizeof(u32) + sizeof(u64)) > PAGE_SIZE) |
5349d6c3 MX |
365 | return -ENOSPC; |
366 | ||
4c6d1d85 | 367 | memset(io_ctl, 0, sizeof(struct btrfs_io_ctl)); |
5349d6c3 | 368 | |
31e818fe | 369 | io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS); |
a67509c3 JB |
370 | if (!io_ctl->pages) |
371 | return -ENOMEM; | |
5349d6c3 MX |
372 | |
373 | io_ctl->num_pages = num_pages; | |
f15376df | 374 | io_ctl->fs_info = btrfs_sb(inode->i_sb); |
c9dc4c65 | 375 | io_ctl->inode = inode; |
5349d6c3 | 376 | |
a67509c3 JB |
377 | return 0; |
378 | } | |
663faf9f | 379 | ALLOW_ERROR_INJECTION(io_ctl_init, ERRNO); |
a67509c3 | 380 | |
4c6d1d85 | 381 | static void io_ctl_free(struct btrfs_io_ctl *io_ctl) |
a67509c3 JB |
382 | { |
383 | kfree(io_ctl->pages); | |
c9dc4c65 | 384 | io_ctl->pages = NULL; |
a67509c3 JB |
385 | } |
386 | ||
4c6d1d85 | 387 | static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl) |
a67509c3 JB |
388 | { |
389 | if (io_ctl->cur) { | |
a67509c3 JB |
390 | io_ctl->cur = NULL; |
391 | io_ctl->orig = NULL; | |
392 | } | |
393 | } | |
394 | ||
4c6d1d85 | 395 | static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear) |
a67509c3 | 396 | { |
b12d6869 | 397 | ASSERT(io_ctl->index < io_ctl->num_pages); |
a67509c3 | 398 | io_ctl->page = io_ctl->pages[io_ctl->index++]; |
2b108268 | 399 | io_ctl->cur = page_address(io_ctl->page); |
a67509c3 | 400 | io_ctl->orig = io_ctl->cur; |
09cbfeaf | 401 | io_ctl->size = PAGE_SIZE; |
a67509c3 | 402 | if (clear) |
619a9742 | 403 | clear_page(io_ctl->cur); |
a67509c3 JB |
404 | } |
405 | ||
4c6d1d85 | 406 | static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl) |
a67509c3 JB |
407 | { |
408 | int i; | |
409 | ||
410 | io_ctl_unmap_page(io_ctl); | |
411 | ||
412 | for (i = 0; i < io_ctl->num_pages; i++) { | |
a1ee5a45 LZ |
413 | if (io_ctl->pages[i]) { |
414 | ClearPageChecked(io_ctl->pages[i]); | |
415 | unlock_page(io_ctl->pages[i]); | |
09cbfeaf | 416 | put_page(io_ctl->pages[i]); |
a1ee5a45 | 417 | } |
a67509c3 JB |
418 | } |
419 | } | |
420 | ||
7a195f6d | 421 | static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate) |
a67509c3 JB |
422 | { |
423 | struct page *page; | |
831fa14f | 424 | struct inode *inode = io_ctl->inode; |
a67509c3 JB |
425 | gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); |
426 | int i; | |
427 | ||
428 | for (i = 0; i < io_ctl->num_pages; i++) { | |
32443de3 QW |
429 | int ret; |
430 | ||
a67509c3 JB |
431 | page = find_or_create_page(inode->i_mapping, i, mask); |
432 | if (!page) { | |
433 | io_ctl_drop_pages(io_ctl); | |
434 | return -ENOMEM; | |
435 | } | |
32443de3 QW |
436 | |
437 | ret = set_page_extent_mapped(page); | |
438 | if (ret < 0) { | |
439 | unlock_page(page); | |
440 | put_page(page); | |
441 | io_ctl_drop_pages(io_ctl); | |
442 | return ret; | |
443 | } | |
444 | ||
a67509c3 JB |
445 | io_ctl->pages[i] = page; |
446 | if (uptodate && !PageUptodate(page)) { | |
447 | btrfs_readpage(NULL, page); | |
448 | lock_page(page); | |
3797136b JB |
449 | if (page->mapping != inode->i_mapping) { |
450 | btrfs_err(BTRFS_I(inode)->root->fs_info, | |
451 | "free space cache page truncated"); | |
452 | io_ctl_drop_pages(io_ctl); | |
453 | return -EIO; | |
454 | } | |
a67509c3 | 455 | if (!PageUptodate(page)) { |
efe120a0 FH |
456 | btrfs_err(BTRFS_I(inode)->root->fs_info, |
457 | "error reading free space cache"); | |
a67509c3 JB |
458 | io_ctl_drop_pages(io_ctl); |
459 | return -EIO; | |
460 | } | |
461 | } | |
462 | } | |
463 | ||
32443de3 | 464 | for (i = 0; i < io_ctl->num_pages; i++) |
f7d61dcd | 465 | clear_page_dirty_for_io(io_ctl->pages[i]); |
f7d61dcd | 466 | |
a67509c3 JB |
467 | return 0; |
468 | } | |
469 | ||
4c6d1d85 | 470 | static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation) |
a67509c3 | 471 | { |
a67509c3 JB |
472 | io_ctl_map_page(io_ctl, 1); |
473 | ||
474 | /* | |
5b0e95bf JB |
475 | * Skip the csum areas. If we don't check crcs then we just have a |
476 | * 64bit chunk at the front of the first page. | |
a67509c3 | 477 | */ |
7dbdb443 NB |
478 | io_ctl->cur += (sizeof(u32) * io_ctl->num_pages); |
479 | io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages); | |
a67509c3 | 480 | |
6994ca36 | 481 | put_unaligned_le64(generation, io_ctl->cur); |
a67509c3 | 482 | io_ctl->cur += sizeof(u64); |
a67509c3 JB |
483 | } |
484 | ||
4c6d1d85 | 485 | static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation) |
a67509c3 | 486 | { |
6994ca36 | 487 | u64 cache_gen; |
a67509c3 | 488 | |
5b0e95bf JB |
489 | /* |
490 | * Skip the crc area. If we don't check crcs then we just have a 64bit | |
491 | * chunk at the front of the first page. | |
492 | */ | |
7dbdb443 NB |
493 | io_ctl->cur += sizeof(u32) * io_ctl->num_pages; |
494 | io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages); | |
a67509c3 | 495 | |
6994ca36 DS |
496 | cache_gen = get_unaligned_le64(io_ctl->cur); |
497 | if (cache_gen != generation) { | |
f15376df | 498 | btrfs_err_rl(io_ctl->fs_info, |
94647322 | 499 | "space cache generation (%llu) does not match inode (%llu)", |
6994ca36 | 500 | cache_gen, generation); |
a67509c3 JB |
501 | io_ctl_unmap_page(io_ctl); |
502 | return -EIO; | |
503 | } | |
504 | io_ctl->cur += sizeof(u64); | |
5b0e95bf JB |
505 | return 0; |
506 | } | |
507 | ||
4c6d1d85 | 508 | static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index) |
5b0e95bf JB |
509 | { |
510 | u32 *tmp; | |
511 | u32 crc = ~(u32)0; | |
512 | unsigned offset = 0; | |
513 | ||
5b0e95bf | 514 | if (index == 0) |
cb54f257 | 515 | offset = sizeof(u32) * io_ctl->num_pages; |
5b0e95bf | 516 | |
4bb3c2e2 JT |
517 | crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset); |
518 | btrfs_crc32c_final(crc, (u8 *)&crc); | |
5b0e95bf | 519 | io_ctl_unmap_page(io_ctl); |
2b108268 | 520 | tmp = page_address(io_ctl->pages[0]); |
5b0e95bf JB |
521 | tmp += index; |
522 | *tmp = crc; | |
5b0e95bf JB |
523 | } |
524 | ||
4c6d1d85 | 525 | static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index) |
5b0e95bf JB |
526 | { |
527 | u32 *tmp, val; | |
528 | u32 crc = ~(u32)0; | |
529 | unsigned offset = 0; | |
530 | ||
5b0e95bf JB |
531 | if (index == 0) |
532 | offset = sizeof(u32) * io_ctl->num_pages; | |
533 | ||
2b108268 | 534 | tmp = page_address(io_ctl->pages[0]); |
5b0e95bf JB |
535 | tmp += index; |
536 | val = *tmp; | |
5b0e95bf JB |
537 | |
538 | io_ctl_map_page(io_ctl, 0); | |
4bb3c2e2 JT |
539 | crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset); |
540 | btrfs_crc32c_final(crc, (u8 *)&crc); | |
5b0e95bf | 541 | if (val != crc) { |
f15376df | 542 | btrfs_err_rl(io_ctl->fs_info, |
94647322 | 543 | "csum mismatch on free space cache"); |
5b0e95bf JB |
544 | io_ctl_unmap_page(io_ctl); |
545 | return -EIO; | |
546 | } | |
547 | ||
a67509c3 JB |
548 | return 0; |
549 | } | |
550 | ||
4c6d1d85 | 551 | static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes, |
a67509c3 JB |
552 | void *bitmap) |
553 | { | |
554 | struct btrfs_free_space_entry *entry; | |
555 | ||
556 | if (!io_ctl->cur) | |
557 | return -ENOSPC; | |
558 | ||
559 | entry = io_ctl->cur; | |
6994ca36 DS |
560 | put_unaligned_le64(offset, &entry->offset); |
561 | put_unaligned_le64(bytes, &entry->bytes); | |
a67509c3 JB |
562 | entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP : |
563 | BTRFS_FREE_SPACE_EXTENT; | |
564 | io_ctl->cur += sizeof(struct btrfs_free_space_entry); | |
565 | io_ctl->size -= sizeof(struct btrfs_free_space_entry); | |
566 | ||
567 | if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) | |
568 | return 0; | |
569 | ||
5b0e95bf | 570 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); |
a67509c3 JB |
571 | |
572 | /* No more pages to map */ | |
573 | if (io_ctl->index >= io_ctl->num_pages) | |
574 | return 0; | |
575 | ||
576 | /* map the next page */ | |
577 | io_ctl_map_page(io_ctl, 1); | |
578 | return 0; | |
579 | } | |
580 | ||
4c6d1d85 | 581 | static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap) |
a67509c3 JB |
582 | { |
583 | if (!io_ctl->cur) | |
584 | return -ENOSPC; | |
585 | ||
586 | /* | |
587 | * If we aren't at the start of the current page, unmap this one and | |
588 | * map the next one if there is any left. | |
589 | */ | |
590 | if (io_ctl->cur != io_ctl->orig) { | |
5b0e95bf | 591 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); |
a67509c3 JB |
592 | if (io_ctl->index >= io_ctl->num_pages) |
593 | return -ENOSPC; | |
594 | io_ctl_map_page(io_ctl, 0); | |
595 | } | |
596 | ||
69d24804 | 597 | copy_page(io_ctl->cur, bitmap); |
5b0e95bf | 598 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); |
a67509c3 JB |
599 | if (io_ctl->index < io_ctl->num_pages) |
600 | io_ctl_map_page(io_ctl, 0); | |
601 | return 0; | |
602 | } | |
603 | ||
4c6d1d85 | 604 | static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl *io_ctl) |
a67509c3 | 605 | { |
5b0e95bf JB |
606 | /* |
607 | * If we're not on the boundary we know we've modified the page and we | |
608 | * need to crc the page. | |
609 | */ | |
610 | if (io_ctl->cur != io_ctl->orig) | |
611 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); | |
612 | else | |
613 | io_ctl_unmap_page(io_ctl); | |
a67509c3 JB |
614 | |
615 | while (io_ctl->index < io_ctl->num_pages) { | |
616 | io_ctl_map_page(io_ctl, 1); | |
5b0e95bf | 617 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); |
a67509c3 JB |
618 | } |
619 | } | |
620 | ||
4c6d1d85 | 621 | static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl, |
5b0e95bf | 622 | struct btrfs_free_space *entry, u8 *type) |
a67509c3 JB |
623 | { |
624 | struct btrfs_free_space_entry *e; | |
2f120c05 JB |
625 | int ret; |
626 | ||
627 | if (!io_ctl->cur) { | |
628 | ret = io_ctl_check_crc(io_ctl, io_ctl->index); | |
629 | if (ret) | |
630 | return ret; | |
631 | } | |
a67509c3 JB |
632 | |
633 | e = io_ctl->cur; | |
6994ca36 DS |
634 | entry->offset = get_unaligned_le64(&e->offset); |
635 | entry->bytes = get_unaligned_le64(&e->bytes); | |
5b0e95bf | 636 | *type = e->type; |
a67509c3 JB |
637 | io_ctl->cur += sizeof(struct btrfs_free_space_entry); |
638 | io_ctl->size -= sizeof(struct btrfs_free_space_entry); | |
639 | ||
640 | if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) | |
5b0e95bf | 641 | return 0; |
a67509c3 JB |
642 | |
643 | io_ctl_unmap_page(io_ctl); | |
644 | ||
2f120c05 | 645 | return 0; |
a67509c3 JB |
646 | } |
647 | ||
4c6d1d85 | 648 | static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl, |
5b0e95bf | 649 | struct btrfs_free_space *entry) |
a67509c3 | 650 | { |
5b0e95bf JB |
651 | int ret; |
652 | ||
5b0e95bf JB |
653 | ret = io_ctl_check_crc(io_ctl, io_ctl->index); |
654 | if (ret) | |
655 | return ret; | |
656 | ||
69d24804 | 657 | copy_page(entry->bitmap, io_ctl->cur); |
a67509c3 | 658 | io_ctl_unmap_page(io_ctl); |
5b0e95bf JB |
659 | |
660 | return 0; | |
a67509c3 JB |
661 | } |
662 | ||
fa598b06 DS |
663 | static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) |
664 | { | |
665 | struct btrfs_block_group *block_group = ctl->private; | |
666 | u64 max_bytes; | |
667 | u64 bitmap_bytes; | |
668 | u64 extent_bytes; | |
669 | u64 size = block_group->length; | |
670 | u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; | |
671 | u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); | |
672 | ||
673 | max_bitmaps = max_t(u64, max_bitmaps, 1); | |
674 | ||
675 | ASSERT(ctl->total_bitmaps <= max_bitmaps); | |
676 | ||
677 | /* | |
678 | * We are trying to keep the total amount of memory used per 1GiB of | |
679 | * space to be MAX_CACHE_BYTES_PER_GIG. However, with a reclamation | |
680 | * mechanism of pulling extents >= FORCE_EXTENT_THRESHOLD out of | |
681 | * bitmaps, we may end up using more memory than this. | |
682 | */ | |
683 | if (size < SZ_1G) | |
684 | max_bytes = MAX_CACHE_BYTES_PER_GIG; | |
685 | else | |
686 | max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G); | |
687 | ||
688 | bitmap_bytes = ctl->total_bitmaps * ctl->unit; | |
689 | ||
690 | /* | |
691 | * we want the extent entry threshold to always be at most 1/2 the max | |
692 | * bytes we can have, or whatever is less than that. | |
693 | */ | |
694 | extent_bytes = max_bytes - bitmap_bytes; | |
695 | extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1); | |
696 | ||
697 | ctl->extents_thresh = | |
698 | div_u64(extent_bytes, sizeof(struct btrfs_free_space)); | |
699 | } | |
700 | ||
48a3b636 ES |
701 | static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, |
702 | struct btrfs_free_space_ctl *ctl, | |
703 | struct btrfs_path *path, u64 offset) | |
9d66e233 | 704 | { |
3ffbd68c | 705 | struct btrfs_fs_info *fs_info = root->fs_info; |
9d66e233 JB |
706 | struct btrfs_free_space_header *header; |
707 | struct extent_buffer *leaf; | |
4c6d1d85 | 708 | struct btrfs_io_ctl io_ctl; |
9d66e233 | 709 | struct btrfs_key key; |
a67509c3 | 710 | struct btrfs_free_space *e, *n; |
b76808fc | 711 | LIST_HEAD(bitmaps); |
9d66e233 JB |
712 | u64 num_entries; |
713 | u64 num_bitmaps; | |
714 | u64 generation; | |
a67509c3 | 715 | u8 type; |
f6a39829 | 716 | int ret = 0; |
9d66e233 | 717 | |
9d66e233 | 718 | /* Nothing in the space cache, goodbye */ |
0414efae | 719 | if (!i_size_read(inode)) |
a67509c3 | 720 | return 0; |
9d66e233 JB |
721 | |
722 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | |
0414efae | 723 | key.offset = offset; |
9d66e233 JB |
724 | key.type = 0; |
725 | ||
726 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
0414efae | 727 | if (ret < 0) |
a67509c3 | 728 | return 0; |
0414efae | 729 | else if (ret > 0) { |
945d8962 | 730 | btrfs_release_path(path); |
a67509c3 | 731 | return 0; |
9d66e233 JB |
732 | } |
733 | ||
0414efae LZ |
734 | ret = -1; |
735 | ||
9d66e233 JB |
736 | leaf = path->nodes[0]; |
737 | header = btrfs_item_ptr(leaf, path->slots[0], | |
738 | struct btrfs_free_space_header); | |
739 | num_entries = btrfs_free_space_entries(leaf, header); | |
740 | num_bitmaps = btrfs_free_space_bitmaps(leaf, header); | |
741 | generation = btrfs_free_space_generation(leaf, header); | |
945d8962 | 742 | btrfs_release_path(path); |
9d66e233 | 743 | |
e570fd27 | 744 | if (!BTRFS_I(inode)->generation) { |
0b246afa | 745 | btrfs_info(fs_info, |
913e1535 | 746 | "the free space cache file (%llu) is invalid, skip it", |
e570fd27 MX |
747 | offset); |
748 | return 0; | |
749 | } | |
750 | ||
9d66e233 | 751 | if (BTRFS_I(inode)->generation != generation) { |
0b246afa JM |
752 | btrfs_err(fs_info, |
753 | "free space inode generation (%llu) did not match free space cache generation (%llu)", | |
754 | BTRFS_I(inode)->generation, generation); | |
a67509c3 | 755 | return 0; |
9d66e233 JB |
756 | } |
757 | ||
758 | if (!num_entries) | |
a67509c3 | 759 | return 0; |
9d66e233 | 760 | |
f15376df | 761 | ret = io_ctl_init(&io_ctl, inode, 0); |
706efc66 LZ |
762 | if (ret) |
763 | return ret; | |
764 | ||
1d480538 | 765 | readahead_cache(inode); |
9d66e233 | 766 | |
7a195f6d | 767 | ret = io_ctl_prepare_pages(&io_ctl, true); |
a67509c3 JB |
768 | if (ret) |
769 | goto out; | |
9d66e233 | 770 | |
5b0e95bf JB |
771 | ret = io_ctl_check_crc(&io_ctl, 0); |
772 | if (ret) | |
773 | goto free_cache; | |
774 | ||
a67509c3 JB |
775 | ret = io_ctl_check_generation(&io_ctl, generation); |
776 | if (ret) | |
777 | goto free_cache; | |
9d66e233 | 778 | |
a67509c3 JB |
779 | while (num_entries) { |
780 | e = kmem_cache_zalloc(btrfs_free_space_cachep, | |
781 | GFP_NOFS); | |
3cc64e7e ZC |
782 | if (!e) { |
783 | ret = -ENOMEM; | |
9d66e233 | 784 | goto free_cache; |
3cc64e7e | 785 | } |
9d66e233 | 786 | |
5b0e95bf JB |
787 | ret = io_ctl_read_entry(&io_ctl, e, &type); |
788 | if (ret) { | |
789 | kmem_cache_free(btrfs_free_space_cachep, e); | |
790 | goto free_cache; | |
791 | } | |
792 | ||
a67509c3 | 793 | if (!e->bytes) { |
3cc64e7e | 794 | ret = -1; |
a67509c3 JB |
795 | kmem_cache_free(btrfs_free_space_cachep, e); |
796 | goto free_cache; | |
9d66e233 | 797 | } |
a67509c3 JB |
798 | |
799 | if (type == BTRFS_FREE_SPACE_EXTENT) { | |
800 | spin_lock(&ctl->tree_lock); | |
801 | ret = link_free_space(ctl, e); | |
802 | spin_unlock(&ctl->tree_lock); | |
803 | if (ret) { | |
0b246afa | 804 | btrfs_err(fs_info, |
c2cf52eb | 805 | "Duplicate entries in free space cache, dumping"); |
a67509c3 | 806 | kmem_cache_free(btrfs_free_space_cachep, e); |
9d66e233 JB |
807 | goto free_cache; |
808 | } | |
a67509c3 | 809 | } else { |
b12d6869 | 810 | ASSERT(num_bitmaps); |
a67509c3 | 811 | num_bitmaps--; |
3acd4850 CL |
812 | e->bitmap = kmem_cache_zalloc( |
813 | btrfs_free_space_bitmap_cachep, GFP_NOFS); | |
a67509c3 | 814 | if (!e->bitmap) { |
3cc64e7e | 815 | ret = -ENOMEM; |
a67509c3 JB |
816 | kmem_cache_free( |
817 | btrfs_free_space_cachep, e); | |
9d66e233 JB |
818 | goto free_cache; |
819 | } | |
a67509c3 JB |
820 | spin_lock(&ctl->tree_lock); |
821 | ret = link_free_space(ctl, e); | |
822 | ctl->total_bitmaps++; | |
fa598b06 | 823 | recalculate_thresholds(ctl); |
a67509c3 JB |
824 | spin_unlock(&ctl->tree_lock); |
825 | if (ret) { | |
0b246afa | 826 | btrfs_err(fs_info, |
c2cf52eb | 827 | "Duplicate entries in free space cache, dumping"); |
dc89e982 | 828 | kmem_cache_free(btrfs_free_space_cachep, e); |
9d66e233 JB |
829 | goto free_cache; |
830 | } | |
a67509c3 | 831 | list_add_tail(&e->list, &bitmaps); |
9d66e233 JB |
832 | } |
833 | ||
a67509c3 JB |
834 | num_entries--; |
835 | } | |
9d66e233 | 836 | |
2f120c05 JB |
837 | io_ctl_unmap_page(&io_ctl); |
838 | ||
a67509c3 JB |
839 | /* |
840 | * We add the bitmaps at the end of the entries in order that | |
841 | * the bitmap entries are added to the cache. | |
842 | */ | |
843 | list_for_each_entry_safe(e, n, &bitmaps, list) { | |
9d66e233 | 844 | list_del_init(&e->list); |
5b0e95bf JB |
845 | ret = io_ctl_read_bitmap(&io_ctl, e); |
846 | if (ret) | |
847 | goto free_cache; | |
9d66e233 JB |
848 | } |
849 | ||
a67509c3 | 850 | io_ctl_drop_pages(&io_ctl); |
9d66e233 JB |
851 | ret = 1; |
852 | out: | |
a67509c3 | 853 | io_ctl_free(&io_ctl); |
9d66e233 | 854 | return ret; |
9d66e233 | 855 | free_cache: |
a67509c3 | 856 | io_ctl_drop_pages(&io_ctl); |
0414efae | 857 | __btrfs_remove_free_space_cache(ctl); |
9d66e233 JB |
858 | goto out; |
859 | } | |
860 | ||
cd79909b JB |
861 | static int copy_free_space_cache(struct btrfs_block_group *block_group, |
862 | struct btrfs_free_space_ctl *ctl) | |
863 | { | |
864 | struct btrfs_free_space *info; | |
865 | struct rb_node *n; | |
866 | int ret = 0; | |
867 | ||
868 | while (!ret && (n = rb_first(&ctl->free_space_offset)) != NULL) { | |
869 | info = rb_entry(n, struct btrfs_free_space, offset_index); | |
870 | if (!info->bitmap) { | |
871 | unlink_free_space(ctl, info); | |
872 | ret = btrfs_add_free_space(block_group, info->offset, | |
873 | info->bytes); | |
874 | kmem_cache_free(btrfs_free_space_cachep, info); | |
875 | } else { | |
876 | u64 offset = info->offset; | |
877 | u64 bytes = ctl->unit; | |
878 | ||
879 | while (search_bitmap(ctl, info, &offset, &bytes, | |
880 | false) == 0) { | |
881 | ret = btrfs_add_free_space(block_group, offset, | |
882 | bytes); | |
883 | if (ret) | |
884 | break; | |
885 | bitmap_clear_bits(ctl, info, offset, bytes); | |
886 | offset = info->offset; | |
887 | bytes = ctl->unit; | |
888 | } | |
889 | free_bitmap(ctl, info); | |
890 | } | |
891 | cond_resched(); | |
892 | } | |
893 | return ret; | |
894 | } | |
895 | ||
32da5386 | 896 | int load_free_space_cache(struct btrfs_block_group *block_group) |
0cb59c99 | 897 | { |
bb6cb1c5 | 898 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
34d52cb6 | 899 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
cd79909b | 900 | struct btrfs_free_space_ctl tmp_ctl = {}; |
0414efae LZ |
901 | struct inode *inode; |
902 | struct btrfs_path *path; | |
5b0e95bf | 903 | int ret = 0; |
0414efae | 904 | bool matched; |
bf38be65 | 905 | u64 used = block_group->used; |
0414efae | 906 | |
cd79909b JB |
907 | /* |
908 | * Because we could potentially discard our loaded free space, we want | |
909 | * to load everything into a temporary structure first, and then if it's | |
910 | * valid copy it all into the actual free space ctl. | |
911 | */ | |
912 | btrfs_init_free_space_ctl(block_group, &tmp_ctl); | |
913 | ||
0414efae LZ |
914 | /* |
915 | * If this block group has been marked to be cleared for one reason or | |
916 | * another then we can't trust the on disk cache, so just return. | |
917 | */ | |
9d66e233 | 918 | spin_lock(&block_group->lock); |
0414efae LZ |
919 | if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { |
920 | spin_unlock(&block_group->lock); | |
921 | return 0; | |
922 | } | |
9d66e233 | 923 | spin_unlock(&block_group->lock); |
0414efae LZ |
924 | |
925 | path = btrfs_alloc_path(); | |
926 | if (!path) | |
927 | return 0; | |
d53ba474 JB |
928 | path->search_commit_root = 1; |
929 | path->skip_locking = 1; | |
0414efae | 930 | |
4222ea71 FM |
931 | /* |
932 | * We must pass a path with search_commit_root set to btrfs_iget in | |
933 | * order to avoid a deadlock when allocating extents for the tree root. | |
934 | * | |
935 | * When we are COWing an extent buffer from the tree root, when looking | |
936 | * for a free extent, at extent-tree.c:find_free_extent(), we can find | |
937 | * block group without its free space cache loaded. When we find one | |
938 | * we must load its space cache which requires reading its free space | |
939 | * cache's inode item from the root tree. If this inode item is located | |
940 | * in the same leaf that we started COWing before, then we end up in | |
941 | * deadlock on the extent buffer (trying to read lock it when we | |
942 | * previously write locked it). | |
943 | * | |
944 | * It's safe to read the inode item using the commit root because | |
945 | * block groups, once loaded, stay in memory forever (until they are | |
946 | * removed) as well as their space caches once loaded. New block groups | |
947 | * once created get their ->cached field set to BTRFS_CACHE_FINISHED so | |
948 | * we will never try to read their inode item while the fs is mounted. | |
949 | */ | |
7949f339 | 950 | inode = lookup_free_space_inode(block_group, path); |
0414efae LZ |
951 | if (IS_ERR(inode)) { |
952 | btrfs_free_path(path); | |
953 | return 0; | |
954 | } | |
955 | ||
5b0e95bf JB |
956 | /* We may have converted the inode and made the cache invalid. */ |
957 | spin_lock(&block_group->lock); | |
958 | if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { | |
959 | spin_unlock(&block_group->lock); | |
a7e221e9 | 960 | btrfs_free_path(path); |
5b0e95bf JB |
961 | goto out; |
962 | } | |
963 | spin_unlock(&block_group->lock); | |
964 | ||
cd79909b | 965 | ret = __load_free_space_cache(fs_info->tree_root, inode, &tmp_ctl, |
b3470b5d | 966 | path, block_group->start); |
0414efae LZ |
967 | btrfs_free_path(path); |
968 | if (ret <= 0) | |
969 | goto out; | |
970 | ||
cd79909b JB |
971 | matched = (tmp_ctl.free_space == (block_group->length - used - |
972 | block_group->bytes_super)); | |
0414efae | 973 | |
cd79909b JB |
974 | if (matched) { |
975 | ret = copy_free_space_cache(block_group, &tmp_ctl); | |
976 | /* | |
977 | * ret == 1 means we successfully loaded the free space cache, | |
978 | * so we need to re-set it here. | |
979 | */ | |
980 | if (ret == 0) | |
981 | ret = 1; | |
982 | } else { | |
983 | __btrfs_remove_free_space_cache(&tmp_ctl); | |
5d163e0e JM |
984 | btrfs_warn(fs_info, |
985 | "block group %llu has wrong amount of free space", | |
b3470b5d | 986 | block_group->start); |
0414efae LZ |
987 | ret = -1; |
988 | } | |
989 | out: | |
990 | if (ret < 0) { | |
991 | /* This cache is bogus, make sure it gets cleared */ | |
992 | spin_lock(&block_group->lock); | |
993 | block_group->disk_cache_state = BTRFS_DC_CLEAR; | |
994 | spin_unlock(&block_group->lock); | |
82d5902d | 995 | ret = 0; |
0414efae | 996 | |
5d163e0e JM |
997 | btrfs_warn(fs_info, |
998 | "failed to load free space cache for block group %llu, rebuilding it now", | |
b3470b5d | 999 | block_group->start); |
0414efae LZ |
1000 | } |
1001 | ||
66b53bae JB |
1002 | spin_lock(&ctl->tree_lock); |
1003 | btrfs_discard_update_discardable(block_group); | |
1004 | spin_unlock(&ctl->tree_lock); | |
0414efae LZ |
1005 | iput(inode); |
1006 | return ret; | |
9d66e233 JB |
1007 | } |
1008 | ||
d4452bc5 | 1009 | static noinline_for_stack |
4c6d1d85 | 1010 | int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl, |
d4452bc5 | 1011 | struct btrfs_free_space_ctl *ctl, |
32da5386 | 1012 | struct btrfs_block_group *block_group, |
d4452bc5 CM |
1013 | int *entries, int *bitmaps, |
1014 | struct list_head *bitmap_list) | |
0cb59c99 | 1015 | { |
c09544e0 | 1016 | int ret; |
d4452bc5 | 1017 | struct btrfs_free_cluster *cluster = NULL; |
1bbc621e | 1018 | struct btrfs_free_cluster *cluster_locked = NULL; |
d4452bc5 | 1019 | struct rb_node *node = rb_first(&ctl->free_space_offset); |
55507ce3 | 1020 | struct btrfs_trim_range *trim_entry; |
be1a12a0 | 1021 | |
43be2146 | 1022 | /* Get the cluster for this block_group if it exists */ |
d4452bc5 | 1023 | if (block_group && !list_empty(&block_group->cluster_list)) { |
43be2146 JB |
1024 | cluster = list_entry(block_group->cluster_list.next, |
1025 | struct btrfs_free_cluster, | |
1026 | block_group_list); | |
d4452bc5 | 1027 | } |
43be2146 | 1028 | |
f75b130e | 1029 | if (!node && cluster) { |
1bbc621e CM |
1030 | cluster_locked = cluster; |
1031 | spin_lock(&cluster_locked->lock); | |
f75b130e JB |
1032 | node = rb_first(&cluster->root); |
1033 | cluster = NULL; | |
1034 | } | |
1035 | ||
a67509c3 JB |
1036 | /* Write out the extent entries */ |
1037 | while (node) { | |
1038 | struct btrfs_free_space *e; | |
0cb59c99 | 1039 | |
a67509c3 | 1040 | e = rb_entry(node, struct btrfs_free_space, offset_index); |
d4452bc5 | 1041 | *entries += 1; |
0cb59c99 | 1042 | |
d4452bc5 | 1043 | ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes, |
a67509c3 JB |
1044 | e->bitmap); |
1045 | if (ret) | |
d4452bc5 | 1046 | goto fail; |
2f356126 | 1047 | |
a67509c3 | 1048 | if (e->bitmap) { |
d4452bc5 CM |
1049 | list_add_tail(&e->list, bitmap_list); |
1050 | *bitmaps += 1; | |
2f356126 | 1051 | } |
a67509c3 JB |
1052 | node = rb_next(node); |
1053 | if (!node && cluster) { | |
1054 | node = rb_first(&cluster->root); | |
1bbc621e CM |
1055 | cluster_locked = cluster; |
1056 | spin_lock(&cluster_locked->lock); | |
a67509c3 | 1057 | cluster = NULL; |
43be2146 | 1058 | } |
a67509c3 | 1059 | } |
1bbc621e CM |
1060 | if (cluster_locked) { |
1061 | spin_unlock(&cluster_locked->lock); | |
1062 | cluster_locked = NULL; | |
1063 | } | |
55507ce3 FM |
1064 | |
1065 | /* | |
1066 | * Make sure we don't miss any range that was removed from our rbtree | |
1067 | * because trimming is running. Otherwise after a umount+mount (or crash | |
1068 | * after committing the transaction) we would leak free space and get | |
1069 | * an inconsistent free space cache report from fsck. | |
1070 | */ | |
1071 | list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) { | |
1072 | ret = io_ctl_add_entry(io_ctl, trim_entry->start, | |
1073 | trim_entry->bytes, NULL); | |
1074 | if (ret) | |
1075 | goto fail; | |
1076 | *entries += 1; | |
1077 | } | |
1078 | ||
d4452bc5 CM |
1079 | return 0; |
1080 | fail: | |
1bbc621e CM |
1081 | if (cluster_locked) |
1082 | spin_unlock(&cluster_locked->lock); | |
d4452bc5 CM |
1083 | return -ENOSPC; |
1084 | } | |
1085 | ||
1086 | static noinline_for_stack int | |
1087 | update_cache_item(struct btrfs_trans_handle *trans, | |
1088 | struct btrfs_root *root, | |
1089 | struct inode *inode, | |
1090 | struct btrfs_path *path, u64 offset, | |
1091 | int entries, int bitmaps) | |
1092 | { | |
1093 | struct btrfs_key key; | |
1094 | struct btrfs_free_space_header *header; | |
1095 | struct extent_buffer *leaf; | |
1096 | int ret; | |
1097 | ||
1098 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | |
1099 | key.offset = offset; | |
1100 | key.type = 0; | |
1101 | ||
1102 | ret = btrfs_search_slot(trans, root, &key, path, 0, 1); | |
1103 | if (ret < 0) { | |
1104 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, | |
e182163d | 1105 | EXTENT_DELALLOC, 0, 0, NULL); |
d4452bc5 CM |
1106 | goto fail; |
1107 | } | |
1108 | leaf = path->nodes[0]; | |
1109 | if (ret > 0) { | |
1110 | struct btrfs_key found_key; | |
1111 | ASSERT(path->slots[0]); | |
1112 | path->slots[0]--; | |
1113 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
1114 | if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || | |
1115 | found_key.offset != offset) { | |
1116 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, | |
e182163d OS |
1117 | inode->i_size - 1, EXTENT_DELALLOC, 0, |
1118 | 0, NULL); | |
d4452bc5 CM |
1119 | btrfs_release_path(path); |
1120 | goto fail; | |
1121 | } | |
1122 | } | |
1123 | ||
1124 | BTRFS_I(inode)->generation = trans->transid; | |
1125 | header = btrfs_item_ptr(leaf, path->slots[0], | |
1126 | struct btrfs_free_space_header); | |
1127 | btrfs_set_free_space_entries(leaf, header, entries); | |
1128 | btrfs_set_free_space_bitmaps(leaf, header, bitmaps); | |
1129 | btrfs_set_free_space_generation(leaf, header, trans->transid); | |
1130 | btrfs_mark_buffer_dirty(leaf); | |
1131 | btrfs_release_path(path); | |
1132 | ||
1133 | return 0; | |
1134 | ||
1135 | fail: | |
1136 | return -1; | |
1137 | } | |
1138 | ||
6701bdb3 | 1139 | static noinline_for_stack int write_pinned_extent_entries( |
6b45f641 | 1140 | struct btrfs_trans_handle *trans, |
32da5386 | 1141 | struct btrfs_block_group *block_group, |
4c6d1d85 | 1142 | struct btrfs_io_ctl *io_ctl, |
5349d6c3 | 1143 | int *entries) |
d4452bc5 CM |
1144 | { |
1145 | u64 start, extent_start, extent_end, len; | |
d4452bc5 CM |
1146 | struct extent_io_tree *unpin = NULL; |
1147 | int ret; | |
43be2146 | 1148 | |
5349d6c3 MX |
1149 | if (!block_group) |
1150 | return 0; | |
1151 | ||
a67509c3 JB |
1152 | /* |
1153 | * We want to add any pinned extents to our free space cache | |
1154 | * so we don't leak the space | |
d4452bc5 | 1155 | * |
db804f23 LZ |
1156 | * We shouldn't have switched the pinned extents yet so this is the |
1157 | * right one | |
1158 | */ | |
fe119a6e | 1159 | unpin = &trans->transaction->pinned_extents; |
db804f23 | 1160 | |
b3470b5d | 1161 | start = block_group->start; |
db804f23 | 1162 | |
b3470b5d | 1163 | while (start < block_group->start + block_group->length) { |
db804f23 LZ |
1164 | ret = find_first_extent_bit(unpin, start, |
1165 | &extent_start, &extent_end, | |
e6138876 | 1166 | EXTENT_DIRTY, NULL); |
5349d6c3 MX |
1167 | if (ret) |
1168 | return 0; | |
0cb59c99 | 1169 | |
a67509c3 | 1170 | /* This pinned extent is out of our range */ |
b3470b5d | 1171 | if (extent_start >= block_group->start + block_group->length) |
5349d6c3 | 1172 | return 0; |
2f356126 | 1173 | |
db804f23 | 1174 | extent_start = max(extent_start, start); |
b3470b5d DS |
1175 | extent_end = min(block_group->start + block_group->length, |
1176 | extent_end + 1); | |
db804f23 | 1177 | len = extent_end - extent_start; |
0cb59c99 | 1178 | |
d4452bc5 CM |
1179 | *entries += 1; |
1180 | ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL); | |
a67509c3 | 1181 | if (ret) |
5349d6c3 | 1182 | return -ENOSPC; |
0cb59c99 | 1183 | |
db804f23 | 1184 | start = extent_end; |
a67509c3 | 1185 | } |
0cb59c99 | 1186 | |
5349d6c3 MX |
1187 | return 0; |
1188 | } | |
1189 | ||
1190 | static noinline_for_stack int | |
4c6d1d85 | 1191 | write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list) |
5349d6c3 | 1192 | { |
7ae1681e | 1193 | struct btrfs_free_space *entry, *next; |
5349d6c3 MX |
1194 | int ret; |
1195 | ||
0cb59c99 | 1196 | /* Write out the bitmaps */ |
7ae1681e | 1197 | list_for_each_entry_safe(entry, next, bitmap_list, list) { |
d4452bc5 | 1198 | ret = io_ctl_add_bitmap(io_ctl, entry->bitmap); |
a67509c3 | 1199 | if (ret) |
5349d6c3 | 1200 | return -ENOSPC; |
0cb59c99 | 1201 | list_del_init(&entry->list); |
be1a12a0 JB |
1202 | } |
1203 | ||
5349d6c3 MX |
1204 | return 0; |
1205 | } | |
0cb59c99 | 1206 | |
5349d6c3 MX |
1207 | static int flush_dirty_cache(struct inode *inode) |
1208 | { | |
1209 | int ret; | |
be1a12a0 | 1210 | |
0ef8b726 | 1211 | ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); |
5349d6c3 | 1212 | if (ret) |
0ef8b726 | 1213 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, |
e182163d | 1214 | EXTENT_DELALLOC, 0, 0, NULL); |
0cb59c99 | 1215 | |
5349d6c3 | 1216 | return ret; |
d4452bc5 CM |
1217 | } |
1218 | ||
1219 | static void noinline_for_stack | |
a3bdccc4 | 1220 | cleanup_bitmap_list(struct list_head *bitmap_list) |
d4452bc5 | 1221 | { |
7ae1681e | 1222 | struct btrfs_free_space *entry, *next; |
5349d6c3 | 1223 | |
7ae1681e | 1224 | list_for_each_entry_safe(entry, next, bitmap_list, list) |
d4452bc5 | 1225 | list_del_init(&entry->list); |
a3bdccc4 CM |
1226 | } |
1227 | ||
1228 | static void noinline_for_stack | |
1229 | cleanup_write_cache_enospc(struct inode *inode, | |
1230 | struct btrfs_io_ctl *io_ctl, | |
7bf1a159 | 1231 | struct extent_state **cached_state) |
a3bdccc4 | 1232 | { |
d4452bc5 CM |
1233 | io_ctl_drop_pages(io_ctl); |
1234 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, | |
e43bbe5e | 1235 | i_size_read(inode) - 1, cached_state); |
d4452bc5 | 1236 | } |
549b4fdb | 1237 | |
afdb5718 JM |
1238 | static int __btrfs_wait_cache_io(struct btrfs_root *root, |
1239 | struct btrfs_trans_handle *trans, | |
32da5386 | 1240 | struct btrfs_block_group *block_group, |
afdb5718 JM |
1241 | struct btrfs_io_ctl *io_ctl, |
1242 | struct btrfs_path *path, u64 offset) | |
c9dc4c65 CM |
1243 | { |
1244 | int ret; | |
1245 | struct inode *inode = io_ctl->inode; | |
1246 | ||
1bbc621e CM |
1247 | if (!inode) |
1248 | return 0; | |
1249 | ||
c9dc4c65 CM |
1250 | /* Flush the dirty pages in the cache file. */ |
1251 | ret = flush_dirty_cache(inode); | |
1252 | if (ret) | |
1253 | goto out; | |
1254 | ||
1255 | /* Update the cache item to tell everyone this cache file is valid. */ | |
1256 | ret = update_cache_item(trans, root, inode, path, offset, | |
1257 | io_ctl->entries, io_ctl->bitmaps); | |
1258 | out: | |
c9dc4c65 CM |
1259 | if (ret) { |
1260 | invalidate_inode_pages2(inode->i_mapping); | |
1261 | BTRFS_I(inode)->generation = 0; | |
bbcd1f4d FM |
1262 | if (block_group) |
1263 | btrfs_debug(root->fs_info, | |
2e69a7a6 FM |
1264 | "failed to write free space cache for block group %llu error %d", |
1265 | block_group->start, ret); | |
c9dc4c65 | 1266 | } |
9a56fcd1 | 1267 | btrfs_update_inode(trans, root, BTRFS_I(inode)); |
c9dc4c65 CM |
1268 | |
1269 | if (block_group) { | |
1bbc621e CM |
1270 | /* the dirty list is protected by the dirty_bgs_lock */ |
1271 | spin_lock(&trans->transaction->dirty_bgs_lock); | |
1272 | ||
1273 | /* the disk_cache_state is protected by the block group lock */ | |
c9dc4c65 CM |
1274 | spin_lock(&block_group->lock); |
1275 | ||
1276 | /* | |
1277 | * only mark this as written if we didn't get put back on | |
1bbc621e CM |
1278 | * the dirty list while waiting for IO. Otherwise our |
1279 | * cache state won't be right, and we won't get written again | |
c9dc4c65 CM |
1280 | */ |
1281 | if (!ret && list_empty(&block_group->dirty_list)) | |
1282 | block_group->disk_cache_state = BTRFS_DC_WRITTEN; | |
1283 | else if (ret) | |
1284 | block_group->disk_cache_state = BTRFS_DC_ERROR; | |
1285 | ||
1286 | spin_unlock(&block_group->lock); | |
1bbc621e | 1287 | spin_unlock(&trans->transaction->dirty_bgs_lock); |
c9dc4c65 CM |
1288 | io_ctl->inode = NULL; |
1289 | iput(inode); | |
1290 | } | |
1291 | ||
1292 | return ret; | |
1293 | ||
1294 | } | |
1295 | ||
afdb5718 | 1296 | int btrfs_wait_cache_io(struct btrfs_trans_handle *trans, |
32da5386 | 1297 | struct btrfs_block_group *block_group, |
afdb5718 JM |
1298 | struct btrfs_path *path) |
1299 | { | |
1300 | return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans, | |
1301 | block_group, &block_group->io_ctl, | |
b3470b5d | 1302 | path, block_group->start); |
afdb5718 JM |
1303 | } |
1304 | ||
d4452bc5 | 1305 | /** |
f092cf3c NB |
1306 | * Write out cached info to an inode |
1307 | * | |
1308 | * @root: root the inode belongs to | |
1309 | * @inode: freespace inode we are writing out | |
1310 | * @ctl: free space cache we are going to write out | |
1311 | * @block_group: block_group for this cache if it belongs to a block_group | |
1312 | * @io_ctl: holds context for the io | |
1313 | * @trans: the trans handle | |
d4452bc5 CM |
1314 | * |
1315 | * This function writes out a free space cache struct to disk for quick recovery | |
8cd1e731 | 1316 | * on mount. This will return 0 if it was successful in writing the cache out, |
b8605454 | 1317 | * or an errno if it was not. |
d4452bc5 CM |
1318 | */ |
1319 | static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |
1320 | struct btrfs_free_space_ctl *ctl, | |
32da5386 | 1321 | struct btrfs_block_group *block_group, |
c9dc4c65 | 1322 | struct btrfs_io_ctl *io_ctl, |
0e8d931a | 1323 | struct btrfs_trans_handle *trans) |
d4452bc5 CM |
1324 | { |
1325 | struct extent_state *cached_state = NULL; | |
5349d6c3 | 1326 | LIST_HEAD(bitmap_list); |
d4452bc5 CM |
1327 | int entries = 0; |
1328 | int bitmaps = 0; | |
1329 | int ret; | |
c9dc4c65 | 1330 | int must_iput = 0; |
d4452bc5 CM |
1331 | |
1332 | if (!i_size_read(inode)) | |
b8605454 | 1333 | return -EIO; |
d4452bc5 | 1334 | |
c9dc4c65 | 1335 | WARN_ON(io_ctl->pages); |
f15376df | 1336 | ret = io_ctl_init(io_ctl, inode, 1); |
d4452bc5 | 1337 | if (ret) |
b8605454 | 1338 | return ret; |
d4452bc5 | 1339 | |
e570fd27 MX |
1340 | if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) { |
1341 | down_write(&block_group->data_rwsem); | |
1342 | spin_lock(&block_group->lock); | |
1343 | if (block_group->delalloc_bytes) { | |
1344 | block_group->disk_cache_state = BTRFS_DC_WRITTEN; | |
1345 | spin_unlock(&block_group->lock); | |
1346 | up_write(&block_group->data_rwsem); | |
1347 | BTRFS_I(inode)->generation = 0; | |
1348 | ret = 0; | |
c9dc4c65 | 1349 | must_iput = 1; |
e570fd27 MX |
1350 | goto out; |
1351 | } | |
1352 | spin_unlock(&block_group->lock); | |
1353 | } | |
1354 | ||
d4452bc5 | 1355 | /* Lock all pages first so we can lock the extent safely. */ |
7a195f6d | 1356 | ret = io_ctl_prepare_pages(io_ctl, false); |
b8605454 | 1357 | if (ret) |
b77000ed | 1358 | goto out_unlock; |
d4452bc5 CM |
1359 | |
1360 | lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, | |
ff13db41 | 1361 | &cached_state); |
d4452bc5 | 1362 | |
c9dc4c65 | 1363 | io_ctl_set_generation(io_ctl, trans->transid); |
d4452bc5 | 1364 | |
55507ce3 | 1365 | mutex_lock(&ctl->cache_writeout_mutex); |
5349d6c3 | 1366 | /* Write out the extent entries in the free space cache */ |
1bbc621e | 1367 | spin_lock(&ctl->tree_lock); |
c9dc4c65 | 1368 | ret = write_cache_extent_entries(io_ctl, ctl, |
d4452bc5 CM |
1369 | block_group, &entries, &bitmaps, |
1370 | &bitmap_list); | |
a3bdccc4 CM |
1371 | if (ret) |
1372 | goto out_nospc_locked; | |
d4452bc5 | 1373 | |
5349d6c3 MX |
1374 | /* |
1375 | * Some spaces that are freed in the current transaction are pinned, | |
1376 | * they will be added into free space cache after the transaction is | |
1377 | * committed, we shouldn't lose them. | |
1bbc621e CM |
1378 | * |
1379 | * If this changes while we are working we'll get added back to | |
1380 | * the dirty list and redo it. No locking needed | |
5349d6c3 | 1381 | */ |
6b45f641 | 1382 | ret = write_pinned_extent_entries(trans, block_group, io_ctl, &entries); |
a3bdccc4 CM |
1383 | if (ret) |
1384 | goto out_nospc_locked; | |
5349d6c3 | 1385 | |
55507ce3 FM |
1386 | /* |
1387 | * At last, we write out all the bitmaps and keep cache_writeout_mutex | |
1388 | * locked while doing it because a concurrent trim can be manipulating | |
1389 | * or freeing the bitmap. | |
1390 | */ | |
c9dc4c65 | 1391 | ret = write_bitmap_entries(io_ctl, &bitmap_list); |
1bbc621e | 1392 | spin_unlock(&ctl->tree_lock); |
55507ce3 | 1393 | mutex_unlock(&ctl->cache_writeout_mutex); |
5349d6c3 MX |
1394 | if (ret) |
1395 | goto out_nospc; | |
1396 | ||
1397 | /* Zero out the rest of the pages just to make sure */ | |
c9dc4c65 | 1398 | io_ctl_zero_remaining_pages(io_ctl); |
d4452bc5 | 1399 | |
5349d6c3 | 1400 | /* Everything is written out, now we dirty the pages in the file. */ |
088545f6 NB |
1401 | ret = btrfs_dirty_pages(BTRFS_I(inode), io_ctl->pages, |
1402 | io_ctl->num_pages, 0, i_size_read(inode), | |
aa8c1a41 | 1403 | &cached_state, false); |
5349d6c3 | 1404 | if (ret) |
d4452bc5 | 1405 | goto out_nospc; |
5349d6c3 | 1406 | |
e570fd27 MX |
1407 | if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) |
1408 | up_write(&block_group->data_rwsem); | |
5349d6c3 MX |
1409 | /* |
1410 | * Release the pages and unlock the extent, we will flush | |
1411 | * them out later | |
1412 | */ | |
c9dc4c65 | 1413 | io_ctl_drop_pages(io_ctl); |
bbc37d6e | 1414 | io_ctl_free(io_ctl); |
5349d6c3 MX |
1415 | |
1416 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, | |
e43bbe5e | 1417 | i_size_read(inode) - 1, &cached_state); |
5349d6c3 | 1418 | |
c9dc4c65 CM |
1419 | /* |
1420 | * at this point the pages are under IO and we're happy, | |
260db43c | 1421 | * The caller is responsible for waiting on them and updating |
c9dc4c65 CM |
1422 | * the cache and the inode |
1423 | */ | |
1424 | io_ctl->entries = entries; | |
1425 | io_ctl->bitmaps = bitmaps; | |
1426 | ||
1427 | ret = btrfs_fdatawrite_range(inode, 0, (u64)-1); | |
5349d6c3 | 1428 | if (ret) |
d4452bc5 CM |
1429 | goto out; |
1430 | ||
c9dc4c65 CM |
1431 | return 0; |
1432 | ||
a3bdccc4 CM |
1433 | out_nospc_locked: |
1434 | cleanup_bitmap_list(&bitmap_list); | |
1435 | spin_unlock(&ctl->tree_lock); | |
1436 | mutex_unlock(&ctl->cache_writeout_mutex); | |
1437 | ||
a67509c3 | 1438 | out_nospc: |
7bf1a159 | 1439 | cleanup_write_cache_enospc(inode, io_ctl, &cached_state); |
e570fd27 | 1440 | |
b77000ed | 1441 | out_unlock: |
e570fd27 MX |
1442 | if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) |
1443 | up_write(&block_group->data_rwsem); | |
1444 | ||
fd8efa81 JT |
1445 | out: |
1446 | io_ctl->inode = NULL; | |
1447 | io_ctl_free(io_ctl); | |
1448 | if (ret) { | |
1449 | invalidate_inode_pages2(inode->i_mapping); | |
1450 | BTRFS_I(inode)->generation = 0; | |
1451 | } | |
9a56fcd1 | 1452 | btrfs_update_inode(trans, root, BTRFS_I(inode)); |
fd8efa81 JT |
1453 | if (must_iput) |
1454 | iput(inode); | |
1455 | return ret; | |
0414efae LZ |
1456 | } |
1457 | ||
fe041534 | 1458 | int btrfs_write_out_cache(struct btrfs_trans_handle *trans, |
32da5386 | 1459 | struct btrfs_block_group *block_group, |
0414efae LZ |
1460 | struct btrfs_path *path) |
1461 | { | |
fe041534 | 1462 | struct btrfs_fs_info *fs_info = trans->fs_info; |
0414efae LZ |
1463 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
1464 | struct inode *inode; | |
1465 | int ret = 0; | |
1466 | ||
0414efae LZ |
1467 | spin_lock(&block_group->lock); |
1468 | if (block_group->disk_cache_state < BTRFS_DC_SETUP) { | |
1469 | spin_unlock(&block_group->lock); | |
e570fd27 MX |
1470 | return 0; |
1471 | } | |
0414efae LZ |
1472 | spin_unlock(&block_group->lock); |
1473 | ||
7949f339 | 1474 | inode = lookup_free_space_inode(block_group, path); |
0414efae LZ |
1475 | if (IS_ERR(inode)) |
1476 | return 0; | |
1477 | ||
77ab86bf JM |
1478 | ret = __btrfs_write_out_cache(fs_info->tree_root, inode, ctl, |
1479 | block_group, &block_group->io_ctl, trans); | |
c09544e0 | 1480 | if (ret) { |
bbcd1f4d | 1481 | btrfs_debug(fs_info, |
2e69a7a6 FM |
1482 | "failed to write free space cache for block group %llu error %d", |
1483 | block_group->start, ret); | |
c9dc4c65 CM |
1484 | spin_lock(&block_group->lock); |
1485 | block_group->disk_cache_state = BTRFS_DC_ERROR; | |
1486 | spin_unlock(&block_group->lock); | |
1487 | ||
1488 | block_group->io_ctl.inode = NULL; | |
1489 | iput(inode); | |
0414efae LZ |
1490 | } |
1491 | ||
c9dc4c65 CM |
1492 | /* |
1493 | * if ret == 0 the caller is expected to call btrfs_wait_cache_io | |
1494 | * to wait for IO and put the inode | |
1495 | */ | |
1496 | ||
0cb59c99 JB |
1497 | return ret; |
1498 | } | |
1499 | ||
34d52cb6 | 1500 | static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit, |
96303081 | 1501 | u64 offset) |
0f9dd46c | 1502 | { |
b12d6869 | 1503 | ASSERT(offset >= bitmap_start); |
96303081 | 1504 | offset -= bitmap_start; |
34d52cb6 | 1505 | return (unsigned long)(div_u64(offset, unit)); |
96303081 | 1506 | } |
0f9dd46c | 1507 | |
34d52cb6 | 1508 | static inline unsigned long bytes_to_bits(u64 bytes, u32 unit) |
96303081 | 1509 | { |
34d52cb6 | 1510 | return (unsigned long)(div_u64(bytes, unit)); |
96303081 | 1511 | } |
0f9dd46c | 1512 | |
34d52cb6 | 1513 | static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl, |
96303081 JB |
1514 | u64 offset) |
1515 | { | |
1516 | u64 bitmap_start; | |
0ef6447a | 1517 | u64 bytes_per_bitmap; |
0f9dd46c | 1518 | |
34d52cb6 LZ |
1519 | bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; |
1520 | bitmap_start = offset - ctl->start; | |
0ef6447a | 1521 | bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); |
96303081 | 1522 | bitmap_start *= bytes_per_bitmap; |
34d52cb6 | 1523 | bitmap_start += ctl->start; |
0f9dd46c | 1524 | |
96303081 | 1525 | return bitmap_start; |
0f9dd46c JB |
1526 | } |
1527 | ||
96303081 JB |
1528 | static int tree_insert_offset(struct rb_root *root, u64 offset, |
1529 | struct rb_node *node, int bitmap) | |
0f9dd46c JB |
1530 | { |
1531 | struct rb_node **p = &root->rb_node; | |
1532 | struct rb_node *parent = NULL; | |
1533 | struct btrfs_free_space *info; | |
1534 | ||
1535 | while (*p) { | |
1536 | parent = *p; | |
96303081 | 1537 | info = rb_entry(parent, struct btrfs_free_space, offset_index); |
0f9dd46c | 1538 | |
96303081 | 1539 | if (offset < info->offset) { |
0f9dd46c | 1540 | p = &(*p)->rb_left; |
96303081 | 1541 | } else if (offset > info->offset) { |
0f9dd46c | 1542 | p = &(*p)->rb_right; |
96303081 JB |
1543 | } else { |
1544 | /* | |
1545 | * we could have a bitmap entry and an extent entry | |
1546 | * share the same offset. If this is the case, we want | |
1547 | * the extent entry to always be found first if we do a | |
1548 | * linear search through the tree, since we want to have | |
1549 | * the quickest allocation time, and allocating from an | |
1550 | * extent is faster than allocating from a bitmap. So | |
1551 | * if we're inserting a bitmap and we find an entry at | |
1552 | * this offset, we want to go right, or after this entry | |
1553 | * logically. If we are inserting an extent and we've | |
1554 | * found a bitmap, we want to go left, or before | |
1555 | * logically. | |
1556 | */ | |
1557 | if (bitmap) { | |
207dde82 JB |
1558 | if (info->bitmap) { |
1559 | WARN_ON_ONCE(1); | |
1560 | return -EEXIST; | |
1561 | } | |
96303081 JB |
1562 | p = &(*p)->rb_right; |
1563 | } else { | |
207dde82 JB |
1564 | if (!info->bitmap) { |
1565 | WARN_ON_ONCE(1); | |
1566 | return -EEXIST; | |
1567 | } | |
96303081 JB |
1568 | p = &(*p)->rb_left; |
1569 | } | |
1570 | } | |
0f9dd46c JB |
1571 | } |
1572 | ||
1573 | rb_link_node(node, parent, p); | |
1574 | rb_insert_color(node, root); | |
1575 | ||
1576 | return 0; | |
1577 | } | |
1578 | ||
1579 | /* | |
70cb0743 JB |
1580 | * searches the tree for the given offset. |
1581 | * | |
96303081 JB |
1582 | * fuzzy - If this is set, then we are trying to make an allocation, and we just |
1583 | * want a section that has at least bytes size and comes at or after the given | |
1584 | * offset. | |
0f9dd46c | 1585 | */ |
96303081 | 1586 | static struct btrfs_free_space * |
34d52cb6 | 1587 | tree_search_offset(struct btrfs_free_space_ctl *ctl, |
96303081 | 1588 | u64 offset, int bitmap_only, int fuzzy) |
0f9dd46c | 1589 | { |
34d52cb6 | 1590 | struct rb_node *n = ctl->free_space_offset.rb_node; |
96303081 JB |
1591 | struct btrfs_free_space *entry, *prev = NULL; |
1592 | ||
1593 | /* find entry that is closest to the 'offset' */ | |
1594 | while (1) { | |
1595 | if (!n) { | |
1596 | entry = NULL; | |
1597 | break; | |
1598 | } | |
0f9dd46c | 1599 | |
0f9dd46c | 1600 | entry = rb_entry(n, struct btrfs_free_space, offset_index); |
96303081 | 1601 | prev = entry; |
0f9dd46c | 1602 | |
96303081 | 1603 | if (offset < entry->offset) |
0f9dd46c | 1604 | n = n->rb_left; |
96303081 | 1605 | else if (offset > entry->offset) |
0f9dd46c | 1606 | n = n->rb_right; |
96303081 | 1607 | else |
0f9dd46c | 1608 | break; |
0f9dd46c JB |
1609 | } |
1610 | ||
96303081 JB |
1611 | if (bitmap_only) { |
1612 | if (!entry) | |
1613 | return NULL; | |
1614 | if (entry->bitmap) | |
1615 | return entry; | |
0f9dd46c | 1616 | |
96303081 JB |
1617 | /* |
1618 | * bitmap entry and extent entry may share same offset, | |
1619 | * in that case, bitmap entry comes after extent entry. | |
1620 | */ | |
1621 | n = rb_next(n); | |
1622 | if (!n) | |
1623 | return NULL; | |
1624 | entry = rb_entry(n, struct btrfs_free_space, offset_index); | |
1625 | if (entry->offset != offset) | |
1626 | return NULL; | |
0f9dd46c | 1627 | |
96303081 JB |
1628 | WARN_ON(!entry->bitmap); |
1629 | return entry; | |
1630 | } else if (entry) { | |
1631 | if (entry->bitmap) { | |
0f9dd46c | 1632 | /* |
96303081 JB |
1633 | * if previous extent entry covers the offset, |
1634 | * we should return it instead of the bitmap entry | |
0f9dd46c | 1635 | */ |
de6c4115 MX |
1636 | n = rb_prev(&entry->offset_index); |
1637 | if (n) { | |
96303081 JB |
1638 | prev = rb_entry(n, struct btrfs_free_space, |
1639 | offset_index); | |
de6c4115 MX |
1640 | if (!prev->bitmap && |
1641 | prev->offset + prev->bytes > offset) | |
1642 | entry = prev; | |
0f9dd46c | 1643 | } |
96303081 JB |
1644 | } |
1645 | return entry; | |
1646 | } | |
1647 | ||
1648 | if (!prev) | |
1649 | return NULL; | |
1650 | ||
1651 | /* find last entry before the 'offset' */ | |
1652 | entry = prev; | |
1653 | if (entry->offset > offset) { | |
1654 | n = rb_prev(&entry->offset_index); | |
1655 | if (n) { | |
1656 | entry = rb_entry(n, struct btrfs_free_space, | |
1657 | offset_index); | |
b12d6869 | 1658 | ASSERT(entry->offset <= offset); |
0f9dd46c | 1659 | } else { |
96303081 JB |
1660 | if (fuzzy) |
1661 | return entry; | |
1662 | else | |
1663 | return NULL; | |
0f9dd46c JB |
1664 | } |
1665 | } | |
1666 | ||
96303081 | 1667 | if (entry->bitmap) { |
de6c4115 MX |
1668 | n = rb_prev(&entry->offset_index); |
1669 | if (n) { | |
96303081 JB |
1670 | prev = rb_entry(n, struct btrfs_free_space, |
1671 | offset_index); | |
de6c4115 MX |
1672 | if (!prev->bitmap && |
1673 | prev->offset + prev->bytes > offset) | |
1674 | return prev; | |
96303081 | 1675 | } |
34d52cb6 | 1676 | if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) |
96303081 JB |
1677 | return entry; |
1678 | } else if (entry->offset + entry->bytes > offset) | |
1679 | return entry; | |
1680 | ||
1681 | if (!fuzzy) | |
1682 | return NULL; | |
1683 | ||
1684 | while (1) { | |
1685 | if (entry->bitmap) { | |
1686 | if (entry->offset + BITS_PER_BITMAP * | |
34d52cb6 | 1687 | ctl->unit > offset) |
96303081 JB |
1688 | break; |
1689 | } else { | |
1690 | if (entry->offset + entry->bytes > offset) | |
1691 | break; | |
1692 | } | |
1693 | ||
1694 | n = rb_next(&entry->offset_index); | |
1695 | if (!n) | |
1696 | return NULL; | |
1697 | entry = rb_entry(n, struct btrfs_free_space, offset_index); | |
1698 | } | |
1699 | return entry; | |
0f9dd46c JB |
1700 | } |
1701 | ||
f333adb5 | 1702 | static inline void |
34d52cb6 | 1703 | __unlink_free_space(struct btrfs_free_space_ctl *ctl, |
f333adb5 | 1704 | struct btrfs_free_space *info) |
0f9dd46c | 1705 | { |
34d52cb6 LZ |
1706 | rb_erase(&info->offset_index, &ctl->free_space_offset); |
1707 | ctl->free_extents--; | |
dfb79ddb | 1708 | |
5dc7c10b | 1709 | if (!info->bitmap && !btrfs_free_space_trimmed(info)) { |
dfb79ddb | 1710 | ctl->discardable_extents[BTRFS_STAT_CURR]--; |
5dc7c10b DZ |
1711 | ctl->discardable_bytes[BTRFS_STAT_CURR] -= info->bytes; |
1712 | } | |
f333adb5 LZ |
1713 | } |
1714 | ||
34d52cb6 | 1715 | static void unlink_free_space(struct btrfs_free_space_ctl *ctl, |
f333adb5 LZ |
1716 | struct btrfs_free_space *info) |
1717 | { | |
34d52cb6 LZ |
1718 | __unlink_free_space(ctl, info); |
1719 | ctl->free_space -= info->bytes; | |
0f9dd46c JB |
1720 | } |
1721 | ||
34d52cb6 | 1722 | static int link_free_space(struct btrfs_free_space_ctl *ctl, |
0f9dd46c JB |
1723 | struct btrfs_free_space *info) |
1724 | { | |
1725 | int ret = 0; | |
1726 | ||
b12d6869 | 1727 | ASSERT(info->bytes || info->bitmap); |
34d52cb6 | 1728 | ret = tree_insert_offset(&ctl->free_space_offset, info->offset, |
96303081 | 1729 | &info->offset_index, (info->bitmap != NULL)); |
0f9dd46c JB |
1730 | if (ret) |
1731 | return ret; | |
1732 | ||
5dc7c10b | 1733 | if (!info->bitmap && !btrfs_free_space_trimmed(info)) { |
dfb79ddb | 1734 | ctl->discardable_extents[BTRFS_STAT_CURR]++; |
5dc7c10b DZ |
1735 | ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes; |
1736 | } | |
dfb79ddb | 1737 | |
34d52cb6 LZ |
1738 | ctl->free_space += info->bytes; |
1739 | ctl->free_extents++; | |
96303081 JB |
1740 | return ret; |
1741 | } | |
1742 | ||
bb3ac5a4 MX |
1743 | static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, |
1744 | struct btrfs_free_space *info, | |
1745 | u64 offset, u64 bytes) | |
96303081 | 1746 | { |
dfb79ddb DZ |
1747 | unsigned long start, count, end; |
1748 | int extent_delta = -1; | |
96303081 | 1749 | |
34d52cb6 LZ |
1750 | start = offset_to_bit(info->offset, ctl->unit, offset); |
1751 | count = bytes_to_bits(bytes, ctl->unit); | |
dfb79ddb DZ |
1752 | end = start + count; |
1753 | ASSERT(end <= BITS_PER_BITMAP); | |
96303081 | 1754 | |
f38b6e75 | 1755 | bitmap_clear(info->bitmap, start, count); |
96303081 JB |
1756 | |
1757 | info->bytes -= bytes; | |
553cceb4 JB |
1758 | if (info->max_extent_size > ctl->unit) |
1759 | info->max_extent_size = 0; | |
dfb79ddb DZ |
1760 | |
1761 | if (start && test_bit(start - 1, info->bitmap)) | |
1762 | extent_delta++; | |
1763 | ||
1764 | if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap)) | |
1765 | extent_delta++; | |
1766 | ||
1767 | info->bitmap_extents += extent_delta; | |
5dc7c10b | 1768 | if (!btrfs_free_space_trimmed(info)) { |
dfb79ddb | 1769 | ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta; |
5dc7c10b DZ |
1770 | ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes; |
1771 | } | |
bb3ac5a4 MX |
1772 | } |
1773 | ||
1774 | static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, | |
1775 | struct btrfs_free_space *info, u64 offset, | |
1776 | u64 bytes) | |
1777 | { | |
1778 | __bitmap_clear_bits(ctl, info, offset, bytes); | |
34d52cb6 | 1779 | ctl->free_space -= bytes; |
96303081 JB |
1780 | } |
1781 | ||
34d52cb6 | 1782 | static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl, |
817d52f8 JB |
1783 | struct btrfs_free_space *info, u64 offset, |
1784 | u64 bytes) | |
96303081 | 1785 | { |
dfb79ddb DZ |
1786 | unsigned long start, count, end; |
1787 | int extent_delta = 1; | |
96303081 | 1788 | |
34d52cb6 LZ |
1789 | start = offset_to_bit(info->offset, ctl->unit, offset); |
1790 | count = bytes_to_bits(bytes, ctl->unit); | |
dfb79ddb DZ |
1791 | end = start + count; |
1792 | ASSERT(end <= BITS_PER_BITMAP); | |
96303081 | 1793 | |
f38b6e75 | 1794 | bitmap_set(info->bitmap, start, count); |
96303081 JB |
1795 | |
1796 | info->bytes += bytes; | |
34d52cb6 | 1797 | ctl->free_space += bytes; |
dfb79ddb DZ |
1798 | |
1799 | if (start && test_bit(start - 1, info->bitmap)) | |
1800 | extent_delta--; | |
1801 | ||
1802 | if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap)) | |
1803 | extent_delta--; | |
1804 | ||
1805 | info->bitmap_extents += extent_delta; | |
5dc7c10b | 1806 | if (!btrfs_free_space_trimmed(info)) { |
dfb79ddb | 1807 | ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta; |
5dc7c10b DZ |
1808 | ctl->discardable_bytes[BTRFS_STAT_CURR] += bytes; |
1809 | } | |
96303081 JB |
1810 | } |
1811 | ||
a4820398 MX |
1812 | /* |
1813 | * If we can not find suitable extent, we will use bytes to record | |
1814 | * the size of the max extent. | |
1815 | */ | |
34d52cb6 | 1816 | static int search_bitmap(struct btrfs_free_space_ctl *ctl, |
96303081 | 1817 | struct btrfs_free_space *bitmap_info, u64 *offset, |
0584f718 | 1818 | u64 *bytes, bool for_alloc) |
96303081 JB |
1819 | { |
1820 | unsigned long found_bits = 0; | |
a4820398 | 1821 | unsigned long max_bits = 0; |
96303081 JB |
1822 | unsigned long bits, i; |
1823 | unsigned long next_zero; | |
a4820398 | 1824 | unsigned long extent_bits; |
96303081 | 1825 | |
cef40483 JB |
1826 | /* |
1827 | * Skip searching the bitmap if we don't have a contiguous section that | |
1828 | * is large enough for this allocation. | |
1829 | */ | |
0584f718 JB |
1830 | if (for_alloc && |
1831 | bitmap_info->max_extent_size && | |
cef40483 JB |
1832 | bitmap_info->max_extent_size < *bytes) { |
1833 | *bytes = bitmap_info->max_extent_size; | |
1834 | return -1; | |
1835 | } | |
1836 | ||
34d52cb6 | 1837 | i = offset_to_bit(bitmap_info->offset, ctl->unit, |
96303081 | 1838 | max_t(u64, *offset, bitmap_info->offset)); |
34d52cb6 | 1839 | bits = bytes_to_bits(*bytes, ctl->unit); |
96303081 | 1840 | |
ebb3dad4 | 1841 | for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) { |
0584f718 JB |
1842 | if (for_alloc && bits == 1) { |
1843 | found_bits = 1; | |
1844 | break; | |
1845 | } | |
96303081 JB |
1846 | next_zero = find_next_zero_bit(bitmap_info->bitmap, |
1847 | BITS_PER_BITMAP, i); | |
a4820398 MX |
1848 | extent_bits = next_zero - i; |
1849 | if (extent_bits >= bits) { | |
1850 | found_bits = extent_bits; | |
96303081 | 1851 | break; |
a4820398 MX |
1852 | } else if (extent_bits > max_bits) { |
1853 | max_bits = extent_bits; | |
96303081 JB |
1854 | } |
1855 | i = next_zero; | |
1856 | } | |
1857 | ||
1858 | if (found_bits) { | |
34d52cb6 LZ |
1859 | *offset = (u64)(i * ctl->unit) + bitmap_info->offset; |
1860 | *bytes = (u64)(found_bits) * ctl->unit; | |
96303081 JB |
1861 | return 0; |
1862 | } | |
1863 | ||
a4820398 | 1864 | *bytes = (u64)(max_bits) * ctl->unit; |
cef40483 | 1865 | bitmap_info->max_extent_size = *bytes; |
96303081 JB |
1866 | return -1; |
1867 | } | |
1868 | ||
ad22cf6e JB |
1869 | static inline u64 get_max_extent_size(struct btrfs_free_space *entry) |
1870 | { | |
1871 | if (entry->bitmap) | |
1872 | return entry->max_extent_size; | |
1873 | return entry->bytes; | |
1874 | } | |
1875 | ||
a4820398 | 1876 | /* Cache the size of the max extent in bytes */ |
34d52cb6 | 1877 | static struct btrfs_free_space * |
53b381b3 | 1878 | find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes, |
a4820398 | 1879 | unsigned long align, u64 *max_extent_size) |
96303081 JB |
1880 | { |
1881 | struct btrfs_free_space *entry; | |
1882 | struct rb_node *node; | |
53b381b3 DW |
1883 | u64 tmp; |
1884 | u64 align_off; | |
96303081 JB |
1885 | int ret; |
1886 | ||
34d52cb6 | 1887 | if (!ctl->free_space_offset.rb_node) |
a4820398 | 1888 | goto out; |
96303081 | 1889 | |
34d52cb6 | 1890 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1); |
96303081 | 1891 | if (!entry) |
a4820398 | 1892 | goto out; |
96303081 JB |
1893 | |
1894 | for (node = &entry->offset_index; node; node = rb_next(node)) { | |
1895 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | |
a4820398 | 1896 | if (entry->bytes < *bytes) { |
ad22cf6e JB |
1897 | *max_extent_size = max(get_max_extent_size(entry), |
1898 | *max_extent_size); | |
96303081 | 1899 | continue; |
a4820398 | 1900 | } |
96303081 | 1901 | |
53b381b3 DW |
1902 | /* make sure the space returned is big enough |
1903 | * to match our requested alignment | |
1904 | */ | |
1905 | if (*bytes >= align) { | |
a4820398 | 1906 | tmp = entry->offset - ctl->start + align - 1; |
47c5713f | 1907 | tmp = div64_u64(tmp, align); |
53b381b3 DW |
1908 | tmp = tmp * align + ctl->start; |
1909 | align_off = tmp - entry->offset; | |
1910 | } else { | |
1911 | align_off = 0; | |
1912 | tmp = entry->offset; | |
1913 | } | |
1914 | ||
a4820398 | 1915 | if (entry->bytes < *bytes + align_off) { |
ad22cf6e JB |
1916 | *max_extent_size = max(get_max_extent_size(entry), |
1917 | *max_extent_size); | |
53b381b3 | 1918 | continue; |
a4820398 | 1919 | } |
53b381b3 | 1920 | |
96303081 | 1921 | if (entry->bitmap) { |
a4820398 MX |
1922 | u64 size = *bytes; |
1923 | ||
0584f718 | 1924 | ret = search_bitmap(ctl, entry, &tmp, &size, true); |
53b381b3 DW |
1925 | if (!ret) { |
1926 | *offset = tmp; | |
a4820398 | 1927 | *bytes = size; |
96303081 | 1928 | return entry; |
ad22cf6e JB |
1929 | } else { |
1930 | *max_extent_size = | |
1931 | max(get_max_extent_size(entry), | |
1932 | *max_extent_size); | |
53b381b3 | 1933 | } |
96303081 JB |
1934 | continue; |
1935 | } | |
1936 | ||
53b381b3 DW |
1937 | *offset = tmp; |
1938 | *bytes = entry->bytes - align_off; | |
96303081 JB |
1939 | return entry; |
1940 | } | |
a4820398 | 1941 | out: |
96303081 JB |
1942 | return NULL; |
1943 | } | |
1944 | ||
34d52cb6 | 1945 | static void add_new_bitmap(struct btrfs_free_space_ctl *ctl, |
96303081 JB |
1946 | struct btrfs_free_space *info, u64 offset) |
1947 | { | |
34d52cb6 | 1948 | info->offset = offset_to_bitmap(ctl, offset); |
f019f426 | 1949 | info->bytes = 0; |
dfb79ddb | 1950 | info->bitmap_extents = 0; |
f2d0f676 | 1951 | INIT_LIST_HEAD(&info->list); |
34d52cb6 LZ |
1952 | link_free_space(ctl, info); |
1953 | ctl->total_bitmaps++; | |
fa598b06 | 1954 | recalculate_thresholds(ctl); |
96303081 JB |
1955 | } |
1956 | ||
34d52cb6 | 1957 | static void free_bitmap(struct btrfs_free_space_ctl *ctl, |
edf6e2d1 LZ |
1958 | struct btrfs_free_space *bitmap_info) |
1959 | { | |
27f0afc7 DZ |
1960 | /* |
1961 | * Normally when this is called, the bitmap is completely empty. However, | |
1962 | * if we are blowing up the free space cache for one reason or another | |
1963 | * via __btrfs_remove_free_space_cache(), then it may not be freed and | |
1964 | * we may leave stats on the table. | |
1965 | */ | |
1966 | if (bitmap_info->bytes && !btrfs_free_space_trimmed(bitmap_info)) { | |
1967 | ctl->discardable_extents[BTRFS_STAT_CURR] -= | |
1968 | bitmap_info->bitmap_extents; | |
1969 | ctl->discardable_bytes[BTRFS_STAT_CURR] -= bitmap_info->bytes; | |
1970 | ||
1971 | } | |
34d52cb6 | 1972 | unlink_free_space(ctl, bitmap_info); |
3acd4850 | 1973 | kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap); |
dc89e982 | 1974 | kmem_cache_free(btrfs_free_space_cachep, bitmap_info); |
34d52cb6 | 1975 | ctl->total_bitmaps--; |
fa598b06 | 1976 | recalculate_thresholds(ctl); |
edf6e2d1 LZ |
1977 | } |
1978 | ||
34d52cb6 | 1979 | static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl, |
96303081 JB |
1980 | struct btrfs_free_space *bitmap_info, |
1981 | u64 *offset, u64 *bytes) | |
1982 | { | |
1983 | u64 end; | |
6606bb97 JB |
1984 | u64 search_start, search_bytes; |
1985 | int ret; | |
96303081 JB |
1986 | |
1987 | again: | |
34d52cb6 | 1988 | end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; |
96303081 | 1989 | |
6606bb97 | 1990 | /* |
bdb7d303 JB |
1991 | * We need to search for bits in this bitmap. We could only cover some |
1992 | * of the extent in this bitmap thanks to how we add space, so we need | |
1993 | * to search for as much as it as we can and clear that amount, and then | |
1994 | * go searching for the next bit. | |
6606bb97 JB |
1995 | */ |
1996 | search_start = *offset; | |
bdb7d303 | 1997 | search_bytes = ctl->unit; |
13dbc089 | 1998 | search_bytes = min(search_bytes, end - search_start + 1); |
0584f718 JB |
1999 | ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes, |
2000 | false); | |
b50c6e25 JB |
2001 | if (ret < 0 || search_start != *offset) |
2002 | return -EINVAL; | |
6606bb97 | 2003 | |
bdb7d303 JB |
2004 | /* We may have found more bits than what we need */ |
2005 | search_bytes = min(search_bytes, *bytes); | |
2006 | ||
2007 | /* Cannot clear past the end of the bitmap */ | |
2008 | search_bytes = min(search_bytes, end - search_start + 1); | |
2009 | ||
2010 | bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes); | |
2011 | *offset += search_bytes; | |
2012 | *bytes -= search_bytes; | |
96303081 JB |
2013 | |
2014 | if (*bytes) { | |
6606bb97 | 2015 | struct rb_node *next = rb_next(&bitmap_info->offset_index); |
edf6e2d1 | 2016 | if (!bitmap_info->bytes) |
34d52cb6 | 2017 | free_bitmap(ctl, bitmap_info); |
96303081 | 2018 | |
6606bb97 JB |
2019 | /* |
2020 | * no entry after this bitmap, but we still have bytes to | |
2021 | * remove, so something has gone wrong. | |
2022 | */ | |
2023 | if (!next) | |
96303081 JB |
2024 | return -EINVAL; |
2025 | ||
6606bb97 JB |
2026 | bitmap_info = rb_entry(next, struct btrfs_free_space, |
2027 | offset_index); | |
2028 | ||
2029 | /* | |
2030 | * if the next entry isn't a bitmap we need to return to let the | |
2031 | * extent stuff do its work. | |
2032 | */ | |
96303081 JB |
2033 | if (!bitmap_info->bitmap) |
2034 | return -EAGAIN; | |
2035 | ||
6606bb97 JB |
2036 | /* |
2037 | * Ok the next item is a bitmap, but it may not actually hold | |
2038 | * the information for the rest of this free space stuff, so | |
2039 | * look for it, and if we don't find it return so we can try | |
2040 | * everything over again. | |
2041 | */ | |
2042 | search_start = *offset; | |
bdb7d303 | 2043 | search_bytes = ctl->unit; |
34d52cb6 | 2044 | ret = search_bitmap(ctl, bitmap_info, &search_start, |
0584f718 | 2045 | &search_bytes, false); |
6606bb97 JB |
2046 | if (ret < 0 || search_start != *offset) |
2047 | return -EAGAIN; | |
2048 | ||
96303081 | 2049 | goto again; |
edf6e2d1 | 2050 | } else if (!bitmap_info->bytes) |
34d52cb6 | 2051 | free_bitmap(ctl, bitmap_info); |
96303081 JB |
2052 | |
2053 | return 0; | |
2054 | } | |
2055 | ||
2cdc342c JB |
2056 | static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl, |
2057 | struct btrfs_free_space *info, u64 offset, | |
da080fe1 | 2058 | u64 bytes, enum btrfs_trim_state trim_state) |
2cdc342c JB |
2059 | { |
2060 | u64 bytes_to_set = 0; | |
2061 | u64 end; | |
2062 | ||
da080fe1 DZ |
2063 | /* |
2064 | * This is a tradeoff to make bitmap trim state minimal. We mark the | |
2065 | * whole bitmap untrimmed if at any point we add untrimmed regions. | |
2066 | */ | |
dfb79ddb | 2067 | if (trim_state == BTRFS_TRIM_STATE_UNTRIMMED) { |
5dc7c10b | 2068 | if (btrfs_free_space_trimmed(info)) { |
dfb79ddb DZ |
2069 | ctl->discardable_extents[BTRFS_STAT_CURR] += |
2070 | info->bitmap_extents; | |
5dc7c10b DZ |
2071 | ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes; |
2072 | } | |
da080fe1 | 2073 | info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; |
dfb79ddb | 2074 | } |
da080fe1 | 2075 | |
2cdc342c JB |
2076 | end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); |
2077 | ||
2078 | bytes_to_set = min(end - offset, bytes); | |
2079 | ||
2080 | bitmap_set_bits(ctl, info, offset, bytes_to_set); | |
2081 | ||
cef40483 JB |
2082 | /* |
2083 | * We set some bytes, we have no idea what the max extent size is | |
2084 | * anymore. | |
2085 | */ | |
2086 | info->max_extent_size = 0; | |
2087 | ||
2cdc342c JB |
2088 | return bytes_to_set; |
2089 | ||
2090 | } | |
2091 | ||
34d52cb6 LZ |
2092 | static bool use_bitmap(struct btrfs_free_space_ctl *ctl, |
2093 | struct btrfs_free_space *info) | |
96303081 | 2094 | { |
32da5386 | 2095 | struct btrfs_block_group *block_group = ctl->private; |
0b246afa | 2096 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
d0bd4560 JB |
2097 | bool forced = false; |
2098 | ||
2099 | #ifdef CONFIG_BTRFS_DEBUG | |
2ff7e61e | 2100 | if (btrfs_should_fragment_free_space(block_group)) |
d0bd4560 JB |
2101 | forced = true; |
2102 | #endif | |
96303081 | 2103 | |
5d90c5c7 DZ |
2104 | /* This is a way to reclaim large regions from the bitmaps. */ |
2105 | if (!forced && info->bytes >= FORCE_EXTENT_THRESHOLD) | |
2106 | return false; | |
2107 | ||
96303081 JB |
2108 | /* |
2109 | * If we are below the extents threshold then we can add this as an | |
2110 | * extent, and don't have to deal with the bitmap | |
2111 | */ | |
d0bd4560 | 2112 | if (!forced && ctl->free_extents < ctl->extents_thresh) { |
32cb0840 JB |
2113 | /* |
2114 | * If this block group has some small extents we don't want to | |
2115 | * use up all of our free slots in the cache with them, we want | |
01327610 | 2116 | * to reserve them to larger extents, however if we have plenty |
32cb0840 JB |
2117 | * of cache left then go ahead an dadd them, no sense in adding |
2118 | * the overhead of a bitmap if we don't have to. | |
2119 | */ | |
f9bb615a DZ |
2120 | if (info->bytes <= fs_info->sectorsize * 8) { |
2121 | if (ctl->free_extents * 3 <= ctl->extents_thresh) | |
34d52cb6 | 2122 | return false; |
32cb0840 | 2123 | } else { |
34d52cb6 | 2124 | return false; |
32cb0840 JB |
2125 | } |
2126 | } | |
96303081 JB |
2127 | |
2128 | /* | |
dde5740f JB |
2129 | * The original block groups from mkfs can be really small, like 8 |
2130 | * megabytes, so don't bother with a bitmap for those entries. However | |
2131 | * some block groups can be smaller than what a bitmap would cover but | |
2132 | * are still large enough that they could overflow the 32k memory limit, | |
2133 | * so allow those block groups to still be allowed to have a bitmap | |
2134 | * entry. | |
96303081 | 2135 | */ |
b3470b5d | 2136 | if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length) |
34d52cb6 LZ |
2137 | return false; |
2138 | ||
2139 | return true; | |
2140 | } | |
2141 | ||
20e5506b | 2142 | static const struct btrfs_free_space_op free_space_op = { |
2cdc342c JB |
2143 | .use_bitmap = use_bitmap, |
2144 | }; | |
2145 | ||
34d52cb6 LZ |
2146 | static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, |
2147 | struct btrfs_free_space *info) | |
2148 | { | |
2149 | struct btrfs_free_space *bitmap_info; | |
32da5386 | 2150 | struct btrfs_block_group *block_group = NULL; |
34d52cb6 | 2151 | int added = 0; |
2cdc342c | 2152 | u64 bytes, offset, bytes_added; |
da080fe1 | 2153 | enum btrfs_trim_state trim_state; |
34d52cb6 | 2154 | int ret; |
96303081 JB |
2155 | |
2156 | bytes = info->bytes; | |
2157 | offset = info->offset; | |
da080fe1 | 2158 | trim_state = info->trim_state; |
96303081 | 2159 | |
34d52cb6 LZ |
2160 | if (!ctl->op->use_bitmap(ctl, info)) |
2161 | return 0; | |
2162 | ||
2cdc342c JB |
2163 | if (ctl->op == &free_space_op) |
2164 | block_group = ctl->private; | |
38e87880 | 2165 | again: |
2cdc342c JB |
2166 | /* |
2167 | * Since we link bitmaps right into the cluster we need to see if we | |
2168 | * have a cluster here, and if so and it has our bitmap we need to add | |
2169 | * the free space to that bitmap. | |
2170 | */ | |
2171 | if (block_group && !list_empty(&block_group->cluster_list)) { | |
2172 | struct btrfs_free_cluster *cluster; | |
2173 | struct rb_node *node; | |
2174 | struct btrfs_free_space *entry; | |
2175 | ||
2176 | cluster = list_entry(block_group->cluster_list.next, | |
2177 | struct btrfs_free_cluster, | |
2178 | block_group_list); | |
2179 | spin_lock(&cluster->lock); | |
2180 | node = rb_first(&cluster->root); | |
2181 | if (!node) { | |
2182 | spin_unlock(&cluster->lock); | |
38e87880 | 2183 | goto no_cluster_bitmap; |
2cdc342c JB |
2184 | } |
2185 | ||
2186 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | |
2187 | if (!entry->bitmap) { | |
2188 | spin_unlock(&cluster->lock); | |
38e87880 | 2189 | goto no_cluster_bitmap; |
2cdc342c JB |
2190 | } |
2191 | ||
2192 | if (entry->offset == offset_to_bitmap(ctl, offset)) { | |
da080fe1 DZ |
2193 | bytes_added = add_bytes_to_bitmap(ctl, entry, offset, |
2194 | bytes, trim_state); | |
2cdc342c JB |
2195 | bytes -= bytes_added; |
2196 | offset += bytes_added; | |
2197 | } | |
2198 | spin_unlock(&cluster->lock); | |
2199 | if (!bytes) { | |
2200 | ret = 1; | |
2201 | goto out; | |
2202 | } | |
2203 | } | |
38e87880 CM |
2204 | |
2205 | no_cluster_bitmap: | |
34d52cb6 | 2206 | bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), |
96303081 JB |
2207 | 1, 0); |
2208 | if (!bitmap_info) { | |
b12d6869 | 2209 | ASSERT(added == 0); |
96303081 JB |
2210 | goto new_bitmap; |
2211 | } | |
2212 | ||
da080fe1 DZ |
2213 | bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes, |
2214 | trim_state); | |
2cdc342c JB |
2215 | bytes -= bytes_added; |
2216 | offset += bytes_added; | |
2217 | added = 0; | |
96303081 JB |
2218 | |
2219 | if (!bytes) { | |
2220 | ret = 1; | |
2221 | goto out; | |
2222 | } else | |
2223 | goto again; | |
2224 | ||
2225 | new_bitmap: | |
2226 | if (info && info->bitmap) { | |
34d52cb6 | 2227 | add_new_bitmap(ctl, info, offset); |
96303081 JB |
2228 | added = 1; |
2229 | info = NULL; | |
2230 | goto again; | |
2231 | } else { | |
34d52cb6 | 2232 | spin_unlock(&ctl->tree_lock); |
96303081 JB |
2233 | |
2234 | /* no pre-allocated info, allocate a new one */ | |
2235 | if (!info) { | |
dc89e982 JB |
2236 | info = kmem_cache_zalloc(btrfs_free_space_cachep, |
2237 | GFP_NOFS); | |
96303081 | 2238 | if (!info) { |
34d52cb6 | 2239 | spin_lock(&ctl->tree_lock); |
96303081 JB |
2240 | ret = -ENOMEM; |
2241 | goto out; | |
2242 | } | |
2243 | } | |
2244 | ||
2245 | /* allocate the bitmap */ | |
3acd4850 CL |
2246 | info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, |
2247 | GFP_NOFS); | |
da080fe1 | 2248 | info->trim_state = BTRFS_TRIM_STATE_TRIMMED; |
34d52cb6 | 2249 | spin_lock(&ctl->tree_lock); |
96303081 JB |
2250 | if (!info->bitmap) { |
2251 | ret = -ENOMEM; | |
2252 | goto out; | |
2253 | } | |
2254 | goto again; | |
2255 | } | |
2256 | ||
2257 | out: | |
2258 | if (info) { | |
3acd4850 CL |
2259 | if (info->bitmap) |
2260 | kmem_cache_free(btrfs_free_space_bitmap_cachep, | |
2261 | info->bitmap); | |
dc89e982 | 2262 | kmem_cache_free(btrfs_free_space_cachep, info); |
96303081 | 2263 | } |
0f9dd46c JB |
2264 | |
2265 | return ret; | |
2266 | } | |
2267 | ||
a7ccb255 DZ |
2268 | /* |
2269 | * Free space merging rules: | |
2270 | * 1) Merge trimmed areas together | |
2271 | * 2) Let untrimmed areas coalesce with trimmed areas | |
2272 | * 3) Always pull neighboring regions from bitmaps | |
2273 | * | |
2274 | * The above rules are for when we merge free space based on btrfs_trim_state. | |
2275 | * Rules 2 and 3 are subtle because they are suboptimal, but are done for the | |
2276 | * same reason: to promote larger extent regions which makes life easier for | |
2277 | * find_free_extent(). Rule 2 enables coalescing based on the common path | |
2278 | * being returning free space from btrfs_finish_extent_commit(). So when free | |
2279 | * space is trimmed, it will prevent aggregating trimmed new region and | |
2280 | * untrimmed regions in the rb_tree. Rule 3 is purely to obtain larger extents | |
2281 | * and provide find_free_extent() with the largest extents possible hoping for | |
2282 | * the reuse path. | |
2283 | */ | |
945d8962 | 2284 | static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, |
f333adb5 | 2285 | struct btrfs_free_space *info, bool update_stat) |
0f9dd46c | 2286 | { |
bf53d468 | 2287 | struct btrfs_free_space *left_info = NULL; |
120d66ee LZ |
2288 | struct btrfs_free_space *right_info; |
2289 | bool merged = false; | |
2290 | u64 offset = info->offset; | |
2291 | u64 bytes = info->bytes; | |
a7ccb255 | 2292 | const bool is_trimmed = btrfs_free_space_trimmed(info); |
6226cb0a | 2293 | |
0f9dd46c JB |
2294 | /* |
2295 | * first we want to see if there is free space adjacent to the range we | |
2296 | * are adding, if there is remove that struct and add a new one to | |
2297 | * cover the entire range | |
2298 | */ | |
34d52cb6 | 2299 | right_info = tree_search_offset(ctl, offset + bytes, 0, 0); |
96303081 JB |
2300 | if (right_info && rb_prev(&right_info->offset_index)) |
2301 | left_info = rb_entry(rb_prev(&right_info->offset_index), | |
2302 | struct btrfs_free_space, offset_index); | |
bf53d468 | 2303 | else if (!right_info) |
34d52cb6 | 2304 | left_info = tree_search_offset(ctl, offset - 1, 0, 0); |
0f9dd46c | 2305 | |
a7ccb255 DZ |
2306 | /* See try_merge_free_space() comment. */ |
2307 | if (right_info && !right_info->bitmap && | |
2308 | (!is_trimmed || btrfs_free_space_trimmed(right_info))) { | |
f333adb5 | 2309 | if (update_stat) |
34d52cb6 | 2310 | unlink_free_space(ctl, right_info); |
f333adb5 | 2311 | else |
34d52cb6 | 2312 | __unlink_free_space(ctl, right_info); |
6226cb0a | 2313 | info->bytes += right_info->bytes; |
dc89e982 | 2314 | kmem_cache_free(btrfs_free_space_cachep, right_info); |
120d66ee | 2315 | merged = true; |
0f9dd46c JB |
2316 | } |
2317 | ||
a7ccb255 | 2318 | /* See try_merge_free_space() comment. */ |
96303081 | 2319 | if (left_info && !left_info->bitmap && |
a7ccb255 DZ |
2320 | left_info->offset + left_info->bytes == offset && |
2321 | (!is_trimmed || btrfs_free_space_trimmed(left_info))) { | |
f333adb5 | 2322 | if (update_stat) |
34d52cb6 | 2323 | unlink_free_space(ctl, left_info); |
f333adb5 | 2324 | else |
34d52cb6 | 2325 | __unlink_free_space(ctl, left_info); |
6226cb0a JB |
2326 | info->offset = left_info->offset; |
2327 | info->bytes += left_info->bytes; | |
dc89e982 | 2328 | kmem_cache_free(btrfs_free_space_cachep, left_info); |
120d66ee | 2329 | merged = true; |
0f9dd46c JB |
2330 | } |
2331 | ||
120d66ee LZ |
2332 | return merged; |
2333 | } | |
2334 | ||
20005523 FM |
2335 | static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl, |
2336 | struct btrfs_free_space *info, | |
2337 | bool update_stat) | |
2338 | { | |
2339 | struct btrfs_free_space *bitmap; | |
2340 | unsigned long i; | |
2341 | unsigned long j; | |
2342 | const u64 end = info->offset + info->bytes; | |
2343 | const u64 bitmap_offset = offset_to_bitmap(ctl, end); | |
2344 | u64 bytes; | |
2345 | ||
2346 | bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0); | |
2347 | if (!bitmap) | |
2348 | return false; | |
2349 | ||
2350 | i = offset_to_bit(bitmap->offset, ctl->unit, end); | |
2351 | j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i); | |
2352 | if (j == i) | |
2353 | return false; | |
2354 | bytes = (j - i) * ctl->unit; | |
2355 | info->bytes += bytes; | |
2356 | ||
a7ccb255 DZ |
2357 | /* See try_merge_free_space() comment. */ |
2358 | if (!btrfs_free_space_trimmed(bitmap)) | |
2359 | info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; | |
2360 | ||
20005523 FM |
2361 | if (update_stat) |
2362 | bitmap_clear_bits(ctl, bitmap, end, bytes); | |
2363 | else | |
2364 | __bitmap_clear_bits(ctl, bitmap, end, bytes); | |
2365 | ||
2366 | if (!bitmap->bytes) | |
2367 | free_bitmap(ctl, bitmap); | |
2368 | ||
2369 | return true; | |
2370 | } | |
2371 | ||
2372 | static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl, | |
2373 | struct btrfs_free_space *info, | |
2374 | bool update_stat) | |
2375 | { | |
2376 | struct btrfs_free_space *bitmap; | |
2377 | u64 bitmap_offset; | |
2378 | unsigned long i; | |
2379 | unsigned long j; | |
2380 | unsigned long prev_j; | |
2381 | u64 bytes; | |
2382 | ||
2383 | bitmap_offset = offset_to_bitmap(ctl, info->offset); | |
2384 | /* If we're on a boundary, try the previous logical bitmap. */ | |
2385 | if (bitmap_offset == info->offset) { | |
2386 | if (info->offset == 0) | |
2387 | return false; | |
2388 | bitmap_offset = offset_to_bitmap(ctl, info->offset - 1); | |
2389 | } | |
2390 | ||
2391 | bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0); | |
2392 | if (!bitmap) | |
2393 | return false; | |
2394 | ||
2395 | i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1; | |
2396 | j = 0; | |
2397 | prev_j = (unsigned long)-1; | |
2398 | for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) { | |
2399 | if (j > i) | |
2400 | break; | |
2401 | prev_j = j; | |
2402 | } | |
2403 | if (prev_j == i) | |
2404 | return false; | |
2405 | ||
2406 | if (prev_j == (unsigned long)-1) | |
2407 | bytes = (i + 1) * ctl->unit; | |
2408 | else | |
2409 | bytes = (i - prev_j) * ctl->unit; | |
2410 | ||
2411 | info->offset -= bytes; | |
2412 | info->bytes += bytes; | |
2413 | ||
a7ccb255 DZ |
2414 | /* See try_merge_free_space() comment. */ |
2415 | if (!btrfs_free_space_trimmed(bitmap)) | |
2416 | info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; | |
2417 | ||
20005523 FM |
2418 | if (update_stat) |
2419 | bitmap_clear_bits(ctl, bitmap, info->offset, bytes); | |
2420 | else | |
2421 | __bitmap_clear_bits(ctl, bitmap, info->offset, bytes); | |
2422 | ||
2423 | if (!bitmap->bytes) | |
2424 | free_bitmap(ctl, bitmap); | |
2425 | ||
2426 | return true; | |
2427 | } | |
2428 | ||
2429 | /* | |
2430 | * We prefer always to allocate from extent entries, both for clustered and | |
2431 | * non-clustered allocation requests. So when attempting to add a new extent | |
2432 | * entry, try to see if there's adjacent free space in bitmap entries, and if | |
2433 | * there is, migrate that space from the bitmaps to the extent. | |
2434 | * Like this we get better chances of satisfying space allocation requests | |
2435 | * because we attempt to satisfy them based on a single cache entry, and never | |
2436 | * on 2 or more entries - even if the entries represent a contiguous free space | |
2437 | * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry | |
2438 | * ends). | |
2439 | */ | |
2440 | static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl, | |
2441 | struct btrfs_free_space *info, | |
2442 | bool update_stat) | |
2443 | { | |
2444 | /* | |
2445 | * Only work with disconnected entries, as we can change their offset, | |
2446 | * and must be extent entries. | |
2447 | */ | |
2448 | ASSERT(!info->bitmap); | |
2449 | ASSERT(RB_EMPTY_NODE(&info->offset_index)); | |
2450 | ||
2451 | if (ctl->total_bitmaps > 0) { | |
2452 | bool stole_end; | |
2453 | bool stole_front = false; | |
2454 | ||
2455 | stole_end = steal_from_bitmap_to_end(ctl, info, update_stat); | |
2456 | if (ctl->total_bitmaps > 0) | |
2457 | stole_front = steal_from_bitmap_to_front(ctl, info, | |
2458 | update_stat); | |
2459 | ||
2460 | if (stole_end || stole_front) | |
2461 | try_merge_free_space(ctl, info, update_stat); | |
2462 | } | |
2463 | } | |
2464 | ||
ab8d0fc4 JM |
2465 | int __btrfs_add_free_space(struct btrfs_fs_info *fs_info, |
2466 | struct btrfs_free_space_ctl *ctl, | |
a7ccb255 DZ |
2467 | u64 offset, u64 bytes, |
2468 | enum btrfs_trim_state trim_state) | |
120d66ee | 2469 | { |
b0643e59 | 2470 | struct btrfs_block_group *block_group = ctl->private; |
120d66ee LZ |
2471 | struct btrfs_free_space *info; |
2472 | int ret = 0; | |
7fe6d45e | 2473 | u64 filter_bytes = bytes; |
120d66ee | 2474 | |
169e0da9 NA |
2475 | ASSERT(!btrfs_is_zoned(fs_info)); |
2476 | ||
dc89e982 | 2477 | info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); |
120d66ee LZ |
2478 | if (!info) |
2479 | return -ENOMEM; | |
2480 | ||
2481 | info->offset = offset; | |
2482 | info->bytes = bytes; | |
a7ccb255 | 2483 | info->trim_state = trim_state; |
20005523 | 2484 | RB_CLEAR_NODE(&info->offset_index); |
120d66ee | 2485 | |
34d52cb6 | 2486 | spin_lock(&ctl->tree_lock); |
120d66ee | 2487 | |
34d52cb6 | 2488 | if (try_merge_free_space(ctl, info, true)) |
120d66ee LZ |
2489 | goto link; |
2490 | ||
2491 | /* | |
2492 | * There was no extent directly to the left or right of this new | |
2493 | * extent then we know we're going to have to allocate a new extent, so | |
2494 | * before we do that see if we need to drop this into a bitmap | |
2495 | */ | |
34d52cb6 | 2496 | ret = insert_into_bitmap(ctl, info); |
120d66ee LZ |
2497 | if (ret < 0) { |
2498 | goto out; | |
2499 | } else if (ret) { | |
2500 | ret = 0; | |
2501 | goto out; | |
2502 | } | |
2503 | link: | |
20005523 FM |
2504 | /* |
2505 | * Only steal free space from adjacent bitmaps if we're sure we're not | |
2506 | * going to add the new free space to existing bitmap entries - because | |
2507 | * that would mean unnecessary work that would be reverted. Therefore | |
2508 | * attempt to steal space from bitmaps if we're adding an extent entry. | |
2509 | */ | |
2510 | steal_from_bitmap(ctl, info, true); | |
2511 | ||
7fe6d45e DZ |
2512 | filter_bytes = max(filter_bytes, info->bytes); |
2513 | ||
34d52cb6 | 2514 | ret = link_free_space(ctl, info); |
0f9dd46c | 2515 | if (ret) |
dc89e982 | 2516 | kmem_cache_free(btrfs_free_space_cachep, info); |
96303081 | 2517 | out: |
66b53bae | 2518 | btrfs_discard_update_discardable(block_group); |
34d52cb6 | 2519 | spin_unlock(&ctl->tree_lock); |
6226cb0a | 2520 | |
0f9dd46c | 2521 | if (ret) { |
ab8d0fc4 | 2522 | btrfs_crit(fs_info, "unable to add free space :%d", ret); |
b12d6869 | 2523 | ASSERT(ret != -EEXIST); |
0f9dd46c JB |
2524 | } |
2525 | ||
7fe6d45e DZ |
2526 | if (trim_state != BTRFS_TRIM_STATE_TRIMMED) { |
2527 | btrfs_discard_check_filter(block_group, filter_bytes); | |
b0643e59 | 2528 | btrfs_discard_queue_work(&fs_info->discard_ctl, block_group); |
7fe6d45e | 2529 | } |
b0643e59 | 2530 | |
0f9dd46c JB |
2531 | return ret; |
2532 | } | |
2533 | ||
169e0da9 NA |
2534 | static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group, |
2535 | u64 bytenr, u64 size, bool used) | |
2536 | { | |
18bb8bbf | 2537 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
169e0da9 NA |
2538 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
2539 | u64 offset = bytenr - block_group->start; | |
2540 | u64 to_free, to_unusable; | |
77233c2d | 2541 | const int bg_reclaim_threshold = READ_ONCE(fs_info->bg_reclaim_threshold); |
169e0da9 NA |
2542 | |
2543 | spin_lock(&ctl->tree_lock); | |
2544 | if (!used) | |
2545 | to_free = size; | |
2546 | else if (offset >= block_group->alloc_offset) | |
2547 | to_free = size; | |
2548 | else if (offset + size <= block_group->alloc_offset) | |
2549 | to_free = 0; | |
2550 | else | |
2551 | to_free = offset + size - block_group->alloc_offset; | |
2552 | to_unusable = size - to_free; | |
2553 | ||
2554 | ctl->free_space += to_free; | |
badae9c8 NA |
2555 | /* |
2556 | * If the block group is read-only, we should account freed space into | |
2557 | * bytes_readonly. | |
2558 | */ | |
2559 | if (!block_group->ro) | |
2560 | block_group->zone_unusable += to_unusable; | |
169e0da9 NA |
2561 | spin_unlock(&ctl->tree_lock); |
2562 | if (!used) { | |
2563 | spin_lock(&block_group->lock); | |
2564 | block_group->alloc_offset -= size; | |
2565 | spin_unlock(&block_group->lock); | |
2566 | } | |
2567 | ||
2568 | /* All the region is now unusable. Mark it as unused and reclaim */ | |
18bb8bbf | 2569 | if (block_group->zone_unusable == block_group->length) { |
169e0da9 | 2570 | btrfs_mark_bg_unused(block_group); |
77233c2d JT |
2571 | } else if (bg_reclaim_threshold && |
2572 | block_group->zone_unusable >= | |
2573 | div_factor_fine(block_group->length, bg_reclaim_threshold)) { | |
18bb8bbf JT |
2574 | btrfs_mark_bg_to_reclaim(block_group); |
2575 | } | |
169e0da9 NA |
2576 | |
2577 | return 0; | |
2578 | } | |
2579 | ||
32da5386 | 2580 | int btrfs_add_free_space(struct btrfs_block_group *block_group, |
478b4d9f JB |
2581 | u64 bytenr, u64 size) |
2582 | { | |
a7ccb255 DZ |
2583 | enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED; |
2584 | ||
169e0da9 NA |
2585 | if (btrfs_is_zoned(block_group->fs_info)) |
2586 | return __btrfs_add_free_space_zoned(block_group, bytenr, size, | |
2587 | true); | |
2588 | ||
a7ccb255 DZ |
2589 | if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC)) |
2590 | trim_state = BTRFS_TRIM_STATE_TRIMMED; | |
2591 | ||
478b4d9f JB |
2592 | return __btrfs_add_free_space(block_group->fs_info, |
2593 | block_group->free_space_ctl, | |
a7ccb255 | 2594 | bytenr, size, trim_state); |
478b4d9f JB |
2595 | } |
2596 | ||
169e0da9 NA |
2597 | int btrfs_add_free_space_unused(struct btrfs_block_group *block_group, |
2598 | u64 bytenr, u64 size) | |
2599 | { | |
2600 | if (btrfs_is_zoned(block_group->fs_info)) | |
2601 | return __btrfs_add_free_space_zoned(block_group, bytenr, size, | |
2602 | false); | |
2603 | ||
2604 | return btrfs_add_free_space(block_group, bytenr, size); | |
2605 | } | |
2606 | ||
b0643e59 DZ |
2607 | /* |
2608 | * This is a subtle distinction because when adding free space back in general, | |
2609 | * we want it to be added as untrimmed for async. But in the case where we add | |
2610 | * it on loading of a block group, we want to consider it trimmed. | |
2611 | */ | |
2612 | int btrfs_add_free_space_async_trimmed(struct btrfs_block_group *block_group, | |
2613 | u64 bytenr, u64 size) | |
2614 | { | |
2615 | enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED; | |
2616 | ||
169e0da9 NA |
2617 | if (btrfs_is_zoned(block_group->fs_info)) |
2618 | return __btrfs_add_free_space_zoned(block_group, bytenr, size, | |
2619 | true); | |
2620 | ||
b0643e59 DZ |
2621 | if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC) || |
2622 | btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) | |
2623 | trim_state = BTRFS_TRIM_STATE_TRIMMED; | |
2624 | ||
2625 | return __btrfs_add_free_space(block_group->fs_info, | |
2626 | block_group->free_space_ctl, | |
2627 | bytenr, size, trim_state); | |
2628 | } | |
2629 | ||
32da5386 | 2630 | int btrfs_remove_free_space(struct btrfs_block_group *block_group, |
6226cb0a | 2631 | u64 offset, u64 bytes) |
0f9dd46c | 2632 | { |
34d52cb6 | 2633 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
0f9dd46c | 2634 | struct btrfs_free_space *info; |
b0175117 JB |
2635 | int ret; |
2636 | bool re_search = false; | |
0f9dd46c | 2637 | |
011b41bf NA |
2638 | if (btrfs_is_zoned(block_group->fs_info)) { |
2639 | /* | |
2640 | * This can happen with conventional zones when replaying log. | |
2641 | * Since the allocation info of tree-log nodes are not recorded | |
2642 | * to the extent-tree, calculate_alloc_pointer() failed to | |
2643 | * advance the allocation pointer after last allocated tree log | |
2644 | * node blocks. | |
2645 | * | |
2646 | * This function is called from | |
2647 | * btrfs_pin_extent_for_log_replay() when replaying the log. | |
2648 | * Advance the pointer not to overwrite the tree-log nodes. | |
2649 | */ | |
0ae79c6f NA |
2650 | if (block_group->start + block_group->alloc_offset < |
2651 | offset + bytes) { | |
2652 | block_group->alloc_offset = | |
2653 | offset + bytes - block_group->start; | |
2654 | } | |
169e0da9 | 2655 | return 0; |
011b41bf | 2656 | } |
169e0da9 | 2657 | |
34d52cb6 | 2658 | spin_lock(&ctl->tree_lock); |
6226cb0a | 2659 | |
96303081 | 2660 | again: |
b0175117 | 2661 | ret = 0; |
bdb7d303 JB |
2662 | if (!bytes) |
2663 | goto out_lock; | |
2664 | ||
34d52cb6 | 2665 | info = tree_search_offset(ctl, offset, 0, 0); |
96303081 | 2666 | if (!info) { |
6606bb97 JB |
2667 | /* |
2668 | * oops didn't find an extent that matched the space we wanted | |
2669 | * to remove, look for a bitmap instead | |
2670 | */ | |
34d52cb6 | 2671 | info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), |
6606bb97 JB |
2672 | 1, 0); |
2673 | if (!info) { | |
b0175117 JB |
2674 | /* |
2675 | * If we found a partial bit of our free space in a | |
2676 | * bitmap but then couldn't find the other part this may | |
2677 | * be a problem, so WARN about it. | |
24a70313 | 2678 | */ |
b0175117 | 2679 | WARN_ON(re_search); |
6606bb97 JB |
2680 | goto out_lock; |
2681 | } | |
96303081 JB |
2682 | } |
2683 | ||
b0175117 | 2684 | re_search = false; |
bdb7d303 | 2685 | if (!info->bitmap) { |
34d52cb6 | 2686 | unlink_free_space(ctl, info); |
bdb7d303 JB |
2687 | if (offset == info->offset) { |
2688 | u64 to_free = min(bytes, info->bytes); | |
2689 | ||
2690 | info->bytes -= to_free; | |
2691 | info->offset += to_free; | |
2692 | if (info->bytes) { | |
2693 | ret = link_free_space(ctl, info); | |
2694 | WARN_ON(ret); | |
2695 | } else { | |
2696 | kmem_cache_free(btrfs_free_space_cachep, info); | |
2697 | } | |
0f9dd46c | 2698 | |
bdb7d303 JB |
2699 | offset += to_free; |
2700 | bytes -= to_free; | |
2701 | goto again; | |
2702 | } else { | |
2703 | u64 old_end = info->bytes + info->offset; | |
9b49c9b9 | 2704 | |
bdb7d303 | 2705 | info->bytes = offset - info->offset; |
34d52cb6 | 2706 | ret = link_free_space(ctl, info); |
96303081 JB |
2707 | WARN_ON(ret); |
2708 | if (ret) | |
2709 | goto out_lock; | |
96303081 | 2710 | |
bdb7d303 JB |
2711 | /* Not enough bytes in this entry to satisfy us */ |
2712 | if (old_end < offset + bytes) { | |
2713 | bytes -= old_end - offset; | |
2714 | offset = old_end; | |
2715 | goto again; | |
2716 | } else if (old_end == offset + bytes) { | |
2717 | /* all done */ | |
2718 | goto out_lock; | |
2719 | } | |
2720 | spin_unlock(&ctl->tree_lock); | |
2721 | ||
a7ccb255 DZ |
2722 | ret = __btrfs_add_free_space(block_group->fs_info, ctl, |
2723 | offset + bytes, | |
2724 | old_end - (offset + bytes), | |
2725 | info->trim_state); | |
bdb7d303 JB |
2726 | WARN_ON(ret); |
2727 | goto out; | |
2728 | } | |
0f9dd46c | 2729 | } |
96303081 | 2730 | |
34d52cb6 | 2731 | ret = remove_from_bitmap(ctl, info, &offset, &bytes); |
b0175117 JB |
2732 | if (ret == -EAGAIN) { |
2733 | re_search = true; | |
96303081 | 2734 | goto again; |
b0175117 | 2735 | } |
96303081 | 2736 | out_lock: |
66b53bae | 2737 | btrfs_discard_update_discardable(block_group); |
34d52cb6 | 2738 | spin_unlock(&ctl->tree_lock); |
0f9dd46c | 2739 | out: |
25179201 JB |
2740 | return ret; |
2741 | } | |
2742 | ||
32da5386 | 2743 | void btrfs_dump_free_space(struct btrfs_block_group *block_group, |
0f9dd46c JB |
2744 | u64 bytes) |
2745 | { | |
0b246afa | 2746 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
34d52cb6 | 2747 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
0f9dd46c JB |
2748 | struct btrfs_free_space *info; |
2749 | struct rb_node *n; | |
2750 | int count = 0; | |
2751 | ||
169e0da9 NA |
2752 | /* |
2753 | * Zoned btrfs does not use free space tree and cluster. Just print | |
2754 | * out the free space after the allocation offset. | |
2755 | */ | |
2756 | if (btrfs_is_zoned(fs_info)) { | |
2757 | btrfs_info(fs_info, "free space %llu", | |
2758 | block_group->length - block_group->alloc_offset); | |
2759 | return; | |
2760 | } | |
2761 | ||
9084cb6a | 2762 | spin_lock(&ctl->tree_lock); |
34d52cb6 | 2763 | for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { |
0f9dd46c | 2764 | info = rb_entry(n, struct btrfs_free_space, offset_index); |
f6175efa | 2765 | if (info->bytes >= bytes && !block_group->ro) |
0f9dd46c | 2766 | count++; |
0b246afa | 2767 | btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s", |
efe120a0 | 2768 | info->offset, info->bytes, |
96303081 | 2769 | (info->bitmap) ? "yes" : "no"); |
0f9dd46c | 2770 | } |
9084cb6a | 2771 | spin_unlock(&ctl->tree_lock); |
0b246afa | 2772 | btrfs_info(fs_info, "block group has cluster?: %s", |
96303081 | 2773 | list_empty(&block_group->cluster_list) ? "no" : "yes"); |
0b246afa | 2774 | btrfs_info(fs_info, |
efe120a0 | 2775 | "%d blocks of free space at or bigger than bytes is", count); |
0f9dd46c JB |
2776 | } |
2777 | ||
cd79909b JB |
2778 | void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group, |
2779 | struct btrfs_free_space_ctl *ctl) | |
0f9dd46c | 2780 | { |
0b246afa | 2781 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
0f9dd46c | 2782 | |
34d52cb6 | 2783 | spin_lock_init(&ctl->tree_lock); |
0b246afa | 2784 | ctl->unit = fs_info->sectorsize; |
b3470b5d | 2785 | ctl->start = block_group->start; |
34d52cb6 LZ |
2786 | ctl->private = block_group; |
2787 | ctl->op = &free_space_op; | |
55507ce3 FM |
2788 | INIT_LIST_HEAD(&ctl->trimming_ranges); |
2789 | mutex_init(&ctl->cache_writeout_mutex); | |
0f9dd46c | 2790 | |
34d52cb6 LZ |
2791 | /* |
2792 | * we only want to have 32k of ram per block group for keeping | |
2793 | * track of free space, and if we pass 1/2 of that we want to | |
2794 | * start converting things over to using bitmaps | |
2795 | */ | |
ee22184b | 2796 | ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space); |
0f9dd46c JB |
2797 | } |
2798 | ||
fa9c0d79 CM |
2799 | /* |
2800 | * for a given cluster, put all of its extents back into the free | |
2801 | * space cache. If the block group passed doesn't match the block group | |
2802 | * pointed to by the cluster, someone else raced in and freed the | |
2803 | * cluster already. In that case, we just return without changing anything | |
2804 | */ | |
69b0e093 | 2805 | static void __btrfs_return_cluster_to_free_space( |
32da5386 | 2806 | struct btrfs_block_group *block_group, |
fa9c0d79 CM |
2807 | struct btrfs_free_cluster *cluster) |
2808 | { | |
34d52cb6 | 2809 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
fa9c0d79 CM |
2810 | struct btrfs_free_space *entry; |
2811 | struct rb_node *node; | |
2812 | ||
2813 | spin_lock(&cluster->lock); | |
95c85fba JB |
2814 | if (cluster->block_group != block_group) { |
2815 | spin_unlock(&cluster->lock); | |
2816 | return; | |
2817 | } | |
fa9c0d79 | 2818 | |
96303081 | 2819 | cluster->block_group = NULL; |
fa9c0d79 | 2820 | cluster->window_start = 0; |
96303081 | 2821 | list_del_init(&cluster->block_group_list); |
96303081 | 2822 | |
fa9c0d79 | 2823 | node = rb_first(&cluster->root); |
96303081 | 2824 | while (node) { |
4e69b598 JB |
2825 | bool bitmap; |
2826 | ||
fa9c0d79 CM |
2827 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
2828 | node = rb_next(&entry->offset_index); | |
2829 | rb_erase(&entry->offset_index, &cluster->root); | |
20005523 | 2830 | RB_CLEAR_NODE(&entry->offset_index); |
4e69b598 JB |
2831 | |
2832 | bitmap = (entry->bitmap != NULL); | |
20005523 | 2833 | if (!bitmap) { |
dfb79ddb | 2834 | /* Merging treats extents as if they were new */ |
5dc7c10b | 2835 | if (!btrfs_free_space_trimmed(entry)) { |
dfb79ddb | 2836 | ctl->discardable_extents[BTRFS_STAT_CURR]--; |
5dc7c10b DZ |
2837 | ctl->discardable_bytes[BTRFS_STAT_CURR] -= |
2838 | entry->bytes; | |
2839 | } | |
dfb79ddb | 2840 | |
34d52cb6 | 2841 | try_merge_free_space(ctl, entry, false); |
20005523 | 2842 | steal_from_bitmap(ctl, entry, false); |
dfb79ddb DZ |
2843 | |
2844 | /* As we insert directly, update these statistics */ | |
5dc7c10b | 2845 | if (!btrfs_free_space_trimmed(entry)) { |
dfb79ddb | 2846 | ctl->discardable_extents[BTRFS_STAT_CURR]++; |
5dc7c10b DZ |
2847 | ctl->discardable_bytes[BTRFS_STAT_CURR] += |
2848 | entry->bytes; | |
2849 | } | |
20005523 | 2850 | } |
34d52cb6 | 2851 | tree_insert_offset(&ctl->free_space_offset, |
4e69b598 | 2852 | entry->offset, &entry->offset_index, bitmap); |
fa9c0d79 | 2853 | } |
6bef4d31 | 2854 | cluster->root = RB_ROOT; |
fa9c0d79 | 2855 | spin_unlock(&cluster->lock); |
96303081 | 2856 | btrfs_put_block_group(block_group); |
fa9c0d79 CM |
2857 | } |
2858 | ||
48a3b636 ES |
2859 | static void __btrfs_remove_free_space_cache_locked( |
2860 | struct btrfs_free_space_ctl *ctl) | |
0f9dd46c JB |
2861 | { |
2862 | struct btrfs_free_space *info; | |
2863 | struct rb_node *node; | |
581bb050 | 2864 | |
581bb050 LZ |
2865 | while ((node = rb_last(&ctl->free_space_offset)) != NULL) { |
2866 | info = rb_entry(node, struct btrfs_free_space, offset_index); | |
9b90f513 JB |
2867 | if (!info->bitmap) { |
2868 | unlink_free_space(ctl, info); | |
2869 | kmem_cache_free(btrfs_free_space_cachep, info); | |
2870 | } else { | |
2871 | free_bitmap(ctl, info); | |
2872 | } | |
351810c1 DS |
2873 | |
2874 | cond_resched_lock(&ctl->tree_lock); | |
581bb050 | 2875 | } |
09655373 CM |
2876 | } |
2877 | ||
2878 | void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl) | |
2879 | { | |
2880 | spin_lock(&ctl->tree_lock); | |
2881 | __btrfs_remove_free_space_cache_locked(ctl); | |
27f0afc7 | 2882 | if (ctl->private) |
66b53bae | 2883 | btrfs_discard_update_discardable(ctl->private); |
581bb050 LZ |
2884 | spin_unlock(&ctl->tree_lock); |
2885 | } | |
2886 | ||
32da5386 | 2887 | void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group) |
581bb050 LZ |
2888 | { |
2889 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | |
fa9c0d79 | 2890 | struct btrfs_free_cluster *cluster; |
96303081 | 2891 | struct list_head *head; |
0f9dd46c | 2892 | |
34d52cb6 | 2893 | spin_lock(&ctl->tree_lock); |
96303081 JB |
2894 | while ((head = block_group->cluster_list.next) != |
2895 | &block_group->cluster_list) { | |
2896 | cluster = list_entry(head, struct btrfs_free_cluster, | |
2897 | block_group_list); | |
fa9c0d79 CM |
2898 | |
2899 | WARN_ON(cluster->block_group != block_group); | |
2900 | __btrfs_return_cluster_to_free_space(block_group, cluster); | |
351810c1 DS |
2901 | |
2902 | cond_resched_lock(&ctl->tree_lock); | |
fa9c0d79 | 2903 | } |
09655373 | 2904 | __btrfs_remove_free_space_cache_locked(ctl); |
66b53bae | 2905 | btrfs_discard_update_discardable(block_group); |
34d52cb6 | 2906 | spin_unlock(&ctl->tree_lock); |
fa9c0d79 | 2907 | |
0f9dd46c JB |
2908 | } |
2909 | ||
6e80d4f8 DZ |
2910 | /** |
2911 | * btrfs_is_free_space_trimmed - see if everything is trimmed | |
2912 | * @block_group: block_group of interest | |
2913 | * | |
2914 | * Walk @block_group's free space rb_tree to determine if everything is trimmed. | |
2915 | */ | |
2916 | bool btrfs_is_free_space_trimmed(struct btrfs_block_group *block_group) | |
2917 | { | |
2918 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | |
2919 | struct btrfs_free_space *info; | |
2920 | struct rb_node *node; | |
2921 | bool ret = true; | |
2922 | ||
2923 | spin_lock(&ctl->tree_lock); | |
2924 | node = rb_first(&ctl->free_space_offset); | |
2925 | ||
2926 | while (node) { | |
2927 | info = rb_entry(node, struct btrfs_free_space, offset_index); | |
2928 | ||
2929 | if (!btrfs_free_space_trimmed(info)) { | |
2930 | ret = false; | |
2931 | break; | |
2932 | } | |
2933 | ||
2934 | node = rb_next(node); | |
2935 | } | |
2936 | ||
2937 | spin_unlock(&ctl->tree_lock); | |
2938 | return ret; | |
2939 | } | |
2940 | ||
32da5386 | 2941 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group, |
a4820398 MX |
2942 | u64 offset, u64 bytes, u64 empty_size, |
2943 | u64 *max_extent_size) | |
0f9dd46c | 2944 | { |
34d52cb6 | 2945 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
9ddf648f DZ |
2946 | struct btrfs_discard_ctl *discard_ctl = |
2947 | &block_group->fs_info->discard_ctl; | |
6226cb0a | 2948 | struct btrfs_free_space *entry = NULL; |
96303081 | 2949 | u64 bytes_search = bytes + empty_size; |
6226cb0a | 2950 | u64 ret = 0; |
53b381b3 DW |
2951 | u64 align_gap = 0; |
2952 | u64 align_gap_len = 0; | |
a7ccb255 | 2953 | enum btrfs_trim_state align_gap_trim_state = BTRFS_TRIM_STATE_UNTRIMMED; |
0f9dd46c | 2954 | |
2eda5708 NA |
2955 | ASSERT(!btrfs_is_zoned(block_group->fs_info)); |
2956 | ||
34d52cb6 | 2957 | spin_lock(&ctl->tree_lock); |
53b381b3 | 2958 | entry = find_free_space(ctl, &offset, &bytes_search, |
a4820398 | 2959 | block_group->full_stripe_len, max_extent_size); |
6226cb0a | 2960 | if (!entry) |
96303081 JB |
2961 | goto out; |
2962 | ||
2963 | ret = offset; | |
2964 | if (entry->bitmap) { | |
34d52cb6 | 2965 | bitmap_clear_bits(ctl, entry, offset, bytes); |
9ddf648f DZ |
2966 | |
2967 | if (!btrfs_free_space_trimmed(entry)) | |
2968 | atomic64_add(bytes, &discard_ctl->discard_bytes_saved); | |
2969 | ||
edf6e2d1 | 2970 | if (!entry->bytes) |
34d52cb6 | 2971 | free_bitmap(ctl, entry); |
96303081 | 2972 | } else { |
34d52cb6 | 2973 | unlink_free_space(ctl, entry); |
53b381b3 DW |
2974 | align_gap_len = offset - entry->offset; |
2975 | align_gap = entry->offset; | |
a7ccb255 | 2976 | align_gap_trim_state = entry->trim_state; |
53b381b3 | 2977 | |
9ddf648f DZ |
2978 | if (!btrfs_free_space_trimmed(entry)) |
2979 | atomic64_add(bytes, &discard_ctl->discard_bytes_saved); | |
2980 | ||
53b381b3 DW |
2981 | entry->offset = offset + bytes; |
2982 | WARN_ON(entry->bytes < bytes + align_gap_len); | |
2983 | ||
2984 | entry->bytes -= bytes + align_gap_len; | |
6226cb0a | 2985 | if (!entry->bytes) |
dc89e982 | 2986 | kmem_cache_free(btrfs_free_space_cachep, entry); |
6226cb0a | 2987 | else |
34d52cb6 | 2988 | link_free_space(ctl, entry); |
6226cb0a | 2989 | } |
96303081 | 2990 | out: |
66b53bae | 2991 | btrfs_discard_update_discardable(block_group); |
34d52cb6 | 2992 | spin_unlock(&ctl->tree_lock); |
817d52f8 | 2993 | |
53b381b3 | 2994 | if (align_gap_len) |
ab8d0fc4 | 2995 | __btrfs_add_free_space(block_group->fs_info, ctl, |
a7ccb255 DZ |
2996 | align_gap, align_gap_len, |
2997 | align_gap_trim_state); | |
0f9dd46c JB |
2998 | return ret; |
2999 | } | |
fa9c0d79 CM |
3000 | |
3001 | /* | |
3002 | * given a cluster, put all of its extents back into the free space | |
3003 | * cache. If a block group is passed, this function will only free | |
3004 | * a cluster that belongs to the passed block group. | |
3005 | * | |
3006 | * Otherwise, it'll get a reference on the block group pointed to by the | |
3007 | * cluster and remove the cluster from it. | |
3008 | */ | |
69b0e093 | 3009 | void btrfs_return_cluster_to_free_space( |
32da5386 | 3010 | struct btrfs_block_group *block_group, |
fa9c0d79 CM |
3011 | struct btrfs_free_cluster *cluster) |
3012 | { | |
34d52cb6 | 3013 | struct btrfs_free_space_ctl *ctl; |
fa9c0d79 CM |
3014 | |
3015 | /* first, get a safe pointer to the block group */ | |
3016 | spin_lock(&cluster->lock); | |
3017 | if (!block_group) { | |
3018 | block_group = cluster->block_group; | |
3019 | if (!block_group) { | |
3020 | spin_unlock(&cluster->lock); | |
69b0e093 | 3021 | return; |
fa9c0d79 CM |
3022 | } |
3023 | } else if (cluster->block_group != block_group) { | |
3024 | /* someone else has already freed it don't redo their work */ | |
3025 | spin_unlock(&cluster->lock); | |
69b0e093 | 3026 | return; |
fa9c0d79 | 3027 | } |
b5790d51 | 3028 | btrfs_get_block_group(block_group); |
fa9c0d79 CM |
3029 | spin_unlock(&cluster->lock); |
3030 | ||
34d52cb6 LZ |
3031 | ctl = block_group->free_space_ctl; |
3032 | ||
fa9c0d79 | 3033 | /* now return any extents the cluster had on it */ |
34d52cb6 | 3034 | spin_lock(&ctl->tree_lock); |
69b0e093 | 3035 | __btrfs_return_cluster_to_free_space(block_group, cluster); |
34d52cb6 | 3036 | spin_unlock(&ctl->tree_lock); |
fa9c0d79 | 3037 | |
6e80d4f8 DZ |
3038 | btrfs_discard_queue_work(&block_group->fs_info->discard_ctl, block_group); |
3039 | ||
fa9c0d79 CM |
3040 | /* finally drop our ref */ |
3041 | btrfs_put_block_group(block_group); | |
fa9c0d79 CM |
3042 | } |
3043 | ||
32da5386 | 3044 | static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group, |
96303081 | 3045 | struct btrfs_free_cluster *cluster, |
4e69b598 | 3046 | struct btrfs_free_space *entry, |
a4820398 MX |
3047 | u64 bytes, u64 min_start, |
3048 | u64 *max_extent_size) | |
96303081 | 3049 | { |
34d52cb6 | 3050 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
96303081 JB |
3051 | int err; |
3052 | u64 search_start = cluster->window_start; | |
3053 | u64 search_bytes = bytes; | |
3054 | u64 ret = 0; | |
3055 | ||
96303081 JB |
3056 | search_start = min_start; |
3057 | search_bytes = bytes; | |
3058 | ||
0584f718 | 3059 | err = search_bitmap(ctl, entry, &search_start, &search_bytes, true); |
a4820398 | 3060 | if (err) { |
ad22cf6e JB |
3061 | *max_extent_size = max(get_max_extent_size(entry), |
3062 | *max_extent_size); | |
4e69b598 | 3063 | return 0; |
a4820398 | 3064 | } |
96303081 JB |
3065 | |
3066 | ret = search_start; | |
bb3ac5a4 | 3067 | __bitmap_clear_bits(ctl, entry, ret, bytes); |
96303081 JB |
3068 | |
3069 | return ret; | |
3070 | } | |
3071 | ||
fa9c0d79 CM |
3072 | /* |
3073 | * given a cluster, try to allocate 'bytes' from it, returns 0 | |
3074 | * if it couldn't find anything suitably large, or a logical disk offset | |
3075 | * if things worked out | |
3076 | */ | |
32da5386 | 3077 | u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group, |
fa9c0d79 | 3078 | struct btrfs_free_cluster *cluster, u64 bytes, |
a4820398 | 3079 | u64 min_start, u64 *max_extent_size) |
fa9c0d79 | 3080 | { |
34d52cb6 | 3081 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
9ddf648f DZ |
3082 | struct btrfs_discard_ctl *discard_ctl = |
3083 | &block_group->fs_info->discard_ctl; | |
fa9c0d79 CM |
3084 | struct btrfs_free_space *entry = NULL; |
3085 | struct rb_node *node; | |
3086 | u64 ret = 0; | |
3087 | ||
2eda5708 NA |
3088 | ASSERT(!btrfs_is_zoned(block_group->fs_info)); |
3089 | ||
fa9c0d79 CM |
3090 | spin_lock(&cluster->lock); |
3091 | if (bytes > cluster->max_size) | |
3092 | goto out; | |
3093 | ||
3094 | if (cluster->block_group != block_group) | |
3095 | goto out; | |
3096 | ||
3097 | node = rb_first(&cluster->root); | |
3098 | if (!node) | |
3099 | goto out; | |
3100 | ||
3101 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | |
67871254 | 3102 | while (1) { |
ad22cf6e JB |
3103 | if (entry->bytes < bytes) |
3104 | *max_extent_size = max(get_max_extent_size(entry), | |
3105 | *max_extent_size); | |
a4820398 | 3106 | |
4e69b598 JB |
3107 | if (entry->bytes < bytes || |
3108 | (!entry->bitmap && entry->offset < min_start)) { | |
fa9c0d79 CM |
3109 | node = rb_next(&entry->offset_index); |
3110 | if (!node) | |
3111 | break; | |
3112 | entry = rb_entry(node, struct btrfs_free_space, | |
3113 | offset_index); | |
3114 | continue; | |
3115 | } | |
fa9c0d79 | 3116 | |
4e69b598 JB |
3117 | if (entry->bitmap) { |
3118 | ret = btrfs_alloc_from_bitmap(block_group, | |
3119 | cluster, entry, bytes, | |
a4820398 MX |
3120 | cluster->window_start, |
3121 | max_extent_size); | |
4e69b598 | 3122 | if (ret == 0) { |
4e69b598 JB |
3123 | node = rb_next(&entry->offset_index); |
3124 | if (!node) | |
3125 | break; | |
3126 | entry = rb_entry(node, struct btrfs_free_space, | |
3127 | offset_index); | |
3128 | continue; | |
3129 | } | |
9b230628 | 3130 | cluster->window_start += bytes; |
4e69b598 | 3131 | } else { |
4e69b598 JB |
3132 | ret = entry->offset; |
3133 | ||
3134 | entry->offset += bytes; | |
3135 | entry->bytes -= bytes; | |
3136 | } | |
fa9c0d79 | 3137 | |
fa9c0d79 CM |
3138 | break; |
3139 | } | |
3140 | out: | |
3141 | spin_unlock(&cluster->lock); | |
96303081 | 3142 | |
5e71b5d5 LZ |
3143 | if (!ret) |
3144 | return 0; | |
3145 | ||
34d52cb6 | 3146 | spin_lock(&ctl->tree_lock); |
5e71b5d5 | 3147 | |
9ddf648f DZ |
3148 | if (!btrfs_free_space_trimmed(entry)) |
3149 | atomic64_add(bytes, &discard_ctl->discard_bytes_saved); | |
3150 | ||
34d52cb6 | 3151 | ctl->free_space -= bytes; |
5dc7c10b DZ |
3152 | if (!entry->bitmap && !btrfs_free_space_trimmed(entry)) |
3153 | ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes; | |
3c179165 NB |
3154 | |
3155 | spin_lock(&cluster->lock); | |
5e71b5d5 | 3156 | if (entry->bytes == 0) { |
3c179165 | 3157 | rb_erase(&entry->offset_index, &cluster->root); |
34d52cb6 | 3158 | ctl->free_extents--; |
4e69b598 | 3159 | if (entry->bitmap) { |
3acd4850 CL |
3160 | kmem_cache_free(btrfs_free_space_bitmap_cachep, |
3161 | entry->bitmap); | |
34d52cb6 | 3162 | ctl->total_bitmaps--; |
fa598b06 | 3163 | recalculate_thresholds(ctl); |
dfb79ddb DZ |
3164 | } else if (!btrfs_free_space_trimmed(entry)) { |
3165 | ctl->discardable_extents[BTRFS_STAT_CURR]--; | |
4e69b598 | 3166 | } |
dc89e982 | 3167 | kmem_cache_free(btrfs_free_space_cachep, entry); |
5e71b5d5 LZ |
3168 | } |
3169 | ||
3c179165 | 3170 | spin_unlock(&cluster->lock); |
34d52cb6 | 3171 | spin_unlock(&ctl->tree_lock); |
5e71b5d5 | 3172 | |
fa9c0d79 CM |
3173 | return ret; |
3174 | } | |
3175 | ||
32da5386 | 3176 | static int btrfs_bitmap_cluster(struct btrfs_block_group *block_group, |
96303081 JB |
3177 | struct btrfs_free_space *entry, |
3178 | struct btrfs_free_cluster *cluster, | |
1bb91902 AO |
3179 | u64 offset, u64 bytes, |
3180 | u64 cont1_bytes, u64 min_bytes) | |
96303081 | 3181 | { |
34d52cb6 | 3182 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
96303081 JB |
3183 | unsigned long next_zero; |
3184 | unsigned long i; | |
1bb91902 AO |
3185 | unsigned long want_bits; |
3186 | unsigned long min_bits; | |
96303081 | 3187 | unsigned long found_bits; |
cef40483 | 3188 | unsigned long max_bits = 0; |
96303081 JB |
3189 | unsigned long start = 0; |
3190 | unsigned long total_found = 0; | |
4e69b598 | 3191 | int ret; |
96303081 | 3192 | |
96009762 | 3193 | i = offset_to_bit(entry->offset, ctl->unit, |
96303081 | 3194 | max_t(u64, offset, entry->offset)); |
96009762 WSH |
3195 | want_bits = bytes_to_bits(bytes, ctl->unit); |
3196 | min_bits = bytes_to_bits(min_bytes, ctl->unit); | |
96303081 | 3197 | |
cef40483 JB |
3198 | /* |
3199 | * Don't bother looking for a cluster in this bitmap if it's heavily | |
3200 | * fragmented. | |
3201 | */ | |
3202 | if (entry->max_extent_size && | |
3203 | entry->max_extent_size < cont1_bytes) | |
3204 | return -ENOSPC; | |
96303081 JB |
3205 | again: |
3206 | found_bits = 0; | |
ebb3dad4 | 3207 | for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) { |
96303081 JB |
3208 | next_zero = find_next_zero_bit(entry->bitmap, |
3209 | BITS_PER_BITMAP, i); | |
1bb91902 | 3210 | if (next_zero - i >= min_bits) { |
96303081 | 3211 | found_bits = next_zero - i; |
cef40483 JB |
3212 | if (found_bits > max_bits) |
3213 | max_bits = found_bits; | |
96303081 JB |
3214 | break; |
3215 | } | |
cef40483 JB |
3216 | if (next_zero - i > max_bits) |
3217 | max_bits = next_zero - i; | |
96303081 JB |
3218 | i = next_zero; |
3219 | } | |
3220 | ||
cef40483 JB |
3221 | if (!found_bits) { |
3222 | entry->max_extent_size = (u64)max_bits * ctl->unit; | |
4e69b598 | 3223 | return -ENOSPC; |
cef40483 | 3224 | } |
96303081 | 3225 | |
1bb91902 | 3226 | if (!total_found) { |
96303081 | 3227 | start = i; |
b78d09bc | 3228 | cluster->max_size = 0; |
96303081 JB |
3229 | } |
3230 | ||
3231 | total_found += found_bits; | |
3232 | ||
96009762 WSH |
3233 | if (cluster->max_size < found_bits * ctl->unit) |
3234 | cluster->max_size = found_bits * ctl->unit; | |
96303081 | 3235 | |
1bb91902 AO |
3236 | if (total_found < want_bits || cluster->max_size < cont1_bytes) { |
3237 | i = next_zero + 1; | |
96303081 JB |
3238 | goto again; |
3239 | } | |
3240 | ||
96009762 | 3241 | cluster->window_start = start * ctl->unit + entry->offset; |
34d52cb6 | 3242 | rb_erase(&entry->offset_index, &ctl->free_space_offset); |
4e69b598 JB |
3243 | ret = tree_insert_offset(&cluster->root, entry->offset, |
3244 | &entry->offset_index, 1); | |
b12d6869 | 3245 | ASSERT(!ret); /* -EEXIST; Logic error */ |
96303081 | 3246 | |
3f7de037 | 3247 | trace_btrfs_setup_cluster(block_group, cluster, |
96009762 | 3248 | total_found * ctl->unit, 1); |
96303081 JB |
3249 | return 0; |
3250 | } | |
3251 | ||
4e69b598 JB |
3252 | /* |
3253 | * This searches the block group for just extents to fill the cluster with. | |
1bb91902 AO |
3254 | * Try to find a cluster with at least bytes total bytes, at least one |
3255 | * extent of cont1_bytes, and other clusters of at least min_bytes. | |
4e69b598 | 3256 | */ |
3de85bb9 | 3257 | static noinline int |
32da5386 | 3258 | setup_cluster_no_bitmap(struct btrfs_block_group *block_group, |
3de85bb9 JB |
3259 | struct btrfs_free_cluster *cluster, |
3260 | struct list_head *bitmaps, u64 offset, u64 bytes, | |
1bb91902 | 3261 | u64 cont1_bytes, u64 min_bytes) |
4e69b598 | 3262 | { |
34d52cb6 | 3263 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
4e69b598 JB |
3264 | struct btrfs_free_space *first = NULL; |
3265 | struct btrfs_free_space *entry = NULL; | |
4e69b598 JB |
3266 | struct btrfs_free_space *last; |
3267 | struct rb_node *node; | |
4e69b598 JB |
3268 | u64 window_free; |
3269 | u64 max_extent; | |
3f7de037 | 3270 | u64 total_size = 0; |
4e69b598 | 3271 | |
34d52cb6 | 3272 | entry = tree_search_offset(ctl, offset, 0, 1); |
4e69b598 JB |
3273 | if (!entry) |
3274 | return -ENOSPC; | |
3275 | ||
3276 | /* | |
3277 | * We don't want bitmaps, so just move along until we find a normal | |
3278 | * extent entry. | |
3279 | */ | |
1bb91902 AO |
3280 | while (entry->bitmap || entry->bytes < min_bytes) { |
3281 | if (entry->bitmap && list_empty(&entry->list)) | |
86d4a77b | 3282 | list_add_tail(&entry->list, bitmaps); |
4e69b598 JB |
3283 | node = rb_next(&entry->offset_index); |
3284 | if (!node) | |
3285 | return -ENOSPC; | |
3286 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | |
3287 | } | |
3288 | ||
4e69b598 JB |
3289 | window_free = entry->bytes; |
3290 | max_extent = entry->bytes; | |
3291 | first = entry; | |
3292 | last = entry; | |
4e69b598 | 3293 | |
1bb91902 AO |
3294 | for (node = rb_next(&entry->offset_index); node; |
3295 | node = rb_next(&entry->offset_index)) { | |
4e69b598 JB |
3296 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
3297 | ||
86d4a77b JB |
3298 | if (entry->bitmap) { |
3299 | if (list_empty(&entry->list)) | |
3300 | list_add_tail(&entry->list, bitmaps); | |
4e69b598 | 3301 | continue; |
86d4a77b JB |
3302 | } |
3303 | ||
1bb91902 AO |
3304 | if (entry->bytes < min_bytes) |
3305 | continue; | |
3306 | ||
3307 | last = entry; | |
3308 | window_free += entry->bytes; | |
3309 | if (entry->bytes > max_extent) | |
4e69b598 | 3310 | max_extent = entry->bytes; |
4e69b598 JB |
3311 | } |
3312 | ||
1bb91902 AO |
3313 | if (window_free < bytes || max_extent < cont1_bytes) |
3314 | return -ENOSPC; | |
3315 | ||
4e69b598 JB |
3316 | cluster->window_start = first->offset; |
3317 | ||
3318 | node = &first->offset_index; | |
3319 | ||
3320 | /* | |
3321 | * now we've found our entries, pull them out of the free space | |
3322 | * cache and put them into the cluster rbtree | |
3323 | */ | |
3324 | do { | |
3325 | int ret; | |
3326 | ||
3327 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | |
3328 | node = rb_next(&entry->offset_index); | |
1bb91902 | 3329 | if (entry->bitmap || entry->bytes < min_bytes) |
4e69b598 JB |
3330 | continue; |
3331 | ||
34d52cb6 | 3332 | rb_erase(&entry->offset_index, &ctl->free_space_offset); |
4e69b598 JB |
3333 | ret = tree_insert_offset(&cluster->root, entry->offset, |
3334 | &entry->offset_index, 0); | |
3f7de037 | 3335 | total_size += entry->bytes; |
b12d6869 | 3336 | ASSERT(!ret); /* -EEXIST; Logic error */ |
4e69b598 JB |
3337 | } while (node && entry != last); |
3338 | ||
3339 | cluster->max_size = max_extent; | |
3f7de037 | 3340 | trace_btrfs_setup_cluster(block_group, cluster, total_size, 0); |
4e69b598 JB |
3341 | return 0; |
3342 | } | |
3343 | ||
3344 | /* | |
3345 | * This specifically looks for bitmaps that may work in the cluster, we assume | |
3346 | * that we have already failed to find extents that will work. | |
3347 | */ | |
3de85bb9 | 3348 | static noinline int |
32da5386 | 3349 | setup_cluster_bitmap(struct btrfs_block_group *block_group, |
3de85bb9 JB |
3350 | struct btrfs_free_cluster *cluster, |
3351 | struct list_head *bitmaps, u64 offset, u64 bytes, | |
1bb91902 | 3352 | u64 cont1_bytes, u64 min_bytes) |
4e69b598 | 3353 | { |
34d52cb6 | 3354 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
1b9b922a | 3355 | struct btrfs_free_space *entry = NULL; |
4e69b598 | 3356 | int ret = -ENOSPC; |
0f0fbf1d | 3357 | u64 bitmap_offset = offset_to_bitmap(ctl, offset); |
4e69b598 | 3358 | |
34d52cb6 | 3359 | if (ctl->total_bitmaps == 0) |
4e69b598 JB |
3360 | return -ENOSPC; |
3361 | ||
0f0fbf1d LZ |
3362 | /* |
3363 | * The bitmap that covers offset won't be in the list unless offset | |
3364 | * is just its start offset. | |
3365 | */ | |
1b9b922a CM |
3366 | if (!list_empty(bitmaps)) |
3367 | entry = list_first_entry(bitmaps, struct btrfs_free_space, list); | |
3368 | ||
3369 | if (!entry || entry->offset != bitmap_offset) { | |
0f0fbf1d LZ |
3370 | entry = tree_search_offset(ctl, bitmap_offset, 1, 0); |
3371 | if (entry && list_empty(&entry->list)) | |
3372 | list_add(&entry->list, bitmaps); | |
3373 | } | |
3374 | ||
86d4a77b | 3375 | list_for_each_entry(entry, bitmaps, list) { |
357b9784 | 3376 | if (entry->bytes < bytes) |
86d4a77b JB |
3377 | continue; |
3378 | ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, | |
1bb91902 | 3379 | bytes, cont1_bytes, min_bytes); |
86d4a77b JB |
3380 | if (!ret) |
3381 | return 0; | |
3382 | } | |
3383 | ||
3384 | /* | |
52621cb6 LZ |
3385 | * The bitmaps list has all the bitmaps that record free space |
3386 | * starting after offset, so no more search is required. | |
86d4a77b | 3387 | */ |
52621cb6 | 3388 | return -ENOSPC; |
4e69b598 JB |
3389 | } |
3390 | ||
fa9c0d79 CM |
3391 | /* |
3392 | * here we try to find a cluster of blocks in a block group. The goal | |
1bb91902 | 3393 | * is to find at least bytes+empty_size. |
fa9c0d79 CM |
3394 | * We might not find them all in one contiguous area. |
3395 | * | |
3396 | * returns zero and sets up cluster if things worked out, otherwise | |
3397 | * it returns -enospc | |
3398 | */ | |
32da5386 | 3399 | int btrfs_find_space_cluster(struct btrfs_block_group *block_group, |
fa9c0d79 CM |
3400 | struct btrfs_free_cluster *cluster, |
3401 | u64 offset, u64 bytes, u64 empty_size) | |
3402 | { | |
2ceeae2e | 3403 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
34d52cb6 | 3404 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
86d4a77b | 3405 | struct btrfs_free_space *entry, *tmp; |
52621cb6 | 3406 | LIST_HEAD(bitmaps); |
fa9c0d79 | 3407 | u64 min_bytes; |
1bb91902 | 3408 | u64 cont1_bytes; |
fa9c0d79 CM |
3409 | int ret; |
3410 | ||
1bb91902 AO |
3411 | /* |
3412 | * Choose the minimum extent size we'll require for this | |
3413 | * cluster. For SSD_SPREAD, don't allow any fragmentation. | |
3414 | * For metadata, allow allocates with smaller extents. For | |
3415 | * data, keep it dense. | |
3416 | */ | |
0b246afa | 3417 | if (btrfs_test_opt(fs_info, SSD_SPREAD)) { |
1bb91902 | 3418 | cont1_bytes = min_bytes = bytes + empty_size; |
451d7585 | 3419 | } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) { |
1bb91902 | 3420 | cont1_bytes = bytes; |
0b246afa | 3421 | min_bytes = fs_info->sectorsize; |
1bb91902 AO |
3422 | } else { |
3423 | cont1_bytes = max(bytes, (bytes + empty_size) >> 2); | |
0b246afa | 3424 | min_bytes = fs_info->sectorsize; |
1bb91902 | 3425 | } |
fa9c0d79 | 3426 | |
34d52cb6 | 3427 | spin_lock(&ctl->tree_lock); |
7d0d2e8e JB |
3428 | |
3429 | /* | |
3430 | * If we know we don't have enough space to make a cluster don't even | |
3431 | * bother doing all the work to try and find one. | |
3432 | */ | |
1bb91902 | 3433 | if (ctl->free_space < bytes) { |
34d52cb6 | 3434 | spin_unlock(&ctl->tree_lock); |
7d0d2e8e JB |
3435 | return -ENOSPC; |
3436 | } | |
3437 | ||
fa9c0d79 CM |
3438 | spin_lock(&cluster->lock); |
3439 | ||
3440 | /* someone already found a cluster, hooray */ | |
3441 | if (cluster->block_group) { | |
3442 | ret = 0; | |
3443 | goto out; | |
3444 | } | |
fa9c0d79 | 3445 | |
3f7de037 JB |
3446 | trace_btrfs_find_cluster(block_group, offset, bytes, empty_size, |
3447 | min_bytes); | |
3448 | ||
86d4a77b | 3449 | ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, |
1bb91902 AO |
3450 | bytes + empty_size, |
3451 | cont1_bytes, min_bytes); | |
4e69b598 | 3452 | if (ret) |
86d4a77b | 3453 | ret = setup_cluster_bitmap(block_group, cluster, &bitmaps, |
1bb91902 AO |
3454 | offset, bytes + empty_size, |
3455 | cont1_bytes, min_bytes); | |
86d4a77b JB |
3456 | |
3457 | /* Clear our temporary list */ | |
3458 | list_for_each_entry_safe(entry, tmp, &bitmaps, list) | |
3459 | list_del_init(&entry->list); | |
fa9c0d79 | 3460 | |
4e69b598 | 3461 | if (!ret) { |
b5790d51 | 3462 | btrfs_get_block_group(block_group); |
4e69b598 JB |
3463 | list_add_tail(&cluster->block_group_list, |
3464 | &block_group->cluster_list); | |
3465 | cluster->block_group = block_group; | |
3f7de037 JB |
3466 | } else { |
3467 | trace_btrfs_failed_cluster_setup(block_group); | |
fa9c0d79 | 3468 | } |
fa9c0d79 CM |
3469 | out: |
3470 | spin_unlock(&cluster->lock); | |
34d52cb6 | 3471 | spin_unlock(&ctl->tree_lock); |
fa9c0d79 CM |
3472 | |
3473 | return ret; | |
3474 | } | |
3475 | ||
3476 | /* | |
3477 | * simple code to zero out a cluster | |
3478 | */ | |
3479 | void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) | |
3480 | { | |
3481 | spin_lock_init(&cluster->lock); | |
3482 | spin_lock_init(&cluster->refill_lock); | |
6bef4d31 | 3483 | cluster->root = RB_ROOT; |
fa9c0d79 | 3484 | cluster->max_size = 0; |
c759c4e1 | 3485 | cluster->fragmented = false; |
fa9c0d79 CM |
3486 | INIT_LIST_HEAD(&cluster->block_group_list); |
3487 | cluster->block_group = NULL; | |
3488 | } | |
3489 | ||
32da5386 | 3490 | static int do_trimming(struct btrfs_block_group *block_group, |
7fe1e641 | 3491 | u64 *total_trimmed, u64 start, u64 bytes, |
55507ce3 | 3492 | u64 reserved_start, u64 reserved_bytes, |
b0643e59 | 3493 | enum btrfs_trim_state reserved_trim_state, |
55507ce3 | 3494 | struct btrfs_trim_range *trim_entry) |
f7039b1d | 3495 | { |
7fe1e641 | 3496 | struct btrfs_space_info *space_info = block_group->space_info; |
f7039b1d | 3497 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
55507ce3 | 3498 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
7fe1e641 LZ |
3499 | int ret; |
3500 | int update = 0; | |
b0643e59 DZ |
3501 | const u64 end = start + bytes; |
3502 | const u64 reserved_end = reserved_start + reserved_bytes; | |
3503 | enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED; | |
7fe1e641 | 3504 | u64 trimmed = 0; |
f7039b1d | 3505 | |
7fe1e641 LZ |
3506 | spin_lock(&space_info->lock); |
3507 | spin_lock(&block_group->lock); | |
3508 | if (!block_group->ro) { | |
3509 | block_group->reserved += reserved_bytes; | |
3510 | space_info->bytes_reserved += reserved_bytes; | |
3511 | update = 1; | |
3512 | } | |
3513 | spin_unlock(&block_group->lock); | |
3514 | spin_unlock(&space_info->lock); | |
3515 | ||
2ff7e61e | 3516 | ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed); |
b0643e59 | 3517 | if (!ret) { |
7fe1e641 | 3518 | *total_trimmed += trimmed; |
b0643e59 DZ |
3519 | trim_state = BTRFS_TRIM_STATE_TRIMMED; |
3520 | } | |
7fe1e641 | 3521 | |
55507ce3 | 3522 | mutex_lock(&ctl->cache_writeout_mutex); |
b0643e59 DZ |
3523 | if (reserved_start < start) |
3524 | __btrfs_add_free_space(fs_info, ctl, reserved_start, | |
3525 | start - reserved_start, | |
3526 | reserved_trim_state); | |
3527 | if (start + bytes < reserved_start + reserved_bytes) | |
3528 | __btrfs_add_free_space(fs_info, ctl, end, reserved_end - end, | |
3529 | reserved_trim_state); | |
3530 | __btrfs_add_free_space(fs_info, ctl, start, bytes, trim_state); | |
55507ce3 FM |
3531 | list_del(&trim_entry->list); |
3532 | mutex_unlock(&ctl->cache_writeout_mutex); | |
7fe1e641 LZ |
3533 | |
3534 | if (update) { | |
3535 | spin_lock(&space_info->lock); | |
3536 | spin_lock(&block_group->lock); | |
3537 | if (block_group->ro) | |
3538 | space_info->bytes_readonly += reserved_bytes; | |
3539 | block_group->reserved -= reserved_bytes; | |
3540 | space_info->bytes_reserved -= reserved_bytes; | |
7fe1e641 | 3541 | spin_unlock(&block_group->lock); |
8f63a840 | 3542 | spin_unlock(&space_info->lock); |
7fe1e641 LZ |
3543 | } |
3544 | ||
3545 | return ret; | |
3546 | } | |
3547 | ||
2bee7eb8 DZ |
3548 | /* |
3549 | * If @async is set, then we will trim 1 region and return. | |
3550 | */ | |
32da5386 | 3551 | static int trim_no_bitmap(struct btrfs_block_group *block_group, |
2bee7eb8 DZ |
3552 | u64 *total_trimmed, u64 start, u64 end, u64 minlen, |
3553 | bool async) | |
7fe1e641 | 3554 | { |
19b2a2c7 DZ |
3555 | struct btrfs_discard_ctl *discard_ctl = |
3556 | &block_group->fs_info->discard_ctl; | |
7fe1e641 LZ |
3557 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
3558 | struct btrfs_free_space *entry; | |
3559 | struct rb_node *node; | |
3560 | int ret = 0; | |
3561 | u64 extent_start; | |
3562 | u64 extent_bytes; | |
b0643e59 | 3563 | enum btrfs_trim_state extent_trim_state; |
7fe1e641 | 3564 | u64 bytes; |
19b2a2c7 | 3565 | const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size); |
f7039b1d LD |
3566 | |
3567 | while (start < end) { | |
55507ce3 FM |
3568 | struct btrfs_trim_range trim_entry; |
3569 | ||
3570 | mutex_lock(&ctl->cache_writeout_mutex); | |
34d52cb6 | 3571 | spin_lock(&ctl->tree_lock); |
f7039b1d | 3572 | |
2bee7eb8 DZ |
3573 | if (ctl->free_space < minlen) |
3574 | goto out_unlock; | |
f7039b1d | 3575 | |
34d52cb6 | 3576 | entry = tree_search_offset(ctl, start, 0, 1); |
2bee7eb8 DZ |
3577 | if (!entry) |
3578 | goto out_unlock; | |
f7039b1d | 3579 | |
2bee7eb8 DZ |
3580 | /* Skip bitmaps and if async, already trimmed entries */ |
3581 | while (entry->bitmap || | |
3582 | (async && btrfs_free_space_trimmed(entry))) { | |
7fe1e641 | 3583 | node = rb_next(&entry->offset_index); |
2bee7eb8 DZ |
3584 | if (!node) |
3585 | goto out_unlock; | |
7fe1e641 LZ |
3586 | entry = rb_entry(node, struct btrfs_free_space, |
3587 | offset_index); | |
f7039b1d LD |
3588 | } |
3589 | ||
2bee7eb8 DZ |
3590 | if (entry->offset >= end) |
3591 | goto out_unlock; | |
f7039b1d | 3592 | |
7fe1e641 LZ |
3593 | extent_start = entry->offset; |
3594 | extent_bytes = entry->bytes; | |
b0643e59 | 3595 | extent_trim_state = entry->trim_state; |
4aa9ad52 DZ |
3596 | if (async) { |
3597 | start = entry->offset; | |
3598 | bytes = entry->bytes; | |
3599 | if (bytes < minlen) { | |
3600 | spin_unlock(&ctl->tree_lock); | |
3601 | mutex_unlock(&ctl->cache_writeout_mutex); | |
3602 | goto next; | |
3603 | } | |
3604 | unlink_free_space(ctl, entry); | |
7fe6d45e DZ |
3605 | /* |
3606 | * Let bytes = BTRFS_MAX_DISCARD_SIZE + X. | |
3607 | * If X < BTRFS_ASYNC_DISCARD_MIN_FILTER, we won't trim | |
3608 | * X when we come back around. So trim it now. | |
3609 | */ | |
3610 | if (max_discard_size && | |
3611 | bytes >= (max_discard_size + | |
3612 | BTRFS_ASYNC_DISCARD_MIN_FILTER)) { | |
19b2a2c7 DZ |
3613 | bytes = max_discard_size; |
3614 | extent_bytes = max_discard_size; | |
3615 | entry->offset += max_discard_size; | |
3616 | entry->bytes -= max_discard_size; | |
4aa9ad52 DZ |
3617 | link_free_space(ctl, entry); |
3618 | } else { | |
3619 | kmem_cache_free(btrfs_free_space_cachep, entry); | |
3620 | } | |
3621 | } else { | |
3622 | start = max(start, extent_start); | |
3623 | bytes = min(extent_start + extent_bytes, end) - start; | |
3624 | if (bytes < minlen) { | |
3625 | spin_unlock(&ctl->tree_lock); | |
3626 | mutex_unlock(&ctl->cache_writeout_mutex); | |
3627 | goto next; | |
3628 | } | |
f7039b1d | 3629 | |
4aa9ad52 DZ |
3630 | unlink_free_space(ctl, entry); |
3631 | kmem_cache_free(btrfs_free_space_cachep, entry); | |
3632 | } | |
7fe1e641 | 3633 | |
34d52cb6 | 3634 | spin_unlock(&ctl->tree_lock); |
55507ce3 FM |
3635 | trim_entry.start = extent_start; |
3636 | trim_entry.bytes = extent_bytes; | |
3637 | list_add_tail(&trim_entry.list, &ctl->trimming_ranges); | |
3638 | mutex_unlock(&ctl->cache_writeout_mutex); | |
f7039b1d | 3639 | |
7fe1e641 | 3640 | ret = do_trimming(block_group, total_trimmed, start, bytes, |
b0643e59 DZ |
3641 | extent_start, extent_bytes, extent_trim_state, |
3642 | &trim_entry); | |
2bee7eb8 DZ |
3643 | if (ret) { |
3644 | block_group->discard_cursor = start + bytes; | |
7fe1e641 | 3645 | break; |
2bee7eb8 | 3646 | } |
7fe1e641 LZ |
3647 | next: |
3648 | start += bytes; | |
2bee7eb8 DZ |
3649 | block_group->discard_cursor = start; |
3650 | if (async && *total_trimmed) | |
3651 | break; | |
f7039b1d | 3652 | |
7fe1e641 LZ |
3653 | if (fatal_signal_pending(current)) { |
3654 | ret = -ERESTARTSYS; | |
3655 | break; | |
3656 | } | |
3657 | ||
3658 | cond_resched(); | |
3659 | } | |
2bee7eb8 DZ |
3660 | |
3661 | return ret; | |
3662 | ||
3663 | out_unlock: | |
3664 | block_group->discard_cursor = btrfs_block_group_end(block_group); | |
3665 | spin_unlock(&ctl->tree_lock); | |
3666 | mutex_unlock(&ctl->cache_writeout_mutex); | |
3667 | ||
7fe1e641 LZ |
3668 | return ret; |
3669 | } | |
3670 | ||
da080fe1 DZ |
3671 | /* |
3672 | * If we break out of trimming a bitmap prematurely, we should reset the | |
3673 | * trimming bit. In a rather contrieved case, it's possible to race here so | |
3674 | * reset the state to BTRFS_TRIM_STATE_UNTRIMMED. | |
3675 | * | |
3676 | * start = start of bitmap | |
3677 | * end = near end of bitmap | |
3678 | * | |
3679 | * Thread 1: Thread 2: | |
3680 | * trim_bitmaps(start) | |
3681 | * trim_bitmaps(end) | |
3682 | * end_trimming_bitmap() | |
3683 | * reset_trimming_bitmap() | |
3684 | */ | |
3685 | static void reset_trimming_bitmap(struct btrfs_free_space_ctl *ctl, u64 offset) | |
3686 | { | |
3687 | struct btrfs_free_space *entry; | |
3688 | ||
3689 | spin_lock(&ctl->tree_lock); | |
3690 | entry = tree_search_offset(ctl, offset, 1, 0); | |
dfb79ddb | 3691 | if (entry) { |
5dc7c10b | 3692 | if (btrfs_free_space_trimmed(entry)) { |
dfb79ddb DZ |
3693 | ctl->discardable_extents[BTRFS_STAT_CURR] += |
3694 | entry->bitmap_extents; | |
5dc7c10b DZ |
3695 | ctl->discardable_bytes[BTRFS_STAT_CURR] += entry->bytes; |
3696 | } | |
da080fe1 | 3697 | entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; |
dfb79ddb DZ |
3698 | } |
3699 | ||
da080fe1 DZ |
3700 | spin_unlock(&ctl->tree_lock); |
3701 | } | |
3702 | ||
dfb79ddb DZ |
3703 | static void end_trimming_bitmap(struct btrfs_free_space_ctl *ctl, |
3704 | struct btrfs_free_space *entry) | |
da080fe1 | 3705 | { |
dfb79ddb | 3706 | if (btrfs_free_space_trimming_bitmap(entry)) { |
da080fe1 | 3707 | entry->trim_state = BTRFS_TRIM_STATE_TRIMMED; |
dfb79ddb DZ |
3708 | ctl->discardable_extents[BTRFS_STAT_CURR] -= |
3709 | entry->bitmap_extents; | |
5dc7c10b | 3710 | ctl->discardable_bytes[BTRFS_STAT_CURR] -= entry->bytes; |
dfb79ddb | 3711 | } |
da080fe1 DZ |
3712 | } |
3713 | ||
2bee7eb8 DZ |
3714 | /* |
3715 | * If @async is set, then we will trim 1 region and return. | |
3716 | */ | |
32da5386 | 3717 | static int trim_bitmaps(struct btrfs_block_group *block_group, |
2bee7eb8 | 3718 | u64 *total_trimmed, u64 start, u64 end, u64 minlen, |
7fe6d45e | 3719 | u64 maxlen, bool async) |
7fe1e641 | 3720 | { |
19b2a2c7 DZ |
3721 | struct btrfs_discard_ctl *discard_ctl = |
3722 | &block_group->fs_info->discard_ctl; | |
7fe1e641 LZ |
3723 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
3724 | struct btrfs_free_space *entry; | |
3725 | int ret = 0; | |
3726 | int ret2; | |
3727 | u64 bytes; | |
3728 | u64 offset = offset_to_bitmap(ctl, start); | |
19b2a2c7 | 3729 | const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size); |
7fe1e641 LZ |
3730 | |
3731 | while (offset < end) { | |
3732 | bool next_bitmap = false; | |
55507ce3 | 3733 | struct btrfs_trim_range trim_entry; |
7fe1e641 | 3734 | |
55507ce3 | 3735 | mutex_lock(&ctl->cache_writeout_mutex); |
7fe1e641 LZ |
3736 | spin_lock(&ctl->tree_lock); |
3737 | ||
3738 | if (ctl->free_space < minlen) { | |
2bee7eb8 DZ |
3739 | block_group->discard_cursor = |
3740 | btrfs_block_group_end(block_group); | |
7fe1e641 | 3741 | spin_unlock(&ctl->tree_lock); |
55507ce3 | 3742 | mutex_unlock(&ctl->cache_writeout_mutex); |
7fe1e641 LZ |
3743 | break; |
3744 | } | |
3745 | ||
3746 | entry = tree_search_offset(ctl, offset, 1, 0); | |
7fe6d45e DZ |
3747 | /* |
3748 | * Bitmaps are marked trimmed lossily now to prevent constant | |
3749 | * discarding of the same bitmap (the reason why we are bound | |
3750 | * by the filters). So, retrim the block group bitmaps when we | |
3751 | * are preparing to punt to the unused_bgs list. This uses | |
3752 | * @minlen to determine if we are in BTRFS_DISCARD_INDEX_UNUSED | |
3753 | * which is the only discard index which sets minlen to 0. | |
3754 | */ | |
3755 | if (!entry || (async && minlen && start == offset && | |
2bee7eb8 | 3756 | btrfs_free_space_trimmed(entry))) { |
7fe1e641 | 3757 | spin_unlock(&ctl->tree_lock); |
55507ce3 | 3758 | mutex_unlock(&ctl->cache_writeout_mutex); |
7fe1e641 LZ |
3759 | next_bitmap = true; |
3760 | goto next; | |
3761 | } | |
3762 | ||
da080fe1 DZ |
3763 | /* |
3764 | * Async discard bitmap trimming begins at by setting the start | |
3765 | * to be key.objectid and the offset_to_bitmap() aligns to the | |
3766 | * start of the bitmap. This lets us know we are fully | |
3767 | * scanning the bitmap rather than only some portion of it. | |
3768 | */ | |
3769 | if (start == offset) | |
3770 | entry->trim_state = BTRFS_TRIM_STATE_TRIMMING; | |
3771 | ||
7fe1e641 | 3772 | bytes = minlen; |
0584f718 | 3773 | ret2 = search_bitmap(ctl, entry, &start, &bytes, false); |
7fe1e641 | 3774 | if (ret2 || start >= end) { |
da080fe1 | 3775 | /* |
7fe6d45e DZ |
3776 | * We lossily consider a bitmap trimmed if we only skip |
3777 | * over regions <= BTRFS_ASYNC_DISCARD_MIN_FILTER. | |
da080fe1 | 3778 | */ |
7fe6d45e | 3779 | if (ret2 && minlen <= BTRFS_ASYNC_DISCARD_MIN_FILTER) |
dfb79ddb | 3780 | end_trimming_bitmap(ctl, entry); |
da080fe1 DZ |
3781 | else |
3782 | entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; | |
7fe1e641 | 3783 | spin_unlock(&ctl->tree_lock); |
55507ce3 | 3784 | mutex_unlock(&ctl->cache_writeout_mutex); |
7fe1e641 LZ |
3785 | next_bitmap = true; |
3786 | goto next; | |
3787 | } | |
3788 | ||
2bee7eb8 DZ |
3789 | /* |
3790 | * We already trimmed a region, but are using the locking above | |
3791 | * to reset the trim_state. | |
3792 | */ | |
3793 | if (async && *total_trimmed) { | |
3794 | spin_unlock(&ctl->tree_lock); | |
3795 | mutex_unlock(&ctl->cache_writeout_mutex); | |
3796 | goto out; | |
3797 | } | |
3798 | ||
7fe1e641 | 3799 | bytes = min(bytes, end - start); |
7fe6d45e | 3800 | if (bytes < minlen || (async && maxlen && bytes > maxlen)) { |
7fe1e641 | 3801 | spin_unlock(&ctl->tree_lock); |
55507ce3 | 3802 | mutex_unlock(&ctl->cache_writeout_mutex); |
7fe1e641 LZ |
3803 | goto next; |
3804 | } | |
3805 | ||
7fe6d45e DZ |
3806 | /* |
3807 | * Let bytes = BTRFS_MAX_DISCARD_SIZE + X. | |
3808 | * If X < @minlen, we won't trim X when we come back around. | |
3809 | * So trim it now. We differ here from trimming extents as we | |
3810 | * don't keep individual state per bit. | |
3811 | */ | |
3812 | if (async && | |
3813 | max_discard_size && | |
3814 | bytes > (max_discard_size + minlen)) | |
19b2a2c7 | 3815 | bytes = max_discard_size; |
4aa9ad52 | 3816 | |
7fe1e641 LZ |
3817 | bitmap_clear_bits(ctl, entry, start, bytes); |
3818 | if (entry->bytes == 0) | |
3819 | free_bitmap(ctl, entry); | |
3820 | ||
3821 | spin_unlock(&ctl->tree_lock); | |
55507ce3 FM |
3822 | trim_entry.start = start; |
3823 | trim_entry.bytes = bytes; | |
3824 | list_add_tail(&trim_entry.list, &ctl->trimming_ranges); | |
3825 | mutex_unlock(&ctl->cache_writeout_mutex); | |
7fe1e641 LZ |
3826 | |
3827 | ret = do_trimming(block_group, total_trimmed, start, bytes, | |
b0643e59 | 3828 | start, bytes, 0, &trim_entry); |
da080fe1 DZ |
3829 | if (ret) { |
3830 | reset_trimming_bitmap(ctl, offset); | |
2bee7eb8 DZ |
3831 | block_group->discard_cursor = |
3832 | btrfs_block_group_end(block_group); | |
7fe1e641 | 3833 | break; |
da080fe1 | 3834 | } |
7fe1e641 LZ |
3835 | next: |
3836 | if (next_bitmap) { | |
3837 | offset += BITS_PER_BITMAP * ctl->unit; | |
da080fe1 | 3838 | start = offset; |
7fe1e641 LZ |
3839 | } else { |
3840 | start += bytes; | |
f7039b1d | 3841 | } |
2bee7eb8 | 3842 | block_group->discard_cursor = start; |
f7039b1d LD |
3843 | |
3844 | if (fatal_signal_pending(current)) { | |
da080fe1 DZ |
3845 | if (start != offset) |
3846 | reset_trimming_bitmap(ctl, offset); | |
f7039b1d LD |
3847 | ret = -ERESTARTSYS; |
3848 | break; | |
3849 | } | |
3850 | ||
3851 | cond_resched(); | |
3852 | } | |
3853 | ||
2bee7eb8 DZ |
3854 | if (offset >= end) |
3855 | block_group->discard_cursor = end; | |
3856 | ||
3857 | out: | |
f7039b1d LD |
3858 | return ret; |
3859 | } | |
581bb050 | 3860 | |
32da5386 | 3861 | int btrfs_trim_block_group(struct btrfs_block_group *block_group, |
e33e17ee JM |
3862 | u64 *trimmed, u64 start, u64 end, u64 minlen) |
3863 | { | |
da080fe1 | 3864 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
e33e17ee | 3865 | int ret; |
da080fe1 | 3866 | u64 rem = 0; |
e33e17ee | 3867 | |
2eda5708 NA |
3868 | ASSERT(!btrfs_is_zoned(block_group->fs_info)); |
3869 | ||
e33e17ee JM |
3870 | *trimmed = 0; |
3871 | ||
3872 | spin_lock(&block_group->lock); | |
3873 | if (block_group->removed) { | |
04216820 | 3874 | spin_unlock(&block_group->lock); |
e33e17ee | 3875 | return 0; |
04216820 | 3876 | } |
6b7304af | 3877 | btrfs_freeze_block_group(block_group); |
e33e17ee JM |
3878 | spin_unlock(&block_group->lock); |
3879 | ||
2bee7eb8 | 3880 | ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, false); |
e33e17ee JM |
3881 | if (ret) |
3882 | goto out; | |
7fe1e641 | 3883 | |
7fe6d45e | 3884 | ret = trim_bitmaps(block_group, trimmed, start, end, minlen, 0, false); |
da080fe1 DZ |
3885 | div64_u64_rem(end, BITS_PER_BITMAP * ctl->unit, &rem); |
3886 | /* If we ended in the middle of a bitmap, reset the trimming flag */ | |
3887 | if (rem) | |
3888 | reset_trimming_bitmap(ctl, offset_to_bitmap(ctl, end)); | |
e33e17ee | 3889 | out: |
6b7304af | 3890 | btrfs_unfreeze_block_group(block_group); |
7fe1e641 LZ |
3891 | return ret; |
3892 | } | |
3893 | ||
2bee7eb8 DZ |
3894 | int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group, |
3895 | u64 *trimmed, u64 start, u64 end, u64 minlen, | |
3896 | bool async) | |
3897 | { | |
3898 | int ret; | |
3899 | ||
3900 | *trimmed = 0; | |
3901 | ||
3902 | spin_lock(&block_group->lock); | |
3903 | if (block_group->removed) { | |
3904 | spin_unlock(&block_group->lock); | |
3905 | return 0; | |
3906 | } | |
6b7304af | 3907 | btrfs_freeze_block_group(block_group); |
2bee7eb8 DZ |
3908 | spin_unlock(&block_group->lock); |
3909 | ||
3910 | ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, async); | |
6b7304af | 3911 | btrfs_unfreeze_block_group(block_group); |
2bee7eb8 DZ |
3912 | |
3913 | return ret; | |
3914 | } | |
3915 | ||
3916 | int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group, | |
3917 | u64 *trimmed, u64 start, u64 end, u64 minlen, | |
7fe6d45e | 3918 | u64 maxlen, bool async) |
2bee7eb8 DZ |
3919 | { |
3920 | int ret; | |
3921 | ||
3922 | *trimmed = 0; | |
3923 | ||
3924 | spin_lock(&block_group->lock); | |
3925 | if (block_group->removed) { | |
3926 | spin_unlock(&block_group->lock); | |
3927 | return 0; | |
3928 | } | |
6b7304af | 3929 | btrfs_freeze_block_group(block_group); |
2bee7eb8 DZ |
3930 | spin_unlock(&block_group->lock); |
3931 | ||
7fe6d45e DZ |
3932 | ret = trim_bitmaps(block_group, trimmed, start, end, minlen, maxlen, |
3933 | async); | |
3934 | ||
6b7304af | 3935 | btrfs_unfreeze_block_group(block_group); |
2bee7eb8 DZ |
3936 | |
3937 | return ret; | |
3938 | } | |
3939 | ||
94846229 BB |
3940 | bool btrfs_free_space_cache_v1_active(struct btrfs_fs_info *fs_info) |
3941 | { | |
3942 | return btrfs_super_cache_generation(fs_info->super_copy); | |
3943 | } | |
3944 | ||
36b216c8 BB |
3945 | static int cleanup_free_space_cache_v1(struct btrfs_fs_info *fs_info, |
3946 | struct btrfs_trans_handle *trans) | |
3947 | { | |
3948 | struct btrfs_block_group *block_group; | |
3949 | struct rb_node *node; | |
77364faf | 3950 | int ret = 0; |
36b216c8 BB |
3951 | |
3952 | btrfs_info(fs_info, "cleaning free space cache v1"); | |
3953 | ||
3954 | node = rb_first(&fs_info->block_group_cache_tree); | |
3955 | while (node) { | |
3956 | block_group = rb_entry(node, struct btrfs_block_group, cache_node); | |
3957 | ret = btrfs_remove_free_space_inode(trans, NULL, block_group); | |
3958 | if (ret) | |
3959 | goto out; | |
3960 | node = rb_next(node); | |
3961 | } | |
3962 | out: | |
3963 | return ret; | |
3964 | } | |
3965 | ||
94846229 BB |
3966 | int btrfs_set_free_space_cache_v1_active(struct btrfs_fs_info *fs_info, bool active) |
3967 | { | |
3968 | struct btrfs_trans_handle *trans; | |
3969 | int ret; | |
3970 | ||
3971 | /* | |
36b216c8 BB |
3972 | * update_super_roots will appropriately set or unset |
3973 | * super_copy->cache_generation based on SPACE_CACHE and | |
3974 | * BTRFS_FS_CLEANUP_SPACE_CACHE_V1. For this reason, we need a | |
3975 | * transaction commit whether we are enabling space cache v1 and don't | |
3976 | * have any other work to do, or are disabling it and removing free | |
3977 | * space inodes. | |
94846229 BB |
3978 | */ |
3979 | trans = btrfs_start_transaction(fs_info->tree_root, 0); | |
3980 | if (IS_ERR(trans)) | |
3981 | return PTR_ERR(trans); | |
3982 | ||
36b216c8 | 3983 | if (!active) { |
94846229 | 3984 | set_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags); |
36b216c8 BB |
3985 | ret = cleanup_free_space_cache_v1(fs_info, trans); |
3986 | if (ret) { | |
3987 | btrfs_abort_transaction(trans, ret); | |
3988 | btrfs_end_transaction(trans); | |
3989 | goto out; | |
3990 | } | |
3991 | } | |
94846229 BB |
3992 | |
3993 | ret = btrfs_commit_transaction(trans); | |
36b216c8 | 3994 | out: |
94846229 BB |
3995 | clear_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags); |
3996 | ||
3997 | return ret; | |
3998 | } | |
3999 | ||
74255aa0 | 4000 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
dc11dd5d JB |
4001 | /* |
4002 | * Use this if you need to make a bitmap or extent entry specifically, it | |
4003 | * doesn't do any of the merging that add_free_space does, this acts a lot like | |
4004 | * how the free space cache loading stuff works, so you can get really weird | |
4005 | * configurations. | |
4006 | */ | |
32da5386 | 4007 | int test_add_free_space_entry(struct btrfs_block_group *cache, |
dc11dd5d | 4008 | u64 offset, u64 bytes, bool bitmap) |
74255aa0 | 4009 | { |
dc11dd5d JB |
4010 | struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; |
4011 | struct btrfs_free_space *info = NULL, *bitmap_info; | |
4012 | void *map = NULL; | |
da080fe1 | 4013 | enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_TRIMMED; |
dc11dd5d JB |
4014 | u64 bytes_added; |
4015 | int ret; | |
74255aa0 | 4016 | |
dc11dd5d JB |
4017 | again: |
4018 | if (!info) { | |
4019 | info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); | |
4020 | if (!info) | |
4021 | return -ENOMEM; | |
74255aa0 JB |
4022 | } |
4023 | ||
dc11dd5d JB |
4024 | if (!bitmap) { |
4025 | spin_lock(&ctl->tree_lock); | |
4026 | info->offset = offset; | |
4027 | info->bytes = bytes; | |
cef40483 | 4028 | info->max_extent_size = 0; |
dc11dd5d JB |
4029 | ret = link_free_space(ctl, info); |
4030 | spin_unlock(&ctl->tree_lock); | |
4031 | if (ret) | |
4032 | kmem_cache_free(btrfs_free_space_cachep, info); | |
4033 | return ret; | |
4034 | } | |
4035 | ||
4036 | if (!map) { | |
3acd4850 | 4037 | map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS); |
dc11dd5d JB |
4038 | if (!map) { |
4039 | kmem_cache_free(btrfs_free_space_cachep, info); | |
4040 | return -ENOMEM; | |
4041 | } | |
4042 | } | |
4043 | ||
4044 | spin_lock(&ctl->tree_lock); | |
4045 | bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), | |
4046 | 1, 0); | |
4047 | if (!bitmap_info) { | |
4048 | info->bitmap = map; | |
4049 | map = NULL; | |
4050 | add_new_bitmap(ctl, info, offset); | |
4051 | bitmap_info = info; | |
20005523 | 4052 | info = NULL; |
dc11dd5d | 4053 | } |
74255aa0 | 4054 | |
da080fe1 DZ |
4055 | bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes, |
4056 | trim_state); | |
cef40483 | 4057 | |
dc11dd5d JB |
4058 | bytes -= bytes_added; |
4059 | offset += bytes_added; | |
4060 | spin_unlock(&ctl->tree_lock); | |
74255aa0 | 4061 | |
dc11dd5d JB |
4062 | if (bytes) |
4063 | goto again; | |
74255aa0 | 4064 | |
20005523 FM |
4065 | if (info) |
4066 | kmem_cache_free(btrfs_free_space_cachep, info); | |
3acd4850 CL |
4067 | if (map) |
4068 | kmem_cache_free(btrfs_free_space_bitmap_cachep, map); | |
dc11dd5d | 4069 | return 0; |
74255aa0 JB |
4070 | } |
4071 | ||
4072 | /* | |
4073 | * Checks to see if the given range is in the free space cache. This is really | |
4074 | * just used to check the absence of space, so if there is free space in the | |
4075 | * range at all we will return 1. | |
4076 | */ | |
32da5386 | 4077 | int test_check_exists(struct btrfs_block_group *cache, |
dc11dd5d | 4078 | u64 offset, u64 bytes) |
74255aa0 JB |
4079 | { |
4080 | struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; | |
4081 | struct btrfs_free_space *info; | |
4082 | int ret = 0; | |
4083 | ||
4084 | spin_lock(&ctl->tree_lock); | |
4085 | info = tree_search_offset(ctl, offset, 0, 0); | |
4086 | if (!info) { | |
4087 | info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), | |
4088 | 1, 0); | |
4089 | if (!info) | |
4090 | goto out; | |
4091 | } | |
4092 | ||
4093 | have_info: | |
4094 | if (info->bitmap) { | |
4095 | u64 bit_off, bit_bytes; | |
4096 | struct rb_node *n; | |
4097 | struct btrfs_free_space *tmp; | |
4098 | ||
4099 | bit_off = offset; | |
4100 | bit_bytes = ctl->unit; | |
0584f718 | 4101 | ret = search_bitmap(ctl, info, &bit_off, &bit_bytes, false); |
74255aa0 JB |
4102 | if (!ret) { |
4103 | if (bit_off == offset) { | |
4104 | ret = 1; | |
4105 | goto out; | |
4106 | } else if (bit_off > offset && | |
4107 | offset + bytes > bit_off) { | |
4108 | ret = 1; | |
4109 | goto out; | |
4110 | } | |
4111 | } | |
4112 | ||
4113 | n = rb_prev(&info->offset_index); | |
4114 | while (n) { | |
4115 | tmp = rb_entry(n, struct btrfs_free_space, | |
4116 | offset_index); | |
4117 | if (tmp->offset + tmp->bytes < offset) | |
4118 | break; | |
4119 | if (offset + bytes < tmp->offset) { | |
5473e0c4 | 4120 | n = rb_prev(&tmp->offset_index); |
74255aa0 JB |
4121 | continue; |
4122 | } | |
4123 | info = tmp; | |
4124 | goto have_info; | |
4125 | } | |
4126 | ||
4127 | n = rb_next(&info->offset_index); | |
4128 | while (n) { | |
4129 | tmp = rb_entry(n, struct btrfs_free_space, | |
4130 | offset_index); | |
4131 | if (offset + bytes < tmp->offset) | |
4132 | break; | |
4133 | if (tmp->offset + tmp->bytes < offset) { | |
5473e0c4 | 4134 | n = rb_next(&tmp->offset_index); |
74255aa0 JB |
4135 | continue; |
4136 | } | |
4137 | info = tmp; | |
4138 | goto have_info; | |
4139 | } | |
4140 | ||
20005523 | 4141 | ret = 0; |
74255aa0 JB |
4142 | goto out; |
4143 | } | |
4144 | ||
4145 | if (info->offset == offset) { | |
4146 | ret = 1; | |
4147 | goto out; | |
4148 | } | |
4149 | ||
4150 | if (offset > info->offset && offset < info->offset + info->bytes) | |
4151 | ret = 1; | |
4152 | out: | |
4153 | spin_unlock(&ctl->tree_lock); | |
4154 | return ret; | |
4155 | } | |
dc11dd5d | 4156 | #endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */ |