Commit | Line | Data |
---|---|---|
0f9dd46c JB |
1 | /* |
2 | * Copyright (C) 2008 Red Hat. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
96303081 | 19 | #include <linux/pagemap.h> |
0f9dd46c | 20 | #include <linux/sched.h> |
5a0e3ad6 | 21 | #include <linux/slab.h> |
96303081 | 22 | #include <linux/math64.h> |
6ab60601 | 23 | #include <linux/ratelimit.h> |
0f9dd46c | 24 | #include "ctree.h" |
fa9c0d79 CM |
25 | #include "free-space-cache.h" |
26 | #include "transaction.h" | |
0af3d00b | 27 | #include "disk-io.h" |
43be2146 | 28 | #include "extent_io.h" |
581bb050 | 29 | #include "inode-map.h" |
04216820 | 30 | #include "volumes.h" |
fa9c0d79 | 31 | |
0ef6447a | 32 | #define BITS_PER_BITMAP (PAGE_SIZE * 8UL) |
ee22184b | 33 | #define MAX_CACHE_BYTES_PER_GIG SZ_32K |
0f9dd46c | 34 | |
55507ce3 FM |
35 | struct btrfs_trim_range { |
36 | u64 start; | |
37 | u64 bytes; | |
38 | struct list_head list; | |
39 | }; | |
40 | ||
34d52cb6 | 41 | static int link_free_space(struct btrfs_free_space_ctl *ctl, |
0cb59c99 | 42 | struct btrfs_free_space *info); |
cd023e7b JB |
43 | static void unlink_free_space(struct btrfs_free_space_ctl *ctl, |
44 | struct btrfs_free_space *info); | |
0cb59c99 | 45 | |
0414efae LZ |
46 | static struct inode *__lookup_free_space_inode(struct btrfs_root *root, |
47 | struct btrfs_path *path, | |
48 | u64 offset) | |
0af3d00b JB |
49 | { |
50 | struct btrfs_key key; | |
51 | struct btrfs_key location; | |
52 | struct btrfs_disk_key disk_key; | |
53 | struct btrfs_free_space_header *header; | |
54 | struct extent_buffer *leaf; | |
55 | struct inode *inode = NULL; | |
56 | int ret; | |
57 | ||
0af3d00b | 58 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; |
0414efae | 59 | key.offset = offset; |
0af3d00b JB |
60 | key.type = 0; |
61 | ||
62 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
63 | if (ret < 0) | |
64 | return ERR_PTR(ret); | |
65 | if (ret > 0) { | |
b3b4aa74 | 66 | btrfs_release_path(path); |
0af3d00b JB |
67 | return ERR_PTR(-ENOENT); |
68 | } | |
69 | ||
70 | leaf = path->nodes[0]; | |
71 | header = btrfs_item_ptr(leaf, path->slots[0], | |
72 | struct btrfs_free_space_header); | |
73 | btrfs_free_space_key(leaf, header, &disk_key); | |
74 | btrfs_disk_key_to_cpu(&location, &disk_key); | |
b3b4aa74 | 75 | btrfs_release_path(path); |
0af3d00b JB |
76 | |
77 | inode = btrfs_iget(root->fs_info->sb, &location, root, NULL); | |
0af3d00b JB |
78 | if (IS_ERR(inode)) |
79 | return inode; | |
80 | if (is_bad_inode(inode)) { | |
81 | iput(inode); | |
82 | return ERR_PTR(-ENOENT); | |
83 | } | |
84 | ||
528c0327 | 85 | mapping_set_gfp_mask(inode->i_mapping, |
c62d2555 MH |
86 | mapping_gfp_constraint(inode->i_mapping, |
87 | ~(__GFP_FS | __GFP_HIGHMEM))); | |
adae52b9 | 88 | |
0414efae LZ |
89 | return inode; |
90 | } | |
91 | ||
92 | struct inode *lookup_free_space_inode(struct btrfs_root *root, | |
93 | struct btrfs_block_group_cache | |
94 | *block_group, struct btrfs_path *path) | |
95 | { | |
96 | struct inode *inode = NULL; | |
5b0e95bf | 97 | u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW; |
0414efae LZ |
98 | |
99 | spin_lock(&block_group->lock); | |
100 | if (block_group->inode) | |
101 | inode = igrab(block_group->inode); | |
102 | spin_unlock(&block_group->lock); | |
103 | if (inode) | |
104 | return inode; | |
105 | ||
106 | inode = __lookup_free_space_inode(root, path, | |
107 | block_group->key.objectid); | |
108 | if (IS_ERR(inode)) | |
109 | return inode; | |
110 | ||
0af3d00b | 111 | spin_lock(&block_group->lock); |
5b0e95bf | 112 | if (!((BTRFS_I(inode)->flags & flags) == flags)) { |
c2cf52eb SK |
113 | btrfs_info(root->fs_info, |
114 | "Old style space inode found, converting."); | |
5b0e95bf JB |
115 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM | |
116 | BTRFS_INODE_NODATACOW; | |
2f356126 JB |
117 | block_group->disk_cache_state = BTRFS_DC_CLEAR; |
118 | } | |
119 | ||
300e4f8a | 120 | if (!block_group->iref) { |
0af3d00b JB |
121 | block_group->inode = igrab(inode); |
122 | block_group->iref = 1; | |
123 | } | |
124 | spin_unlock(&block_group->lock); | |
125 | ||
126 | return inode; | |
127 | } | |
128 | ||
48a3b636 ES |
129 | static int __create_free_space_inode(struct btrfs_root *root, |
130 | struct btrfs_trans_handle *trans, | |
131 | struct btrfs_path *path, | |
132 | u64 ino, u64 offset) | |
0af3d00b JB |
133 | { |
134 | struct btrfs_key key; | |
135 | struct btrfs_disk_key disk_key; | |
136 | struct btrfs_free_space_header *header; | |
137 | struct btrfs_inode_item *inode_item; | |
138 | struct extent_buffer *leaf; | |
5b0e95bf | 139 | u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC; |
0af3d00b JB |
140 | int ret; |
141 | ||
0414efae | 142 | ret = btrfs_insert_empty_inode(trans, root, path, ino); |
0af3d00b JB |
143 | if (ret) |
144 | return ret; | |
145 | ||
5b0e95bf JB |
146 | /* We inline crc's for the free disk space cache */ |
147 | if (ino != BTRFS_FREE_INO_OBJECTID) | |
148 | flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW; | |
149 | ||
0af3d00b JB |
150 | leaf = path->nodes[0]; |
151 | inode_item = btrfs_item_ptr(leaf, path->slots[0], | |
152 | struct btrfs_inode_item); | |
153 | btrfs_item_key(leaf, &disk_key, path->slots[0]); | |
b159fa28 | 154 | memzero_extent_buffer(leaf, (unsigned long)inode_item, |
0af3d00b JB |
155 | sizeof(*inode_item)); |
156 | btrfs_set_inode_generation(leaf, inode_item, trans->transid); | |
157 | btrfs_set_inode_size(leaf, inode_item, 0); | |
158 | btrfs_set_inode_nbytes(leaf, inode_item, 0); | |
159 | btrfs_set_inode_uid(leaf, inode_item, 0); | |
160 | btrfs_set_inode_gid(leaf, inode_item, 0); | |
161 | btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600); | |
5b0e95bf | 162 | btrfs_set_inode_flags(leaf, inode_item, flags); |
0af3d00b JB |
163 | btrfs_set_inode_nlink(leaf, inode_item, 1); |
164 | btrfs_set_inode_transid(leaf, inode_item, trans->transid); | |
0414efae | 165 | btrfs_set_inode_block_group(leaf, inode_item, offset); |
0af3d00b | 166 | btrfs_mark_buffer_dirty(leaf); |
b3b4aa74 | 167 | btrfs_release_path(path); |
0af3d00b JB |
168 | |
169 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | |
0414efae | 170 | key.offset = offset; |
0af3d00b | 171 | key.type = 0; |
0af3d00b JB |
172 | ret = btrfs_insert_empty_item(trans, root, path, &key, |
173 | sizeof(struct btrfs_free_space_header)); | |
174 | if (ret < 0) { | |
b3b4aa74 | 175 | btrfs_release_path(path); |
0af3d00b JB |
176 | return ret; |
177 | } | |
c9dc4c65 | 178 | |
0af3d00b JB |
179 | leaf = path->nodes[0]; |
180 | header = btrfs_item_ptr(leaf, path->slots[0], | |
181 | struct btrfs_free_space_header); | |
b159fa28 | 182 | memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header)); |
0af3d00b JB |
183 | btrfs_set_free_space_key(leaf, header, &disk_key); |
184 | btrfs_mark_buffer_dirty(leaf); | |
b3b4aa74 | 185 | btrfs_release_path(path); |
0af3d00b JB |
186 | |
187 | return 0; | |
188 | } | |
189 | ||
0414efae LZ |
190 | int create_free_space_inode(struct btrfs_root *root, |
191 | struct btrfs_trans_handle *trans, | |
192 | struct btrfs_block_group_cache *block_group, | |
193 | struct btrfs_path *path) | |
194 | { | |
195 | int ret; | |
196 | u64 ino; | |
197 | ||
198 | ret = btrfs_find_free_objectid(root, &ino); | |
199 | if (ret < 0) | |
200 | return ret; | |
201 | ||
202 | return __create_free_space_inode(root, trans, path, ino, | |
203 | block_group->key.objectid); | |
204 | } | |
205 | ||
7b61cd92 MX |
206 | int btrfs_check_trunc_cache_free_space(struct btrfs_root *root, |
207 | struct btrfs_block_rsv *rsv) | |
0af3d00b | 208 | { |
c8174313 | 209 | u64 needed_bytes; |
7b61cd92 | 210 | int ret; |
c8174313 JB |
211 | |
212 | /* 1 for slack space, 1 for updating the inode */ | |
27965b6c JM |
213 | needed_bytes = btrfs_calc_trunc_metadata_size(root->fs_info, 1) + |
214 | btrfs_calc_trans_metadata_size(root->fs_info, 1); | |
c8174313 | 215 | |
7b61cd92 MX |
216 | spin_lock(&rsv->lock); |
217 | if (rsv->reserved < needed_bytes) | |
218 | ret = -ENOSPC; | |
219 | else | |
220 | ret = 0; | |
221 | spin_unlock(&rsv->lock); | |
4b286cd1 | 222 | return ret; |
7b61cd92 MX |
223 | } |
224 | ||
225 | int btrfs_truncate_free_space_cache(struct btrfs_root *root, | |
226 | struct btrfs_trans_handle *trans, | |
1bbc621e | 227 | struct btrfs_block_group_cache *block_group, |
7b61cd92 MX |
228 | struct inode *inode) |
229 | { | |
7b61cd92 | 230 | int ret = 0; |
1bbc621e | 231 | struct btrfs_path *path = btrfs_alloc_path(); |
35c76642 | 232 | bool locked = false; |
1bbc621e CM |
233 | |
234 | if (!path) { | |
235 | ret = -ENOMEM; | |
236 | goto fail; | |
237 | } | |
238 | ||
239 | if (block_group) { | |
35c76642 | 240 | locked = true; |
1bbc621e CM |
241 | mutex_lock(&trans->transaction->cache_write_mutex); |
242 | if (!list_empty(&block_group->io_list)) { | |
243 | list_del_init(&block_group->io_list); | |
244 | ||
245 | btrfs_wait_cache_io(root, trans, block_group, | |
246 | &block_group->io_ctl, path, | |
247 | block_group->key.objectid); | |
248 | btrfs_put_block_group(block_group); | |
249 | } | |
250 | ||
251 | /* | |
252 | * now that we've truncated the cache away, its no longer | |
253 | * setup or written | |
254 | */ | |
255 | spin_lock(&block_group->lock); | |
256 | block_group->disk_cache_state = BTRFS_DC_CLEAR; | |
257 | spin_unlock(&block_group->lock); | |
258 | } | |
259 | btrfs_free_path(path); | |
0af3d00b | 260 | |
0af3d00b | 261 | btrfs_i_size_write(inode, 0); |
7caef267 | 262 | truncate_pagecache(inode, 0); |
0af3d00b JB |
263 | |
264 | /* | |
265 | * We don't need an orphan item because truncating the free space cache | |
266 | * will never be split across transactions. | |
28ed1345 CM |
267 | * We don't need to check for -EAGAIN because we're a free space |
268 | * cache inode | |
0af3d00b JB |
269 | */ |
270 | ret = btrfs_truncate_inode_items(trans, root, inode, | |
271 | 0, BTRFS_EXTENT_DATA_KEY); | |
35c76642 FM |
272 | if (ret) |
273 | goto fail; | |
0af3d00b | 274 | |
82d5902d | 275 | ret = btrfs_update_inode(trans, root, inode); |
1bbc621e | 276 | |
1bbc621e | 277 | fail: |
35c76642 FM |
278 | if (locked) |
279 | mutex_unlock(&trans->transaction->cache_write_mutex); | |
79787eaa | 280 | if (ret) |
66642832 | 281 | btrfs_abort_transaction(trans, ret); |
c8174313 | 282 | |
82d5902d | 283 | return ret; |
0af3d00b JB |
284 | } |
285 | ||
9d66e233 JB |
286 | static int readahead_cache(struct inode *inode) |
287 | { | |
288 | struct file_ra_state *ra; | |
289 | unsigned long last_index; | |
290 | ||
291 | ra = kzalloc(sizeof(*ra), GFP_NOFS); | |
292 | if (!ra) | |
293 | return -ENOMEM; | |
294 | ||
295 | file_ra_state_init(ra, inode->i_mapping); | |
09cbfeaf | 296 | last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; |
9d66e233 JB |
297 | |
298 | page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index); | |
299 | ||
300 | kfree(ra); | |
301 | ||
302 | return 0; | |
303 | } | |
304 | ||
4c6d1d85 | 305 | static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode, |
f15376df | 306 | int write) |
a67509c3 | 307 | { |
5349d6c3 MX |
308 | int num_pages; |
309 | int check_crcs = 0; | |
310 | ||
09cbfeaf | 311 | num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
5349d6c3 MX |
312 | |
313 | if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID) | |
314 | check_crcs = 1; | |
315 | ||
316 | /* Make sure we can fit our crcs into the first page */ | |
317 | if (write && check_crcs && | |
09cbfeaf | 318 | (num_pages * sizeof(u32)) >= PAGE_SIZE) |
5349d6c3 MX |
319 | return -ENOSPC; |
320 | ||
4c6d1d85 | 321 | memset(io_ctl, 0, sizeof(struct btrfs_io_ctl)); |
5349d6c3 | 322 | |
31e818fe | 323 | io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS); |
a67509c3 JB |
324 | if (!io_ctl->pages) |
325 | return -ENOMEM; | |
5349d6c3 MX |
326 | |
327 | io_ctl->num_pages = num_pages; | |
f15376df | 328 | io_ctl->fs_info = btrfs_sb(inode->i_sb); |
5349d6c3 | 329 | io_ctl->check_crcs = check_crcs; |
c9dc4c65 | 330 | io_ctl->inode = inode; |
5349d6c3 | 331 | |
a67509c3 JB |
332 | return 0; |
333 | } | |
334 | ||
4c6d1d85 | 335 | static void io_ctl_free(struct btrfs_io_ctl *io_ctl) |
a67509c3 JB |
336 | { |
337 | kfree(io_ctl->pages); | |
c9dc4c65 | 338 | io_ctl->pages = NULL; |
a67509c3 JB |
339 | } |
340 | ||
4c6d1d85 | 341 | static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl) |
a67509c3 JB |
342 | { |
343 | if (io_ctl->cur) { | |
a67509c3 JB |
344 | io_ctl->cur = NULL; |
345 | io_ctl->orig = NULL; | |
346 | } | |
347 | } | |
348 | ||
4c6d1d85 | 349 | static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear) |
a67509c3 | 350 | { |
b12d6869 | 351 | ASSERT(io_ctl->index < io_ctl->num_pages); |
a67509c3 | 352 | io_ctl->page = io_ctl->pages[io_ctl->index++]; |
2b108268 | 353 | io_ctl->cur = page_address(io_ctl->page); |
a67509c3 | 354 | io_ctl->orig = io_ctl->cur; |
09cbfeaf | 355 | io_ctl->size = PAGE_SIZE; |
a67509c3 | 356 | if (clear) |
09cbfeaf | 357 | memset(io_ctl->cur, 0, PAGE_SIZE); |
a67509c3 JB |
358 | } |
359 | ||
4c6d1d85 | 360 | static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl) |
a67509c3 JB |
361 | { |
362 | int i; | |
363 | ||
364 | io_ctl_unmap_page(io_ctl); | |
365 | ||
366 | for (i = 0; i < io_ctl->num_pages; i++) { | |
a1ee5a45 LZ |
367 | if (io_ctl->pages[i]) { |
368 | ClearPageChecked(io_ctl->pages[i]); | |
369 | unlock_page(io_ctl->pages[i]); | |
09cbfeaf | 370 | put_page(io_ctl->pages[i]); |
a1ee5a45 | 371 | } |
a67509c3 JB |
372 | } |
373 | } | |
374 | ||
4c6d1d85 | 375 | static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, struct inode *inode, |
a67509c3 JB |
376 | int uptodate) |
377 | { | |
378 | struct page *page; | |
379 | gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); | |
380 | int i; | |
381 | ||
382 | for (i = 0; i < io_ctl->num_pages; i++) { | |
383 | page = find_or_create_page(inode->i_mapping, i, mask); | |
384 | if (!page) { | |
385 | io_ctl_drop_pages(io_ctl); | |
386 | return -ENOMEM; | |
387 | } | |
388 | io_ctl->pages[i] = page; | |
389 | if (uptodate && !PageUptodate(page)) { | |
390 | btrfs_readpage(NULL, page); | |
391 | lock_page(page); | |
392 | if (!PageUptodate(page)) { | |
efe120a0 FH |
393 | btrfs_err(BTRFS_I(inode)->root->fs_info, |
394 | "error reading free space cache"); | |
a67509c3 JB |
395 | io_ctl_drop_pages(io_ctl); |
396 | return -EIO; | |
397 | } | |
398 | } | |
399 | } | |
400 | ||
f7d61dcd JB |
401 | for (i = 0; i < io_ctl->num_pages; i++) { |
402 | clear_page_dirty_for_io(io_ctl->pages[i]); | |
403 | set_page_extent_mapped(io_ctl->pages[i]); | |
404 | } | |
405 | ||
a67509c3 JB |
406 | return 0; |
407 | } | |
408 | ||
4c6d1d85 | 409 | static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation) |
a67509c3 | 410 | { |
528c0327 | 411 | __le64 *val; |
a67509c3 JB |
412 | |
413 | io_ctl_map_page(io_ctl, 1); | |
414 | ||
415 | /* | |
5b0e95bf JB |
416 | * Skip the csum areas. If we don't check crcs then we just have a |
417 | * 64bit chunk at the front of the first page. | |
a67509c3 | 418 | */ |
5b0e95bf JB |
419 | if (io_ctl->check_crcs) { |
420 | io_ctl->cur += (sizeof(u32) * io_ctl->num_pages); | |
421 | io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages); | |
422 | } else { | |
423 | io_ctl->cur += sizeof(u64); | |
424 | io_ctl->size -= sizeof(u64) * 2; | |
425 | } | |
a67509c3 JB |
426 | |
427 | val = io_ctl->cur; | |
428 | *val = cpu_to_le64(generation); | |
429 | io_ctl->cur += sizeof(u64); | |
a67509c3 JB |
430 | } |
431 | ||
4c6d1d85 | 432 | static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation) |
a67509c3 | 433 | { |
528c0327 | 434 | __le64 *gen; |
a67509c3 | 435 | |
5b0e95bf JB |
436 | /* |
437 | * Skip the crc area. If we don't check crcs then we just have a 64bit | |
438 | * chunk at the front of the first page. | |
439 | */ | |
440 | if (io_ctl->check_crcs) { | |
441 | io_ctl->cur += sizeof(u32) * io_ctl->num_pages; | |
442 | io_ctl->size -= sizeof(u64) + | |
443 | (sizeof(u32) * io_ctl->num_pages); | |
444 | } else { | |
445 | io_ctl->cur += sizeof(u64); | |
446 | io_ctl->size -= sizeof(u64) * 2; | |
447 | } | |
a67509c3 | 448 | |
a67509c3 JB |
449 | gen = io_ctl->cur; |
450 | if (le64_to_cpu(*gen) != generation) { | |
f15376df | 451 | btrfs_err_rl(io_ctl->fs_info, |
94647322 DS |
452 | "space cache generation (%llu) does not match inode (%llu)", |
453 | *gen, generation); | |
a67509c3 JB |
454 | io_ctl_unmap_page(io_ctl); |
455 | return -EIO; | |
456 | } | |
457 | io_ctl->cur += sizeof(u64); | |
5b0e95bf JB |
458 | return 0; |
459 | } | |
460 | ||
4c6d1d85 | 461 | static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index) |
5b0e95bf JB |
462 | { |
463 | u32 *tmp; | |
464 | u32 crc = ~(u32)0; | |
465 | unsigned offset = 0; | |
466 | ||
467 | if (!io_ctl->check_crcs) { | |
468 | io_ctl_unmap_page(io_ctl); | |
469 | return; | |
470 | } | |
471 | ||
472 | if (index == 0) | |
cb54f257 | 473 | offset = sizeof(u32) * io_ctl->num_pages; |
5b0e95bf | 474 | |
b0496686 | 475 | crc = btrfs_csum_data(io_ctl->orig + offset, crc, |
09cbfeaf | 476 | PAGE_SIZE - offset); |
0b5e3daf | 477 | btrfs_csum_final(crc, (u8 *)&crc); |
5b0e95bf | 478 | io_ctl_unmap_page(io_ctl); |
2b108268 | 479 | tmp = page_address(io_ctl->pages[0]); |
5b0e95bf JB |
480 | tmp += index; |
481 | *tmp = crc; | |
5b0e95bf JB |
482 | } |
483 | ||
4c6d1d85 | 484 | static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index) |
5b0e95bf JB |
485 | { |
486 | u32 *tmp, val; | |
487 | u32 crc = ~(u32)0; | |
488 | unsigned offset = 0; | |
489 | ||
490 | if (!io_ctl->check_crcs) { | |
491 | io_ctl_map_page(io_ctl, 0); | |
492 | return 0; | |
493 | } | |
494 | ||
495 | if (index == 0) | |
496 | offset = sizeof(u32) * io_ctl->num_pages; | |
497 | ||
2b108268 | 498 | tmp = page_address(io_ctl->pages[0]); |
5b0e95bf JB |
499 | tmp += index; |
500 | val = *tmp; | |
5b0e95bf JB |
501 | |
502 | io_ctl_map_page(io_ctl, 0); | |
b0496686 | 503 | crc = btrfs_csum_data(io_ctl->orig + offset, crc, |
09cbfeaf | 504 | PAGE_SIZE - offset); |
0b5e3daf | 505 | btrfs_csum_final(crc, (u8 *)&crc); |
5b0e95bf | 506 | if (val != crc) { |
f15376df | 507 | btrfs_err_rl(io_ctl->fs_info, |
94647322 | 508 | "csum mismatch on free space cache"); |
5b0e95bf JB |
509 | io_ctl_unmap_page(io_ctl); |
510 | return -EIO; | |
511 | } | |
512 | ||
a67509c3 JB |
513 | return 0; |
514 | } | |
515 | ||
4c6d1d85 | 516 | static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes, |
a67509c3 JB |
517 | void *bitmap) |
518 | { | |
519 | struct btrfs_free_space_entry *entry; | |
520 | ||
521 | if (!io_ctl->cur) | |
522 | return -ENOSPC; | |
523 | ||
524 | entry = io_ctl->cur; | |
525 | entry->offset = cpu_to_le64(offset); | |
526 | entry->bytes = cpu_to_le64(bytes); | |
527 | entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP : | |
528 | BTRFS_FREE_SPACE_EXTENT; | |
529 | io_ctl->cur += sizeof(struct btrfs_free_space_entry); | |
530 | io_ctl->size -= sizeof(struct btrfs_free_space_entry); | |
531 | ||
532 | if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) | |
533 | return 0; | |
534 | ||
5b0e95bf | 535 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); |
a67509c3 JB |
536 | |
537 | /* No more pages to map */ | |
538 | if (io_ctl->index >= io_ctl->num_pages) | |
539 | return 0; | |
540 | ||
541 | /* map the next page */ | |
542 | io_ctl_map_page(io_ctl, 1); | |
543 | return 0; | |
544 | } | |
545 | ||
4c6d1d85 | 546 | static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap) |
a67509c3 JB |
547 | { |
548 | if (!io_ctl->cur) | |
549 | return -ENOSPC; | |
550 | ||
551 | /* | |
552 | * If we aren't at the start of the current page, unmap this one and | |
553 | * map the next one if there is any left. | |
554 | */ | |
555 | if (io_ctl->cur != io_ctl->orig) { | |
5b0e95bf | 556 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); |
a67509c3 JB |
557 | if (io_ctl->index >= io_ctl->num_pages) |
558 | return -ENOSPC; | |
559 | io_ctl_map_page(io_ctl, 0); | |
560 | } | |
561 | ||
09cbfeaf | 562 | memcpy(io_ctl->cur, bitmap, PAGE_SIZE); |
5b0e95bf | 563 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); |
a67509c3 JB |
564 | if (io_ctl->index < io_ctl->num_pages) |
565 | io_ctl_map_page(io_ctl, 0); | |
566 | return 0; | |
567 | } | |
568 | ||
4c6d1d85 | 569 | static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl *io_ctl) |
a67509c3 | 570 | { |
5b0e95bf JB |
571 | /* |
572 | * If we're not on the boundary we know we've modified the page and we | |
573 | * need to crc the page. | |
574 | */ | |
575 | if (io_ctl->cur != io_ctl->orig) | |
576 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); | |
577 | else | |
578 | io_ctl_unmap_page(io_ctl); | |
a67509c3 JB |
579 | |
580 | while (io_ctl->index < io_ctl->num_pages) { | |
581 | io_ctl_map_page(io_ctl, 1); | |
5b0e95bf | 582 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); |
a67509c3 JB |
583 | } |
584 | } | |
585 | ||
4c6d1d85 | 586 | static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl, |
5b0e95bf | 587 | struct btrfs_free_space *entry, u8 *type) |
a67509c3 JB |
588 | { |
589 | struct btrfs_free_space_entry *e; | |
2f120c05 JB |
590 | int ret; |
591 | ||
592 | if (!io_ctl->cur) { | |
593 | ret = io_ctl_check_crc(io_ctl, io_ctl->index); | |
594 | if (ret) | |
595 | return ret; | |
596 | } | |
a67509c3 JB |
597 | |
598 | e = io_ctl->cur; | |
599 | entry->offset = le64_to_cpu(e->offset); | |
600 | entry->bytes = le64_to_cpu(e->bytes); | |
5b0e95bf | 601 | *type = e->type; |
a67509c3 JB |
602 | io_ctl->cur += sizeof(struct btrfs_free_space_entry); |
603 | io_ctl->size -= sizeof(struct btrfs_free_space_entry); | |
604 | ||
605 | if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) | |
5b0e95bf | 606 | return 0; |
a67509c3 JB |
607 | |
608 | io_ctl_unmap_page(io_ctl); | |
609 | ||
2f120c05 | 610 | return 0; |
a67509c3 JB |
611 | } |
612 | ||
4c6d1d85 | 613 | static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl, |
5b0e95bf | 614 | struct btrfs_free_space *entry) |
a67509c3 | 615 | { |
5b0e95bf JB |
616 | int ret; |
617 | ||
5b0e95bf JB |
618 | ret = io_ctl_check_crc(io_ctl, io_ctl->index); |
619 | if (ret) | |
620 | return ret; | |
621 | ||
09cbfeaf | 622 | memcpy(entry->bitmap, io_ctl->cur, PAGE_SIZE); |
a67509c3 | 623 | io_ctl_unmap_page(io_ctl); |
5b0e95bf JB |
624 | |
625 | return 0; | |
a67509c3 JB |
626 | } |
627 | ||
cd023e7b JB |
628 | /* |
629 | * Since we attach pinned extents after the fact we can have contiguous sections | |
630 | * of free space that are split up in entries. This poses a problem with the | |
631 | * tree logging stuff since it could have allocated across what appears to be 2 | |
632 | * entries since we would have merged the entries when adding the pinned extents | |
633 | * back to the free space cache. So run through the space cache that we just | |
634 | * loaded and merge contiguous entries. This will make the log replay stuff not | |
635 | * blow up and it will make for nicer allocator behavior. | |
636 | */ | |
637 | static void merge_space_tree(struct btrfs_free_space_ctl *ctl) | |
638 | { | |
639 | struct btrfs_free_space *e, *prev = NULL; | |
640 | struct rb_node *n; | |
641 | ||
642 | again: | |
643 | spin_lock(&ctl->tree_lock); | |
644 | for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { | |
645 | e = rb_entry(n, struct btrfs_free_space, offset_index); | |
646 | if (!prev) | |
647 | goto next; | |
648 | if (e->bitmap || prev->bitmap) | |
649 | goto next; | |
650 | if (prev->offset + prev->bytes == e->offset) { | |
651 | unlink_free_space(ctl, prev); | |
652 | unlink_free_space(ctl, e); | |
653 | prev->bytes += e->bytes; | |
654 | kmem_cache_free(btrfs_free_space_cachep, e); | |
655 | link_free_space(ctl, prev); | |
656 | prev = NULL; | |
657 | spin_unlock(&ctl->tree_lock); | |
658 | goto again; | |
659 | } | |
660 | next: | |
661 | prev = e; | |
662 | } | |
663 | spin_unlock(&ctl->tree_lock); | |
664 | } | |
665 | ||
48a3b636 ES |
666 | static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, |
667 | struct btrfs_free_space_ctl *ctl, | |
668 | struct btrfs_path *path, u64 offset) | |
9d66e233 | 669 | { |
9d66e233 JB |
670 | struct btrfs_free_space_header *header; |
671 | struct extent_buffer *leaf; | |
4c6d1d85 | 672 | struct btrfs_io_ctl io_ctl; |
9d66e233 | 673 | struct btrfs_key key; |
a67509c3 | 674 | struct btrfs_free_space *e, *n; |
b76808fc | 675 | LIST_HEAD(bitmaps); |
9d66e233 JB |
676 | u64 num_entries; |
677 | u64 num_bitmaps; | |
678 | u64 generation; | |
a67509c3 | 679 | u8 type; |
f6a39829 | 680 | int ret = 0; |
9d66e233 | 681 | |
9d66e233 | 682 | /* Nothing in the space cache, goodbye */ |
0414efae | 683 | if (!i_size_read(inode)) |
a67509c3 | 684 | return 0; |
9d66e233 JB |
685 | |
686 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | |
0414efae | 687 | key.offset = offset; |
9d66e233 JB |
688 | key.type = 0; |
689 | ||
690 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
0414efae | 691 | if (ret < 0) |
a67509c3 | 692 | return 0; |
0414efae | 693 | else if (ret > 0) { |
945d8962 | 694 | btrfs_release_path(path); |
a67509c3 | 695 | return 0; |
9d66e233 JB |
696 | } |
697 | ||
0414efae LZ |
698 | ret = -1; |
699 | ||
9d66e233 JB |
700 | leaf = path->nodes[0]; |
701 | header = btrfs_item_ptr(leaf, path->slots[0], | |
702 | struct btrfs_free_space_header); | |
703 | num_entries = btrfs_free_space_entries(leaf, header); | |
704 | num_bitmaps = btrfs_free_space_bitmaps(leaf, header); | |
705 | generation = btrfs_free_space_generation(leaf, header); | |
945d8962 | 706 | btrfs_release_path(path); |
9d66e233 | 707 | |
e570fd27 MX |
708 | if (!BTRFS_I(inode)->generation) { |
709 | btrfs_info(root->fs_info, | |
710 | "The free space cache file (%llu) is invalid. skip it\n", | |
711 | offset); | |
712 | return 0; | |
713 | } | |
714 | ||
9d66e233 | 715 | if (BTRFS_I(inode)->generation != generation) { |
c2cf52eb | 716 | btrfs_err(root->fs_info, |
5d163e0e | 717 | "free space inode generation (%llu) did not match free space cache generation (%llu)", |
c1c9ff7c | 718 | BTRFS_I(inode)->generation, generation); |
a67509c3 | 719 | return 0; |
9d66e233 JB |
720 | } |
721 | ||
722 | if (!num_entries) | |
a67509c3 | 723 | return 0; |
9d66e233 | 724 | |
f15376df | 725 | ret = io_ctl_init(&io_ctl, inode, 0); |
706efc66 LZ |
726 | if (ret) |
727 | return ret; | |
728 | ||
9d66e233 | 729 | ret = readahead_cache(inode); |
0414efae | 730 | if (ret) |
9d66e233 | 731 | goto out; |
9d66e233 | 732 | |
a67509c3 JB |
733 | ret = io_ctl_prepare_pages(&io_ctl, inode, 1); |
734 | if (ret) | |
735 | goto out; | |
9d66e233 | 736 | |
5b0e95bf JB |
737 | ret = io_ctl_check_crc(&io_ctl, 0); |
738 | if (ret) | |
739 | goto free_cache; | |
740 | ||
a67509c3 JB |
741 | ret = io_ctl_check_generation(&io_ctl, generation); |
742 | if (ret) | |
743 | goto free_cache; | |
9d66e233 | 744 | |
a67509c3 JB |
745 | while (num_entries) { |
746 | e = kmem_cache_zalloc(btrfs_free_space_cachep, | |
747 | GFP_NOFS); | |
748 | if (!e) | |
9d66e233 | 749 | goto free_cache; |
9d66e233 | 750 | |
5b0e95bf JB |
751 | ret = io_ctl_read_entry(&io_ctl, e, &type); |
752 | if (ret) { | |
753 | kmem_cache_free(btrfs_free_space_cachep, e); | |
754 | goto free_cache; | |
755 | } | |
756 | ||
a67509c3 JB |
757 | if (!e->bytes) { |
758 | kmem_cache_free(btrfs_free_space_cachep, e); | |
759 | goto free_cache; | |
9d66e233 | 760 | } |
a67509c3 JB |
761 | |
762 | if (type == BTRFS_FREE_SPACE_EXTENT) { | |
763 | spin_lock(&ctl->tree_lock); | |
764 | ret = link_free_space(ctl, e); | |
765 | spin_unlock(&ctl->tree_lock); | |
766 | if (ret) { | |
c2cf52eb SK |
767 | btrfs_err(root->fs_info, |
768 | "Duplicate entries in free space cache, dumping"); | |
a67509c3 | 769 | kmem_cache_free(btrfs_free_space_cachep, e); |
9d66e233 JB |
770 | goto free_cache; |
771 | } | |
a67509c3 | 772 | } else { |
b12d6869 | 773 | ASSERT(num_bitmaps); |
a67509c3 | 774 | num_bitmaps--; |
09cbfeaf | 775 | e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS); |
a67509c3 JB |
776 | if (!e->bitmap) { |
777 | kmem_cache_free( | |
778 | btrfs_free_space_cachep, e); | |
9d66e233 JB |
779 | goto free_cache; |
780 | } | |
a67509c3 JB |
781 | spin_lock(&ctl->tree_lock); |
782 | ret = link_free_space(ctl, e); | |
783 | ctl->total_bitmaps++; | |
784 | ctl->op->recalc_thresholds(ctl); | |
785 | spin_unlock(&ctl->tree_lock); | |
786 | if (ret) { | |
c2cf52eb SK |
787 | btrfs_err(root->fs_info, |
788 | "Duplicate entries in free space cache, dumping"); | |
dc89e982 | 789 | kmem_cache_free(btrfs_free_space_cachep, e); |
9d66e233 JB |
790 | goto free_cache; |
791 | } | |
a67509c3 | 792 | list_add_tail(&e->list, &bitmaps); |
9d66e233 JB |
793 | } |
794 | ||
a67509c3 JB |
795 | num_entries--; |
796 | } | |
9d66e233 | 797 | |
2f120c05 JB |
798 | io_ctl_unmap_page(&io_ctl); |
799 | ||
a67509c3 JB |
800 | /* |
801 | * We add the bitmaps at the end of the entries in order that | |
802 | * the bitmap entries are added to the cache. | |
803 | */ | |
804 | list_for_each_entry_safe(e, n, &bitmaps, list) { | |
9d66e233 | 805 | list_del_init(&e->list); |
5b0e95bf JB |
806 | ret = io_ctl_read_bitmap(&io_ctl, e); |
807 | if (ret) | |
808 | goto free_cache; | |
9d66e233 JB |
809 | } |
810 | ||
a67509c3 | 811 | io_ctl_drop_pages(&io_ctl); |
cd023e7b | 812 | merge_space_tree(ctl); |
9d66e233 JB |
813 | ret = 1; |
814 | out: | |
a67509c3 | 815 | io_ctl_free(&io_ctl); |
9d66e233 | 816 | return ret; |
9d66e233 | 817 | free_cache: |
a67509c3 | 818 | io_ctl_drop_pages(&io_ctl); |
0414efae | 819 | __btrfs_remove_free_space_cache(ctl); |
9d66e233 JB |
820 | goto out; |
821 | } | |
822 | ||
0414efae LZ |
823 | int load_free_space_cache(struct btrfs_fs_info *fs_info, |
824 | struct btrfs_block_group_cache *block_group) | |
0cb59c99 | 825 | { |
34d52cb6 | 826 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
0414efae LZ |
827 | struct btrfs_root *root = fs_info->tree_root; |
828 | struct inode *inode; | |
829 | struct btrfs_path *path; | |
5b0e95bf | 830 | int ret = 0; |
0414efae LZ |
831 | bool matched; |
832 | u64 used = btrfs_block_group_used(&block_group->item); | |
833 | ||
0414efae LZ |
834 | /* |
835 | * If this block group has been marked to be cleared for one reason or | |
836 | * another then we can't trust the on disk cache, so just return. | |
837 | */ | |
9d66e233 | 838 | spin_lock(&block_group->lock); |
0414efae LZ |
839 | if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { |
840 | spin_unlock(&block_group->lock); | |
841 | return 0; | |
842 | } | |
9d66e233 | 843 | spin_unlock(&block_group->lock); |
0414efae LZ |
844 | |
845 | path = btrfs_alloc_path(); | |
846 | if (!path) | |
847 | return 0; | |
d53ba474 JB |
848 | path->search_commit_root = 1; |
849 | path->skip_locking = 1; | |
0414efae LZ |
850 | |
851 | inode = lookup_free_space_inode(root, block_group, path); | |
852 | if (IS_ERR(inode)) { | |
853 | btrfs_free_path(path); | |
854 | return 0; | |
855 | } | |
856 | ||
5b0e95bf JB |
857 | /* We may have converted the inode and made the cache invalid. */ |
858 | spin_lock(&block_group->lock); | |
859 | if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { | |
860 | spin_unlock(&block_group->lock); | |
a7e221e9 | 861 | btrfs_free_path(path); |
5b0e95bf JB |
862 | goto out; |
863 | } | |
864 | spin_unlock(&block_group->lock); | |
865 | ||
0414efae LZ |
866 | ret = __load_free_space_cache(fs_info->tree_root, inode, ctl, |
867 | path, block_group->key.objectid); | |
868 | btrfs_free_path(path); | |
869 | if (ret <= 0) | |
870 | goto out; | |
871 | ||
872 | spin_lock(&ctl->tree_lock); | |
873 | matched = (ctl->free_space == (block_group->key.offset - used - | |
874 | block_group->bytes_super)); | |
875 | spin_unlock(&ctl->tree_lock); | |
876 | ||
877 | if (!matched) { | |
878 | __btrfs_remove_free_space_cache(ctl); | |
5d163e0e JM |
879 | btrfs_warn(fs_info, |
880 | "block group %llu has wrong amount of free space", | |
881 | block_group->key.objectid); | |
0414efae LZ |
882 | ret = -1; |
883 | } | |
884 | out: | |
885 | if (ret < 0) { | |
886 | /* This cache is bogus, make sure it gets cleared */ | |
887 | spin_lock(&block_group->lock); | |
888 | block_group->disk_cache_state = BTRFS_DC_CLEAR; | |
889 | spin_unlock(&block_group->lock); | |
82d5902d | 890 | ret = 0; |
0414efae | 891 | |
5d163e0e JM |
892 | btrfs_warn(fs_info, |
893 | "failed to load free space cache for block group %llu, rebuilding it now", | |
894 | block_group->key.objectid); | |
0414efae LZ |
895 | } |
896 | ||
897 | iput(inode); | |
898 | return ret; | |
9d66e233 JB |
899 | } |
900 | ||
d4452bc5 | 901 | static noinline_for_stack |
4c6d1d85 | 902 | int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl, |
d4452bc5 CM |
903 | struct btrfs_free_space_ctl *ctl, |
904 | struct btrfs_block_group_cache *block_group, | |
905 | int *entries, int *bitmaps, | |
906 | struct list_head *bitmap_list) | |
0cb59c99 | 907 | { |
c09544e0 | 908 | int ret; |
d4452bc5 | 909 | struct btrfs_free_cluster *cluster = NULL; |
1bbc621e | 910 | struct btrfs_free_cluster *cluster_locked = NULL; |
d4452bc5 | 911 | struct rb_node *node = rb_first(&ctl->free_space_offset); |
55507ce3 | 912 | struct btrfs_trim_range *trim_entry; |
be1a12a0 | 913 | |
43be2146 | 914 | /* Get the cluster for this block_group if it exists */ |
d4452bc5 | 915 | if (block_group && !list_empty(&block_group->cluster_list)) { |
43be2146 JB |
916 | cluster = list_entry(block_group->cluster_list.next, |
917 | struct btrfs_free_cluster, | |
918 | block_group_list); | |
d4452bc5 | 919 | } |
43be2146 | 920 | |
f75b130e | 921 | if (!node && cluster) { |
1bbc621e CM |
922 | cluster_locked = cluster; |
923 | spin_lock(&cluster_locked->lock); | |
f75b130e JB |
924 | node = rb_first(&cluster->root); |
925 | cluster = NULL; | |
926 | } | |
927 | ||
a67509c3 JB |
928 | /* Write out the extent entries */ |
929 | while (node) { | |
930 | struct btrfs_free_space *e; | |
0cb59c99 | 931 | |
a67509c3 | 932 | e = rb_entry(node, struct btrfs_free_space, offset_index); |
d4452bc5 | 933 | *entries += 1; |
0cb59c99 | 934 | |
d4452bc5 | 935 | ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes, |
a67509c3 JB |
936 | e->bitmap); |
937 | if (ret) | |
d4452bc5 | 938 | goto fail; |
2f356126 | 939 | |
a67509c3 | 940 | if (e->bitmap) { |
d4452bc5 CM |
941 | list_add_tail(&e->list, bitmap_list); |
942 | *bitmaps += 1; | |
2f356126 | 943 | } |
a67509c3 JB |
944 | node = rb_next(node); |
945 | if (!node && cluster) { | |
946 | node = rb_first(&cluster->root); | |
1bbc621e CM |
947 | cluster_locked = cluster; |
948 | spin_lock(&cluster_locked->lock); | |
a67509c3 | 949 | cluster = NULL; |
43be2146 | 950 | } |
a67509c3 | 951 | } |
1bbc621e CM |
952 | if (cluster_locked) { |
953 | spin_unlock(&cluster_locked->lock); | |
954 | cluster_locked = NULL; | |
955 | } | |
55507ce3 FM |
956 | |
957 | /* | |
958 | * Make sure we don't miss any range that was removed from our rbtree | |
959 | * because trimming is running. Otherwise after a umount+mount (or crash | |
960 | * after committing the transaction) we would leak free space and get | |
961 | * an inconsistent free space cache report from fsck. | |
962 | */ | |
963 | list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) { | |
964 | ret = io_ctl_add_entry(io_ctl, trim_entry->start, | |
965 | trim_entry->bytes, NULL); | |
966 | if (ret) | |
967 | goto fail; | |
968 | *entries += 1; | |
969 | } | |
970 | ||
d4452bc5 CM |
971 | return 0; |
972 | fail: | |
1bbc621e CM |
973 | if (cluster_locked) |
974 | spin_unlock(&cluster_locked->lock); | |
d4452bc5 CM |
975 | return -ENOSPC; |
976 | } | |
977 | ||
978 | static noinline_for_stack int | |
979 | update_cache_item(struct btrfs_trans_handle *trans, | |
980 | struct btrfs_root *root, | |
981 | struct inode *inode, | |
982 | struct btrfs_path *path, u64 offset, | |
983 | int entries, int bitmaps) | |
984 | { | |
985 | struct btrfs_key key; | |
986 | struct btrfs_free_space_header *header; | |
987 | struct extent_buffer *leaf; | |
988 | int ret; | |
989 | ||
990 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | |
991 | key.offset = offset; | |
992 | key.type = 0; | |
993 | ||
994 | ret = btrfs_search_slot(trans, root, &key, path, 0, 1); | |
995 | if (ret < 0) { | |
996 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, | |
997 | EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL, | |
998 | GFP_NOFS); | |
999 | goto fail; | |
1000 | } | |
1001 | leaf = path->nodes[0]; | |
1002 | if (ret > 0) { | |
1003 | struct btrfs_key found_key; | |
1004 | ASSERT(path->slots[0]); | |
1005 | path->slots[0]--; | |
1006 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
1007 | if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || | |
1008 | found_key.offset != offset) { | |
1009 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, | |
1010 | inode->i_size - 1, | |
1011 | EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, | |
1012 | NULL, GFP_NOFS); | |
1013 | btrfs_release_path(path); | |
1014 | goto fail; | |
1015 | } | |
1016 | } | |
1017 | ||
1018 | BTRFS_I(inode)->generation = trans->transid; | |
1019 | header = btrfs_item_ptr(leaf, path->slots[0], | |
1020 | struct btrfs_free_space_header); | |
1021 | btrfs_set_free_space_entries(leaf, header, entries); | |
1022 | btrfs_set_free_space_bitmaps(leaf, header, bitmaps); | |
1023 | btrfs_set_free_space_generation(leaf, header, trans->transid); | |
1024 | btrfs_mark_buffer_dirty(leaf); | |
1025 | btrfs_release_path(path); | |
1026 | ||
1027 | return 0; | |
1028 | ||
1029 | fail: | |
1030 | return -1; | |
1031 | } | |
1032 | ||
1033 | static noinline_for_stack int | |
5349d6c3 MX |
1034 | write_pinned_extent_entries(struct btrfs_root *root, |
1035 | struct btrfs_block_group_cache *block_group, | |
4c6d1d85 | 1036 | struct btrfs_io_ctl *io_ctl, |
5349d6c3 | 1037 | int *entries) |
d4452bc5 CM |
1038 | { |
1039 | u64 start, extent_start, extent_end, len; | |
d4452bc5 CM |
1040 | struct extent_io_tree *unpin = NULL; |
1041 | int ret; | |
43be2146 | 1042 | |
5349d6c3 MX |
1043 | if (!block_group) |
1044 | return 0; | |
1045 | ||
a67509c3 JB |
1046 | /* |
1047 | * We want to add any pinned extents to our free space cache | |
1048 | * so we don't leak the space | |
d4452bc5 | 1049 | * |
db804f23 LZ |
1050 | * We shouldn't have switched the pinned extents yet so this is the |
1051 | * right one | |
1052 | */ | |
1053 | unpin = root->fs_info->pinned_extents; | |
1054 | ||
5349d6c3 | 1055 | start = block_group->key.objectid; |
db804f23 | 1056 | |
5349d6c3 | 1057 | while (start < block_group->key.objectid + block_group->key.offset) { |
db804f23 LZ |
1058 | ret = find_first_extent_bit(unpin, start, |
1059 | &extent_start, &extent_end, | |
e6138876 | 1060 | EXTENT_DIRTY, NULL); |
5349d6c3 MX |
1061 | if (ret) |
1062 | return 0; | |
0cb59c99 | 1063 | |
a67509c3 | 1064 | /* This pinned extent is out of our range */ |
db804f23 | 1065 | if (extent_start >= block_group->key.objectid + |
a67509c3 | 1066 | block_group->key.offset) |
5349d6c3 | 1067 | return 0; |
2f356126 | 1068 | |
db804f23 LZ |
1069 | extent_start = max(extent_start, start); |
1070 | extent_end = min(block_group->key.objectid + | |
1071 | block_group->key.offset, extent_end + 1); | |
1072 | len = extent_end - extent_start; | |
0cb59c99 | 1073 | |
d4452bc5 CM |
1074 | *entries += 1; |
1075 | ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL); | |
a67509c3 | 1076 | if (ret) |
5349d6c3 | 1077 | return -ENOSPC; |
0cb59c99 | 1078 | |
db804f23 | 1079 | start = extent_end; |
a67509c3 | 1080 | } |
0cb59c99 | 1081 | |
5349d6c3 MX |
1082 | return 0; |
1083 | } | |
1084 | ||
1085 | static noinline_for_stack int | |
4c6d1d85 | 1086 | write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list) |
5349d6c3 | 1087 | { |
7ae1681e | 1088 | struct btrfs_free_space *entry, *next; |
5349d6c3 MX |
1089 | int ret; |
1090 | ||
0cb59c99 | 1091 | /* Write out the bitmaps */ |
7ae1681e | 1092 | list_for_each_entry_safe(entry, next, bitmap_list, list) { |
d4452bc5 | 1093 | ret = io_ctl_add_bitmap(io_ctl, entry->bitmap); |
a67509c3 | 1094 | if (ret) |
5349d6c3 | 1095 | return -ENOSPC; |
0cb59c99 | 1096 | list_del_init(&entry->list); |
be1a12a0 JB |
1097 | } |
1098 | ||
5349d6c3 MX |
1099 | return 0; |
1100 | } | |
0cb59c99 | 1101 | |
5349d6c3 MX |
1102 | static int flush_dirty_cache(struct inode *inode) |
1103 | { | |
1104 | int ret; | |
be1a12a0 | 1105 | |
0ef8b726 | 1106 | ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); |
5349d6c3 | 1107 | if (ret) |
0ef8b726 JB |
1108 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, |
1109 | EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL, | |
1110 | GFP_NOFS); | |
0cb59c99 | 1111 | |
5349d6c3 | 1112 | return ret; |
d4452bc5 CM |
1113 | } |
1114 | ||
1115 | static void noinline_for_stack | |
a3bdccc4 | 1116 | cleanup_bitmap_list(struct list_head *bitmap_list) |
d4452bc5 | 1117 | { |
7ae1681e | 1118 | struct btrfs_free_space *entry, *next; |
5349d6c3 | 1119 | |
7ae1681e | 1120 | list_for_each_entry_safe(entry, next, bitmap_list, list) |
d4452bc5 | 1121 | list_del_init(&entry->list); |
a3bdccc4 CM |
1122 | } |
1123 | ||
1124 | static void noinline_for_stack | |
1125 | cleanup_write_cache_enospc(struct inode *inode, | |
1126 | struct btrfs_io_ctl *io_ctl, | |
1127 | struct extent_state **cached_state, | |
1128 | struct list_head *bitmap_list) | |
1129 | { | |
d4452bc5 CM |
1130 | io_ctl_drop_pages(io_ctl); |
1131 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, | |
1132 | i_size_read(inode) - 1, cached_state, | |
1133 | GFP_NOFS); | |
1134 | } | |
549b4fdb | 1135 | |
c9dc4c65 CM |
1136 | int btrfs_wait_cache_io(struct btrfs_root *root, |
1137 | struct btrfs_trans_handle *trans, | |
1138 | struct btrfs_block_group_cache *block_group, | |
1139 | struct btrfs_io_ctl *io_ctl, | |
1140 | struct btrfs_path *path, u64 offset) | |
1141 | { | |
1142 | int ret; | |
1143 | struct inode *inode = io_ctl->inode; | |
1144 | ||
1bbc621e CM |
1145 | if (!inode) |
1146 | return 0; | |
1147 | ||
85db36cf CM |
1148 | if (block_group) |
1149 | root = root->fs_info->tree_root; | |
c9dc4c65 CM |
1150 | |
1151 | /* Flush the dirty pages in the cache file. */ | |
1152 | ret = flush_dirty_cache(inode); | |
1153 | if (ret) | |
1154 | goto out; | |
1155 | ||
1156 | /* Update the cache item to tell everyone this cache file is valid. */ | |
1157 | ret = update_cache_item(trans, root, inode, path, offset, | |
1158 | io_ctl->entries, io_ctl->bitmaps); | |
1159 | out: | |
1160 | io_ctl_free(io_ctl); | |
1161 | if (ret) { | |
1162 | invalidate_inode_pages2(inode->i_mapping); | |
1163 | BTRFS_I(inode)->generation = 0; | |
1164 | if (block_group) { | |
1165 | #ifdef DEBUG | |
1166 | btrfs_err(root->fs_info, | |
1167 | "failed to write free space cache for block group %llu", | |
1168 | block_group->key.objectid); | |
1169 | #endif | |
1170 | } | |
1171 | } | |
1172 | btrfs_update_inode(trans, root, inode); | |
1173 | ||
1174 | if (block_group) { | |
1bbc621e CM |
1175 | /* the dirty list is protected by the dirty_bgs_lock */ |
1176 | spin_lock(&trans->transaction->dirty_bgs_lock); | |
1177 | ||
1178 | /* the disk_cache_state is protected by the block group lock */ | |
c9dc4c65 CM |
1179 | spin_lock(&block_group->lock); |
1180 | ||
1181 | /* | |
1182 | * only mark this as written if we didn't get put back on | |
1bbc621e CM |
1183 | * the dirty list while waiting for IO. Otherwise our |
1184 | * cache state won't be right, and we won't get written again | |
c9dc4c65 CM |
1185 | */ |
1186 | if (!ret && list_empty(&block_group->dirty_list)) | |
1187 | block_group->disk_cache_state = BTRFS_DC_WRITTEN; | |
1188 | else if (ret) | |
1189 | block_group->disk_cache_state = BTRFS_DC_ERROR; | |
1190 | ||
1191 | spin_unlock(&block_group->lock); | |
1bbc621e | 1192 | spin_unlock(&trans->transaction->dirty_bgs_lock); |
c9dc4c65 CM |
1193 | io_ctl->inode = NULL; |
1194 | iput(inode); | |
1195 | } | |
1196 | ||
1197 | return ret; | |
1198 | ||
1199 | } | |
1200 | ||
d4452bc5 CM |
1201 | /** |
1202 | * __btrfs_write_out_cache - write out cached info to an inode | |
1203 | * @root - the root the inode belongs to | |
1204 | * @ctl - the free space cache we are going to write out | |
1205 | * @block_group - the block_group for this cache if it belongs to a block_group | |
1206 | * @trans - the trans handle | |
1207 | * @path - the path to use | |
1208 | * @offset - the offset for the key we'll insert | |
1209 | * | |
1210 | * This function writes out a free space cache struct to disk for quick recovery | |
8cd1e731 | 1211 | * on mount. This will return 0 if it was successful in writing the cache out, |
b8605454 | 1212 | * or an errno if it was not. |
d4452bc5 CM |
1213 | */ |
1214 | static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |
1215 | struct btrfs_free_space_ctl *ctl, | |
1216 | struct btrfs_block_group_cache *block_group, | |
c9dc4c65 | 1217 | struct btrfs_io_ctl *io_ctl, |
d4452bc5 CM |
1218 | struct btrfs_trans_handle *trans, |
1219 | struct btrfs_path *path, u64 offset) | |
1220 | { | |
1221 | struct extent_state *cached_state = NULL; | |
5349d6c3 | 1222 | LIST_HEAD(bitmap_list); |
d4452bc5 CM |
1223 | int entries = 0; |
1224 | int bitmaps = 0; | |
1225 | int ret; | |
c9dc4c65 | 1226 | int must_iput = 0; |
d4452bc5 CM |
1227 | |
1228 | if (!i_size_read(inode)) | |
b8605454 | 1229 | return -EIO; |
d4452bc5 | 1230 | |
c9dc4c65 | 1231 | WARN_ON(io_ctl->pages); |
f15376df | 1232 | ret = io_ctl_init(io_ctl, inode, 1); |
d4452bc5 | 1233 | if (ret) |
b8605454 | 1234 | return ret; |
d4452bc5 | 1235 | |
e570fd27 MX |
1236 | if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) { |
1237 | down_write(&block_group->data_rwsem); | |
1238 | spin_lock(&block_group->lock); | |
1239 | if (block_group->delalloc_bytes) { | |
1240 | block_group->disk_cache_state = BTRFS_DC_WRITTEN; | |
1241 | spin_unlock(&block_group->lock); | |
1242 | up_write(&block_group->data_rwsem); | |
1243 | BTRFS_I(inode)->generation = 0; | |
1244 | ret = 0; | |
c9dc4c65 | 1245 | must_iput = 1; |
e570fd27 MX |
1246 | goto out; |
1247 | } | |
1248 | spin_unlock(&block_group->lock); | |
1249 | } | |
1250 | ||
d4452bc5 | 1251 | /* Lock all pages first so we can lock the extent safely. */ |
b8605454 OS |
1252 | ret = io_ctl_prepare_pages(io_ctl, inode, 0); |
1253 | if (ret) | |
1254 | goto out; | |
d4452bc5 CM |
1255 | |
1256 | lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, | |
ff13db41 | 1257 | &cached_state); |
d4452bc5 | 1258 | |
c9dc4c65 | 1259 | io_ctl_set_generation(io_ctl, trans->transid); |
d4452bc5 | 1260 | |
55507ce3 | 1261 | mutex_lock(&ctl->cache_writeout_mutex); |
5349d6c3 | 1262 | /* Write out the extent entries in the free space cache */ |
1bbc621e | 1263 | spin_lock(&ctl->tree_lock); |
c9dc4c65 | 1264 | ret = write_cache_extent_entries(io_ctl, ctl, |
d4452bc5 CM |
1265 | block_group, &entries, &bitmaps, |
1266 | &bitmap_list); | |
a3bdccc4 CM |
1267 | if (ret) |
1268 | goto out_nospc_locked; | |
d4452bc5 | 1269 | |
5349d6c3 MX |
1270 | /* |
1271 | * Some spaces that are freed in the current transaction are pinned, | |
1272 | * they will be added into free space cache after the transaction is | |
1273 | * committed, we shouldn't lose them. | |
1bbc621e CM |
1274 | * |
1275 | * If this changes while we are working we'll get added back to | |
1276 | * the dirty list and redo it. No locking needed | |
5349d6c3 | 1277 | */ |
c9dc4c65 | 1278 | ret = write_pinned_extent_entries(root, block_group, io_ctl, &entries); |
a3bdccc4 CM |
1279 | if (ret) |
1280 | goto out_nospc_locked; | |
5349d6c3 | 1281 | |
55507ce3 FM |
1282 | /* |
1283 | * At last, we write out all the bitmaps and keep cache_writeout_mutex | |
1284 | * locked while doing it because a concurrent trim can be manipulating | |
1285 | * or freeing the bitmap. | |
1286 | */ | |
c9dc4c65 | 1287 | ret = write_bitmap_entries(io_ctl, &bitmap_list); |
1bbc621e | 1288 | spin_unlock(&ctl->tree_lock); |
55507ce3 | 1289 | mutex_unlock(&ctl->cache_writeout_mutex); |
5349d6c3 MX |
1290 | if (ret) |
1291 | goto out_nospc; | |
1292 | ||
1293 | /* Zero out the rest of the pages just to make sure */ | |
c9dc4c65 | 1294 | io_ctl_zero_remaining_pages(io_ctl); |
d4452bc5 | 1295 | |
5349d6c3 | 1296 | /* Everything is written out, now we dirty the pages in the file. */ |
c9dc4c65 | 1297 | ret = btrfs_dirty_pages(root, inode, io_ctl->pages, io_ctl->num_pages, |
5349d6c3 MX |
1298 | 0, i_size_read(inode), &cached_state); |
1299 | if (ret) | |
d4452bc5 | 1300 | goto out_nospc; |
5349d6c3 | 1301 | |
e570fd27 MX |
1302 | if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) |
1303 | up_write(&block_group->data_rwsem); | |
5349d6c3 MX |
1304 | /* |
1305 | * Release the pages and unlock the extent, we will flush | |
1306 | * them out later | |
1307 | */ | |
c9dc4c65 | 1308 | io_ctl_drop_pages(io_ctl); |
5349d6c3 MX |
1309 | |
1310 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, | |
1311 | i_size_read(inode) - 1, &cached_state, GFP_NOFS); | |
1312 | ||
c9dc4c65 CM |
1313 | /* |
1314 | * at this point the pages are under IO and we're happy, | |
1315 | * The caller is responsible for waiting on them and updating the | |
1316 | * the cache and the inode | |
1317 | */ | |
1318 | io_ctl->entries = entries; | |
1319 | io_ctl->bitmaps = bitmaps; | |
1320 | ||
1321 | ret = btrfs_fdatawrite_range(inode, 0, (u64)-1); | |
5349d6c3 | 1322 | if (ret) |
d4452bc5 CM |
1323 | goto out; |
1324 | ||
c9dc4c65 CM |
1325 | return 0; |
1326 | ||
2f356126 | 1327 | out: |
c9dc4c65 CM |
1328 | io_ctl->inode = NULL; |
1329 | io_ctl_free(io_ctl); | |
5349d6c3 | 1330 | if (ret) { |
a67509c3 | 1331 | invalidate_inode_pages2(inode->i_mapping); |
0cb59c99 JB |
1332 | BTRFS_I(inode)->generation = 0; |
1333 | } | |
0cb59c99 | 1334 | btrfs_update_inode(trans, root, inode); |
c9dc4c65 CM |
1335 | if (must_iput) |
1336 | iput(inode); | |
5349d6c3 | 1337 | return ret; |
a67509c3 | 1338 | |
a3bdccc4 CM |
1339 | out_nospc_locked: |
1340 | cleanup_bitmap_list(&bitmap_list); | |
1341 | spin_unlock(&ctl->tree_lock); | |
1342 | mutex_unlock(&ctl->cache_writeout_mutex); | |
1343 | ||
a67509c3 | 1344 | out_nospc: |
c9dc4c65 | 1345 | cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list); |
e570fd27 MX |
1346 | |
1347 | if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) | |
1348 | up_write(&block_group->data_rwsem); | |
1349 | ||
a67509c3 | 1350 | goto out; |
0414efae LZ |
1351 | } |
1352 | ||
5b4aacef | 1353 | int btrfs_write_out_cache(struct btrfs_fs_info *fs_info, |
0414efae LZ |
1354 | struct btrfs_trans_handle *trans, |
1355 | struct btrfs_block_group_cache *block_group, | |
1356 | struct btrfs_path *path) | |
1357 | { | |
5b4aacef | 1358 | struct btrfs_root *root = fs_info->tree_root; |
0414efae LZ |
1359 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
1360 | struct inode *inode; | |
1361 | int ret = 0; | |
1362 | ||
0414efae LZ |
1363 | spin_lock(&block_group->lock); |
1364 | if (block_group->disk_cache_state < BTRFS_DC_SETUP) { | |
1365 | spin_unlock(&block_group->lock); | |
e570fd27 MX |
1366 | return 0; |
1367 | } | |
0414efae LZ |
1368 | spin_unlock(&block_group->lock); |
1369 | ||
1370 | inode = lookup_free_space_inode(root, block_group, path); | |
1371 | if (IS_ERR(inode)) | |
1372 | return 0; | |
1373 | ||
c9dc4c65 CM |
1374 | ret = __btrfs_write_out_cache(root, inode, ctl, block_group, |
1375 | &block_group->io_ctl, trans, | |
0414efae | 1376 | path, block_group->key.objectid); |
c09544e0 | 1377 | if (ret) { |
c09544e0 | 1378 | #ifdef DEBUG |
c2cf52eb SK |
1379 | btrfs_err(root->fs_info, |
1380 | "failed to write free space cache for block group %llu", | |
1381 | block_group->key.objectid); | |
c09544e0 | 1382 | #endif |
c9dc4c65 CM |
1383 | spin_lock(&block_group->lock); |
1384 | block_group->disk_cache_state = BTRFS_DC_ERROR; | |
1385 | spin_unlock(&block_group->lock); | |
1386 | ||
1387 | block_group->io_ctl.inode = NULL; | |
1388 | iput(inode); | |
0414efae LZ |
1389 | } |
1390 | ||
c9dc4c65 CM |
1391 | /* |
1392 | * if ret == 0 the caller is expected to call btrfs_wait_cache_io | |
1393 | * to wait for IO and put the inode | |
1394 | */ | |
1395 | ||
0cb59c99 JB |
1396 | return ret; |
1397 | } | |
1398 | ||
34d52cb6 | 1399 | static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit, |
96303081 | 1400 | u64 offset) |
0f9dd46c | 1401 | { |
b12d6869 | 1402 | ASSERT(offset >= bitmap_start); |
96303081 | 1403 | offset -= bitmap_start; |
34d52cb6 | 1404 | return (unsigned long)(div_u64(offset, unit)); |
96303081 | 1405 | } |
0f9dd46c | 1406 | |
34d52cb6 | 1407 | static inline unsigned long bytes_to_bits(u64 bytes, u32 unit) |
96303081 | 1408 | { |
34d52cb6 | 1409 | return (unsigned long)(div_u64(bytes, unit)); |
96303081 | 1410 | } |
0f9dd46c | 1411 | |
34d52cb6 | 1412 | static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl, |
96303081 JB |
1413 | u64 offset) |
1414 | { | |
1415 | u64 bitmap_start; | |
0ef6447a | 1416 | u64 bytes_per_bitmap; |
0f9dd46c | 1417 | |
34d52cb6 LZ |
1418 | bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; |
1419 | bitmap_start = offset - ctl->start; | |
0ef6447a | 1420 | bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); |
96303081 | 1421 | bitmap_start *= bytes_per_bitmap; |
34d52cb6 | 1422 | bitmap_start += ctl->start; |
0f9dd46c | 1423 | |
96303081 | 1424 | return bitmap_start; |
0f9dd46c JB |
1425 | } |
1426 | ||
96303081 JB |
1427 | static int tree_insert_offset(struct rb_root *root, u64 offset, |
1428 | struct rb_node *node, int bitmap) | |
0f9dd46c JB |
1429 | { |
1430 | struct rb_node **p = &root->rb_node; | |
1431 | struct rb_node *parent = NULL; | |
1432 | struct btrfs_free_space *info; | |
1433 | ||
1434 | while (*p) { | |
1435 | parent = *p; | |
96303081 | 1436 | info = rb_entry(parent, struct btrfs_free_space, offset_index); |
0f9dd46c | 1437 | |
96303081 | 1438 | if (offset < info->offset) { |
0f9dd46c | 1439 | p = &(*p)->rb_left; |
96303081 | 1440 | } else if (offset > info->offset) { |
0f9dd46c | 1441 | p = &(*p)->rb_right; |
96303081 JB |
1442 | } else { |
1443 | /* | |
1444 | * we could have a bitmap entry and an extent entry | |
1445 | * share the same offset. If this is the case, we want | |
1446 | * the extent entry to always be found first if we do a | |
1447 | * linear search through the tree, since we want to have | |
1448 | * the quickest allocation time, and allocating from an | |
1449 | * extent is faster than allocating from a bitmap. So | |
1450 | * if we're inserting a bitmap and we find an entry at | |
1451 | * this offset, we want to go right, or after this entry | |
1452 | * logically. If we are inserting an extent and we've | |
1453 | * found a bitmap, we want to go left, or before | |
1454 | * logically. | |
1455 | */ | |
1456 | if (bitmap) { | |
207dde82 JB |
1457 | if (info->bitmap) { |
1458 | WARN_ON_ONCE(1); | |
1459 | return -EEXIST; | |
1460 | } | |
96303081 JB |
1461 | p = &(*p)->rb_right; |
1462 | } else { | |
207dde82 JB |
1463 | if (!info->bitmap) { |
1464 | WARN_ON_ONCE(1); | |
1465 | return -EEXIST; | |
1466 | } | |
96303081 JB |
1467 | p = &(*p)->rb_left; |
1468 | } | |
1469 | } | |
0f9dd46c JB |
1470 | } |
1471 | ||
1472 | rb_link_node(node, parent, p); | |
1473 | rb_insert_color(node, root); | |
1474 | ||
1475 | return 0; | |
1476 | } | |
1477 | ||
1478 | /* | |
70cb0743 JB |
1479 | * searches the tree for the given offset. |
1480 | * | |
96303081 JB |
1481 | * fuzzy - If this is set, then we are trying to make an allocation, and we just |
1482 | * want a section that has at least bytes size and comes at or after the given | |
1483 | * offset. | |
0f9dd46c | 1484 | */ |
96303081 | 1485 | static struct btrfs_free_space * |
34d52cb6 | 1486 | tree_search_offset(struct btrfs_free_space_ctl *ctl, |
96303081 | 1487 | u64 offset, int bitmap_only, int fuzzy) |
0f9dd46c | 1488 | { |
34d52cb6 | 1489 | struct rb_node *n = ctl->free_space_offset.rb_node; |
96303081 JB |
1490 | struct btrfs_free_space *entry, *prev = NULL; |
1491 | ||
1492 | /* find entry that is closest to the 'offset' */ | |
1493 | while (1) { | |
1494 | if (!n) { | |
1495 | entry = NULL; | |
1496 | break; | |
1497 | } | |
0f9dd46c | 1498 | |
0f9dd46c | 1499 | entry = rb_entry(n, struct btrfs_free_space, offset_index); |
96303081 | 1500 | prev = entry; |
0f9dd46c | 1501 | |
96303081 | 1502 | if (offset < entry->offset) |
0f9dd46c | 1503 | n = n->rb_left; |
96303081 | 1504 | else if (offset > entry->offset) |
0f9dd46c | 1505 | n = n->rb_right; |
96303081 | 1506 | else |
0f9dd46c | 1507 | break; |
0f9dd46c JB |
1508 | } |
1509 | ||
96303081 JB |
1510 | if (bitmap_only) { |
1511 | if (!entry) | |
1512 | return NULL; | |
1513 | if (entry->bitmap) | |
1514 | return entry; | |
0f9dd46c | 1515 | |
96303081 JB |
1516 | /* |
1517 | * bitmap entry and extent entry may share same offset, | |
1518 | * in that case, bitmap entry comes after extent entry. | |
1519 | */ | |
1520 | n = rb_next(n); | |
1521 | if (!n) | |
1522 | return NULL; | |
1523 | entry = rb_entry(n, struct btrfs_free_space, offset_index); | |
1524 | if (entry->offset != offset) | |
1525 | return NULL; | |
0f9dd46c | 1526 | |
96303081 JB |
1527 | WARN_ON(!entry->bitmap); |
1528 | return entry; | |
1529 | } else if (entry) { | |
1530 | if (entry->bitmap) { | |
0f9dd46c | 1531 | /* |
96303081 JB |
1532 | * if previous extent entry covers the offset, |
1533 | * we should return it instead of the bitmap entry | |
0f9dd46c | 1534 | */ |
de6c4115 MX |
1535 | n = rb_prev(&entry->offset_index); |
1536 | if (n) { | |
96303081 JB |
1537 | prev = rb_entry(n, struct btrfs_free_space, |
1538 | offset_index); | |
de6c4115 MX |
1539 | if (!prev->bitmap && |
1540 | prev->offset + prev->bytes > offset) | |
1541 | entry = prev; | |
0f9dd46c | 1542 | } |
96303081 JB |
1543 | } |
1544 | return entry; | |
1545 | } | |
1546 | ||
1547 | if (!prev) | |
1548 | return NULL; | |
1549 | ||
1550 | /* find last entry before the 'offset' */ | |
1551 | entry = prev; | |
1552 | if (entry->offset > offset) { | |
1553 | n = rb_prev(&entry->offset_index); | |
1554 | if (n) { | |
1555 | entry = rb_entry(n, struct btrfs_free_space, | |
1556 | offset_index); | |
b12d6869 | 1557 | ASSERT(entry->offset <= offset); |
0f9dd46c | 1558 | } else { |
96303081 JB |
1559 | if (fuzzy) |
1560 | return entry; | |
1561 | else | |
1562 | return NULL; | |
0f9dd46c JB |
1563 | } |
1564 | } | |
1565 | ||
96303081 | 1566 | if (entry->bitmap) { |
de6c4115 MX |
1567 | n = rb_prev(&entry->offset_index); |
1568 | if (n) { | |
96303081 JB |
1569 | prev = rb_entry(n, struct btrfs_free_space, |
1570 | offset_index); | |
de6c4115 MX |
1571 | if (!prev->bitmap && |
1572 | prev->offset + prev->bytes > offset) | |
1573 | return prev; | |
96303081 | 1574 | } |
34d52cb6 | 1575 | if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) |
96303081 JB |
1576 | return entry; |
1577 | } else if (entry->offset + entry->bytes > offset) | |
1578 | return entry; | |
1579 | ||
1580 | if (!fuzzy) | |
1581 | return NULL; | |
1582 | ||
1583 | while (1) { | |
1584 | if (entry->bitmap) { | |
1585 | if (entry->offset + BITS_PER_BITMAP * | |
34d52cb6 | 1586 | ctl->unit > offset) |
96303081 JB |
1587 | break; |
1588 | } else { | |
1589 | if (entry->offset + entry->bytes > offset) | |
1590 | break; | |
1591 | } | |
1592 | ||
1593 | n = rb_next(&entry->offset_index); | |
1594 | if (!n) | |
1595 | return NULL; | |
1596 | entry = rb_entry(n, struct btrfs_free_space, offset_index); | |
1597 | } | |
1598 | return entry; | |
0f9dd46c JB |
1599 | } |
1600 | ||
f333adb5 | 1601 | static inline void |
34d52cb6 | 1602 | __unlink_free_space(struct btrfs_free_space_ctl *ctl, |
f333adb5 | 1603 | struct btrfs_free_space *info) |
0f9dd46c | 1604 | { |
34d52cb6 LZ |
1605 | rb_erase(&info->offset_index, &ctl->free_space_offset); |
1606 | ctl->free_extents--; | |
f333adb5 LZ |
1607 | } |
1608 | ||
34d52cb6 | 1609 | static void unlink_free_space(struct btrfs_free_space_ctl *ctl, |
f333adb5 LZ |
1610 | struct btrfs_free_space *info) |
1611 | { | |
34d52cb6 LZ |
1612 | __unlink_free_space(ctl, info); |
1613 | ctl->free_space -= info->bytes; | |
0f9dd46c JB |
1614 | } |
1615 | ||
34d52cb6 | 1616 | static int link_free_space(struct btrfs_free_space_ctl *ctl, |
0f9dd46c JB |
1617 | struct btrfs_free_space *info) |
1618 | { | |
1619 | int ret = 0; | |
1620 | ||
b12d6869 | 1621 | ASSERT(info->bytes || info->bitmap); |
34d52cb6 | 1622 | ret = tree_insert_offset(&ctl->free_space_offset, info->offset, |
96303081 | 1623 | &info->offset_index, (info->bitmap != NULL)); |
0f9dd46c JB |
1624 | if (ret) |
1625 | return ret; | |
1626 | ||
34d52cb6 LZ |
1627 | ctl->free_space += info->bytes; |
1628 | ctl->free_extents++; | |
96303081 JB |
1629 | return ret; |
1630 | } | |
1631 | ||
34d52cb6 | 1632 | static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) |
96303081 | 1633 | { |
34d52cb6 | 1634 | struct btrfs_block_group_cache *block_group = ctl->private; |
25891f79 JB |
1635 | u64 max_bytes; |
1636 | u64 bitmap_bytes; | |
1637 | u64 extent_bytes; | |
8eb2d829 | 1638 | u64 size = block_group->key.offset; |
0ef6447a FX |
1639 | u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; |
1640 | u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); | |
34d52cb6 | 1641 | |
0ef6447a | 1642 | max_bitmaps = max_t(u64, max_bitmaps, 1); |
dde5740f | 1643 | |
b12d6869 | 1644 | ASSERT(ctl->total_bitmaps <= max_bitmaps); |
96303081 JB |
1645 | |
1646 | /* | |
1647 | * The goal is to keep the total amount of memory used per 1gb of space | |
1648 | * at or below 32k, so we need to adjust how much memory we allow to be | |
1649 | * used by extent based free space tracking | |
1650 | */ | |
ee22184b | 1651 | if (size < SZ_1G) |
8eb2d829 LZ |
1652 | max_bytes = MAX_CACHE_BYTES_PER_GIG; |
1653 | else | |
ee22184b | 1654 | max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G); |
96303081 | 1655 | |
25891f79 JB |
1656 | /* |
1657 | * we want to account for 1 more bitmap than what we have so we can make | |
1658 | * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as | |
1659 | * we add more bitmaps. | |
1660 | */ | |
b9ef22de | 1661 | bitmap_bytes = (ctl->total_bitmaps + 1) * ctl->unit; |
96303081 | 1662 | |
25891f79 | 1663 | if (bitmap_bytes >= max_bytes) { |
34d52cb6 | 1664 | ctl->extents_thresh = 0; |
25891f79 JB |
1665 | return; |
1666 | } | |
96303081 | 1667 | |
25891f79 | 1668 | /* |
f8c269d7 | 1669 | * we want the extent entry threshold to always be at most 1/2 the max |
25891f79 JB |
1670 | * bytes we can have, or whatever is less than that. |
1671 | */ | |
1672 | extent_bytes = max_bytes - bitmap_bytes; | |
f8c269d7 | 1673 | extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1); |
96303081 | 1674 | |
34d52cb6 | 1675 | ctl->extents_thresh = |
f8c269d7 | 1676 | div_u64(extent_bytes, sizeof(struct btrfs_free_space)); |
96303081 JB |
1677 | } |
1678 | ||
bb3ac5a4 MX |
1679 | static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, |
1680 | struct btrfs_free_space *info, | |
1681 | u64 offset, u64 bytes) | |
96303081 | 1682 | { |
f38b6e75 | 1683 | unsigned long start, count; |
96303081 | 1684 | |
34d52cb6 LZ |
1685 | start = offset_to_bit(info->offset, ctl->unit, offset); |
1686 | count = bytes_to_bits(bytes, ctl->unit); | |
b12d6869 | 1687 | ASSERT(start + count <= BITS_PER_BITMAP); |
96303081 | 1688 | |
f38b6e75 | 1689 | bitmap_clear(info->bitmap, start, count); |
96303081 JB |
1690 | |
1691 | info->bytes -= bytes; | |
bb3ac5a4 MX |
1692 | } |
1693 | ||
1694 | static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, | |
1695 | struct btrfs_free_space *info, u64 offset, | |
1696 | u64 bytes) | |
1697 | { | |
1698 | __bitmap_clear_bits(ctl, info, offset, bytes); | |
34d52cb6 | 1699 | ctl->free_space -= bytes; |
96303081 JB |
1700 | } |
1701 | ||
34d52cb6 | 1702 | static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl, |
817d52f8 JB |
1703 | struct btrfs_free_space *info, u64 offset, |
1704 | u64 bytes) | |
96303081 | 1705 | { |
f38b6e75 | 1706 | unsigned long start, count; |
96303081 | 1707 | |
34d52cb6 LZ |
1708 | start = offset_to_bit(info->offset, ctl->unit, offset); |
1709 | count = bytes_to_bits(bytes, ctl->unit); | |
b12d6869 | 1710 | ASSERT(start + count <= BITS_PER_BITMAP); |
96303081 | 1711 | |
f38b6e75 | 1712 | bitmap_set(info->bitmap, start, count); |
96303081 JB |
1713 | |
1714 | info->bytes += bytes; | |
34d52cb6 | 1715 | ctl->free_space += bytes; |
96303081 JB |
1716 | } |
1717 | ||
a4820398 MX |
1718 | /* |
1719 | * If we can not find suitable extent, we will use bytes to record | |
1720 | * the size of the max extent. | |
1721 | */ | |
34d52cb6 | 1722 | static int search_bitmap(struct btrfs_free_space_ctl *ctl, |
96303081 | 1723 | struct btrfs_free_space *bitmap_info, u64 *offset, |
0584f718 | 1724 | u64 *bytes, bool for_alloc) |
96303081 JB |
1725 | { |
1726 | unsigned long found_bits = 0; | |
a4820398 | 1727 | unsigned long max_bits = 0; |
96303081 JB |
1728 | unsigned long bits, i; |
1729 | unsigned long next_zero; | |
a4820398 | 1730 | unsigned long extent_bits; |
96303081 | 1731 | |
cef40483 JB |
1732 | /* |
1733 | * Skip searching the bitmap if we don't have a contiguous section that | |
1734 | * is large enough for this allocation. | |
1735 | */ | |
0584f718 JB |
1736 | if (for_alloc && |
1737 | bitmap_info->max_extent_size && | |
cef40483 JB |
1738 | bitmap_info->max_extent_size < *bytes) { |
1739 | *bytes = bitmap_info->max_extent_size; | |
1740 | return -1; | |
1741 | } | |
1742 | ||
34d52cb6 | 1743 | i = offset_to_bit(bitmap_info->offset, ctl->unit, |
96303081 | 1744 | max_t(u64, *offset, bitmap_info->offset)); |
34d52cb6 | 1745 | bits = bytes_to_bits(*bytes, ctl->unit); |
96303081 | 1746 | |
ebb3dad4 | 1747 | for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) { |
0584f718 JB |
1748 | if (for_alloc && bits == 1) { |
1749 | found_bits = 1; | |
1750 | break; | |
1751 | } | |
96303081 JB |
1752 | next_zero = find_next_zero_bit(bitmap_info->bitmap, |
1753 | BITS_PER_BITMAP, i); | |
a4820398 MX |
1754 | extent_bits = next_zero - i; |
1755 | if (extent_bits >= bits) { | |
1756 | found_bits = extent_bits; | |
96303081 | 1757 | break; |
a4820398 MX |
1758 | } else if (extent_bits > max_bits) { |
1759 | max_bits = extent_bits; | |
96303081 JB |
1760 | } |
1761 | i = next_zero; | |
1762 | } | |
1763 | ||
1764 | if (found_bits) { | |
34d52cb6 LZ |
1765 | *offset = (u64)(i * ctl->unit) + bitmap_info->offset; |
1766 | *bytes = (u64)(found_bits) * ctl->unit; | |
96303081 JB |
1767 | return 0; |
1768 | } | |
1769 | ||
a4820398 | 1770 | *bytes = (u64)(max_bits) * ctl->unit; |
cef40483 | 1771 | bitmap_info->max_extent_size = *bytes; |
96303081 JB |
1772 | return -1; |
1773 | } | |
1774 | ||
a4820398 | 1775 | /* Cache the size of the max extent in bytes */ |
34d52cb6 | 1776 | static struct btrfs_free_space * |
53b381b3 | 1777 | find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes, |
a4820398 | 1778 | unsigned long align, u64 *max_extent_size) |
96303081 JB |
1779 | { |
1780 | struct btrfs_free_space *entry; | |
1781 | struct rb_node *node; | |
53b381b3 DW |
1782 | u64 tmp; |
1783 | u64 align_off; | |
96303081 JB |
1784 | int ret; |
1785 | ||
34d52cb6 | 1786 | if (!ctl->free_space_offset.rb_node) |
a4820398 | 1787 | goto out; |
96303081 | 1788 | |
34d52cb6 | 1789 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1); |
96303081 | 1790 | if (!entry) |
a4820398 | 1791 | goto out; |
96303081 JB |
1792 | |
1793 | for (node = &entry->offset_index; node; node = rb_next(node)) { | |
1794 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | |
a4820398 MX |
1795 | if (entry->bytes < *bytes) { |
1796 | if (entry->bytes > *max_extent_size) | |
1797 | *max_extent_size = entry->bytes; | |
96303081 | 1798 | continue; |
a4820398 | 1799 | } |
96303081 | 1800 | |
53b381b3 DW |
1801 | /* make sure the space returned is big enough |
1802 | * to match our requested alignment | |
1803 | */ | |
1804 | if (*bytes >= align) { | |
a4820398 | 1805 | tmp = entry->offset - ctl->start + align - 1; |
47c5713f | 1806 | tmp = div64_u64(tmp, align); |
53b381b3 DW |
1807 | tmp = tmp * align + ctl->start; |
1808 | align_off = tmp - entry->offset; | |
1809 | } else { | |
1810 | align_off = 0; | |
1811 | tmp = entry->offset; | |
1812 | } | |
1813 | ||
a4820398 MX |
1814 | if (entry->bytes < *bytes + align_off) { |
1815 | if (entry->bytes > *max_extent_size) | |
1816 | *max_extent_size = entry->bytes; | |
53b381b3 | 1817 | continue; |
a4820398 | 1818 | } |
53b381b3 | 1819 | |
96303081 | 1820 | if (entry->bitmap) { |
a4820398 MX |
1821 | u64 size = *bytes; |
1822 | ||
0584f718 | 1823 | ret = search_bitmap(ctl, entry, &tmp, &size, true); |
53b381b3 DW |
1824 | if (!ret) { |
1825 | *offset = tmp; | |
a4820398 | 1826 | *bytes = size; |
96303081 | 1827 | return entry; |
a4820398 MX |
1828 | } else if (size > *max_extent_size) { |
1829 | *max_extent_size = size; | |
53b381b3 | 1830 | } |
96303081 JB |
1831 | continue; |
1832 | } | |
1833 | ||
53b381b3 DW |
1834 | *offset = tmp; |
1835 | *bytes = entry->bytes - align_off; | |
96303081 JB |
1836 | return entry; |
1837 | } | |
a4820398 | 1838 | out: |
96303081 JB |
1839 | return NULL; |
1840 | } | |
1841 | ||
34d52cb6 | 1842 | static void add_new_bitmap(struct btrfs_free_space_ctl *ctl, |
96303081 JB |
1843 | struct btrfs_free_space *info, u64 offset) |
1844 | { | |
34d52cb6 | 1845 | info->offset = offset_to_bitmap(ctl, offset); |
f019f426 | 1846 | info->bytes = 0; |
f2d0f676 | 1847 | INIT_LIST_HEAD(&info->list); |
34d52cb6 LZ |
1848 | link_free_space(ctl, info); |
1849 | ctl->total_bitmaps++; | |
96303081 | 1850 | |
34d52cb6 | 1851 | ctl->op->recalc_thresholds(ctl); |
96303081 JB |
1852 | } |
1853 | ||
34d52cb6 | 1854 | static void free_bitmap(struct btrfs_free_space_ctl *ctl, |
edf6e2d1 LZ |
1855 | struct btrfs_free_space *bitmap_info) |
1856 | { | |
34d52cb6 | 1857 | unlink_free_space(ctl, bitmap_info); |
edf6e2d1 | 1858 | kfree(bitmap_info->bitmap); |
dc89e982 | 1859 | kmem_cache_free(btrfs_free_space_cachep, bitmap_info); |
34d52cb6 LZ |
1860 | ctl->total_bitmaps--; |
1861 | ctl->op->recalc_thresholds(ctl); | |
edf6e2d1 LZ |
1862 | } |
1863 | ||
34d52cb6 | 1864 | static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl, |
96303081 JB |
1865 | struct btrfs_free_space *bitmap_info, |
1866 | u64 *offset, u64 *bytes) | |
1867 | { | |
1868 | u64 end; | |
6606bb97 JB |
1869 | u64 search_start, search_bytes; |
1870 | int ret; | |
96303081 JB |
1871 | |
1872 | again: | |
34d52cb6 | 1873 | end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; |
96303081 | 1874 | |
6606bb97 | 1875 | /* |
bdb7d303 JB |
1876 | * We need to search for bits in this bitmap. We could only cover some |
1877 | * of the extent in this bitmap thanks to how we add space, so we need | |
1878 | * to search for as much as it as we can and clear that amount, and then | |
1879 | * go searching for the next bit. | |
6606bb97 JB |
1880 | */ |
1881 | search_start = *offset; | |
bdb7d303 | 1882 | search_bytes = ctl->unit; |
13dbc089 | 1883 | search_bytes = min(search_bytes, end - search_start + 1); |
0584f718 JB |
1884 | ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes, |
1885 | false); | |
b50c6e25 JB |
1886 | if (ret < 0 || search_start != *offset) |
1887 | return -EINVAL; | |
6606bb97 | 1888 | |
bdb7d303 JB |
1889 | /* We may have found more bits than what we need */ |
1890 | search_bytes = min(search_bytes, *bytes); | |
1891 | ||
1892 | /* Cannot clear past the end of the bitmap */ | |
1893 | search_bytes = min(search_bytes, end - search_start + 1); | |
1894 | ||
1895 | bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes); | |
1896 | *offset += search_bytes; | |
1897 | *bytes -= search_bytes; | |
96303081 JB |
1898 | |
1899 | if (*bytes) { | |
6606bb97 | 1900 | struct rb_node *next = rb_next(&bitmap_info->offset_index); |
edf6e2d1 | 1901 | if (!bitmap_info->bytes) |
34d52cb6 | 1902 | free_bitmap(ctl, bitmap_info); |
96303081 | 1903 | |
6606bb97 JB |
1904 | /* |
1905 | * no entry after this bitmap, but we still have bytes to | |
1906 | * remove, so something has gone wrong. | |
1907 | */ | |
1908 | if (!next) | |
96303081 JB |
1909 | return -EINVAL; |
1910 | ||
6606bb97 JB |
1911 | bitmap_info = rb_entry(next, struct btrfs_free_space, |
1912 | offset_index); | |
1913 | ||
1914 | /* | |
1915 | * if the next entry isn't a bitmap we need to return to let the | |
1916 | * extent stuff do its work. | |
1917 | */ | |
96303081 JB |
1918 | if (!bitmap_info->bitmap) |
1919 | return -EAGAIN; | |
1920 | ||
6606bb97 JB |
1921 | /* |
1922 | * Ok the next item is a bitmap, but it may not actually hold | |
1923 | * the information for the rest of this free space stuff, so | |
1924 | * look for it, and if we don't find it return so we can try | |
1925 | * everything over again. | |
1926 | */ | |
1927 | search_start = *offset; | |
bdb7d303 | 1928 | search_bytes = ctl->unit; |
34d52cb6 | 1929 | ret = search_bitmap(ctl, bitmap_info, &search_start, |
0584f718 | 1930 | &search_bytes, false); |
6606bb97 JB |
1931 | if (ret < 0 || search_start != *offset) |
1932 | return -EAGAIN; | |
1933 | ||
96303081 | 1934 | goto again; |
edf6e2d1 | 1935 | } else if (!bitmap_info->bytes) |
34d52cb6 | 1936 | free_bitmap(ctl, bitmap_info); |
96303081 JB |
1937 | |
1938 | return 0; | |
1939 | } | |
1940 | ||
2cdc342c JB |
1941 | static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl, |
1942 | struct btrfs_free_space *info, u64 offset, | |
1943 | u64 bytes) | |
1944 | { | |
1945 | u64 bytes_to_set = 0; | |
1946 | u64 end; | |
1947 | ||
1948 | end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); | |
1949 | ||
1950 | bytes_to_set = min(end - offset, bytes); | |
1951 | ||
1952 | bitmap_set_bits(ctl, info, offset, bytes_to_set); | |
1953 | ||
cef40483 JB |
1954 | /* |
1955 | * We set some bytes, we have no idea what the max extent size is | |
1956 | * anymore. | |
1957 | */ | |
1958 | info->max_extent_size = 0; | |
1959 | ||
2cdc342c JB |
1960 | return bytes_to_set; |
1961 | ||
1962 | } | |
1963 | ||
34d52cb6 LZ |
1964 | static bool use_bitmap(struct btrfs_free_space_ctl *ctl, |
1965 | struct btrfs_free_space *info) | |
96303081 | 1966 | { |
34d52cb6 | 1967 | struct btrfs_block_group_cache *block_group = ctl->private; |
d0bd4560 JB |
1968 | bool forced = false; |
1969 | ||
1970 | #ifdef CONFIG_BTRFS_DEBUG | |
1971 | if (btrfs_should_fragment_free_space(block_group->fs_info->extent_root, | |
1972 | block_group)) | |
1973 | forced = true; | |
1974 | #endif | |
96303081 JB |
1975 | |
1976 | /* | |
1977 | * If we are below the extents threshold then we can add this as an | |
1978 | * extent, and don't have to deal with the bitmap | |
1979 | */ | |
d0bd4560 | 1980 | if (!forced && ctl->free_extents < ctl->extents_thresh) { |
32cb0840 JB |
1981 | /* |
1982 | * If this block group has some small extents we don't want to | |
1983 | * use up all of our free slots in the cache with them, we want | |
01327610 | 1984 | * to reserve them to larger extents, however if we have plenty |
32cb0840 JB |
1985 | * of cache left then go ahead an dadd them, no sense in adding |
1986 | * the overhead of a bitmap if we don't have to. | |
1987 | */ | |
da17066c | 1988 | if (info->bytes <= block_group->fs_info->sectorsize * 4) { |
34d52cb6 LZ |
1989 | if (ctl->free_extents * 2 <= ctl->extents_thresh) |
1990 | return false; | |
32cb0840 | 1991 | } else { |
34d52cb6 | 1992 | return false; |
32cb0840 JB |
1993 | } |
1994 | } | |
96303081 JB |
1995 | |
1996 | /* | |
dde5740f JB |
1997 | * The original block groups from mkfs can be really small, like 8 |
1998 | * megabytes, so don't bother with a bitmap for those entries. However | |
1999 | * some block groups can be smaller than what a bitmap would cover but | |
2000 | * are still large enough that they could overflow the 32k memory limit, | |
2001 | * so allow those block groups to still be allowed to have a bitmap | |
2002 | * entry. | |
96303081 | 2003 | */ |
dde5740f | 2004 | if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset) |
34d52cb6 LZ |
2005 | return false; |
2006 | ||
2007 | return true; | |
2008 | } | |
2009 | ||
20e5506b | 2010 | static const struct btrfs_free_space_op free_space_op = { |
2cdc342c JB |
2011 | .recalc_thresholds = recalculate_thresholds, |
2012 | .use_bitmap = use_bitmap, | |
2013 | }; | |
2014 | ||
34d52cb6 LZ |
2015 | static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, |
2016 | struct btrfs_free_space *info) | |
2017 | { | |
2018 | struct btrfs_free_space *bitmap_info; | |
2cdc342c | 2019 | struct btrfs_block_group_cache *block_group = NULL; |
34d52cb6 | 2020 | int added = 0; |
2cdc342c | 2021 | u64 bytes, offset, bytes_added; |
34d52cb6 | 2022 | int ret; |
96303081 JB |
2023 | |
2024 | bytes = info->bytes; | |
2025 | offset = info->offset; | |
2026 | ||
34d52cb6 LZ |
2027 | if (!ctl->op->use_bitmap(ctl, info)) |
2028 | return 0; | |
2029 | ||
2cdc342c JB |
2030 | if (ctl->op == &free_space_op) |
2031 | block_group = ctl->private; | |
38e87880 | 2032 | again: |
2cdc342c JB |
2033 | /* |
2034 | * Since we link bitmaps right into the cluster we need to see if we | |
2035 | * have a cluster here, and if so and it has our bitmap we need to add | |
2036 | * the free space to that bitmap. | |
2037 | */ | |
2038 | if (block_group && !list_empty(&block_group->cluster_list)) { | |
2039 | struct btrfs_free_cluster *cluster; | |
2040 | struct rb_node *node; | |
2041 | struct btrfs_free_space *entry; | |
2042 | ||
2043 | cluster = list_entry(block_group->cluster_list.next, | |
2044 | struct btrfs_free_cluster, | |
2045 | block_group_list); | |
2046 | spin_lock(&cluster->lock); | |
2047 | node = rb_first(&cluster->root); | |
2048 | if (!node) { | |
2049 | spin_unlock(&cluster->lock); | |
38e87880 | 2050 | goto no_cluster_bitmap; |
2cdc342c JB |
2051 | } |
2052 | ||
2053 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | |
2054 | if (!entry->bitmap) { | |
2055 | spin_unlock(&cluster->lock); | |
38e87880 | 2056 | goto no_cluster_bitmap; |
2cdc342c JB |
2057 | } |
2058 | ||
2059 | if (entry->offset == offset_to_bitmap(ctl, offset)) { | |
2060 | bytes_added = add_bytes_to_bitmap(ctl, entry, | |
2061 | offset, bytes); | |
2062 | bytes -= bytes_added; | |
2063 | offset += bytes_added; | |
2064 | } | |
2065 | spin_unlock(&cluster->lock); | |
2066 | if (!bytes) { | |
2067 | ret = 1; | |
2068 | goto out; | |
2069 | } | |
2070 | } | |
38e87880 CM |
2071 | |
2072 | no_cluster_bitmap: | |
34d52cb6 | 2073 | bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), |
96303081 JB |
2074 | 1, 0); |
2075 | if (!bitmap_info) { | |
b12d6869 | 2076 | ASSERT(added == 0); |
96303081 JB |
2077 | goto new_bitmap; |
2078 | } | |
2079 | ||
2cdc342c JB |
2080 | bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes); |
2081 | bytes -= bytes_added; | |
2082 | offset += bytes_added; | |
2083 | added = 0; | |
96303081 JB |
2084 | |
2085 | if (!bytes) { | |
2086 | ret = 1; | |
2087 | goto out; | |
2088 | } else | |
2089 | goto again; | |
2090 | ||
2091 | new_bitmap: | |
2092 | if (info && info->bitmap) { | |
34d52cb6 | 2093 | add_new_bitmap(ctl, info, offset); |
96303081 JB |
2094 | added = 1; |
2095 | info = NULL; | |
2096 | goto again; | |
2097 | } else { | |
34d52cb6 | 2098 | spin_unlock(&ctl->tree_lock); |
96303081 JB |
2099 | |
2100 | /* no pre-allocated info, allocate a new one */ | |
2101 | if (!info) { | |
dc89e982 JB |
2102 | info = kmem_cache_zalloc(btrfs_free_space_cachep, |
2103 | GFP_NOFS); | |
96303081 | 2104 | if (!info) { |
34d52cb6 | 2105 | spin_lock(&ctl->tree_lock); |
96303081 JB |
2106 | ret = -ENOMEM; |
2107 | goto out; | |
2108 | } | |
2109 | } | |
2110 | ||
2111 | /* allocate the bitmap */ | |
09cbfeaf | 2112 | info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS); |
34d52cb6 | 2113 | spin_lock(&ctl->tree_lock); |
96303081 JB |
2114 | if (!info->bitmap) { |
2115 | ret = -ENOMEM; | |
2116 | goto out; | |
2117 | } | |
2118 | goto again; | |
2119 | } | |
2120 | ||
2121 | out: | |
2122 | if (info) { | |
2123 | if (info->bitmap) | |
2124 | kfree(info->bitmap); | |
dc89e982 | 2125 | kmem_cache_free(btrfs_free_space_cachep, info); |
96303081 | 2126 | } |
0f9dd46c JB |
2127 | |
2128 | return ret; | |
2129 | } | |
2130 | ||
945d8962 | 2131 | static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, |
f333adb5 | 2132 | struct btrfs_free_space *info, bool update_stat) |
0f9dd46c | 2133 | { |
120d66ee LZ |
2134 | struct btrfs_free_space *left_info; |
2135 | struct btrfs_free_space *right_info; | |
2136 | bool merged = false; | |
2137 | u64 offset = info->offset; | |
2138 | u64 bytes = info->bytes; | |
6226cb0a | 2139 | |
0f9dd46c JB |
2140 | /* |
2141 | * first we want to see if there is free space adjacent to the range we | |
2142 | * are adding, if there is remove that struct and add a new one to | |
2143 | * cover the entire range | |
2144 | */ | |
34d52cb6 | 2145 | right_info = tree_search_offset(ctl, offset + bytes, 0, 0); |
96303081 JB |
2146 | if (right_info && rb_prev(&right_info->offset_index)) |
2147 | left_info = rb_entry(rb_prev(&right_info->offset_index), | |
2148 | struct btrfs_free_space, offset_index); | |
2149 | else | |
34d52cb6 | 2150 | left_info = tree_search_offset(ctl, offset - 1, 0, 0); |
0f9dd46c | 2151 | |
96303081 | 2152 | if (right_info && !right_info->bitmap) { |
f333adb5 | 2153 | if (update_stat) |
34d52cb6 | 2154 | unlink_free_space(ctl, right_info); |
f333adb5 | 2155 | else |
34d52cb6 | 2156 | __unlink_free_space(ctl, right_info); |
6226cb0a | 2157 | info->bytes += right_info->bytes; |
dc89e982 | 2158 | kmem_cache_free(btrfs_free_space_cachep, right_info); |
120d66ee | 2159 | merged = true; |
0f9dd46c JB |
2160 | } |
2161 | ||
96303081 JB |
2162 | if (left_info && !left_info->bitmap && |
2163 | left_info->offset + left_info->bytes == offset) { | |
f333adb5 | 2164 | if (update_stat) |
34d52cb6 | 2165 | unlink_free_space(ctl, left_info); |
f333adb5 | 2166 | else |
34d52cb6 | 2167 | __unlink_free_space(ctl, left_info); |
6226cb0a JB |
2168 | info->offset = left_info->offset; |
2169 | info->bytes += left_info->bytes; | |
dc89e982 | 2170 | kmem_cache_free(btrfs_free_space_cachep, left_info); |
120d66ee | 2171 | merged = true; |
0f9dd46c JB |
2172 | } |
2173 | ||
120d66ee LZ |
2174 | return merged; |
2175 | } | |
2176 | ||
20005523 FM |
2177 | static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl, |
2178 | struct btrfs_free_space *info, | |
2179 | bool update_stat) | |
2180 | { | |
2181 | struct btrfs_free_space *bitmap; | |
2182 | unsigned long i; | |
2183 | unsigned long j; | |
2184 | const u64 end = info->offset + info->bytes; | |
2185 | const u64 bitmap_offset = offset_to_bitmap(ctl, end); | |
2186 | u64 bytes; | |
2187 | ||
2188 | bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0); | |
2189 | if (!bitmap) | |
2190 | return false; | |
2191 | ||
2192 | i = offset_to_bit(bitmap->offset, ctl->unit, end); | |
2193 | j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i); | |
2194 | if (j == i) | |
2195 | return false; | |
2196 | bytes = (j - i) * ctl->unit; | |
2197 | info->bytes += bytes; | |
2198 | ||
2199 | if (update_stat) | |
2200 | bitmap_clear_bits(ctl, bitmap, end, bytes); | |
2201 | else | |
2202 | __bitmap_clear_bits(ctl, bitmap, end, bytes); | |
2203 | ||
2204 | if (!bitmap->bytes) | |
2205 | free_bitmap(ctl, bitmap); | |
2206 | ||
2207 | return true; | |
2208 | } | |
2209 | ||
2210 | static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl, | |
2211 | struct btrfs_free_space *info, | |
2212 | bool update_stat) | |
2213 | { | |
2214 | struct btrfs_free_space *bitmap; | |
2215 | u64 bitmap_offset; | |
2216 | unsigned long i; | |
2217 | unsigned long j; | |
2218 | unsigned long prev_j; | |
2219 | u64 bytes; | |
2220 | ||
2221 | bitmap_offset = offset_to_bitmap(ctl, info->offset); | |
2222 | /* If we're on a boundary, try the previous logical bitmap. */ | |
2223 | if (bitmap_offset == info->offset) { | |
2224 | if (info->offset == 0) | |
2225 | return false; | |
2226 | bitmap_offset = offset_to_bitmap(ctl, info->offset - 1); | |
2227 | } | |
2228 | ||
2229 | bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0); | |
2230 | if (!bitmap) | |
2231 | return false; | |
2232 | ||
2233 | i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1; | |
2234 | j = 0; | |
2235 | prev_j = (unsigned long)-1; | |
2236 | for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) { | |
2237 | if (j > i) | |
2238 | break; | |
2239 | prev_j = j; | |
2240 | } | |
2241 | if (prev_j == i) | |
2242 | return false; | |
2243 | ||
2244 | if (prev_j == (unsigned long)-1) | |
2245 | bytes = (i + 1) * ctl->unit; | |
2246 | else | |
2247 | bytes = (i - prev_j) * ctl->unit; | |
2248 | ||
2249 | info->offset -= bytes; | |
2250 | info->bytes += bytes; | |
2251 | ||
2252 | if (update_stat) | |
2253 | bitmap_clear_bits(ctl, bitmap, info->offset, bytes); | |
2254 | else | |
2255 | __bitmap_clear_bits(ctl, bitmap, info->offset, bytes); | |
2256 | ||
2257 | if (!bitmap->bytes) | |
2258 | free_bitmap(ctl, bitmap); | |
2259 | ||
2260 | return true; | |
2261 | } | |
2262 | ||
2263 | /* | |
2264 | * We prefer always to allocate from extent entries, both for clustered and | |
2265 | * non-clustered allocation requests. So when attempting to add a new extent | |
2266 | * entry, try to see if there's adjacent free space in bitmap entries, and if | |
2267 | * there is, migrate that space from the bitmaps to the extent. | |
2268 | * Like this we get better chances of satisfying space allocation requests | |
2269 | * because we attempt to satisfy them based on a single cache entry, and never | |
2270 | * on 2 or more entries - even if the entries represent a contiguous free space | |
2271 | * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry | |
2272 | * ends). | |
2273 | */ | |
2274 | static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl, | |
2275 | struct btrfs_free_space *info, | |
2276 | bool update_stat) | |
2277 | { | |
2278 | /* | |
2279 | * Only work with disconnected entries, as we can change their offset, | |
2280 | * and must be extent entries. | |
2281 | */ | |
2282 | ASSERT(!info->bitmap); | |
2283 | ASSERT(RB_EMPTY_NODE(&info->offset_index)); | |
2284 | ||
2285 | if (ctl->total_bitmaps > 0) { | |
2286 | bool stole_end; | |
2287 | bool stole_front = false; | |
2288 | ||
2289 | stole_end = steal_from_bitmap_to_end(ctl, info, update_stat); | |
2290 | if (ctl->total_bitmaps > 0) | |
2291 | stole_front = steal_from_bitmap_to_front(ctl, info, | |
2292 | update_stat); | |
2293 | ||
2294 | if (stole_end || stole_front) | |
2295 | try_merge_free_space(ctl, info, update_stat); | |
2296 | } | |
2297 | } | |
2298 | ||
ab8d0fc4 JM |
2299 | int __btrfs_add_free_space(struct btrfs_fs_info *fs_info, |
2300 | struct btrfs_free_space_ctl *ctl, | |
581bb050 | 2301 | u64 offset, u64 bytes) |
120d66ee LZ |
2302 | { |
2303 | struct btrfs_free_space *info; | |
2304 | int ret = 0; | |
2305 | ||
dc89e982 | 2306 | info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); |
120d66ee LZ |
2307 | if (!info) |
2308 | return -ENOMEM; | |
2309 | ||
2310 | info->offset = offset; | |
2311 | info->bytes = bytes; | |
20005523 | 2312 | RB_CLEAR_NODE(&info->offset_index); |
120d66ee | 2313 | |
34d52cb6 | 2314 | spin_lock(&ctl->tree_lock); |
120d66ee | 2315 | |
34d52cb6 | 2316 | if (try_merge_free_space(ctl, info, true)) |
120d66ee LZ |
2317 | goto link; |
2318 | ||
2319 | /* | |
2320 | * There was no extent directly to the left or right of this new | |
2321 | * extent then we know we're going to have to allocate a new extent, so | |
2322 | * before we do that see if we need to drop this into a bitmap | |
2323 | */ | |
34d52cb6 | 2324 | ret = insert_into_bitmap(ctl, info); |
120d66ee LZ |
2325 | if (ret < 0) { |
2326 | goto out; | |
2327 | } else if (ret) { | |
2328 | ret = 0; | |
2329 | goto out; | |
2330 | } | |
2331 | link: | |
20005523 FM |
2332 | /* |
2333 | * Only steal free space from adjacent bitmaps if we're sure we're not | |
2334 | * going to add the new free space to existing bitmap entries - because | |
2335 | * that would mean unnecessary work that would be reverted. Therefore | |
2336 | * attempt to steal space from bitmaps if we're adding an extent entry. | |
2337 | */ | |
2338 | steal_from_bitmap(ctl, info, true); | |
2339 | ||
34d52cb6 | 2340 | ret = link_free_space(ctl, info); |
0f9dd46c | 2341 | if (ret) |
dc89e982 | 2342 | kmem_cache_free(btrfs_free_space_cachep, info); |
96303081 | 2343 | out: |
34d52cb6 | 2344 | spin_unlock(&ctl->tree_lock); |
6226cb0a | 2345 | |
0f9dd46c | 2346 | if (ret) { |
ab8d0fc4 | 2347 | btrfs_crit(fs_info, "unable to add free space :%d", ret); |
b12d6869 | 2348 | ASSERT(ret != -EEXIST); |
0f9dd46c JB |
2349 | } |
2350 | ||
0f9dd46c JB |
2351 | return ret; |
2352 | } | |
2353 | ||
6226cb0a JB |
2354 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, |
2355 | u64 offset, u64 bytes) | |
0f9dd46c | 2356 | { |
34d52cb6 | 2357 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
0f9dd46c | 2358 | struct btrfs_free_space *info; |
b0175117 JB |
2359 | int ret; |
2360 | bool re_search = false; | |
0f9dd46c | 2361 | |
34d52cb6 | 2362 | spin_lock(&ctl->tree_lock); |
6226cb0a | 2363 | |
96303081 | 2364 | again: |
b0175117 | 2365 | ret = 0; |
bdb7d303 JB |
2366 | if (!bytes) |
2367 | goto out_lock; | |
2368 | ||
34d52cb6 | 2369 | info = tree_search_offset(ctl, offset, 0, 0); |
96303081 | 2370 | if (!info) { |
6606bb97 JB |
2371 | /* |
2372 | * oops didn't find an extent that matched the space we wanted | |
2373 | * to remove, look for a bitmap instead | |
2374 | */ | |
34d52cb6 | 2375 | info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), |
6606bb97 JB |
2376 | 1, 0); |
2377 | if (!info) { | |
b0175117 JB |
2378 | /* |
2379 | * If we found a partial bit of our free space in a | |
2380 | * bitmap but then couldn't find the other part this may | |
2381 | * be a problem, so WARN about it. | |
24a70313 | 2382 | */ |
b0175117 | 2383 | WARN_ON(re_search); |
6606bb97 JB |
2384 | goto out_lock; |
2385 | } | |
96303081 JB |
2386 | } |
2387 | ||
b0175117 | 2388 | re_search = false; |
bdb7d303 | 2389 | if (!info->bitmap) { |
34d52cb6 | 2390 | unlink_free_space(ctl, info); |
bdb7d303 JB |
2391 | if (offset == info->offset) { |
2392 | u64 to_free = min(bytes, info->bytes); | |
2393 | ||
2394 | info->bytes -= to_free; | |
2395 | info->offset += to_free; | |
2396 | if (info->bytes) { | |
2397 | ret = link_free_space(ctl, info); | |
2398 | WARN_ON(ret); | |
2399 | } else { | |
2400 | kmem_cache_free(btrfs_free_space_cachep, info); | |
2401 | } | |
0f9dd46c | 2402 | |
bdb7d303 JB |
2403 | offset += to_free; |
2404 | bytes -= to_free; | |
2405 | goto again; | |
2406 | } else { | |
2407 | u64 old_end = info->bytes + info->offset; | |
9b49c9b9 | 2408 | |
bdb7d303 | 2409 | info->bytes = offset - info->offset; |
34d52cb6 | 2410 | ret = link_free_space(ctl, info); |
96303081 JB |
2411 | WARN_ON(ret); |
2412 | if (ret) | |
2413 | goto out_lock; | |
96303081 | 2414 | |
bdb7d303 JB |
2415 | /* Not enough bytes in this entry to satisfy us */ |
2416 | if (old_end < offset + bytes) { | |
2417 | bytes -= old_end - offset; | |
2418 | offset = old_end; | |
2419 | goto again; | |
2420 | } else if (old_end == offset + bytes) { | |
2421 | /* all done */ | |
2422 | goto out_lock; | |
2423 | } | |
2424 | spin_unlock(&ctl->tree_lock); | |
2425 | ||
2426 | ret = btrfs_add_free_space(block_group, offset + bytes, | |
2427 | old_end - (offset + bytes)); | |
2428 | WARN_ON(ret); | |
2429 | goto out; | |
2430 | } | |
0f9dd46c | 2431 | } |
96303081 | 2432 | |
34d52cb6 | 2433 | ret = remove_from_bitmap(ctl, info, &offset, &bytes); |
b0175117 JB |
2434 | if (ret == -EAGAIN) { |
2435 | re_search = true; | |
96303081 | 2436 | goto again; |
b0175117 | 2437 | } |
96303081 | 2438 | out_lock: |
34d52cb6 | 2439 | spin_unlock(&ctl->tree_lock); |
0f9dd46c | 2440 | out: |
25179201 JB |
2441 | return ret; |
2442 | } | |
2443 | ||
0f9dd46c JB |
2444 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, |
2445 | u64 bytes) | |
2446 | { | |
34d52cb6 | 2447 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
0f9dd46c JB |
2448 | struct btrfs_free_space *info; |
2449 | struct rb_node *n; | |
2450 | int count = 0; | |
2451 | ||
34d52cb6 | 2452 | for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { |
0f9dd46c | 2453 | info = rb_entry(n, struct btrfs_free_space, offset_index); |
f6175efa | 2454 | if (info->bytes >= bytes && !block_group->ro) |
0f9dd46c | 2455 | count++; |
efe120a0 FH |
2456 | btrfs_crit(block_group->fs_info, |
2457 | "entry offset %llu, bytes %llu, bitmap %s", | |
2458 | info->offset, info->bytes, | |
96303081 | 2459 | (info->bitmap) ? "yes" : "no"); |
0f9dd46c | 2460 | } |
efe120a0 | 2461 | btrfs_info(block_group->fs_info, "block group has cluster?: %s", |
96303081 | 2462 | list_empty(&block_group->cluster_list) ? "no" : "yes"); |
efe120a0 FH |
2463 | btrfs_info(block_group->fs_info, |
2464 | "%d blocks of free space at or bigger than bytes is", count); | |
0f9dd46c JB |
2465 | } |
2466 | ||
34d52cb6 | 2467 | void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) |
0f9dd46c | 2468 | { |
34d52cb6 | 2469 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
0f9dd46c | 2470 | |
34d52cb6 | 2471 | spin_lock_init(&ctl->tree_lock); |
da17066c | 2472 | ctl->unit = block_group->fs_info->sectorsize; |
34d52cb6 LZ |
2473 | ctl->start = block_group->key.objectid; |
2474 | ctl->private = block_group; | |
2475 | ctl->op = &free_space_op; | |
55507ce3 FM |
2476 | INIT_LIST_HEAD(&ctl->trimming_ranges); |
2477 | mutex_init(&ctl->cache_writeout_mutex); | |
0f9dd46c | 2478 | |
34d52cb6 LZ |
2479 | /* |
2480 | * we only want to have 32k of ram per block group for keeping | |
2481 | * track of free space, and if we pass 1/2 of that we want to | |
2482 | * start converting things over to using bitmaps | |
2483 | */ | |
ee22184b | 2484 | ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space); |
0f9dd46c JB |
2485 | } |
2486 | ||
fa9c0d79 CM |
2487 | /* |
2488 | * for a given cluster, put all of its extents back into the free | |
2489 | * space cache. If the block group passed doesn't match the block group | |
2490 | * pointed to by the cluster, someone else raced in and freed the | |
2491 | * cluster already. In that case, we just return without changing anything | |
2492 | */ | |
2493 | static int | |
2494 | __btrfs_return_cluster_to_free_space( | |
2495 | struct btrfs_block_group_cache *block_group, | |
2496 | struct btrfs_free_cluster *cluster) | |
2497 | { | |
34d52cb6 | 2498 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
fa9c0d79 CM |
2499 | struct btrfs_free_space *entry; |
2500 | struct rb_node *node; | |
2501 | ||
2502 | spin_lock(&cluster->lock); | |
2503 | if (cluster->block_group != block_group) | |
2504 | goto out; | |
2505 | ||
96303081 | 2506 | cluster->block_group = NULL; |
fa9c0d79 | 2507 | cluster->window_start = 0; |
96303081 | 2508 | list_del_init(&cluster->block_group_list); |
96303081 | 2509 | |
fa9c0d79 | 2510 | node = rb_first(&cluster->root); |
96303081 | 2511 | while (node) { |
4e69b598 JB |
2512 | bool bitmap; |
2513 | ||
fa9c0d79 CM |
2514 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
2515 | node = rb_next(&entry->offset_index); | |
2516 | rb_erase(&entry->offset_index, &cluster->root); | |
20005523 | 2517 | RB_CLEAR_NODE(&entry->offset_index); |
4e69b598 JB |
2518 | |
2519 | bitmap = (entry->bitmap != NULL); | |
20005523 | 2520 | if (!bitmap) { |
34d52cb6 | 2521 | try_merge_free_space(ctl, entry, false); |
20005523 FM |
2522 | steal_from_bitmap(ctl, entry, false); |
2523 | } | |
34d52cb6 | 2524 | tree_insert_offset(&ctl->free_space_offset, |
4e69b598 | 2525 | entry->offset, &entry->offset_index, bitmap); |
fa9c0d79 | 2526 | } |
6bef4d31 | 2527 | cluster->root = RB_ROOT; |
96303081 | 2528 | |
fa9c0d79 CM |
2529 | out: |
2530 | spin_unlock(&cluster->lock); | |
96303081 | 2531 | btrfs_put_block_group(block_group); |
fa9c0d79 CM |
2532 | return 0; |
2533 | } | |
2534 | ||
48a3b636 ES |
2535 | static void __btrfs_remove_free_space_cache_locked( |
2536 | struct btrfs_free_space_ctl *ctl) | |
0f9dd46c JB |
2537 | { |
2538 | struct btrfs_free_space *info; | |
2539 | struct rb_node *node; | |
581bb050 | 2540 | |
581bb050 LZ |
2541 | while ((node = rb_last(&ctl->free_space_offset)) != NULL) { |
2542 | info = rb_entry(node, struct btrfs_free_space, offset_index); | |
9b90f513 JB |
2543 | if (!info->bitmap) { |
2544 | unlink_free_space(ctl, info); | |
2545 | kmem_cache_free(btrfs_free_space_cachep, info); | |
2546 | } else { | |
2547 | free_bitmap(ctl, info); | |
2548 | } | |
351810c1 DS |
2549 | |
2550 | cond_resched_lock(&ctl->tree_lock); | |
581bb050 | 2551 | } |
09655373 CM |
2552 | } |
2553 | ||
2554 | void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl) | |
2555 | { | |
2556 | spin_lock(&ctl->tree_lock); | |
2557 | __btrfs_remove_free_space_cache_locked(ctl); | |
581bb050 LZ |
2558 | spin_unlock(&ctl->tree_lock); |
2559 | } | |
2560 | ||
2561 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | |
2562 | { | |
2563 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | |
fa9c0d79 | 2564 | struct btrfs_free_cluster *cluster; |
96303081 | 2565 | struct list_head *head; |
0f9dd46c | 2566 | |
34d52cb6 | 2567 | spin_lock(&ctl->tree_lock); |
96303081 JB |
2568 | while ((head = block_group->cluster_list.next) != |
2569 | &block_group->cluster_list) { | |
2570 | cluster = list_entry(head, struct btrfs_free_cluster, | |
2571 | block_group_list); | |
fa9c0d79 CM |
2572 | |
2573 | WARN_ON(cluster->block_group != block_group); | |
2574 | __btrfs_return_cluster_to_free_space(block_group, cluster); | |
351810c1 DS |
2575 | |
2576 | cond_resched_lock(&ctl->tree_lock); | |
fa9c0d79 | 2577 | } |
09655373 | 2578 | __btrfs_remove_free_space_cache_locked(ctl); |
34d52cb6 | 2579 | spin_unlock(&ctl->tree_lock); |
fa9c0d79 | 2580 | |
0f9dd46c JB |
2581 | } |
2582 | ||
6226cb0a | 2583 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, |
a4820398 MX |
2584 | u64 offset, u64 bytes, u64 empty_size, |
2585 | u64 *max_extent_size) | |
0f9dd46c | 2586 | { |
34d52cb6 | 2587 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
6226cb0a | 2588 | struct btrfs_free_space *entry = NULL; |
96303081 | 2589 | u64 bytes_search = bytes + empty_size; |
6226cb0a | 2590 | u64 ret = 0; |
53b381b3 DW |
2591 | u64 align_gap = 0; |
2592 | u64 align_gap_len = 0; | |
0f9dd46c | 2593 | |
34d52cb6 | 2594 | spin_lock(&ctl->tree_lock); |
53b381b3 | 2595 | entry = find_free_space(ctl, &offset, &bytes_search, |
a4820398 | 2596 | block_group->full_stripe_len, max_extent_size); |
6226cb0a | 2597 | if (!entry) |
96303081 JB |
2598 | goto out; |
2599 | ||
2600 | ret = offset; | |
2601 | if (entry->bitmap) { | |
34d52cb6 | 2602 | bitmap_clear_bits(ctl, entry, offset, bytes); |
edf6e2d1 | 2603 | if (!entry->bytes) |
34d52cb6 | 2604 | free_bitmap(ctl, entry); |
96303081 | 2605 | } else { |
34d52cb6 | 2606 | unlink_free_space(ctl, entry); |
53b381b3 DW |
2607 | align_gap_len = offset - entry->offset; |
2608 | align_gap = entry->offset; | |
2609 | ||
2610 | entry->offset = offset + bytes; | |
2611 | WARN_ON(entry->bytes < bytes + align_gap_len); | |
2612 | ||
2613 | entry->bytes -= bytes + align_gap_len; | |
6226cb0a | 2614 | if (!entry->bytes) |
dc89e982 | 2615 | kmem_cache_free(btrfs_free_space_cachep, entry); |
6226cb0a | 2616 | else |
34d52cb6 | 2617 | link_free_space(ctl, entry); |
6226cb0a | 2618 | } |
96303081 | 2619 | out: |
34d52cb6 | 2620 | spin_unlock(&ctl->tree_lock); |
817d52f8 | 2621 | |
53b381b3 | 2622 | if (align_gap_len) |
ab8d0fc4 JM |
2623 | __btrfs_add_free_space(block_group->fs_info, ctl, |
2624 | align_gap, align_gap_len); | |
0f9dd46c JB |
2625 | return ret; |
2626 | } | |
fa9c0d79 CM |
2627 | |
2628 | /* | |
2629 | * given a cluster, put all of its extents back into the free space | |
2630 | * cache. If a block group is passed, this function will only free | |
2631 | * a cluster that belongs to the passed block group. | |
2632 | * | |
2633 | * Otherwise, it'll get a reference on the block group pointed to by the | |
2634 | * cluster and remove the cluster from it. | |
2635 | */ | |
2636 | int btrfs_return_cluster_to_free_space( | |
2637 | struct btrfs_block_group_cache *block_group, | |
2638 | struct btrfs_free_cluster *cluster) | |
2639 | { | |
34d52cb6 | 2640 | struct btrfs_free_space_ctl *ctl; |
fa9c0d79 CM |
2641 | int ret; |
2642 | ||
2643 | /* first, get a safe pointer to the block group */ | |
2644 | spin_lock(&cluster->lock); | |
2645 | if (!block_group) { | |
2646 | block_group = cluster->block_group; | |
2647 | if (!block_group) { | |
2648 | spin_unlock(&cluster->lock); | |
2649 | return 0; | |
2650 | } | |
2651 | } else if (cluster->block_group != block_group) { | |
2652 | /* someone else has already freed it don't redo their work */ | |
2653 | spin_unlock(&cluster->lock); | |
2654 | return 0; | |
2655 | } | |
2656 | atomic_inc(&block_group->count); | |
2657 | spin_unlock(&cluster->lock); | |
2658 | ||
34d52cb6 LZ |
2659 | ctl = block_group->free_space_ctl; |
2660 | ||
fa9c0d79 | 2661 | /* now return any extents the cluster had on it */ |
34d52cb6 | 2662 | spin_lock(&ctl->tree_lock); |
fa9c0d79 | 2663 | ret = __btrfs_return_cluster_to_free_space(block_group, cluster); |
34d52cb6 | 2664 | spin_unlock(&ctl->tree_lock); |
fa9c0d79 CM |
2665 | |
2666 | /* finally drop our ref */ | |
2667 | btrfs_put_block_group(block_group); | |
2668 | return ret; | |
2669 | } | |
2670 | ||
96303081 JB |
2671 | static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, |
2672 | struct btrfs_free_cluster *cluster, | |
4e69b598 | 2673 | struct btrfs_free_space *entry, |
a4820398 MX |
2674 | u64 bytes, u64 min_start, |
2675 | u64 *max_extent_size) | |
96303081 | 2676 | { |
34d52cb6 | 2677 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
96303081 JB |
2678 | int err; |
2679 | u64 search_start = cluster->window_start; | |
2680 | u64 search_bytes = bytes; | |
2681 | u64 ret = 0; | |
2682 | ||
96303081 JB |
2683 | search_start = min_start; |
2684 | search_bytes = bytes; | |
2685 | ||
0584f718 | 2686 | err = search_bitmap(ctl, entry, &search_start, &search_bytes, true); |
a4820398 MX |
2687 | if (err) { |
2688 | if (search_bytes > *max_extent_size) | |
2689 | *max_extent_size = search_bytes; | |
4e69b598 | 2690 | return 0; |
a4820398 | 2691 | } |
96303081 JB |
2692 | |
2693 | ret = search_start; | |
bb3ac5a4 | 2694 | __bitmap_clear_bits(ctl, entry, ret, bytes); |
96303081 JB |
2695 | |
2696 | return ret; | |
2697 | } | |
2698 | ||
fa9c0d79 CM |
2699 | /* |
2700 | * given a cluster, try to allocate 'bytes' from it, returns 0 | |
2701 | * if it couldn't find anything suitably large, or a logical disk offset | |
2702 | * if things worked out | |
2703 | */ | |
2704 | u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |
2705 | struct btrfs_free_cluster *cluster, u64 bytes, | |
a4820398 | 2706 | u64 min_start, u64 *max_extent_size) |
fa9c0d79 | 2707 | { |
34d52cb6 | 2708 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
fa9c0d79 CM |
2709 | struct btrfs_free_space *entry = NULL; |
2710 | struct rb_node *node; | |
2711 | u64 ret = 0; | |
2712 | ||
2713 | spin_lock(&cluster->lock); | |
2714 | if (bytes > cluster->max_size) | |
2715 | goto out; | |
2716 | ||
2717 | if (cluster->block_group != block_group) | |
2718 | goto out; | |
2719 | ||
2720 | node = rb_first(&cluster->root); | |
2721 | if (!node) | |
2722 | goto out; | |
2723 | ||
2724 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | |
67871254 | 2725 | while (1) { |
a4820398 MX |
2726 | if (entry->bytes < bytes && entry->bytes > *max_extent_size) |
2727 | *max_extent_size = entry->bytes; | |
2728 | ||
4e69b598 JB |
2729 | if (entry->bytes < bytes || |
2730 | (!entry->bitmap && entry->offset < min_start)) { | |
fa9c0d79 CM |
2731 | node = rb_next(&entry->offset_index); |
2732 | if (!node) | |
2733 | break; | |
2734 | entry = rb_entry(node, struct btrfs_free_space, | |
2735 | offset_index); | |
2736 | continue; | |
2737 | } | |
fa9c0d79 | 2738 | |
4e69b598 JB |
2739 | if (entry->bitmap) { |
2740 | ret = btrfs_alloc_from_bitmap(block_group, | |
2741 | cluster, entry, bytes, | |
a4820398 MX |
2742 | cluster->window_start, |
2743 | max_extent_size); | |
4e69b598 | 2744 | if (ret == 0) { |
4e69b598 JB |
2745 | node = rb_next(&entry->offset_index); |
2746 | if (!node) | |
2747 | break; | |
2748 | entry = rb_entry(node, struct btrfs_free_space, | |
2749 | offset_index); | |
2750 | continue; | |
2751 | } | |
9b230628 | 2752 | cluster->window_start += bytes; |
4e69b598 | 2753 | } else { |
4e69b598 JB |
2754 | ret = entry->offset; |
2755 | ||
2756 | entry->offset += bytes; | |
2757 | entry->bytes -= bytes; | |
2758 | } | |
fa9c0d79 | 2759 | |
5e71b5d5 | 2760 | if (entry->bytes == 0) |
fa9c0d79 | 2761 | rb_erase(&entry->offset_index, &cluster->root); |
fa9c0d79 CM |
2762 | break; |
2763 | } | |
2764 | out: | |
2765 | spin_unlock(&cluster->lock); | |
96303081 | 2766 | |
5e71b5d5 LZ |
2767 | if (!ret) |
2768 | return 0; | |
2769 | ||
34d52cb6 | 2770 | spin_lock(&ctl->tree_lock); |
5e71b5d5 | 2771 | |
34d52cb6 | 2772 | ctl->free_space -= bytes; |
5e71b5d5 | 2773 | if (entry->bytes == 0) { |
34d52cb6 | 2774 | ctl->free_extents--; |
4e69b598 JB |
2775 | if (entry->bitmap) { |
2776 | kfree(entry->bitmap); | |
34d52cb6 LZ |
2777 | ctl->total_bitmaps--; |
2778 | ctl->op->recalc_thresholds(ctl); | |
4e69b598 | 2779 | } |
dc89e982 | 2780 | kmem_cache_free(btrfs_free_space_cachep, entry); |
5e71b5d5 LZ |
2781 | } |
2782 | ||
34d52cb6 | 2783 | spin_unlock(&ctl->tree_lock); |
5e71b5d5 | 2784 | |
fa9c0d79 CM |
2785 | return ret; |
2786 | } | |
2787 | ||
96303081 JB |
2788 | static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, |
2789 | struct btrfs_free_space *entry, | |
2790 | struct btrfs_free_cluster *cluster, | |
1bb91902 AO |
2791 | u64 offset, u64 bytes, |
2792 | u64 cont1_bytes, u64 min_bytes) | |
96303081 | 2793 | { |
34d52cb6 | 2794 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
96303081 JB |
2795 | unsigned long next_zero; |
2796 | unsigned long i; | |
1bb91902 AO |
2797 | unsigned long want_bits; |
2798 | unsigned long min_bits; | |
96303081 | 2799 | unsigned long found_bits; |
cef40483 | 2800 | unsigned long max_bits = 0; |
96303081 JB |
2801 | unsigned long start = 0; |
2802 | unsigned long total_found = 0; | |
4e69b598 | 2803 | int ret; |
96303081 | 2804 | |
96009762 | 2805 | i = offset_to_bit(entry->offset, ctl->unit, |
96303081 | 2806 | max_t(u64, offset, entry->offset)); |
96009762 WSH |
2807 | want_bits = bytes_to_bits(bytes, ctl->unit); |
2808 | min_bits = bytes_to_bits(min_bytes, ctl->unit); | |
96303081 | 2809 | |
cef40483 JB |
2810 | /* |
2811 | * Don't bother looking for a cluster in this bitmap if it's heavily | |
2812 | * fragmented. | |
2813 | */ | |
2814 | if (entry->max_extent_size && | |
2815 | entry->max_extent_size < cont1_bytes) | |
2816 | return -ENOSPC; | |
96303081 JB |
2817 | again: |
2818 | found_bits = 0; | |
ebb3dad4 | 2819 | for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) { |
96303081 JB |
2820 | next_zero = find_next_zero_bit(entry->bitmap, |
2821 | BITS_PER_BITMAP, i); | |
1bb91902 | 2822 | if (next_zero - i >= min_bits) { |
96303081 | 2823 | found_bits = next_zero - i; |
cef40483 JB |
2824 | if (found_bits > max_bits) |
2825 | max_bits = found_bits; | |
96303081 JB |
2826 | break; |
2827 | } | |
cef40483 JB |
2828 | if (next_zero - i > max_bits) |
2829 | max_bits = next_zero - i; | |
96303081 JB |
2830 | i = next_zero; |
2831 | } | |
2832 | ||
cef40483 JB |
2833 | if (!found_bits) { |
2834 | entry->max_extent_size = (u64)max_bits * ctl->unit; | |
4e69b598 | 2835 | return -ENOSPC; |
cef40483 | 2836 | } |
96303081 | 2837 | |
1bb91902 | 2838 | if (!total_found) { |
96303081 | 2839 | start = i; |
b78d09bc | 2840 | cluster->max_size = 0; |
96303081 JB |
2841 | } |
2842 | ||
2843 | total_found += found_bits; | |
2844 | ||
96009762 WSH |
2845 | if (cluster->max_size < found_bits * ctl->unit) |
2846 | cluster->max_size = found_bits * ctl->unit; | |
96303081 | 2847 | |
1bb91902 AO |
2848 | if (total_found < want_bits || cluster->max_size < cont1_bytes) { |
2849 | i = next_zero + 1; | |
96303081 JB |
2850 | goto again; |
2851 | } | |
2852 | ||
96009762 | 2853 | cluster->window_start = start * ctl->unit + entry->offset; |
34d52cb6 | 2854 | rb_erase(&entry->offset_index, &ctl->free_space_offset); |
4e69b598 JB |
2855 | ret = tree_insert_offset(&cluster->root, entry->offset, |
2856 | &entry->offset_index, 1); | |
b12d6869 | 2857 | ASSERT(!ret); /* -EEXIST; Logic error */ |
96303081 | 2858 | |
3f7de037 | 2859 | trace_btrfs_setup_cluster(block_group, cluster, |
96009762 | 2860 | total_found * ctl->unit, 1); |
96303081 JB |
2861 | return 0; |
2862 | } | |
2863 | ||
4e69b598 JB |
2864 | /* |
2865 | * This searches the block group for just extents to fill the cluster with. | |
1bb91902 AO |
2866 | * Try to find a cluster with at least bytes total bytes, at least one |
2867 | * extent of cont1_bytes, and other clusters of at least min_bytes. | |
4e69b598 | 2868 | */ |
3de85bb9 JB |
2869 | static noinline int |
2870 | setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |
2871 | struct btrfs_free_cluster *cluster, | |
2872 | struct list_head *bitmaps, u64 offset, u64 bytes, | |
1bb91902 | 2873 | u64 cont1_bytes, u64 min_bytes) |
4e69b598 | 2874 | { |
34d52cb6 | 2875 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
4e69b598 JB |
2876 | struct btrfs_free_space *first = NULL; |
2877 | struct btrfs_free_space *entry = NULL; | |
4e69b598 JB |
2878 | struct btrfs_free_space *last; |
2879 | struct rb_node *node; | |
4e69b598 JB |
2880 | u64 window_free; |
2881 | u64 max_extent; | |
3f7de037 | 2882 | u64 total_size = 0; |
4e69b598 | 2883 | |
34d52cb6 | 2884 | entry = tree_search_offset(ctl, offset, 0, 1); |
4e69b598 JB |
2885 | if (!entry) |
2886 | return -ENOSPC; | |
2887 | ||
2888 | /* | |
2889 | * We don't want bitmaps, so just move along until we find a normal | |
2890 | * extent entry. | |
2891 | */ | |
1bb91902 AO |
2892 | while (entry->bitmap || entry->bytes < min_bytes) { |
2893 | if (entry->bitmap && list_empty(&entry->list)) | |
86d4a77b | 2894 | list_add_tail(&entry->list, bitmaps); |
4e69b598 JB |
2895 | node = rb_next(&entry->offset_index); |
2896 | if (!node) | |
2897 | return -ENOSPC; | |
2898 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | |
2899 | } | |
2900 | ||
4e69b598 JB |
2901 | window_free = entry->bytes; |
2902 | max_extent = entry->bytes; | |
2903 | first = entry; | |
2904 | last = entry; | |
4e69b598 | 2905 | |
1bb91902 AO |
2906 | for (node = rb_next(&entry->offset_index); node; |
2907 | node = rb_next(&entry->offset_index)) { | |
4e69b598 JB |
2908 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
2909 | ||
86d4a77b JB |
2910 | if (entry->bitmap) { |
2911 | if (list_empty(&entry->list)) | |
2912 | list_add_tail(&entry->list, bitmaps); | |
4e69b598 | 2913 | continue; |
86d4a77b JB |
2914 | } |
2915 | ||
1bb91902 AO |
2916 | if (entry->bytes < min_bytes) |
2917 | continue; | |
2918 | ||
2919 | last = entry; | |
2920 | window_free += entry->bytes; | |
2921 | if (entry->bytes > max_extent) | |
4e69b598 | 2922 | max_extent = entry->bytes; |
4e69b598 JB |
2923 | } |
2924 | ||
1bb91902 AO |
2925 | if (window_free < bytes || max_extent < cont1_bytes) |
2926 | return -ENOSPC; | |
2927 | ||
4e69b598 JB |
2928 | cluster->window_start = first->offset; |
2929 | ||
2930 | node = &first->offset_index; | |
2931 | ||
2932 | /* | |
2933 | * now we've found our entries, pull them out of the free space | |
2934 | * cache and put them into the cluster rbtree | |
2935 | */ | |
2936 | do { | |
2937 | int ret; | |
2938 | ||
2939 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | |
2940 | node = rb_next(&entry->offset_index); | |
1bb91902 | 2941 | if (entry->bitmap || entry->bytes < min_bytes) |
4e69b598 JB |
2942 | continue; |
2943 | ||
34d52cb6 | 2944 | rb_erase(&entry->offset_index, &ctl->free_space_offset); |
4e69b598 JB |
2945 | ret = tree_insert_offset(&cluster->root, entry->offset, |
2946 | &entry->offset_index, 0); | |
3f7de037 | 2947 | total_size += entry->bytes; |
b12d6869 | 2948 | ASSERT(!ret); /* -EEXIST; Logic error */ |
4e69b598 JB |
2949 | } while (node && entry != last); |
2950 | ||
2951 | cluster->max_size = max_extent; | |
3f7de037 | 2952 | trace_btrfs_setup_cluster(block_group, cluster, total_size, 0); |
4e69b598 JB |
2953 | return 0; |
2954 | } | |
2955 | ||
2956 | /* | |
2957 | * This specifically looks for bitmaps that may work in the cluster, we assume | |
2958 | * that we have already failed to find extents that will work. | |
2959 | */ | |
3de85bb9 JB |
2960 | static noinline int |
2961 | setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | |
2962 | struct btrfs_free_cluster *cluster, | |
2963 | struct list_head *bitmaps, u64 offset, u64 bytes, | |
1bb91902 | 2964 | u64 cont1_bytes, u64 min_bytes) |
4e69b598 | 2965 | { |
34d52cb6 | 2966 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
1b9b922a | 2967 | struct btrfs_free_space *entry = NULL; |
4e69b598 | 2968 | int ret = -ENOSPC; |
0f0fbf1d | 2969 | u64 bitmap_offset = offset_to_bitmap(ctl, offset); |
4e69b598 | 2970 | |
34d52cb6 | 2971 | if (ctl->total_bitmaps == 0) |
4e69b598 JB |
2972 | return -ENOSPC; |
2973 | ||
0f0fbf1d LZ |
2974 | /* |
2975 | * The bitmap that covers offset won't be in the list unless offset | |
2976 | * is just its start offset. | |
2977 | */ | |
1b9b922a CM |
2978 | if (!list_empty(bitmaps)) |
2979 | entry = list_first_entry(bitmaps, struct btrfs_free_space, list); | |
2980 | ||
2981 | if (!entry || entry->offset != bitmap_offset) { | |
0f0fbf1d LZ |
2982 | entry = tree_search_offset(ctl, bitmap_offset, 1, 0); |
2983 | if (entry && list_empty(&entry->list)) | |
2984 | list_add(&entry->list, bitmaps); | |
2985 | } | |
2986 | ||
86d4a77b | 2987 | list_for_each_entry(entry, bitmaps, list) { |
357b9784 | 2988 | if (entry->bytes < bytes) |
86d4a77b JB |
2989 | continue; |
2990 | ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, | |
1bb91902 | 2991 | bytes, cont1_bytes, min_bytes); |
86d4a77b JB |
2992 | if (!ret) |
2993 | return 0; | |
2994 | } | |
2995 | ||
2996 | /* | |
52621cb6 LZ |
2997 | * The bitmaps list has all the bitmaps that record free space |
2998 | * starting after offset, so no more search is required. | |
86d4a77b | 2999 | */ |
52621cb6 | 3000 | return -ENOSPC; |
4e69b598 JB |
3001 | } |
3002 | ||
fa9c0d79 CM |
3003 | /* |
3004 | * here we try to find a cluster of blocks in a block group. The goal | |
1bb91902 | 3005 | * is to find at least bytes+empty_size. |
fa9c0d79 CM |
3006 | * We might not find them all in one contiguous area. |
3007 | * | |
3008 | * returns zero and sets up cluster if things worked out, otherwise | |
3009 | * it returns -enospc | |
3010 | */ | |
00361589 | 3011 | int btrfs_find_space_cluster(struct btrfs_root *root, |
fa9c0d79 CM |
3012 | struct btrfs_block_group_cache *block_group, |
3013 | struct btrfs_free_cluster *cluster, | |
3014 | u64 offset, u64 bytes, u64 empty_size) | |
3015 | { | |
34d52cb6 | 3016 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
86d4a77b | 3017 | struct btrfs_free_space *entry, *tmp; |
52621cb6 | 3018 | LIST_HEAD(bitmaps); |
fa9c0d79 | 3019 | u64 min_bytes; |
1bb91902 | 3020 | u64 cont1_bytes; |
fa9c0d79 CM |
3021 | int ret; |
3022 | ||
1bb91902 AO |
3023 | /* |
3024 | * Choose the minimum extent size we'll require for this | |
3025 | * cluster. For SSD_SPREAD, don't allow any fragmentation. | |
3026 | * For metadata, allow allocates with smaller extents. For | |
3027 | * data, keep it dense. | |
3028 | */ | |
3cdde224 | 3029 | if (btrfs_test_opt(root->fs_info, SSD_SPREAD)) { |
1bb91902 | 3030 | cont1_bytes = min_bytes = bytes + empty_size; |
451d7585 | 3031 | } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) { |
1bb91902 | 3032 | cont1_bytes = bytes; |
da17066c | 3033 | min_bytes = block_group->fs_info->sectorsize; |
1bb91902 AO |
3034 | } else { |
3035 | cont1_bytes = max(bytes, (bytes + empty_size) >> 2); | |
da17066c | 3036 | min_bytes = block_group->fs_info->sectorsize; |
1bb91902 | 3037 | } |
fa9c0d79 | 3038 | |
34d52cb6 | 3039 | spin_lock(&ctl->tree_lock); |
7d0d2e8e JB |
3040 | |
3041 | /* | |
3042 | * If we know we don't have enough space to make a cluster don't even | |
3043 | * bother doing all the work to try and find one. | |
3044 | */ | |
1bb91902 | 3045 | if (ctl->free_space < bytes) { |
34d52cb6 | 3046 | spin_unlock(&ctl->tree_lock); |
7d0d2e8e JB |
3047 | return -ENOSPC; |
3048 | } | |
3049 | ||
fa9c0d79 CM |
3050 | spin_lock(&cluster->lock); |
3051 | ||
3052 | /* someone already found a cluster, hooray */ | |
3053 | if (cluster->block_group) { | |
3054 | ret = 0; | |
3055 | goto out; | |
3056 | } | |
fa9c0d79 | 3057 | |
3f7de037 JB |
3058 | trace_btrfs_find_cluster(block_group, offset, bytes, empty_size, |
3059 | min_bytes); | |
3060 | ||
86d4a77b | 3061 | ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, |
1bb91902 AO |
3062 | bytes + empty_size, |
3063 | cont1_bytes, min_bytes); | |
4e69b598 | 3064 | if (ret) |
86d4a77b | 3065 | ret = setup_cluster_bitmap(block_group, cluster, &bitmaps, |
1bb91902 AO |
3066 | offset, bytes + empty_size, |
3067 | cont1_bytes, min_bytes); | |
86d4a77b JB |
3068 | |
3069 | /* Clear our temporary list */ | |
3070 | list_for_each_entry_safe(entry, tmp, &bitmaps, list) | |
3071 | list_del_init(&entry->list); | |
fa9c0d79 | 3072 | |
4e69b598 JB |
3073 | if (!ret) { |
3074 | atomic_inc(&block_group->count); | |
3075 | list_add_tail(&cluster->block_group_list, | |
3076 | &block_group->cluster_list); | |
3077 | cluster->block_group = block_group; | |
3f7de037 JB |
3078 | } else { |
3079 | trace_btrfs_failed_cluster_setup(block_group); | |
fa9c0d79 | 3080 | } |
fa9c0d79 CM |
3081 | out: |
3082 | spin_unlock(&cluster->lock); | |
34d52cb6 | 3083 | spin_unlock(&ctl->tree_lock); |
fa9c0d79 CM |
3084 | |
3085 | return ret; | |
3086 | } | |
3087 | ||
3088 | /* | |
3089 | * simple code to zero out a cluster | |
3090 | */ | |
3091 | void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) | |
3092 | { | |
3093 | spin_lock_init(&cluster->lock); | |
3094 | spin_lock_init(&cluster->refill_lock); | |
6bef4d31 | 3095 | cluster->root = RB_ROOT; |
fa9c0d79 | 3096 | cluster->max_size = 0; |
c759c4e1 | 3097 | cluster->fragmented = false; |
fa9c0d79 CM |
3098 | INIT_LIST_HEAD(&cluster->block_group_list); |
3099 | cluster->block_group = NULL; | |
3100 | } | |
3101 | ||
7fe1e641 LZ |
3102 | static int do_trimming(struct btrfs_block_group_cache *block_group, |
3103 | u64 *total_trimmed, u64 start, u64 bytes, | |
55507ce3 FM |
3104 | u64 reserved_start, u64 reserved_bytes, |
3105 | struct btrfs_trim_range *trim_entry) | |
f7039b1d | 3106 | { |
7fe1e641 | 3107 | struct btrfs_space_info *space_info = block_group->space_info; |
f7039b1d | 3108 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
55507ce3 | 3109 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
7fe1e641 LZ |
3110 | int ret; |
3111 | int update = 0; | |
3112 | u64 trimmed = 0; | |
f7039b1d | 3113 | |
7fe1e641 LZ |
3114 | spin_lock(&space_info->lock); |
3115 | spin_lock(&block_group->lock); | |
3116 | if (!block_group->ro) { | |
3117 | block_group->reserved += reserved_bytes; | |
3118 | space_info->bytes_reserved += reserved_bytes; | |
3119 | update = 1; | |
3120 | } | |
3121 | spin_unlock(&block_group->lock); | |
3122 | spin_unlock(&space_info->lock); | |
3123 | ||
1edb647b FM |
3124 | ret = btrfs_discard_extent(fs_info->extent_root, |
3125 | start, bytes, &trimmed); | |
7fe1e641 LZ |
3126 | if (!ret) |
3127 | *total_trimmed += trimmed; | |
3128 | ||
55507ce3 | 3129 | mutex_lock(&ctl->cache_writeout_mutex); |
7fe1e641 | 3130 | btrfs_add_free_space(block_group, reserved_start, reserved_bytes); |
55507ce3 FM |
3131 | list_del(&trim_entry->list); |
3132 | mutex_unlock(&ctl->cache_writeout_mutex); | |
7fe1e641 LZ |
3133 | |
3134 | if (update) { | |
3135 | spin_lock(&space_info->lock); | |
3136 | spin_lock(&block_group->lock); | |
3137 | if (block_group->ro) | |
3138 | space_info->bytes_readonly += reserved_bytes; | |
3139 | block_group->reserved -= reserved_bytes; | |
3140 | space_info->bytes_reserved -= reserved_bytes; | |
3141 | spin_unlock(&space_info->lock); | |
3142 | spin_unlock(&block_group->lock); | |
3143 | } | |
3144 | ||
3145 | return ret; | |
3146 | } | |
3147 | ||
3148 | static int trim_no_bitmap(struct btrfs_block_group_cache *block_group, | |
3149 | u64 *total_trimmed, u64 start, u64 end, u64 minlen) | |
3150 | { | |
3151 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | |
3152 | struct btrfs_free_space *entry; | |
3153 | struct rb_node *node; | |
3154 | int ret = 0; | |
3155 | u64 extent_start; | |
3156 | u64 extent_bytes; | |
3157 | u64 bytes; | |
f7039b1d LD |
3158 | |
3159 | while (start < end) { | |
55507ce3 FM |
3160 | struct btrfs_trim_range trim_entry; |
3161 | ||
3162 | mutex_lock(&ctl->cache_writeout_mutex); | |
34d52cb6 | 3163 | spin_lock(&ctl->tree_lock); |
f7039b1d | 3164 | |
34d52cb6 LZ |
3165 | if (ctl->free_space < minlen) { |
3166 | spin_unlock(&ctl->tree_lock); | |
55507ce3 | 3167 | mutex_unlock(&ctl->cache_writeout_mutex); |
f7039b1d LD |
3168 | break; |
3169 | } | |
3170 | ||
34d52cb6 | 3171 | entry = tree_search_offset(ctl, start, 0, 1); |
7fe1e641 | 3172 | if (!entry) { |
34d52cb6 | 3173 | spin_unlock(&ctl->tree_lock); |
55507ce3 | 3174 | mutex_unlock(&ctl->cache_writeout_mutex); |
f7039b1d LD |
3175 | break; |
3176 | } | |
3177 | ||
7fe1e641 LZ |
3178 | /* skip bitmaps */ |
3179 | while (entry->bitmap) { | |
3180 | node = rb_next(&entry->offset_index); | |
3181 | if (!node) { | |
34d52cb6 | 3182 | spin_unlock(&ctl->tree_lock); |
55507ce3 | 3183 | mutex_unlock(&ctl->cache_writeout_mutex); |
7fe1e641 | 3184 | goto out; |
f7039b1d | 3185 | } |
7fe1e641 LZ |
3186 | entry = rb_entry(node, struct btrfs_free_space, |
3187 | offset_index); | |
f7039b1d LD |
3188 | } |
3189 | ||
7fe1e641 LZ |
3190 | if (entry->offset >= end) { |
3191 | spin_unlock(&ctl->tree_lock); | |
55507ce3 | 3192 | mutex_unlock(&ctl->cache_writeout_mutex); |
7fe1e641 | 3193 | break; |
f7039b1d LD |
3194 | } |
3195 | ||
7fe1e641 LZ |
3196 | extent_start = entry->offset; |
3197 | extent_bytes = entry->bytes; | |
3198 | start = max(start, extent_start); | |
3199 | bytes = min(extent_start + extent_bytes, end) - start; | |
3200 | if (bytes < minlen) { | |
3201 | spin_unlock(&ctl->tree_lock); | |
55507ce3 | 3202 | mutex_unlock(&ctl->cache_writeout_mutex); |
7fe1e641 | 3203 | goto next; |
f7039b1d LD |
3204 | } |
3205 | ||
7fe1e641 LZ |
3206 | unlink_free_space(ctl, entry); |
3207 | kmem_cache_free(btrfs_free_space_cachep, entry); | |
3208 | ||
34d52cb6 | 3209 | spin_unlock(&ctl->tree_lock); |
55507ce3 FM |
3210 | trim_entry.start = extent_start; |
3211 | trim_entry.bytes = extent_bytes; | |
3212 | list_add_tail(&trim_entry.list, &ctl->trimming_ranges); | |
3213 | mutex_unlock(&ctl->cache_writeout_mutex); | |
f7039b1d | 3214 | |
7fe1e641 | 3215 | ret = do_trimming(block_group, total_trimmed, start, bytes, |
55507ce3 | 3216 | extent_start, extent_bytes, &trim_entry); |
7fe1e641 LZ |
3217 | if (ret) |
3218 | break; | |
3219 | next: | |
3220 | start += bytes; | |
f7039b1d | 3221 | |
7fe1e641 LZ |
3222 | if (fatal_signal_pending(current)) { |
3223 | ret = -ERESTARTSYS; | |
3224 | break; | |
3225 | } | |
3226 | ||
3227 | cond_resched(); | |
3228 | } | |
3229 | out: | |
3230 | return ret; | |
3231 | } | |
3232 | ||
3233 | static int trim_bitmaps(struct btrfs_block_group_cache *block_group, | |
3234 | u64 *total_trimmed, u64 start, u64 end, u64 minlen) | |
3235 | { | |
3236 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | |
3237 | struct btrfs_free_space *entry; | |
3238 | int ret = 0; | |
3239 | int ret2; | |
3240 | u64 bytes; | |
3241 | u64 offset = offset_to_bitmap(ctl, start); | |
3242 | ||
3243 | while (offset < end) { | |
3244 | bool next_bitmap = false; | |
55507ce3 | 3245 | struct btrfs_trim_range trim_entry; |
7fe1e641 | 3246 | |
55507ce3 | 3247 | mutex_lock(&ctl->cache_writeout_mutex); |
7fe1e641 LZ |
3248 | spin_lock(&ctl->tree_lock); |
3249 | ||
3250 | if (ctl->free_space < minlen) { | |
3251 | spin_unlock(&ctl->tree_lock); | |
55507ce3 | 3252 | mutex_unlock(&ctl->cache_writeout_mutex); |
7fe1e641 LZ |
3253 | break; |
3254 | } | |
3255 | ||
3256 | entry = tree_search_offset(ctl, offset, 1, 0); | |
3257 | if (!entry) { | |
3258 | spin_unlock(&ctl->tree_lock); | |
55507ce3 | 3259 | mutex_unlock(&ctl->cache_writeout_mutex); |
7fe1e641 LZ |
3260 | next_bitmap = true; |
3261 | goto next; | |
3262 | } | |
3263 | ||
3264 | bytes = minlen; | |
0584f718 | 3265 | ret2 = search_bitmap(ctl, entry, &start, &bytes, false); |
7fe1e641 LZ |
3266 | if (ret2 || start >= end) { |
3267 | spin_unlock(&ctl->tree_lock); | |
55507ce3 | 3268 | mutex_unlock(&ctl->cache_writeout_mutex); |
7fe1e641 LZ |
3269 | next_bitmap = true; |
3270 | goto next; | |
3271 | } | |
3272 | ||
3273 | bytes = min(bytes, end - start); | |
3274 | if (bytes < minlen) { | |
3275 | spin_unlock(&ctl->tree_lock); | |
55507ce3 | 3276 | mutex_unlock(&ctl->cache_writeout_mutex); |
7fe1e641 LZ |
3277 | goto next; |
3278 | } | |
3279 | ||
3280 | bitmap_clear_bits(ctl, entry, start, bytes); | |
3281 | if (entry->bytes == 0) | |
3282 | free_bitmap(ctl, entry); | |
3283 | ||
3284 | spin_unlock(&ctl->tree_lock); | |
55507ce3 FM |
3285 | trim_entry.start = start; |
3286 | trim_entry.bytes = bytes; | |
3287 | list_add_tail(&trim_entry.list, &ctl->trimming_ranges); | |
3288 | mutex_unlock(&ctl->cache_writeout_mutex); | |
7fe1e641 LZ |
3289 | |
3290 | ret = do_trimming(block_group, total_trimmed, start, bytes, | |
55507ce3 | 3291 | start, bytes, &trim_entry); |
7fe1e641 LZ |
3292 | if (ret) |
3293 | break; | |
3294 | next: | |
3295 | if (next_bitmap) { | |
3296 | offset += BITS_PER_BITMAP * ctl->unit; | |
3297 | } else { | |
3298 | start += bytes; | |
3299 | if (start >= offset + BITS_PER_BITMAP * ctl->unit) | |
3300 | offset += BITS_PER_BITMAP * ctl->unit; | |
f7039b1d | 3301 | } |
f7039b1d LD |
3302 | |
3303 | if (fatal_signal_pending(current)) { | |
3304 | ret = -ERESTARTSYS; | |
3305 | break; | |
3306 | } | |
3307 | ||
3308 | cond_resched(); | |
3309 | } | |
3310 | ||
3311 | return ret; | |
3312 | } | |
581bb050 | 3313 | |
e33e17ee | 3314 | void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache) |
7fe1e641 | 3315 | { |
e33e17ee JM |
3316 | atomic_inc(&cache->trimming); |
3317 | } | |
7fe1e641 | 3318 | |
e33e17ee JM |
3319 | void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group) |
3320 | { | |
3321 | struct extent_map_tree *em_tree; | |
3322 | struct extent_map *em; | |
3323 | bool cleanup; | |
7fe1e641 | 3324 | |
04216820 | 3325 | spin_lock(&block_group->lock); |
e33e17ee JM |
3326 | cleanup = (atomic_dec_and_test(&block_group->trimming) && |
3327 | block_group->removed); | |
04216820 FM |
3328 | spin_unlock(&block_group->lock); |
3329 | ||
e33e17ee | 3330 | if (cleanup) { |
3796d335 | 3331 | lock_chunks(block_group->fs_info); |
04216820 FM |
3332 | em_tree = &block_group->fs_info->mapping_tree.map_tree; |
3333 | write_lock(&em_tree->lock); | |
3334 | em = lookup_extent_mapping(em_tree, block_group->key.objectid, | |
3335 | 1); | |
3336 | BUG_ON(!em); /* logic error, can't happen */ | |
a1e7e16e FM |
3337 | /* |
3338 | * remove_extent_mapping() will delete us from the pinned_chunks | |
3339 | * list, which is protected by the chunk mutex. | |
3340 | */ | |
04216820 FM |
3341 | remove_extent_mapping(em_tree, em); |
3342 | write_unlock(&em_tree->lock); | |
3796d335 | 3343 | unlock_chunks(block_group->fs_info); |
04216820 FM |
3344 | |
3345 | /* once for us and once for the tree */ | |
3346 | free_extent_map(em); | |
3347 | free_extent_map(em); | |
946ddbe8 FM |
3348 | |
3349 | /* | |
3350 | * We've left one free space entry and other tasks trimming | |
3351 | * this block group have left 1 entry each one. Free them. | |
3352 | */ | |
3353 | __btrfs_remove_free_space_cache(block_group->free_space_ctl); | |
e33e17ee JM |
3354 | } |
3355 | } | |
3356 | ||
3357 | int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | |
3358 | u64 *trimmed, u64 start, u64 end, u64 minlen) | |
3359 | { | |
3360 | int ret; | |
3361 | ||
3362 | *trimmed = 0; | |
3363 | ||
3364 | spin_lock(&block_group->lock); | |
3365 | if (block_group->removed) { | |
04216820 | 3366 | spin_unlock(&block_group->lock); |
e33e17ee | 3367 | return 0; |
04216820 | 3368 | } |
e33e17ee JM |
3369 | btrfs_get_block_group_trimming(block_group); |
3370 | spin_unlock(&block_group->lock); | |
3371 | ||
3372 | ret = trim_no_bitmap(block_group, trimmed, start, end, minlen); | |
3373 | if (ret) | |
3374 | goto out; | |
7fe1e641 | 3375 | |
e33e17ee JM |
3376 | ret = trim_bitmaps(block_group, trimmed, start, end, minlen); |
3377 | out: | |
3378 | btrfs_put_block_group_trimming(block_group); | |
7fe1e641 LZ |
3379 | return ret; |
3380 | } | |
3381 | ||
581bb050 LZ |
3382 | /* |
3383 | * Find the left-most item in the cache tree, and then return the | |
3384 | * smallest inode number in the item. | |
3385 | * | |
3386 | * Note: the returned inode number may not be the smallest one in | |
3387 | * the tree, if the left-most item is a bitmap. | |
3388 | */ | |
3389 | u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root) | |
3390 | { | |
3391 | struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl; | |
3392 | struct btrfs_free_space *entry = NULL; | |
3393 | u64 ino = 0; | |
3394 | ||
3395 | spin_lock(&ctl->tree_lock); | |
3396 | ||
3397 | if (RB_EMPTY_ROOT(&ctl->free_space_offset)) | |
3398 | goto out; | |
3399 | ||
3400 | entry = rb_entry(rb_first(&ctl->free_space_offset), | |
3401 | struct btrfs_free_space, offset_index); | |
3402 | ||
3403 | if (!entry->bitmap) { | |
3404 | ino = entry->offset; | |
3405 | ||
3406 | unlink_free_space(ctl, entry); | |
3407 | entry->offset++; | |
3408 | entry->bytes--; | |
3409 | if (!entry->bytes) | |
3410 | kmem_cache_free(btrfs_free_space_cachep, entry); | |
3411 | else | |
3412 | link_free_space(ctl, entry); | |
3413 | } else { | |
3414 | u64 offset = 0; | |
3415 | u64 count = 1; | |
3416 | int ret; | |
3417 | ||
0584f718 | 3418 | ret = search_bitmap(ctl, entry, &offset, &count, true); |
79787eaa | 3419 | /* Logic error; Should be empty if it can't find anything */ |
b12d6869 | 3420 | ASSERT(!ret); |
581bb050 LZ |
3421 | |
3422 | ino = offset; | |
3423 | bitmap_clear_bits(ctl, entry, offset, 1); | |
3424 | if (entry->bytes == 0) | |
3425 | free_bitmap(ctl, entry); | |
3426 | } | |
3427 | out: | |
3428 | spin_unlock(&ctl->tree_lock); | |
3429 | ||
3430 | return ino; | |
3431 | } | |
82d5902d LZ |
3432 | |
3433 | struct inode *lookup_free_ino_inode(struct btrfs_root *root, | |
3434 | struct btrfs_path *path) | |
3435 | { | |
3436 | struct inode *inode = NULL; | |
3437 | ||
57cdc8db DS |
3438 | spin_lock(&root->ino_cache_lock); |
3439 | if (root->ino_cache_inode) | |
3440 | inode = igrab(root->ino_cache_inode); | |
3441 | spin_unlock(&root->ino_cache_lock); | |
82d5902d LZ |
3442 | if (inode) |
3443 | return inode; | |
3444 | ||
3445 | inode = __lookup_free_space_inode(root, path, 0); | |
3446 | if (IS_ERR(inode)) | |
3447 | return inode; | |
3448 | ||
57cdc8db | 3449 | spin_lock(&root->ino_cache_lock); |
7841cb28 | 3450 | if (!btrfs_fs_closing(root->fs_info)) |
57cdc8db DS |
3451 | root->ino_cache_inode = igrab(inode); |
3452 | spin_unlock(&root->ino_cache_lock); | |
82d5902d LZ |
3453 | |
3454 | return inode; | |
3455 | } | |
3456 | ||
3457 | int create_free_ino_inode(struct btrfs_root *root, | |
3458 | struct btrfs_trans_handle *trans, | |
3459 | struct btrfs_path *path) | |
3460 | { | |
3461 | return __create_free_space_inode(root, trans, path, | |
3462 | BTRFS_FREE_INO_OBJECTID, 0); | |
3463 | } | |
3464 | ||
3465 | int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root) | |
3466 | { | |
3467 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | |
3468 | struct btrfs_path *path; | |
3469 | struct inode *inode; | |
3470 | int ret = 0; | |
3471 | u64 root_gen = btrfs_root_generation(&root->root_item); | |
3472 | ||
3cdde224 | 3473 | if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE)) |
4b9465cb CM |
3474 | return 0; |
3475 | ||
82d5902d LZ |
3476 | /* |
3477 | * If we're unmounting then just return, since this does a search on the | |
3478 | * normal root and not the commit root and we could deadlock. | |
3479 | */ | |
7841cb28 | 3480 | if (btrfs_fs_closing(fs_info)) |
82d5902d LZ |
3481 | return 0; |
3482 | ||
3483 | path = btrfs_alloc_path(); | |
3484 | if (!path) | |
3485 | return 0; | |
3486 | ||
3487 | inode = lookup_free_ino_inode(root, path); | |
3488 | if (IS_ERR(inode)) | |
3489 | goto out; | |
3490 | ||
3491 | if (root_gen != BTRFS_I(inode)->generation) | |
3492 | goto out_put; | |
3493 | ||
3494 | ret = __load_free_space_cache(root, inode, ctl, path, 0); | |
3495 | ||
3496 | if (ret < 0) | |
c2cf52eb SK |
3497 | btrfs_err(fs_info, |
3498 | "failed to load free ino cache for root %llu", | |
3499 | root->root_key.objectid); | |
82d5902d LZ |
3500 | out_put: |
3501 | iput(inode); | |
3502 | out: | |
3503 | btrfs_free_path(path); | |
3504 | return ret; | |
3505 | } | |
3506 | ||
3507 | int btrfs_write_out_ino_cache(struct btrfs_root *root, | |
3508 | struct btrfs_trans_handle *trans, | |
53645a91 FDBM |
3509 | struct btrfs_path *path, |
3510 | struct inode *inode) | |
82d5902d LZ |
3511 | { |
3512 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | |
82d5902d | 3513 | int ret; |
c9dc4c65 | 3514 | struct btrfs_io_ctl io_ctl; |
e43699d4 | 3515 | bool release_metadata = true; |
82d5902d | 3516 | |
3cdde224 | 3517 | if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE)) |
4b9465cb CM |
3518 | return 0; |
3519 | ||
85db36cf | 3520 | memset(&io_ctl, 0, sizeof(io_ctl)); |
c9dc4c65 | 3521 | ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl, |
85db36cf | 3522 | trans, path, 0); |
e43699d4 FM |
3523 | if (!ret) { |
3524 | /* | |
3525 | * At this point writepages() didn't error out, so our metadata | |
3526 | * reservation is released when the writeback finishes, at | |
3527 | * inode.c:btrfs_finish_ordered_io(), regardless of it finishing | |
3528 | * with or without an error. | |
3529 | */ | |
3530 | release_metadata = false; | |
85db36cf | 3531 | ret = btrfs_wait_cache_io(root, trans, NULL, &io_ctl, path, 0); |
e43699d4 | 3532 | } |
85db36cf | 3533 | |
c09544e0 | 3534 | if (ret) { |
e43699d4 FM |
3535 | if (release_metadata) |
3536 | btrfs_delalloc_release_metadata(inode, inode->i_size); | |
c09544e0 | 3537 | #ifdef DEBUG |
c2cf52eb SK |
3538 | btrfs_err(root->fs_info, |
3539 | "failed to write free ino cache for root %llu", | |
3540 | root->root_key.objectid); | |
c09544e0 JB |
3541 | #endif |
3542 | } | |
82d5902d | 3543 | |
82d5902d LZ |
3544 | return ret; |
3545 | } | |
74255aa0 JB |
3546 | |
3547 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | |
dc11dd5d JB |
3548 | /* |
3549 | * Use this if you need to make a bitmap or extent entry specifically, it | |
3550 | * doesn't do any of the merging that add_free_space does, this acts a lot like | |
3551 | * how the free space cache loading stuff works, so you can get really weird | |
3552 | * configurations. | |
3553 | */ | |
3554 | int test_add_free_space_entry(struct btrfs_block_group_cache *cache, | |
3555 | u64 offset, u64 bytes, bool bitmap) | |
74255aa0 | 3556 | { |
dc11dd5d JB |
3557 | struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; |
3558 | struct btrfs_free_space *info = NULL, *bitmap_info; | |
3559 | void *map = NULL; | |
3560 | u64 bytes_added; | |
3561 | int ret; | |
74255aa0 | 3562 | |
dc11dd5d JB |
3563 | again: |
3564 | if (!info) { | |
3565 | info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); | |
3566 | if (!info) | |
3567 | return -ENOMEM; | |
74255aa0 JB |
3568 | } |
3569 | ||
dc11dd5d JB |
3570 | if (!bitmap) { |
3571 | spin_lock(&ctl->tree_lock); | |
3572 | info->offset = offset; | |
3573 | info->bytes = bytes; | |
cef40483 | 3574 | info->max_extent_size = 0; |
dc11dd5d JB |
3575 | ret = link_free_space(ctl, info); |
3576 | spin_unlock(&ctl->tree_lock); | |
3577 | if (ret) | |
3578 | kmem_cache_free(btrfs_free_space_cachep, info); | |
3579 | return ret; | |
3580 | } | |
3581 | ||
3582 | if (!map) { | |
09cbfeaf | 3583 | map = kzalloc(PAGE_SIZE, GFP_NOFS); |
dc11dd5d JB |
3584 | if (!map) { |
3585 | kmem_cache_free(btrfs_free_space_cachep, info); | |
3586 | return -ENOMEM; | |
3587 | } | |
3588 | } | |
3589 | ||
3590 | spin_lock(&ctl->tree_lock); | |
3591 | bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), | |
3592 | 1, 0); | |
3593 | if (!bitmap_info) { | |
3594 | info->bitmap = map; | |
3595 | map = NULL; | |
3596 | add_new_bitmap(ctl, info, offset); | |
3597 | bitmap_info = info; | |
20005523 | 3598 | info = NULL; |
dc11dd5d | 3599 | } |
74255aa0 | 3600 | |
dc11dd5d | 3601 | bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes); |
cef40483 | 3602 | |
dc11dd5d JB |
3603 | bytes -= bytes_added; |
3604 | offset += bytes_added; | |
3605 | spin_unlock(&ctl->tree_lock); | |
74255aa0 | 3606 | |
dc11dd5d JB |
3607 | if (bytes) |
3608 | goto again; | |
74255aa0 | 3609 | |
20005523 FM |
3610 | if (info) |
3611 | kmem_cache_free(btrfs_free_space_cachep, info); | |
dc11dd5d JB |
3612 | if (map) |
3613 | kfree(map); | |
3614 | return 0; | |
74255aa0 JB |
3615 | } |
3616 | ||
3617 | /* | |
3618 | * Checks to see if the given range is in the free space cache. This is really | |
3619 | * just used to check the absence of space, so if there is free space in the | |
3620 | * range at all we will return 1. | |
3621 | */ | |
dc11dd5d JB |
3622 | int test_check_exists(struct btrfs_block_group_cache *cache, |
3623 | u64 offset, u64 bytes) | |
74255aa0 JB |
3624 | { |
3625 | struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; | |
3626 | struct btrfs_free_space *info; | |
3627 | int ret = 0; | |
3628 | ||
3629 | spin_lock(&ctl->tree_lock); | |
3630 | info = tree_search_offset(ctl, offset, 0, 0); | |
3631 | if (!info) { | |
3632 | info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), | |
3633 | 1, 0); | |
3634 | if (!info) | |
3635 | goto out; | |
3636 | } | |
3637 | ||
3638 | have_info: | |
3639 | if (info->bitmap) { | |
3640 | u64 bit_off, bit_bytes; | |
3641 | struct rb_node *n; | |
3642 | struct btrfs_free_space *tmp; | |
3643 | ||
3644 | bit_off = offset; | |
3645 | bit_bytes = ctl->unit; | |
0584f718 | 3646 | ret = search_bitmap(ctl, info, &bit_off, &bit_bytes, false); |
74255aa0 JB |
3647 | if (!ret) { |
3648 | if (bit_off == offset) { | |
3649 | ret = 1; | |
3650 | goto out; | |
3651 | } else if (bit_off > offset && | |
3652 | offset + bytes > bit_off) { | |
3653 | ret = 1; | |
3654 | goto out; | |
3655 | } | |
3656 | } | |
3657 | ||
3658 | n = rb_prev(&info->offset_index); | |
3659 | while (n) { | |
3660 | tmp = rb_entry(n, struct btrfs_free_space, | |
3661 | offset_index); | |
3662 | if (tmp->offset + tmp->bytes < offset) | |
3663 | break; | |
3664 | if (offset + bytes < tmp->offset) { | |
5473e0c4 | 3665 | n = rb_prev(&tmp->offset_index); |
74255aa0 JB |
3666 | continue; |
3667 | } | |
3668 | info = tmp; | |
3669 | goto have_info; | |
3670 | } | |
3671 | ||
3672 | n = rb_next(&info->offset_index); | |
3673 | while (n) { | |
3674 | tmp = rb_entry(n, struct btrfs_free_space, | |
3675 | offset_index); | |
3676 | if (offset + bytes < tmp->offset) | |
3677 | break; | |
3678 | if (tmp->offset + tmp->bytes < offset) { | |
5473e0c4 | 3679 | n = rb_next(&tmp->offset_index); |
74255aa0 JB |
3680 | continue; |
3681 | } | |
3682 | info = tmp; | |
3683 | goto have_info; | |
3684 | } | |
3685 | ||
20005523 | 3686 | ret = 0; |
74255aa0 JB |
3687 | goto out; |
3688 | } | |
3689 | ||
3690 | if (info->offset == offset) { | |
3691 | ret = 1; | |
3692 | goto out; | |
3693 | } | |
3694 | ||
3695 | if (offset > info->offset && offset < info->offset + info->bytes) | |
3696 | ret = 1; | |
3697 | out: | |
3698 | spin_unlock(&ctl->tree_lock); | |
3699 | return ret; | |
3700 | } | |
dc11dd5d | 3701 | #endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */ |