Commit | Line | Data |
---|---|---|
c1d7c514 | 1 | // SPDX-License-Identifier: GPL-2.0 |
dc17ff8f CM |
2 | /* |
3 | * Copyright (C) 2007 Oracle. All rights reserved. | |
dc17ff8f CM |
4 | */ |
5 | ||
dc17ff8f | 6 | #include <linux/slab.h> |
d6bfde87 | 7 | #include <linux/blkdev.h> |
f421950f | 8 | #include <linux/writeback.h> |
a3d46aea | 9 | #include <linux/sched/mm.h> |
602cbe91 | 10 | #include "misc.h" |
dc17ff8f CM |
11 | #include "ctree.h" |
12 | #include "transaction.h" | |
13 | #include "btrfs_inode.h" | |
e6dcd2dc | 14 | #include "extent_io.h" |
199c2a9c | 15 | #include "disk-io.h" |
ebb8765b | 16 | #include "compression.h" |
86736342 | 17 | #include "delalloc-space.h" |
7dbeaad0 | 18 | #include "qgroup.h" |
b945a463 | 19 | #include "subpage.h" |
dc17ff8f | 20 | |
6352b91d MX |
21 | static struct kmem_cache *btrfs_ordered_extent_cache; |
22 | ||
e6dcd2dc | 23 | static u64 entry_end(struct btrfs_ordered_extent *entry) |
dc17ff8f | 24 | { |
bffe633e | 25 | if (entry->file_offset + entry->num_bytes < entry->file_offset) |
e6dcd2dc | 26 | return (u64)-1; |
bffe633e | 27 | return entry->file_offset + entry->num_bytes; |
dc17ff8f CM |
28 | } |
29 | ||
d352ac68 CM |
30 | /* returns NULL if the insertion worked, or it returns the node it did find |
31 | * in the tree | |
32 | */ | |
e6dcd2dc CM |
33 | static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, |
34 | struct rb_node *node) | |
dc17ff8f | 35 | { |
d397712b CM |
36 | struct rb_node **p = &root->rb_node; |
37 | struct rb_node *parent = NULL; | |
e6dcd2dc | 38 | struct btrfs_ordered_extent *entry; |
dc17ff8f | 39 | |
d397712b | 40 | while (*p) { |
dc17ff8f | 41 | parent = *p; |
e6dcd2dc | 42 | entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); |
dc17ff8f | 43 | |
e6dcd2dc | 44 | if (file_offset < entry->file_offset) |
dc17ff8f | 45 | p = &(*p)->rb_left; |
e6dcd2dc | 46 | else if (file_offset >= entry_end(entry)) |
dc17ff8f CM |
47 | p = &(*p)->rb_right; |
48 | else | |
49 | return parent; | |
50 | } | |
51 | ||
52 | rb_link_node(node, parent, p); | |
53 | rb_insert_color(node, root); | |
54 | return NULL; | |
55 | } | |
56 | ||
d352ac68 CM |
57 | /* |
58 | * look for a given offset in the tree, and if it can't be found return the | |
59 | * first lesser offset | |
60 | */ | |
e6dcd2dc CM |
61 | static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, |
62 | struct rb_node **prev_ret) | |
dc17ff8f | 63 | { |
d397712b | 64 | struct rb_node *n = root->rb_node; |
dc17ff8f | 65 | struct rb_node *prev = NULL; |
e6dcd2dc CM |
66 | struct rb_node *test; |
67 | struct btrfs_ordered_extent *entry; | |
68 | struct btrfs_ordered_extent *prev_entry = NULL; | |
dc17ff8f | 69 | |
d397712b | 70 | while (n) { |
e6dcd2dc | 71 | entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
dc17ff8f CM |
72 | prev = n; |
73 | prev_entry = entry; | |
dc17ff8f | 74 | |
e6dcd2dc | 75 | if (file_offset < entry->file_offset) |
dc17ff8f | 76 | n = n->rb_left; |
e6dcd2dc | 77 | else if (file_offset >= entry_end(entry)) |
dc17ff8f CM |
78 | n = n->rb_right; |
79 | else | |
80 | return n; | |
81 | } | |
82 | if (!prev_ret) | |
83 | return NULL; | |
84 | ||
d397712b | 85 | while (prev && file_offset >= entry_end(prev_entry)) { |
e6dcd2dc CM |
86 | test = rb_next(prev); |
87 | if (!test) | |
88 | break; | |
89 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | |
90 | rb_node); | |
91 | if (file_offset < entry_end(prev_entry)) | |
92 | break; | |
93 | ||
94 | prev = test; | |
95 | } | |
96 | if (prev) | |
97 | prev_entry = rb_entry(prev, struct btrfs_ordered_extent, | |
98 | rb_node); | |
d397712b | 99 | while (prev && file_offset < entry_end(prev_entry)) { |
e6dcd2dc CM |
100 | test = rb_prev(prev); |
101 | if (!test) | |
102 | break; | |
103 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | |
104 | rb_node); | |
105 | prev = test; | |
dc17ff8f CM |
106 | } |
107 | *prev_ret = prev; | |
108 | return NULL; | |
109 | } | |
110 | ||
4b46fce2 JB |
111 | static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, |
112 | u64 len) | |
113 | { | |
114 | if (file_offset + len <= entry->file_offset || | |
bffe633e | 115 | entry->file_offset + entry->num_bytes <= file_offset) |
4b46fce2 JB |
116 | return 0; |
117 | return 1; | |
118 | } | |
119 | ||
d352ac68 CM |
120 | /* |
121 | * look find the first ordered struct that has this offset, otherwise | |
122 | * the first one less than this offset | |
123 | */ | |
e6dcd2dc CM |
124 | static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, |
125 | u64 file_offset) | |
dc17ff8f | 126 | { |
e6dcd2dc | 127 | struct rb_root *root = &tree->tree; |
c87fb6fd | 128 | struct rb_node *prev = NULL; |
dc17ff8f | 129 | struct rb_node *ret; |
e6dcd2dc CM |
130 | struct btrfs_ordered_extent *entry; |
131 | ||
132 | if (tree->last) { | |
133 | entry = rb_entry(tree->last, struct btrfs_ordered_extent, | |
134 | rb_node); | |
20bbf20e | 135 | if (in_range(file_offset, entry->file_offset, entry->num_bytes)) |
e6dcd2dc CM |
136 | return tree->last; |
137 | } | |
138 | ret = __tree_search(root, file_offset, &prev); | |
dc17ff8f | 139 | if (!ret) |
e6dcd2dc CM |
140 | ret = prev; |
141 | if (ret) | |
142 | tree->last = ret; | |
dc17ff8f CM |
143 | return ret; |
144 | } | |
145 | ||
cb36a9bb OS |
146 | /** |
147 | * Add an ordered extent to the per-inode tree. | |
148 | * | |
149 | * @inode: Inode that this extent is for. | |
150 | * @file_offset: Logical offset in file where the extent starts. | |
151 | * @num_bytes: Logical length of extent in file. | |
152 | * @ram_bytes: Full length of unencoded data. | |
153 | * @disk_bytenr: Offset of extent on disk. | |
154 | * @disk_num_bytes: Size of extent on disk. | |
155 | * @offset: Offset into unencoded data where file data starts. | |
156 | * @flags: Flags specifying type of extent (1 << BTRFS_ORDERED_*). | |
157 | * @compress_type: Compression algorithm used for data. | |
158 | * | |
159 | * Most of these parameters correspond to &struct btrfs_file_extent_item. The | |
160 | * tree is given a single reference on the ordered extent that was inserted. | |
eb84ae03 | 161 | * |
cb36a9bb | 162 | * Return: 0 or -ENOMEM. |
eb84ae03 | 163 | */ |
cb36a9bb OS |
164 | int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset, |
165 | u64 num_bytes, u64 ram_bytes, u64 disk_bytenr, | |
166 | u64 disk_num_bytes, u64 offset, unsigned flags, | |
167 | int compress_type) | |
dc17ff8f | 168 | { |
da69fea9 NB |
169 | struct btrfs_root *root = inode->root; |
170 | struct btrfs_fs_info *fs_info = root->fs_info; | |
171 | struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; | |
e6dcd2dc CM |
172 | struct rb_node *node; |
173 | struct btrfs_ordered_extent *entry; | |
7dbeaad0 QW |
174 | int ret; |
175 | ||
cb36a9bb OS |
176 | if (flags & |
177 | ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) { | |
7dbeaad0 | 178 | /* For nocow write, we can release the qgroup rsv right now */ |
8b8a979f | 179 | ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes); |
7dbeaad0 QW |
180 | if (ret < 0) |
181 | return ret; | |
182 | ret = 0; | |
183 | } else { | |
184 | /* | |
185 | * The ordered extent has reserved qgroup space, release now | |
186 | * and pass the reserved number for qgroup_record to free. | |
187 | */ | |
72b7d15b | 188 | ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes); |
7dbeaad0 QW |
189 | if (ret < 0) |
190 | return ret; | |
191 | } | |
6352b91d | 192 | entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); |
dc17ff8f CM |
193 | if (!entry) |
194 | return -ENOMEM; | |
195 | ||
e6dcd2dc | 196 | entry->file_offset = file_offset; |
bffe633e | 197 | entry->num_bytes = num_bytes; |
cb36a9bb OS |
198 | entry->ram_bytes = ram_bytes; |
199 | entry->disk_bytenr = disk_bytenr; | |
bffe633e | 200 | entry->disk_num_bytes = disk_num_bytes; |
cb36a9bb | 201 | entry->offset = offset; |
bffe633e | 202 | entry->bytes_left = num_bytes; |
da69fea9 | 203 | entry->inode = igrab(&inode->vfs_inode); |
261507a0 | 204 | entry->compress_type = compress_type; |
77cef2ec | 205 | entry->truncated_len = (u64)-1; |
7dbeaad0 | 206 | entry->qgroup_rsv = ret; |
d8e3fb10 | 207 | entry->physical = (u64)-1; |
3c198fe0 | 208 | |
cb36a9bb OS |
209 | ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0); |
210 | entry->flags = flags; | |
3eaa2885 | 211 | |
5deb17e1 JB |
212 | percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes, |
213 | fs_info->delalloc_batch); | |
214 | ||
e6dcd2dc | 215 | /* one ref for the tree */ |
e76edab7 | 216 | refcount_set(&entry->refs, 1); |
e6dcd2dc CM |
217 | init_waitqueue_head(&entry->wait); |
218 | INIT_LIST_HEAD(&entry->list); | |
48778179 | 219 | INIT_LIST_HEAD(&entry->log_list); |
3eaa2885 | 220 | INIT_LIST_HEAD(&entry->root_extent_list); |
9afab882 MX |
221 | INIT_LIST_HEAD(&entry->work_list); |
222 | init_completion(&entry->completion); | |
dc17ff8f | 223 | |
acbf1dd0 | 224 | trace_btrfs_ordered_extent_add(inode, entry); |
1abe9b8a | 225 | |
5fd02043 | 226 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
227 | node = tree_insert(&tree->tree, file_offset, |
228 | &entry->rb_node); | |
43c04fb1 | 229 | if (node) |
511a32b5 NB |
230 | btrfs_panic(fs_info, -EEXIST, |
231 | "inconsistency in ordered tree at offset %llu", | |
232 | file_offset); | |
5fd02043 | 233 | spin_unlock_irq(&tree->lock); |
d397712b | 234 | |
199c2a9c | 235 | spin_lock(&root->ordered_extent_lock); |
3eaa2885 | 236 | list_add_tail(&entry->root_extent_list, |
199c2a9c MX |
237 | &root->ordered_extents); |
238 | root->nr_ordered_extents++; | |
239 | if (root->nr_ordered_extents == 1) { | |
0b246afa | 240 | spin_lock(&fs_info->ordered_root_lock); |
199c2a9c | 241 | BUG_ON(!list_empty(&root->ordered_root)); |
0b246afa JM |
242 | list_add_tail(&root->ordered_root, &fs_info->ordered_roots); |
243 | spin_unlock(&fs_info->ordered_root_lock); | |
199c2a9c MX |
244 | } |
245 | spin_unlock(&root->ordered_extent_lock); | |
3eaa2885 | 246 | |
8b62f87b JB |
247 | /* |
248 | * We don't need the count_max_extents here, we can assume that all of | |
249 | * that work has been done at higher layers, so this is truly the | |
250 | * smallest the extent is going to get. | |
251 | */ | |
da69fea9 NB |
252 | spin_lock(&inode->lock); |
253 | btrfs_mod_outstanding_extents(inode, 1); | |
254 | spin_unlock(&inode->lock); | |
8b62f87b | 255 | |
dc17ff8f CM |
256 | return 0; |
257 | } | |
258 | ||
eb84ae03 CM |
259 | /* |
260 | * Add a struct btrfs_ordered_sum into the list of checksums to be inserted | |
3edf7d33 CM |
261 | * when an ordered extent is finished. If the list covers more than one |
262 | * ordered extent, it is split across multiples. | |
eb84ae03 | 263 | */ |
f9756261 | 264 | void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry, |
143bede5 | 265 | struct btrfs_ordered_sum *sum) |
dc17ff8f | 266 | { |
e6dcd2dc | 267 | struct btrfs_ordered_inode_tree *tree; |
dc17ff8f | 268 | |
f9756261 | 269 | tree = &BTRFS_I(entry->inode)->ordered_tree; |
5fd02043 | 270 | spin_lock_irq(&tree->lock); |
e6dcd2dc | 271 | list_add_tail(&sum->list, &entry->list); |
5fd02043 | 272 | spin_unlock_irq(&tree->lock); |
dc17ff8f CM |
273 | } |
274 | ||
711f447b CH |
275 | static void finish_ordered_fn(struct btrfs_work *work) |
276 | { | |
277 | struct btrfs_ordered_extent *ordered_extent; | |
278 | ||
279 | ordered_extent = container_of(work, struct btrfs_ordered_extent, work); | |
280 | btrfs_finish_ordered_io(ordered_extent); | |
281 | } | |
282 | ||
163cf09c | 283 | /* |
e65f152e | 284 | * Mark all ordered extents io inside the specified range finished. |
163cf09c | 285 | * |
143823cf | 286 | * @page: The involved page for the operation. |
e65f152e QW |
287 | * For uncompressed buffered IO, the page status also needs to be |
288 | * updated to indicate whether the pending ordered io is finished. | |
289 | * Can be NULL for direct IO and compressed write. | |
290 | * For these cases, callers are ensured they won't execute the | |
291 | * endio function twice. | |
163cf09c | 292 | * |
e65f152e | 293 | * This function is called for endio, thus the range must have ordered |
143823cf | 294 | * extent(s) covering it. |
163cf09c | 295 | */ |
e65f152e | 296 | void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode, |
711f447b CH |
297 | struct page *page, u64 file_offset, |
298 | u64 num_bytes, bool uptodate) | |
163cf09c | 299 | { |
7095821e | 300 | struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; |
e65f152e QW |
301 | struct btrfs_fs_info *fs_info = inode->root->fs_info; |
302 | struct btrfs_workqueue *wq; | |
163cf09c CM |
303 | struct rb_node *node; |
304 | struct btrfs_ordered_extent *entry = NULL; | |
5fd02043 | 305 | unsigned long flags; |
e65f152e QW |
306 | u64 cur = file_offset; |
307 | ||
308 | if (btrfs_is_free_space_inode(inode)) | |
309 | wq = fs_info->endio_freespace_worker; | |
310 | else | |
311 | wq = fs_info->endio_write_workers; | |
312 | ||
313 | if (page) | |
314 | ASSERT(page->mapping && page_offset(page) <= file_offset && | |
315 | file_offset + num_bytes <= page_offset(page) + PAGE_SIZE); | |
163cf09c | 316 | |
5fd02043 | 317 | spin_lock_irqsave(&tree->lock, flags); |
e65f152e QW |
318 | while (cur < file_offset + num_bytes) { |
319 | u64 entry_end; | |
320 | u64 end; | |
321 | u32 len; | |
322 | ||
323 | node = tree_search(tree, cur); | |
324 | /* No ordered extents at all */ | |
325 | if (!node) | |
326 | break; | |
5fd02043 | 327 | |
e65f152e QW |
328 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
329 | entry_end = entry->file_offset + entry->num_bytes; | |
58f74b22 | 330 | /* |
e65f152e QW |
331 | * |<-- OE --->| | |
332 | * cur | |
333 | * Go to next OE. | |
58f74b22 | 334 | */ |
e65f152e QW |
335 | if (cur >= entry_end) { |
336 | node = rb_next(node); | |
337 | /* No more ordered extents, exit */ | |
338 | if (!node) | |
339 | break; | |
340 | entry = rb_entry(node, struct btrfs_ordered_extent, | |
341 | rb_node); | |
342 | ||
343 | /* Go to next ordered extent and continue */ | |
344 | cur = entry->file_offset; | |
345 | continue; | |
346 | } | |
347 | /* | |
348 | * | |<--- OE --->| | |
349 | * cur | |
350 | * Go to the start of OE. | |
351 | */ | |
352 | if (cur < entry->file_offset) { | |
353 | cur = entry->file_offset; | |
354 | continue; | |
355 | } | |
356 | ||
357 | /* | |
358 | * Now we are definitely inside one ordered extent. | |
359 | * | |
360 | * |<--- OE --->| | |
361 | * | | |
362 | * cur | |
363 | */ | |
364 | end = min(entry->file_offset + entry->num_bytes, | |
365 | file_offset + num_bytes) - 1; | |
366 | ASSERT(end + 1 - cur < U32_MAX); | |
367 | len = end + 1 - cur; | |
368 | ||
369 | if (page) { | |
370 | /* | |
f57ad937 QW |
371 | * Ordered (Private2) bit indicates whether we still |
372 | * have pending io unfinished for the ordered extent. | |
e65f152e QW |
373 | * |
374 | * If there's no such bit, we need to skip to next range. | |
375 | */ | |
b945a463 | 376 | if (!btrfs_page_test_ordered(fs_info, page, cur, len)) { |
e65f152e QW |
377 | cur += len; |
378 | continue; | |
379 | } | |
b945a463 | 380 | btrfs_page_clear_ordered(fs_info, page, cur, len); |
e65f152e QW |
381 | } |
382 | ||
383 | /* Now we're fine to update the accounting */ | |
384 | if (unlikely(len > entry->bytes_left)) { | |
385 | WARN_ON(1); | |
386 | btrfs_crit(fs_info, | |
387 | "bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%u left=%llu", | |
388 | inode->root->root_key.objectid, | |
389 | btrfs_ino(inode), | |
390 | entry->file_offset, | |
391 | entry->num_bytes, | |
392 | len, entry->bytes_left); | |
393 | entry->bytes_left = 0; | |
394 | } else { | |
395 | entry->bytes_left -= len; | |
396 | } | |
397 | ||
398 | if (!uptodate) | |
399 | set_bit(BTRFS_ORDERED_IOERR, &entry->flags); | |
400 | ||
401 | /* | |
402 | * All the IO of the ordered extent is finished, we need to queue | |
403 | * the finish_func to be executed. | |
404 | */ | |
405 | if (entry->bytes_left == 0) { | |
406 | set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); | |
407 | cond_wake_up(&entry->wait); | |
408 | refcount_inc(&entry->refs); | |
5bea2508 | 409 | trace_btrfs_ordered_extent_mark_finished(inode, entry); |
e65f152e | 410 | spin_unlock_irqrestore(&tree->lock, flags); |
711f447b | 411 | btrfs_init_work(&entry->work, finish_ordered_fn, NULL, NULL); |
e65f152e QW |
412 | btrfs_queue_work(wq, &entry->work); |
413 | spin_lock_irqsave(&tree->lock, flags); | |
414 | } | |
415 | cur += len; | |
163cf09c | 416 | } |
5fd02043 | 417 | spin_unlock_irqrestore(&tree->lock, flags); |
163cf09c CM |
418 | } |
419 | ||
eb84ae03 | 420 | /* |
58f74b22 QW |
421 | * Finish IO for one ordered extent across a given range. The range can only |
422 | * contain one ordered extent. | |
423 | * | |
424 | * @cached: The cached ordered extent. If not NULL, we can skip the tree | |
425 | * search and use the ordered extent directly. | |
426 | * Will be also used to store the finished ordered extent. | |
427 | * @file_offset: File offset for the finished IO | |
428 | * @io_size: Length of the finish IO range | |
eb84ae03 | 429 | * |
58f74b22 QW |
430 | * Return true if the ordered extent is finished in the range, and update |
431 | * @cached. | |
432 | * Return false otherwise. | |
433 | * | |
434 | * NOTE: The range can NOT cross multiple ordered extents. | |
435 | * Thus caller should ensure the range doesn't cross ordered extents. | |
eb84ae03 | 436 | */ |
58f74b22 QW |
437 | bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode, |
438 | struct btrfs_ordered_extent **cached, | |
f41b6ba9 | 439 | u64 file_offset, u64 io_size) |
dc17ff8f | 440 | { |
90c0304c | 441 | struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; |
dc17ff8f | 442 | struct rb_node *node; |
5a1a3df1 | 443 | struct btrfs_ordered_extent *entry = NULL; |
5fd02043 | 444 | unsigned long flags; |
58f74b22 | 445 | bool finished = false; |
e6dcd2dc | 446 | |
5fd02043 JB |
447 | spin_lock_irqsave(&tree->lock, flags); |
448 | if (cached && *cached) { | |
449 | entry = *cached; | |
450 | goto have_entry; | |
451 | } | |
452 | ||
e6dcd2dc | 453 | node = tree_search(tree, file_offset); |
58f74b22 | 454 | if (!node) |
e6dcd2dc | 455 | goto out; |
dc17ff8f | 456 | |
e6dcd2dc | 457 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
5fd02043 | 458 | have_entry: |
20bbf20e | 459 | if (!in_range(file_offset, entry->file_offset, entry->num_bytes)) |
e6dcd2dc | 460 | goto out; |
e6dcd2dc | 461 | |
58f74b22 | 462 | if (io_size > entry->bytes_left) |
90c0304c | 463 | btrfs_crit(inode->root->fs_info, |
efe120a0 | 464 | "bad ordered accounting left %llu size %llu", |
c1c9ff7c | 465 | entry->bytes_left, io_size); |
58f74b22 | 466 | |
8b62b72b | 467 | entry->bytes_left -= io_size; |
5fd02043 | 468 | |
af7a6509 | 469 | if (entry->bytes_left == 0) { |
58f74b22 QW |
470 | /* |
471 | * Ensure only one caller can set the flag and finished_ret | |
472 | * accordingly | |
473 | */ | |
474 | finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); | |
093258e6 DS |
475 | /* test_and_set_bit implies a barrier */ |
476 | cond_wake_up_nomb(&entry->wait); | |
af7a6509 | 477 | } |
e6dcd2dc | 478 | out: |
58f74b22 | 479 | if (finished && cached && entry) { |
5a1a3df1 | 480 | *cached = entry; |
e76edab7 | 481 | refcount_inc(&entry->refs); |
5bea2508 | 482 | trace_btrfs_ordered_extent_dec_test_pending(inode, entry); |
5a1a3df1 | 483 | } |
5fd02043 | 484 | spin_unlock_irqrestore(&tree->lock, flags); |
58f74b22 | 485 | return finished; |
e6dcd2dc | 486 | } |
dc17ff8f | 487 | |
eb84ae03 CM |
488 | /* |
489 | * used to drop a reference on an ordered extent. This will free | |
490 | * the extent if the last reference is dropped | |
491 | */ | |
143bede5 | 492 | void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) |
e6dcd2dc | 493 | { |
ba1da2f4 CM |
494 | struct list_head *cur; |
495 | struct btrfs_ordered_sum *sum; | |
496 | ||
acbf1dd0 | 497 | trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry); |
1abe9b8a | 498 | |
e76edab7 | 499 | if (refcount_dec_and_test(&entry->refs)) { |
61de718f | 500 | ASSERT(list_empty(&entry->root_extent_list)); |
48778179 | 501 | ASSERT(list_empty(&entry->log_list)); |
61de718f | 502 | ASSERT(RB_EMPTY_NODE(&entry->rb_node)); |
5fd02043 JB |
503 | if (entry->inode) |
504 | btrfs_add_delayed_iput(entry->inode); | |
d397712b | 505 | while (!list_empty(&entry->list)) { |
ba1da2f4 CM |
506 | cur = entry->list.next; |
507 | sum = list_entry(cur, struct btrfs_ordered_sum, list); | |
508 | list_del(&sum->list); | |
a3d46aea | 509 | kvfree(sum); |
ba1da2f4 | 510 | } |
6352b91d | 511 | kmem_cache_free(btrfs_ordered_extent_cache, entry); |
ba1da2f4 | 512 | } |
dc17ff8f | 513 | } |
cee36a03 | 514 | |
eb84ae03 CM |
515 | /* |
516 | * remove an ordered extent from the tree. No references are dropped | |
5fd02043 | 517 | * and waiters are woken up. |
eb84ae03 | 518 | */ |
71fe0a55 | 519 | void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode, |
5fd02043 | 520 | struct btrfs_ordered_extent *entry) |
cee36a03 | 521 | { |
e6dcd2dc | 522 | struct btrfs_ordered_inode_tree *tree; |
8b62f87b | 523 | struct btrfs_root *root = btrfs_inode->root; |
71fe0a55 | 524 | struct btrfs_fs_info *fs_info = root->fs_info; |
cee36a03 | 525 | struct rb_node *node; |
48778179 | 526 | bool pending; |
5f4403e1 IA |
527 | bool freespace_inode; |
528 | ||
529 | /* | |
530 | * If this is a free space inode the thread has not acquired the ordered | |
531 | * extents lockdep map. | |
532 | */ | |
533 | freespace_inode = btrfs_is_free_space_inode(btrfs_inode); | |
cee36a03 | 534 | |
8b53779e | 535 | btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered); |
8b62f87b JB |
536 | /* This is paired with btrfs_add_ordered_extent. */ |
537 | spin_lock(&btrfs_inode->lock); | |
538 | btrfs_mod_outstanding_extents(btrfs_inode, -1); | |
539 | spin_unlock(&btrfs_inode->lock); | |
7c0c7269 OS |
540 | if (root != fs_info->tree_root) { |
541 | u64 release; | |
542 | ||
543 | if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags)) | |
544 | release = entry->disk_num_bytes; | |
545 | else | |
546 | release = entry->num_bytes; | |
547 | btrfs_delalloc_release_metadata(btrfs_inode, release, false); | |
548 | } | |
8b62f87b | 549 | |
5deb17e1 JB |
550 | percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes, |
551 | fs_info->delalloc_batch); | |
4297ff84 | 552 | |
8b62f87b | 553 | tree = &btrfs_inode->ordered_tree; |
5fd02043 | 554 | spin_lock_irq(&tree->lock); |
e6dcd2dc | 555 | node = &entry->rb_node; |
cee36a03 | 556 | rb_erase(node, &tree->tree); |
61de718f | 557 | RB_CLEAR_NODE(node); |
1b8e7e45 FDBM |
558 | if (tree->last == node) |
559 | tree->last = NULL; | |
e6dcd2dc | 560 | set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); |
48778179 | 561 | pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags); |
5fd02043 | 562 | spin_unlock_irq(&tree->lock); |
3eaa2885 | 563 | |
48778179 FM |
564 | /* |
565 | * The current running transaction is waiting on us, we need to let it | |
566 | * know that we're complete and wake it up. | |
567 | */ | |
568 | if (pending) { | |
569 | struct btrfs_transaction *trans; | |
570 | ||
571 | /* | |
572 | * The checks for trans are just a formality, it should be set, | |
573 | * but if it isn't we don't want to deref/assert under the spin | |
574 | * lock, so be nice and check if trans is set, but ASSERT() so | |
575 | * if it isn't set a developer will notice. | |
576 | */ | |
577 | spin_lock(&fs_info->trans_lock); | |
578 | trans = fs_info->running_transaction; | |
579 | if (trans) | |
580 | refcount_inc(&trans->use_count); | |
581 | spin_unlock(&fs_info->trans_lock); | |
582 | ||
583 | ASSERT(trans); | |
584 | if (trans) { | |
585 | if (atomic_dec_and_test(&trans->pending_ordered)) | |
586 | wake_up(&trans->pending_wait); | |
587 | btrfs_put_transaction(trans); | |
588 | } | |
589 | } | |
590 | ||
8b53779e IA |
591 | btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered); |
592 | ||
199c2a9c | 593 | spin_lock(&root->ordered_extent_lock); |
3eaa2885 | 594 | list_del_init(&entry->root_extent_list); |
199c2a9c | 595 | root->nr_ordered_extents--; |
5a3f23d5 | 596 | |
71fe0a55 | 597 | trace_btrfs_ordered_extent_remove(btrfs_inode, entry); |
1abe9b8a | 598 | |
199c2a9c | 599 | if (!root->nr_ordered_extents) { |
0b246afa | 600 | spin_lock(&fs_info->ordered_root_lock); |
199c2a9c MX |
601 | BUG_ON(list_empty(&root->ordered_root)); |
602 | list_del_init(&root->ordered_root); | |
0b246afa | 603 | spin_unlock(&fs_info->ordered_root_lock); |
199c2a9c MX |
604 | } |
605 | spin_unlock(&root->ordered_extent_lock); | |
e6dcd2dc | 606 | wake_up(&entry->wait); |
5f4403e1 IA |
607 | if (!freespace_inode) |
608 | btrfs_lockdep_release(fs_info, btrfs_ordered_extent); | |
cee36a03 CM |
609 | } |
610 | ||
d458b054 | 611 | static void btrfs_run_ordered_extent_work(struct btrfs_work *work) |
9afab882 MX |
612 | { |
613 | struct btrfs_ordered_extent *ordered; | |
614 | ||
615 | ordered = container_of(work, struct btrfs_ordered_extent, flush_work); | |
c0a43603 | 616 | btrfs_start_ordered_extent(ordered, 1); |
9afab882 MX |
617 | complete(&ordered->completion); |
618 | } | |
619 | ||
d352ac68 CM |
620 | /* |
621 | * wait for all the ordered extents in a root. This is done when balancing | |
622 | * space between drives. | |
623 | */ | |
6374e57a | 624 | u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, |
578def7c | 625 | const u64 range_start, const u64 range_len) |
3eaa2885 | 626 | { |
0b246afa | 627 | struct btrfs_fs_info *fs_info = root->fs_info; |
578def7c FM |
628 | LIST_HEAD(splice); |
629 | LIST_HEAD(skipped); | |
630 | LIST_HEAD(works); | |
9afab882 | 631 | struct btrfs_ordered_extent *ordered, *next; |
6374e57a | 632 | u64 count = 0; |
578def7c | 633 | const u64 range_end = range_start + range_len; |
3eaa2885 | 634 | |
31f3d255 | 635 | mutex_lock(&root->ordered_extent_mutex); |
199c2a9c MX |
636 | spin_lock(&root->ordered_extent_lock); |
637 | list_splice_init(&root->ordered_extents, &splice); | |
b0244199 | 638 | while (!list_empty(&splice) && nr) { |
199c2a9c MX |
639 | ordered = list_first_entry(&splice, struct btrfs_ordered_extent, |
640 | root_extent_list); | |
578def7c | 641 | |
bffe633e OS |
642 | if (range_end <= ordered->disk_bytenr || |
643 | ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) { | |
578def7c FM |
644 | list_move_tail(&ordered->root_extent_list, &skipped); |
645 | cond_resched_lock(&root->ordered_extent_lock); | |
646 | continue; | |
647 | } | |
648 | ||
199c2a9c MX |
649 | list_move_tail(&ordered->root_extent_list, |
650 | &root->ordered_extents); | |
e76edab7 | 651 | refcount_inc(&ordered->refs); |
199c2a9c | 652 | spin_unlock(&root->ordered_extent_lock); |
3eaa2885 | 653 | |
a44903ab QW |
654 | btrfs_init_work(&ordered->flush_work, |
655 | btrfs_run_ordered_extent_work, NULL, NULL); | |
199c2a9c | 656 | list_add_tail(&ordered->work_list, &works); |
0b246afa | 657 | btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work); |
3eaa2885 | 658 | |
9afab882 | 659 | cond_resched(); |
199c2a9c | 660 | spin_lock(&root->ordered_extent_lock); |
6374e57a | 661 | if (nr != U64_MAX) |
b0244199 MX |
662 | nr--; |
663 | count++; | |
3eaa2885 | 664 | } |
578def7c | 665 | list_splice_tail(&skipped, &root->ordered_extents); |
b0244199 | 666 | list_splice_tail(&splice, &root->ordered_extents); |
199c2a9c | 667 | spin_unlock(&root->ordered_extent_lock); |
9afab882 MX |
668 | |
669 | list_for_each_entry_safe(ordered, next, &works, work_list) { | |
670 | list_del_init(&ordered->work_list); | |
671 | wait_for_completion(&ordered->completion); | |
9afab882 | 672 | btrfs_put_ordered_extent(ordered); |
9afab882 MX |
673 | cond_resched(); |
674 | } | |
31f3d255 | 675 | mutex_unlock(&root->ordered_extent_mutex); |
b0244199 MX |
676 | |
677 | return count; | |
3eaa2885 CM |
678 | } |
679 | ||
042528f8 | 680 | void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, |
6374e57a | 681 | const u64 range_start, const u64 range_len) |
199c2a9c MX |
682 | { |
683 | struct btrfs_root *root; | |
684 | struct list_head splice; | |
6374e57a | 685 | u64 done; |
199c2a9c MX |
686 | |
687 | INIT_LIST_HEAD(&splice); | |
688 | ||
8b9d83cd | 689 | mutex_lock(&fs_info->ordered_operations_mutex); |
199c2a9c MX |
690 | spin_lock(&fs_info->ordered_root_lock); |
691 | list_splice_init(&fs_info->ordered_roots, &splice); | |
b0244199 | 692 | while (!list_empty(&splice) && nr) { |
199c2a9c MX |
693 | root = list_first_entry(&splice, struct btrfs_root, |
694 | ordered_root); | |
00246528 | 695 | root = btrfs_grab_root(root); |
199c2a9c MX |
696 | BUG_ON(!root); |
697 | list_move_tail(&root->ordered_root, | |
698 | &fs_info->ordered_roots); | |
699 | spin_unlock(&fs_info->ordered_root_lock); | |
700 | ||
578def7c FM |
701 | done = btrfs_wait_ordered_extents(root, nr, |
702 | range_start, range_len); | |
00246528 | 703 | btrfs_put_root(root); |
199c2a9c MX |
704 | |
705 | spin_lock(&fs_info->ordered_root_lock); | |
6374e57a | 706 | if (nr != U64_MAX) { |
b0244199 | 707 | nr -= done; |
b0244199 | 708 | } |
199c2a9c | 709 | } |
931aa877 | 710 | list_splice_tail(&splice, &fs_info->ordered_roots); |
199c2a9c | 711 | spin_unlock(&fs_info->ordered_root_lock); |
8b9d83cd | 712 | mutex_unlock(&fs_info->ordered_operations_mutex); |
199c2a9c MX |
713 | } |
714 | ||
eb84ae03 CM |
715 | /* |
716 | * Used to start IO or wait for a given ordered extent to finish. | |
717 | * | |
718 | * If wait is one, this effectively waits on page writeback for all the pages | |
719 | * in the extent, and it waits on the io completion code to insert | |
720 | * metadata into the btree corresponding to the extent | |
721 | */ | |
c0a43603 | 722 | void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait) |
e6dcd2dc CM |
723 | { |
724 | u64 start = entry->file_offset; | |
bffe633e | 725 | u64 end = start + entry->num_bytes - 1; |
c0a43603 | 726 | struct btrfs_inode *inode = BTRFS_I(entry->inode); |
5f4403e1 | 727 | bool freespace_inode; |
e1b81e67 | 728 | |
c0a43603 | 729 | trace_btrfs_ordered_extent_start(inode, entry); |
1abe9b8a | 730 | |
5f4403e1 IA |
731 | /* |
732 | * If this is a free space inode do not take the ordered extents lockdep | |
733 | * map. | |
734 | */ | |
735 | freespace_inode = btrfs_is_free_space_inode(inode); | |
736 | ||
eb84ae03 CM |
737 | /* |
738 | * pages in the range can be dirty, clean or writeback. We | |
739 | * start IO on any dirty ones so the wait doesn't stall waiting | |
b2570314 | 740 | * for the flusher thread to find them |
eb84ae03 | 741 | */ |
4b46fce2 | 742 | if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) |
c0a43603 | 743 | filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end); |
c8b97818 | 744 | if (wait) { |
5f4403e1 IA |
745 | if (!freespace_inode) |
746 | btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent); | |
e6dcd2dc CM |
747 | wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, |
748 | &entry->flags)); | |
c8b97818 | 749 | } |
e6dcd2dc | 750 | } |
cee36a03 | 751 | |
eb84ae03 CM |
752 | /* |
753 | * Used to wait on ordered extents across a large range of bytes. | |
754 | */ | |
0ef8b726 | 755 | int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) |
e6dcd2dc | 756 | { |
0ef8b726 | 757 | int ret = 0; |
28aeeac1 | 758 | int ret_wb = 0; |
e6dcd2dc | 759 | u64 end; |
e5a2217e | 760 | u64 orig_end; |
e6dcd2dc | 761 | struct btrfs_ordered_extent *ordered; |
e5a2217e CM |
762 | |
763 | if (start + len < start) { | |
f421950f | 764 | orig_end = INT_LIMIT(loff_t); |
e5a2217e CM |
765 | } else { |
766 | orig_end = start + len - 1; | |
f421950f CM |
767 | if (orig_end > INT_LIMIT(loff_t)) |
768 | orig_end = INT_LIMIT(loff_t); | |
e5a2217e | 769 | } |
551ebb2d | 770 | |
e5a2217e CM |
771 | /* start IO across the range first to instantiate any delalloc |
772 | * extents | |
773 | */ | |
728404da | 774 | ret = btrfs_fdatawrite_range(inode, start, orig_end); |
0ef8b726 JB |
775 | if (ret) |
776 | return ret; | |
728404da | 777 | |
28aeeac1 FM |
778 | /* |
779 | * If we have a writeback error don't return immediately. Wait first | |
780 | * for any ordered extents that haven't completed yet. This is to make | |
781 | * sure no one can dirty the same page ranges and call writepages() | |
782 | * before the ordered extents complete - to avoid failures (-EEXIST) | |
783 | * when adding the new ordered extents to the ordered tree. | |
784 | */ | |
785 | ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end); | |
e5a2217e | 786 | |
f421950f | 787 | end = orig_end; |
d397712b | 788 | while (1) { |
6d072c8e | 789 | ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end); |
d397712b | 790 | if (!ordered) |
e6dcd2dc | 791 | break; |
e5a2217e | 792 | if (ordered->file_offset > orig_end) { |
e6dcd2dc CM |
793 | btrfs_put_ordered_extent(ordered); |
794 | break; | |
795 | } | |
bffe633e | 796 | if (ordered->file_offset + ordered->num_bytes <= start) { |
e6dcd2dc CM |
797 | btrfs_put_ordered_extent(ordered); |
798 | break; | |
799 | } | |
c0a43603 | 800 | btrfs_start_ordered_extent(ordered, 1); |
e6dcd2dc | 801 | end = ordered->file_offset; |
e75fd33b FM |
802 | /* |
803 | * If the ordered extent had an error save the error but don't | |
804 | * exit without waiting first for all other ordered extents in | |
805 | * the range to complete. | |
806 | */ | |
0ef8b726 JB |
807 | if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) |
808 | ret = -EIO; | |
e6dcd2dc | 809 | btrfs_put_ordered_extent(ordered); |
e75fd33b | 810 | if (end == 0 || end == start) |
e6dcd2dc CM |
811 | break; |
812 | end--; | |
813 | } | |
28aeeac1 | 814 | return ret_wb ? ret_wb : ret; |
cee36a03 CM |
815 | } |
816 | ||
eb84ae03 CM |
817 | /* |
818 | * find an ordered extent corresponding to file_offset. return NULL if | |
819 | * nothing is found, otherwise take a reference on the extent and return it | |
820 | */ | |
c3504372 | 821 | struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode, |
e6dcd2dc CM |
822 | u64 file_offset) |
823 | { | |
824 | struct btrfs_ordered_inode_tree *tree; | |
825 | struct rb_node *node; | |
826 | struct btrfs_ordered_extent *entry = NULL; | |
24533f6a | 827 | unsigned long flags; |
e6dcd2dc | 828 | |
c3504372 | 829 | tree = &inode->ordered_tree; |
24533f6a | 830 | spin_lock_irqsave(&tree->lock, flags); |
e6dcd2dc CM |
831 | node = tree_search(tree, file_offset); |
832 | if (!node) | |
833 | goto out; | |
834 | ||
835 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
20bbf20e | 836 | if (!in_range(file_offset, entry->file_offset, entry->num_bytes)) |
e6dcd2dc | 837 | entry = NULL; |
5bea2508 | 838 | if (entry) { |
e76edab7 | 839 | refcount_inc(&entry->refs); |
5bea2508 JT |
840 | trace_btrfs_ordered_extent_lookup(inode, entry); |
841 | } | |
e6dcd2dc | 842 | out: |
24533f6a | 843 | spin_unlock_irqrestore(&tree->lock, flags); |
e6dcd2dc CM |
844 | return entry; |
845 | } | |
846 | ||
4b46fce2 JB |
847 | /* Since the DIO code tries to lock a wide area we need to look for any ordered |
848 | * extents that exist in the range, rather than just the start of the range. | |
849 | */ | |
a776c6fa NB |
850 | struct btrfs_ordered_extent *btrfs_lookup_ordered_range( |
851 | struct btrfs_inode *inode, u64 file_offset, u64 len) | |
4b46fce2 JB |
852 | { |
853 | struct btrfs_ordered_inode_tree *tree; | |
854 | struct rb_node *node; | |
855 | struct btrfs_ordered_extent *entry = NULL; | |
856 | ||
a776c6fa | 857 | tree = &inode->ordered_tree; |
5fd02043 | 858 | spin_lock_irq(&tree->lock); |
4b46fce2 JB |
859 | node = tree_search(tree, file_offset); |
860 | if (!node) { | |
861 | node = tree_search(tree, file_offset + len); | |
862 | if (!node) | |
863 | goto out; | |
864 | } | |
865 | ||
866 | while (1) { | |
867 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
868 | if (range_overlaps(entry, file_offset, len)) | |
869 | break; | |
870 | ||
871 | if (entry->file_offset >= file_offset + len) { | |
872 | entry = NULL; | |
873 | break; | |
874 | } | |
875 | entry = NULL; | |
876 | node = rb_next(node); | |
877 | if (!node) | |
878 | break; | |
879 | } | |
880 | out: | |
5bea2508 | 881 | if (entry) { |
e76edab7 | 882 | refcount_inc(&entry->refs); |
5bea2508 JT |
883 | trace_btrfs_ordered_extent_lookup_range(inode, entry); |
884 | } | |
5fd02043 | 885 | spin_unlock_irq(&tree->lock); |
4b46fce2 JB |
886 | return entry; |
887 | } | |
888 | ||
48778179 FM |
889 | /* |
890 | * Adds all ordered extents to the given list. The list ends up sorted by the | |
891 | * file_offset of the ordered extents. | |
892 | */ | |
893 | void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode, | |
894 | struct list_head *list) | |
895 | { | |
896 | struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; | |
897 | struct rb_node *n; | |
898 | ||
899 | ASSERT(inode_is_locked(&inode->vfs_inode)); | |
900 | ||
901 | spin_lock_irq(&tree->lock); | |
902 | for (n = rb_first(&tree->tree); n; n = rb_next(n)) { | |
903 | struct btrfs_ordered_extent *ordered; | |
904 | ||
905 | ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node); | |
906 | ||
907 | if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) | |
908 | continue; | |
909 | ||
910 | ASSERT(list_empty(&ordered->log_list)); | |
911 | list_add_tail(&ordered->log_list, list); | |
912 | refcount_inc(&ordered->refs); | |
5bea2508 | 913 | trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered); |
48778179 FM |
914 | } |
915 | spin_unlock_irq(&tree->lock); | |
916 | } | |
917 | ||
eb84ae03 CM |
918 | /* |
919 | * lookup and return any extent before 'file_offset'. NULL is returned | |
920 | * if none is found | |
921 | */ | |
e6dcd2dc | 922 | struct btrfs_ordered_extent * |
6d072c8e | 923 | btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset) |
e6dcd2dc CM |
924 | { |
925 | struct btrfs_ordered_inode_tree *tree; | |
926 | struct rb_node *node; | |
927 | struct btrfs_ordered_extent *entry = NULL; | |
928 | ||
6d072c8e | 929 | tree = &inode->ordered_tree; |
5fd02043 | 930 | spin_lock_irq(&tree->lock); |
e6dcd2dc CM |
931 | node = tree_search(tree, file_offset); |
932 | if (!node) | |
933 | goto out; | |
934 | ||
935 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
e76edab7 | 936 | refcount_inc(&entry->refs); |
5bea2508 | 937 | trace_btrfs_ordered_extent_lookup_first(inode, entry); |
e6dcd2dc | 938 | out: |
5fd02043 | 939 | spin_unlock_irq(&tree->lock); |
e6dcd2dc | 940 | return entry; |
81d7ed29 | 941 | } |
dbe674a9 | 942 | |
c095f333 QW |
943 | /* |
944 | * Lookup the first ordered extent that overlaps the range | |
945 | * [@file_offset, @file_offset + @len). | |
946 | * | |
947 | * The difference between this and btrfs_lookup_first_ordered_extent() is | |
948 | * that this one won't return any ordered extent that does not overlap the range. | |
949 | * And the difference against btrfs_lookup_ordered_extent() is, this function | |
950 | * ensures the first ordered extent gets returned. | |
951 | */ | |
952 | struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range( | |
953 | struct btrfs_inode *inode, u64 file_offset, u64 len) | |
954 | { | |
955 | struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree; | |
956 | struct rb_node *node; | |
957 | struct rb_node *cur; | |
958 | struct rb_node *prev; | |
959 | struct rb_node *next; | |
960 | struct btrfs_ordered_extent *entry = NULL; | |
961 | ||
962 | spin_lock_irq(&tree->lock); | |
963 | node = tree->tree.rb_node; | |
964 | /* | |
965 | * Here we don't want to use tree_search() which will use tree->last | |
966 | * and screw up the search order. | |
967 | * And __tree_search() can't return the adjacent ordered extents | |
968 | * either, thus here we do our own search. | |
969 | */ | |
970 | while (node) { | |
971 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | |
972 | ||
973 | if (file_offset < entry->file_offset) { | |
974 | node = node->rb_left; | |
975 | } else if (file_offset >= entry_end(entry)) { | |
976 | node = node->rb_right; | |
977 | } else { | |
978 | /* | |
979 | * Direct hit, got an ordered extent that starts at | |
980 | * @file_offset | |
981 | */ | |
982 | goto out; | |
983 | } | |
984 | } | |
985 | if (!entry) { | |
986 | /* Empty tree */ | |
987 | goto out; | |
988 | } | |
989 | ||
990 | cur = &entry->rb_node; | |
991 | /* We got an entry around @file_offset, check adjacent entries */ | |
992 | if (entry->file_offset < file_offset) { | |
993 | prev = cur; | |
994 | next = rb_next(cur); | |
995 | } else { | |
996 | prev = rb_prev(cur); | |
997 | next = cur; | |
998 | } | |
999 | if (prev) { | |
1000 | entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node); | |
1001 | if (range_overlaps(entry, file_offset, len)) | |
1002 | goto out; | |
1003 | } | |
1004 | if (next) { | |
1005 | entry = rb_entry(next, struct btrfs_ordered_extent, rb_node); | |
1006 | if (range_overlaps(entry, file_offset, len)) | |
1007 | goto out; | |
1008 | } | |
1009 | /* No ordered extent in the range */ | |
1010 | entry = NULL; | |
1011 | out: | |
5bea2508 | 1012 | if (entry) { |
c095f333 | 1013 | refcount_inc(&entry->refs); |
5bea2508 JT |
1014 | trace_btrfs_ordered_extent_lookup_first_range(inode, entry); |
1015 | } | |
1016 | ||
c095f333 QW |
1017 | spin_unlock_irq(&tree->lock); |
1018 | return entry; | |
1019 | } | |
1020 | ||
ffa87214 NB |
1021 | /* |
1022 | * btrfs_flush_ordered_range - Lock the passed range and ensures all pending | |
1023 | * ordered extents in it are run to completion. | |
1024 | * | |
ffa87214 NB |
1025 | * @inode: Inode whose ordered tree is to be searched |
1026 | * @start: Beginning of range to flush | |
1027 | * @end: Last byte of range to lock | |
1028 | * @cached_state: If passed, will return the extent state responsible for the | |
1029 | * locked range. It's the caller's responsibility to free the cached state. | |
1030 | * | |
1031 | * This function always returns with the given range locked, ensuring after it's | |
1032 | * called no order extent can be pending. | |
1033 | */ | |
b272ae22 | 1034 | void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start, |
ffa87214 NB |
1035 | u64 end, |
1036 | struct extent_state **cached_state) | |
1037 | { | |
1038 | struct btrfs_ordered_extent *ordered; | |
a3b46b86 NA |
1039 | struct extent_state *cache = NULL; |
1040 | struct extent_state **cachedp = &cache; | |
bd80d94e NB |
1041 | |
1042 | if (cached_state) | |
a3b46b86 | 1043 | cachedp = cached_state; |
ffa87214 NB |
1044 | |
1045 | while (1) { | |
570eb97b | 1046 | lock_extent(&inode->io_tree, start, end, cachedp); |
ffa87214 NB |
1047 | ordered = btrfs_lookup_ordered_range(inode, start, |
1048 | end - start + 1); | |
bd80d94e NB |
1049 | if (!ordered) { |
1050 | /* | |
1051 | * If no external cached_state has been passed then | |
1052 | * decrement the extra ref taken for cachedp since we | |
1053 | * aren't exposing it outside of this function | |
1054 | */ | |
1055 | if (!cached_state) | |
a3b46b86 | 1056 | refcount_dec(&cache->refs); |
ffa87214 | 1057 | break; |
bd80d94e | 1058 | } |
570eb97b | 1059 | unlock_extent(&inode->io_tree, start, end, cachedp); |
c0a43603 | 1060 | btrfs_start_ordered_extent(ordered, 1); |
ffa87214 NB |
1061 | btrfs_put_ordered_extent(ordered); |
1062 | } | |
1063 | } | |
1064 | ||
d22002fd NA |
1065 | static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos, |
1066 | u64 len) | |
1067 | { | |
1068 | struct inode *inode = ordered->inode; | |
f79645df | 1069 | struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; |
d22002fd NA |
1070 | u64 file_offset = ordered->file_offset + pos; |
1071 | u64 disk_bytenr = ordered->disk_bytenr + pos; | |
cb36a9bb | 1072 | unsigned long flags = ordered->flags & BTRFS_ORDERED_TYPE_FLAGS; |
d22002fd | 1073 | |
f79645df | 1074 | /* |
cb36a9bb OS |
1075 | * The splitting extent is already counted and will be added again in |
1076 | * btrfs_add_ordered_extent_*(). Subtract len to avoid double counting. | |
f79645df | 1077 | */ |
cb36a9bb | 1078 | percpu_counter_add_batch(&fs_info->ordered_bytes, -len, |
f79645df | 1079 | fs_info->delalloc_batch); |
cb36a9bb OS |
1080 | WARN_ON_ONCE(flags & (1 << BTRFS_ORDERED_COMPRESSED)); |
1081 | return btrfs_add_ordered_extent(BTRFS_I(inode), file_offset, len, len, | |
1082 | disk_bytenr, len, 0, flags, | |
1083 | ordered->compress_type); | |
d22002fd NA |
1084 | } |
1085 | ||
1086 | int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre, | |
1087 | u64 post) | |
1088 | { | |
1089 | struct inode *inode = ordered->inode; | |
1090 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | |
1091 | struct rb_node *node; | |
1092 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); | |
1093 | int ret = 0; | |
1094 | ||
5bea2508 JT |
1095 | trace_btrfs_ordered_extent_split(BTRFS_I(inode), ordered); |
1096 | ||
d22002fd NA |
1097 | spin_lock_irq(&tree->lock); |
1098 | /* Remove from tree once */ | |
1099 | node = &ordered->rb_node; | |
1100 | rb_erase(node, &tree->tree); | |
1101 | RB_CLEAR_NODE(node); | |
1102 | if (tree->last == node) | |
1103 | tree->last = NULL; | |
1104 | ||
1105 | ordered->file_offset += pre; | |
1106 | ordered->disk_bytenr += pre; | |
1107 | ordered->num_bytes -= (pre + post); | |
1108 | ordered->disk_num_bytes -= (pre + post); | |
1109 | ordered->bytes_left -= (pre + post); | |
1110 | ||
1111 | /* Re-insert the node */ | |
1112 | node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node); | |
1113 | if (node) | |
1114 | btrfs_panic(fs_info, -EEXIST, | |
1115 | "zoned: inconsistency in ordered tree at offset %llu", | |
1116 | ordered->file_offset); | |
1117 | ||
1118 | spin_unlock_irq(&tree->lock); | |
1119 | ||
1120 | if (pre) | |
1121 | ret = clone_ordered_extent(ordered, 0, pre); | |
adbd914d | 1122 | if (ret == 0 && post) |
d22002fd NA |
1123 | ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes, |
1124 | post); | |
1125 | ||
1126 | return ret; | |
1127 | } | |
1128 | ||
6352b91d MX |
1129 | int __init ordered_data_init(void) |
1130 | { | |
1131 | btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", | |
1132 | sizeof(struct btrfs_ordered_extent), 0, | |
fba4b697 | 1133 | SLAB_MEM_SPREAD, |
6352b91d MX |
1134 | NULL); |
1135 | if (!btrfs_ordered_extent_cache) | |
1136 | return -ENOMEM; | |
25287e0a | 1137 | |
6352b91d MX |
1138 | return 0; |
1139 | } | |
1140 | ||
e67c718b | 1141 | void __cold ordered_data_exit(void) |
6352b91d | 1142 | { |
5598e900 | 1143 | kmem_cache_destroy(btrfs_ordered_extent_cache); |
6352b91d | 1144 | } |