btrfs: remove SLAB_MEM_SPREAD flag use
[linux-2.6-block.git] / fs / btrfs / ordered-data.c
CommitLineData
c1d7c514 1// SPDX-License-Identifier: GPL-2.0
dc17ff8f
CM
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
dc17ff8f
CM
4 */
5
dc17ff8f 6#include <linux/slab.h>
d6bfde87 7#include <linux/blkdev.h>
f421950f 8#include <linux/writeback.h>
a3d46aea 9#include <linux/sched/mm.h>
9b569ea0 10#include "messages.h"
602cbe91 11#include "misc.h"
dc17ff8f
CM
12#include "ctree.h"
13#include "transaction.h"
14#include "btrfs_inode.h"
e6dcd2dc 15#include "extent_io.h"
199c2a9c 16#include "disk-io.h"
ebb8765b 17#include "compression.h"
86736342 18#include "delalloc-space.h"
7dbeaad0 19#include "qgroup.h"
b945a463 20#include "subpage.h"
af142b6f 21#include "file.h"
dc17ff8f 22
6352b91d
MX
23static struct kmem_cache *btrfs_ordered_extent_cache;
24
e6dcd2dc 25static u64 entry_end(struct btrfs_ordered_extent *entry)
dc17ff8f 26{
bffe633e 27 if (entry->file_offset + entry->num_bytes < entry->file_offset)
e6dcd2dc 28 return (u64)-1;
bffe633e 29 return entry->file_offset + entry->num_bytes;
dc17ff8f
CM
30}
31
d352ac68
CM
32/* returns NULL if the insertion worked, or it returns the node it did find
33 * in the tree
34 */
e6dcd2dc
CM
35static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
36 struct rb_node *node)
dc17ff8f 37{
d397712b
CM
38 struct rb_node **p = &root->rb_node;
39 struct rb_node *parent = NULL;
e6dcd2dc 40 struct btrfs_ordered_extent *entry;
dc17ff8f 41
d397712b 42 while (*p) {
dc17ff8f 43 parent = *p;
e6dcd2dc 44 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
dc17ff8f 45
e6dcd2dc 46 if (file_offset < entry->file_offset)
dc17ff8f 47 p = &(*p)->rb_left;
e6dcd2dc 48 else if (file_offset >= entry_end(entry))
dc17ff8f
CM
49 p = &(*p)->rb_right;
50 else
51 return parent;
52 }
53
54 rb_link_node(node, parent, p);
55 rb_insert_color(node, root);
56 return NULL;
57}
58
d352ac68
CM
59/*
60 * look for a given offset in the tree, and if it can't be found return the
61 * first lesser offset
62 */
e6dcd2dc
CM
63static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
64 struct rb_node **prev_ret)
dc17ff8f 65{
d397712b 66 struct rb_node *n = root->rb_node;
dc17ff8f 67 struct rb_node *prev = NULL;
e6dcd2dc
CM
68 struct rb_node *test;
69 struct btrfs_ordered_extent *entry;
70 struct btrfs_ordered_extent *prev_entry = NULL;
dc17ff8f 71
d397712b 72 while (n) {
e6dcd2dc 73 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
dc17ff8f
CM
74 prev = n;
75 prev_entry = entry;
dc17ff8f 76
e6dcd2dc 77 if (file_offset < entry->file_offset)
dc17ff8f 78 n = n->rb_left;
e6dcd2dc 79 else if (file_offset >= entry_end(entry))
dc17ff8f
CM
80 n = n->rb_right;
81 else
82 return n;
83 }
84 if (!prev_ret)
85 return NULL;
86
d397712b 87 while (prev && file_offset >= entry_end(prev_entry)) {
e6dcd2dc
CM
88 test = rb_next(prev);
89 if (!test)
90 break;
91 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
92 rb_node);
93 if (file_offset < entry_end(prev_entry))
94 break;
95
96 prev = test;
97 }
98 if (prev)
99 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
100 rb_node);
d397712b 101 while (prev && file_offset < entry_end(prev_entry)) {
e6dcd2dc
CM
102 test = rb_prev(prev);
103 if (!test)
104 break;
105 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
106 rb_node);
107 prev = test;
dc17ff8f
CM
108 }
109 *prev_ret = prev;
110 return NULL;
111}
112
4b46fce2
JB
113static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
114 u64 len)
115{
116 if (file_offset + len <= entry->file_offset ||
bffe633e 117 entry->file_offset + entry->num_bytes <= file_offset)
4b46fce2
JB
118 return 0;
119 return 1;
120}
121
d352ac68
CM
122/*
123 * look find the first ordered struct that has this offset, otherwise
124 * the first one less than this offset
125 */
54c65371
DS
126static inline struct rb_node *ordered_tree_search(struct btrfs_inode *inode,
127 u64 file_offset)
dc17ff8f 128{
c87fb6fd 129 struct rb_node *prev = NULL;
dc17ff8f 130 struct rb_node *ret;
e6dcd2dc
CM
131 struct btrfs_ordered_extent *entry;
132
54c65371
DS
133 if (inode->ordered_tree_last) {
134 entry = rb_entry(inode->ordered_tree_last, struct btrfs_ordered_extent,
e6dcd2dc 135 rb_node);
20bbf20e 136 if (in_range(file_offset, entry->file_offset, entry->num_bytes))
54c65371 137 return inode->ordered_tree_last;
e6dcd2dc 138 }
54c65371 139 ret = __tree_search(&inode->ordered_tree, file_offset, &prev);
dc17ff8f 140 if (!ret)
e6dcd2dc
CM
141 ret = prev;
142 if (ret)
54c65371 143 inode->ordered_tree_last = ret;
dc17ff8f
CM
144 return ret;
145}
146
53d9981c
CH
147static struct btrfs_ordered_extent *alloc_ordered_extent(
148 struct btrfs_inode *inode, u64 file_offset, u64 num_bytes,
149 u64 ram_bytes, u64 disk_bytenr, u64 disk_num_bytes,
150 u64 offset, unsigned long flags, int compress_type)
dc17ff8f 151{
e6dcd2dc 152 struct btrfs_ordered_extent *entry;
7dbeaad0 153 int ret;
9e65bfca 154 u64 qgroup_rsv = 0;
7dbeaad0 155
cb36a9bb
OS
156 if (flags &
157 ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
7dbeaad0 158 /* For nocow write, we can release the qgroup rsv right now */
9e65bfca 159 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
7dbeaad0 160 if (ret < 0)
cf6d1aa4 161 return ERR_PTR(ret);
7dbeaad0
QW
162 } else {
163 /*
164 * The ordered extent has reserved qgroup space, release now
165 * and pass the reserved number for qgroup_record to free.
166 */
9e65bfca 167 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv);
7dbeaad0 168 if (ret < 0)
cf6d1aa4 169 return ERR_PTR(ret);
7dbeaad0 170 }
6352b91d 171 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
dc17ff8f 172 if (!entry)
cf6d1aa4 173 return ERR_PTR(-ENOMEM);
dc17ff8f 174
e6dcd2dc 175 entry->file_offset = file_offset;
bffe633e 176 entry->num_bytes = num_bytes;
cb36a9bb
OS
177 entry->ram_bytes = ram_bytes;
178 entry->disk_bytenr = disk_bytenr;
bffe633e 179 entry->disk_num_bytes = disk_num_bytes;
cb36a9bb 180 entry->offset = offset;
bffe633e 181 entry->bytes_left = num_bytes;
da69fea9 182 entry->inode = igrab(&inode->vfs_inode);
261507a0 183 entry->compress_type = compress_type;
77cef2ec 184 entry->truncated_len = (u64)-1;
9e65bfca 185 entry->qgroup_rsv = qgroup_rsv;
cb36a9bb 186 entry->flags = flags;
e76edab7 187 refcount_set(&entry->refs, 1);
e6dcd2dc
CM
188 init_waitqueue_head(&entry->wait);
189 INIT_LIST_HEAD(&entry->list);
48778179 190 INIT_LIST_HEAD(&entry->log_list);
3eaa2885 191 INIT_LIST_HEAD(&entry->root_extent_list);
9afab882 192 INIT_LIST_HEAD(&entry->work_list);
02c372e1 193 INIT_LIST_HEAD(&entry->bioc_list);
9afab882 194 init_completion(&entry->completion);
dc17ff8f 195
53d9981c
CH
196 /*
197 * We don't need the count_max_extents here, we can assume that all of
198 * that work has been done at higher layers, so this is truly the
199 * smallest the extent is going to get.
200 */
201 spin_lock(&inode->lock);
202 btrfs_mod_outstanding_extents(inode, 1);
203 spin_unlock(&inode->lock);
204
205 return entry;
206}
207
208static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
209{
210 struct btrfs_inode *inode = BTRFS_I(entry->inode);
53d9981c
CH
211 struct btrfs_root *root = inode->root;
212 struct btrfs_fs_info *fs_info = root->fs_info;
213 struct rb_node *node;
214
acbf1dd0 215 trace_btrfs_ordered_extent_add(inode, entry);
1abe9b8a 216
53d9981c
CH
217 percpu_counter_add_batch(&fs_info->ordered_bytes, entry->num_bytes,
218 fs_info->delalloc_batch);
219
220 /* One ref for the tree. */
221 refcount_inc(&entry->refs);
222
54c65371
DS
223 spin_lock_irq(&inode->ordered_tree_lock);
224 node = tree_insert(&inode->ordered_tree, entry->file_offset,
225 &entry->rb_node);
43c04fb1 226 if (node)
511a32b5
NB
227 btrfs_panic(fs_info, -EEXIST,
228 "inconsistency in ordered tree at offset %llu",
53d9981c 229 entry->file_offset);
54c65371 230 spin_unlock_irq(&inode->ordered_tree_lock);
d397712b 231
199c2a9c 232 spin_lock(&root->ordered_extent_lock);
3eaa2885 233 list_add_tail(&entry->root_extent_list,
199c2a9c
MX
234 &root->ordered_extents);
235 root->nr_ordered_extents++;
236 if (root->nr_ordered_extents == 1) {
0b246afa 237 spin_lock(&fs_info->ordered_root_lock);
199c2a9c 238 BUG_ON(!list_empty(&root->ordered_root));
0b246afa
JM
239 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
240 spin_unlock(&fs_info->ordered_root_lock);
199c2a9c
MX
241 }
242 spin_unlock(&root->ordered_extent_lock);
53d9981c 243}
3eaa2885 244
53d9981c
CH
245/*
246 * Add an ordered extent to the per-inode tree.
247 *
248 * @inode: Inode that this extent is for.
249 * @file_offset: Logical offset in file where the extent starts.
250 * @num_bytes: Logical length of extent in file.
251 * @ram_bytes: Full length of unencoded data.
252 * @disk_bytenr: Offset of extent on disk.
253 * @disk_num_bytes: Size of extent on disk.
254 * @offset: Offset into unencoded data where file data starts.
255 * @flags: Flags specifying type of extent (1 << BTRFS_ORDERED_*).
256 * @compress_type: Compression algorithm used for data.
257 *
258 * Most of these parameters correspond to &struct btrfs_file_extent_item. The
259 * tree is given a single reference on the ordered extent that was inserted, and
260 * the returned pointer is given a second reference.
261 *
262 * Return: the new ordered extent or error pointer.
263 */
264struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
265 struct btrfs_inode *inode, u64 file_offset,
266 u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
267 u64 disk_num_bytes, u64 offset, unsigned long flags,
268 int compress_type)
269{
270 struct btrfs_ordered_extent *entry;
8b62f87b 271
53d9981c 272 ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
cf6d1aa4 273
53d9981c
CH
274 entry = alloc_ordered_extent(inode, file_offset, num_bytes, ram_bytes,
275 disk_bytenr, disk_num_bytes, offset, flags,
276 compress_type);
277 if (!IS_ERR(entry))
278 insert_ordered_extent(entry);
cf6d1aa4
BB
279 return entry;
280}
281
eb84ae03
CM
282/*
283 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
3edf7d33
CM
284 * when an ordered extent is finished. If the list covers more than one
285 * ordered extent, it is split across multiples.
eb84ae03 286 */
f9756261 287void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
143bede5 288 struct btrfs_ordered_sum *sum)
dc17ff8f 289{
54c65371 290 struct btrfs_inode *inode = BTRFS_I(entry->inode);
dc17ff8f 291
54c65371 292 spin_lock_irq(&inode->ordered_tree_lock);
e6dcd2dc 293 list_add_tail(&sum->list, &entry->list);
54c65371 294 spin_unlock_irq(&inode->ordered_tree_lock);
dc17ff8f
CM
295}
296
711f447b
CH
297static void finish_ordered_fn(struct btrfs_work *work)
298{
299 struct btrfs_ordered_extent *ordered_extent;
300
301 ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
302 btrfs_finish_ordered_io(ordered_extent);
303}
304
53df2586
CH
305static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
306 struct page *page, u64 file_offset,
307 u64 len, bool uptodate)
308{
309 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
310 struct btrfs_fs_info *fs_info = inode->root->fs_info;
311
54c65371 312 lockdep_assert_held(&inode->ordered_tree_lock);
53df2586
CH
313
314 if (page) {
315 ASSERT(page->mapping);
316 ASSERT(page_offset(page) <= file_offset);
317 ASSERT(file_offset + len <= page_offset(page) + PAGE_SIZE);
318
319 /*
320 * Ordered (Private2) bit indicates whether we still have
321 * pending io unfinished for the ordered extent.
322 *
323 * If there's no such bit, we need to skip to next range.
324 */
55151ea9
QW
325 if (!btrfs_folio_test_ordered(fs_info, page_folio(page),
326 file_offset, len))
53df2586 327 return false;
55151ea9 328 btrfs_folio_clear_ordered(fs_info, page_folio(page), file_offset, len);
53df2586
CH
329 }
330
331 /* Now we're fine to update the accounting. */
332 if (WARN_ON_ONCE(len > ordered->bytes_left)) {
333 btrfs_crit(fs_info,
334"bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%llu left=%llu",
335 inode->root->root_key.objectid, btrfs_ino(inode),
336 ordered->file_offset, ordered->num_bytes,
337 len, ordered->bytes_left);
338 ordered->bytes_left = 0;
339 } else {
340 ordered->bytes_left -= len;
341 }
342
343 if (!uptodate)
344 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
345
346 if (ordered->bytes_left)
347 return false;
348
349 /*
350 * All the IO of the ordered extent is finished, we need to queue
351 * the finish_func to be executed.
352 */
353 set_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags);
354 cond_wake_up(&ordered->wait);
355 refcount_inc(&ordered->refs);
356 trace_btrfs_ordered_extent_mark_finished(inode, ordered);
357 return true;
358}
359
2d6f107e
CH
360static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered)
361{
362 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
363 struct btrfs_fs_info *fs_info = inode->root->fs_info;
364 struct btrfs_workqueue *wq = btrfs_is_free_space_inode(inode) ?
365 fs_info->endio_freespace_worker : fs_info->endio_write_workers;
366
078b8b90 367 btrfs_init_work(&ordered->work, finish_ordered_fn, NULL);
2d6f107e
CH
368 btrfs_queue_work(wq, &ordered->work);
369}
370
122e9ede
CH
371bool btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
372 struct page *page, u64 file_offset, u64 len,
373 bool uptodate)
374{
375 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
376 unsigned long flags;
377 bool ret;
378
379 trace_btrfs_finish_ordered_extent(inode, file_offset, len, uptodate);
380
54c65371 381 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
122e9ede 382 ret = can_finish_ordered_extent(ordered, page, file_offset, len, uptodate);
54c65371 383 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
122e9ede
CH
384
385 if (ret)
386 btrfs_queue_ordered_fn(ordered);
387 return ret;
388}
389
163cf09c 390/*
e65f152e 391 * Mark all ordered extents io inside the specified range finished.
163cf09c 392 *
143823cf 393 * @page: The involved page for the operation.
e65f152e
QW
394 * For uncompressed buffered IO, the page status also needs to be
395 * updated to indicate whether the pending ordered io is finished.
396 * Can be NULL for direct IO and compressed write.
397 * For these cases, callers are ensured they won't execute the
398 * endio function twice.
163cf09c 399 *
e65f152e 400 * This function is called for endio, thus the range must have ordered
143823cf 401 * extent(s) covering it.
163cf09c 402 */
e65f152e 403void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
711f447b
CH
404 struct page *page, u64 file_offset,
405 u64 num_bytes, bool uptodate)
163cf09c 406{
163cf09c
CM
407 struct rb_node *node;
408 struct btrfs_ordered_extent *entry = NULL;
5fd02043 409 unsigned long flags;
e65f152e
QW
410 u64 cur = file_offset;
411
6648cedd
CH
412 trace_btrfs_writepage_end_io_hook(inode, file_offset,
413 file_offset + num_bytes - 1,
414 uptodate);
415
54c65371 416 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
e65f152e
QW
417 while (cur < file_offset + num_bytes) {
418 u64 entry_end;
419 u64 end;
420 u32 len;
421
54c65371 422 node = ordered_tree_search(inode, cur);
e65f152e
QW
423 /* No ordered extents at all */
424 if (!node)
425 break;
5fd02043 426
e65f152e
QW
427 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
428 entry_end = entry->file_offset + entry->num_bytes;
58f74b22 429 /*
e65f152e
QW
430 * |<-- OE --->| |
431 * cur
432 * Go to next OE.
58f74b22 433 */
e65f152e
QW
434 if (cur >= entry_end) {
435 node = rb_next(node);
436 /* No more ordered extents, exit */
437 if (!node)
438 break;
439 entry = rb_entry(node, struct btrfs_ordered_extent,
440 rb_node);
441
442 /* Go to next ordered extent and continue */
443 cur = entry->file_offset;
444 continue;
445 }
446 /*
447 * | |<--- OE --->|
448 * cur
449 * Go to the start of OE.
450 */
451 if (cur < entry->file_offset) {
452 cur = entry->file_offset;
453 continue;
454 }
455
456 /*
457 * Now we are definitely inside one ordered extent.
458 *
459 * |<--- OE --->|
460 * |
461 * cur
462 */
463 end = min(entry->file_offset + entry->num_bytes,
464 file_offset + num_bytes) - 1;
465 ASSERT(end + 1 - cur < U32_MAX);
466 len = end + 1 - cur;
467
53df2586 468 if (can_finish_ordered_extent(entry, page, cur, len, uptodate)) {
54c65371 469 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
2d6f107e 470 btrfs_queue_ordered_fn(entry);
54c65371 471 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
e65f152e
QW
472 }
473 cur += len;
163cf09c 474 }
54c65371 475 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
163cf09c
CM
476}
477
eb84ae03 478/*
58f74b22
QW
479 * Finish IO for one ordered extent across a given range. The range can only
480 * contain one ordered extent.
481 *
482 * @cached: The cached ordered extent. If not NULL, we can skip the tree
483 * search and use the ordered extent directly.
484 * Will be also used to store the finished ordered extent.
485 * @file_offset: File offset for the finished IO
486 * @io_size: Length of the finish IO range
eb84ae03 487 *
58f74b22
QW
488 * Return true if the ordered extent is finished in the range, and update
489 * @cached.
490 * Return false otherwise.
491 *
492 * NOTE: The range can NOT cross multiple ordered extents.
493 * Thus caller should ensure the range doesn't cross ordered extents.
eb84ae03 494 */
58f74b22
QW
495bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
496 struct btrfs_ordered_extent **cached,
f41b6ba9 497 u64 file_offset, u64 io_size)
dc17ff8f 498{
dc17ff8f 499 struct rb_node *node;
5a1a3df1 500 struct btrfs_ordered_extent *entry = NULL;
5fd02043 501 unsigned long flags;
58f74b22 502 bool finished = false;
e6dcd2dc 503
54c65371 504 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
5fd02043
JB
505 if (cached && *cached) {
506 entry = *cached;
507 goto have_entry;
508 }
509
54c65371 510 node = ordered_tree_search(inode, file_offset);
58f74b22 511 if (!node)
e6dcd2dc 512 goto out;
dc17ff8f 513
e6dcd2dc 514 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
5fd02043 515have_entry:
20bbf20e 516 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
e6dcd2dc 517 goto out;
e6dcd2dc 518
58f74b22 519 if (io_size > entry->bytes_left)
90c0304c 520 btrfs_crit(inode->root->fs_info,
efe120a0 521 "bad ordered accounting left %llu size %llu",
c1c9ff7c 522 entry->bytes_left, io_size);
58f74b22 523
8b62b72b 524 entry->bytes_left -= io_size;
5fd02043 525
af7a6509 526 if (entry->bytes_left == 0) {
58f74b22
QW
527 /*
528 * Ensure only one caller can set the flag and finished_ret
529 * accordingly
530 */
531 finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
093258e6
DS
532 /* test_and_set_bit implies a barrier */
533 cond_wake_up_nomb(&entry->wait);
af7a6509 534 }
e6dcd2dc 535out:
58f74b22 536 if (finished && cached && entry) {
5a1a3df1 537 *cached = entry;
e76edab7 538 refcount_inc(&entry->refs);
5bea2508 539 trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
5a1a3df1 540 }
54c65371 541 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
58f74b22 542 return finished;
e6dcd2dc 543}
dc17ff8f 544
eb84ae03
CM
545/*
546 * used to drop a reference on an ordered extent. This will free
547 * the extent if the last reference is dropped
548 */
143bede5 549void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
e6dcd2dc 550{
ba1da2f4
CM
551 struct list_head *cur;
552 struct btrfs_ordered_sum *sum;
553
acbf1dd0 554 trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
1abe9b8a 555
e76edab7 556 if (refcount_dec_and_test(&entry->refs)) {
61de718f 557 ASSERT(list_empty(&entry->root_extent_list));
48778179 558 ASSERT(list_empty(&entry->log_list));
61de718f 559 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
5fd02043 560 if (entry->inode)
e55cf7ca 561 btrfs_add_delayed_iput(BTRFS_I(entry->inode));
d397712b 562 while (!list_empty(&entry->list)) {
ba1da2f4
CM
563 cur = entry->list.next;
564 sum = list_entry(cur, struct btrfs_ordered_sum, list);
565 list_del(&sum->list);
a3d46aea 566 kvfree(sum);
ba1da2f4 567 }
6352b91d 568 kmem_cache_free(btrfs_ordered_extent_cache, entry);
ba1da2f4 569 }
dc17ff8f 570}
cee36a03 571
eb84ae03
CM
572/*
573 * remove an ordered extent from the tree. No references are dropped
5fd02043 574 * and waiters are woken up.
eb84ae03 575 */
71fe0a55 576void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
5fd02043 577 struct btrfs_ordered_extent *entry)
cee36a03 578{
8b62f87b 579 struct btrfs_root *root = btrfs_inode->root;
71fe0a55 580 struct btrfs_fs_info *fs_info = root->fs_info;
cee36a03 581 struct rb_node *node;
48778179 582 bool pending;
5f4403e1
IA
583 bool freespace_inode;
584
585 /*
586 * If this is a free space inode the thread has not acquired the ordered
587 * extents lockdep map.
588 */
589 freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
cee36a03 590
8b53779e 591 btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
ebfe4d4e 592 /* This is paired with btrfs_alloc_ordered_extent. */
8b62f87b
JB
593 spin_lock(&btrfs_inode->lock);
594 btrfs_mod_outstanding_extents(btrfs_inode, -1);
595 spin_unlock(&btrfs_inode->lock);
7c0c7269
OS
596 if (root != fs_info->tree_root) {
597 u64 release;
598
599 if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags))
600 release = entry->disk_num_bytes;
601 else
602 release = entry->num_bytes;
f63e1164
BB
603 btrfs_delalloc_release_metadata(btrfs_inode, release,
604 test_bit(BTRFS_ORDERED_IOERR,
605 &entry->flags));
7c0c7269 606 }
8b62f87b 607
5deb17e1
JB
608 percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
609 fs_info->delalloc_batch);
4297ff84 610
54c65371 611 spin_lock_irq(&btrfs_inode->ordered_tree_lock);
e6dcd2dc 612 node = &entry->rb_node;
54c65371 613 rb_erase(node, &btrfs_inode->ordered_tree);
61de718f 614 RB_CLEAR_NODE(node);
54c65371
DS
615 if (btrfs_inode->ordered_tree_last == node)
616 btrfs_inode->ordered_tree_last = NULL;
e6dcd2dc 617 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
48778179 618 pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
54c65371 619 spin_unlock_irq(&btrfs_inode->ordered_tree_lock);
3eaa2885 620
48778179
FM
621 /*
622 * The current running transaction is waiting on us, we need to let it
623 * know that we're complete and wake it up.
624 */
625 if (pending) {
626 struct btrfs_transaction *trans;
627
628 /*
629 * The checks for trans are just a formality, it should be set,
630 * but if it isn't we don't want to deref/assert under the spin
631 * lock, so be nice and check if trans is set, but ASSERT() so
632 * if it isn't set a developer will notice.
633 */
634 spin_lock(&fs_info->trans_lock);
635 trans = fs_info->running_transaction;
636 if (trans)
637 refcount_inc(&trans->use_count);
638 spin_unlock(&fs_info->trans_lock);
639
4ca8e03c 640 ASSERT(trans || BTRFS_FS_ERROR(fs_info));
48778179
FM
641 if (trans) {
642 if (atomic_dec_and_test(&trans->pending_ordered))
643 wake_up(&trans->pending_wait);
644 btrfs_put_transaction(trans);
645 }
646 }
647
8b53779e
IA
648 btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered);
649
199c2a9c 650 spin_lock(&root->ordered_extent_lock);
3eaa2885 651 list_del_init(&entry->root_extent_list);
199c2a9c 652 root->nr_ordered_extents--;
5a3f23d5 653
71fe0a55 654 trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
1abe9b8a 655
199c2a9c 656 if (!root->nr_ordered_extents) {
0b246afa 657 spin_lock(&fs_info->ordered_root_lock);
199c2a9c
MX
658 BUG_ON(list_empty(&root->ordered_root));
659 list_del_init(&root->ordered_root);
0b246afa 660 spin_unlock(&fs_info->ordered_root_lock);
199c2a9c
MX
661 }
662 spin_unlock(&root->ordered_extent_lock);
e6dcd2dc 663 wake_up(&entry->wait);
5f4403e1
IA
664 if (!freespace_inode)
665 btrfs_lockdep_release(fs_info, btrfs_ordered_extent);
cee36a03
CM
666}
667
d458b054 668static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
9afab882
MX
669{
670 struct btrfs_ordered_extent *ordered;
671
672 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
36d45567 673 btrfs_start_ordered_extent(ordered);
9afab882
MX
674 complete(&ordered->completion);
675}
676
d352ac68
CM
677/*
678 * wait for all the ordered extents in a root. This is done when balancing
679 * space between drives.
680 */
6374e57a 681u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
578def7c 682 const u64 range_start, const u64 range_len)
3eaa2885 683{
0b246afa 684 struct btrfs_fs_info *fs_info = root->fs_info;
578def7c
FM
685 LIST_HEAD(splice);
686 LIST_HEAD(skipped);
687 LIST_HEAD(works);
9afab882 688 struct btrfs_ordered_extent *ordered, *next;
6374e57a 689 u64 count = 0;
578def7c 690 const u64 range_end = range_start + range_len;
3eaa2885 691
31f3d255 692 mutex_lock(&root->ordered_extent_mutex);
199c2a9c
MX
693 spin_lock(&root->ordered_extent_lock);
694 list_splice_init(&root->ordered_extents, &splice);
b0244199 695 while (!list_empty(&splice) && nr) {
199c2a9c
MX
696 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
697 root_extent_list);
578def7c 698
bffe633e
OS
699 if (range_end <= ordered->disk_bytenr ||
700 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
578def7c
FM
701 list_move_tail(&ordered->root_extent_list, &skipped);
702 cond_resched_lock(&root->ordered_extent_lock);
703 continue;
704 }
705
199c2a9c
MX
706 list_move_tail(&ordered->root_extent_list,
707 &root->ordered_extents);
e76edab7 708 refcount_inc(&ordered->refs);
199c2a9c 709 spin_unlock(&root->ordered_extent_lock);
3eaa2885 710
a44903ab 711 btrfs_init_work(&ordered->flush_work,
078b8b90 712 btrfs_run_ordered_extent_work, NULL);
199c2a9c 713 list_add_tail(&ordered->work_list, &works);
0b246afa 714 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
3eaa2885 715
9afab882 716 cond_resched();
199c2a9c 717 spin_lock(&root->ordered_extent_lock);
6374e57a 718 if (nr != U64_MAX)
b0244199
MX
719 nr--;
720 count++;
3eaa2885 721 }
578def7c 722 list_splice_tail(&skipped, &root->ordered_extents);
b0244199 723 list_splice_tail(&splice, &root->ordered_extents);
199c2a9c 724 spin_unlock(&root->ordered_extent_lock);
9afab882
MX
725
726 list_for_each_entry_safe(ordered, next, &works, work_list) {
727 list_del_init(&ordered->work_list);
728 wait_for_completion(&ordered->completion);
9afab882 729 btrfs_put_ordered_extent(ordered);
9afab882
MX
730 cond_resched();
731 }
31f3d255 732 mutex_unlock(&root->ordered_extent_mutex);
b0244199
MX
733
734 return count;
3eaa2885
CM
735}
736
042528f8 737void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
6374e57a 738 const u64 range_start, const u64 range_len)
199c2a9c
MX
739{
740 struct btrfs_root *root;
84af994b 741 LIST_HEAD(splice);
6374e57a 742 u64 done;
199c2a9c 743
8b9d83cd 744 mutex_lock(&fs_info->ordered_operations_mutex);
199c2a9c
MX
745 spin_lock(&fs_info->ordered_root_lock);
746 list_splice_init(&fs_info->ordered_roots, &splice);
b0244199 747 while (!list_empty(&splice) && nr) {
199c2a9c
MX
748 root = list_first_entry(&splice, struct btrfs_root,
749 ordered_root);
00246528 750 root = btrfs_grab_root(root);
199c2a9c
MX
751 BUG_ON(!root);
752 list_move_tail(&root->ordered_root,
753 &fs_info->ordered_roots);
754 spin_unlock(&fs_info->ordered_root_lock);
755
578def7c
FM
756 done = btrfs_wait_ordered_extents(root, nr,
757 range_start, range_len);
00246528 758 btrfs_put_root(root);
199c2a9c
MX
759
760 spin_lock(&fs_info->ordered_root_lock);
6374e57a 761 if (nr != U64_MAX) {
b0244199 762 nr -= done;
b0244199 763 }
199c2a9c 764 }
931aa877 765 list_splice_tail(&splice, &fs_info->ordered_roots);
199c2a9c 766 spin_unlock(&fs_info->ordered_root_lock);
8b9d83cd 767 mutex_unlock(&fs_info->ordered_operations_mutex);
199c2a9c
MX
768}
769
eb84ae03 770/*
36d45567 771 * Start IO and wait for a given ordered extent to finish.
eb84ae03 772 *
36d45567
CH
773 * Wait on page writeback for all the pages in the extent and the IO completion
774 * code to insert metadata into the btree corresponding to the extent.
eb84ae03 775 */
36d45567 776void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry)
e6dcd2dc
CM
777{
778 u64 start = entry->file_offset;
bffe633e 779 u64 end = start + entry->num_bytes - 1;
c0a43603 780 struct btrfs_inode *inode = BTRFS_I(entry->inode);
5f4403e1 781 bool freespace_inode;
e1b81e67 782
c0a43603 783 trace_btrfs_ordered_extent_start(inode, entry);
1abe9b8a 784
5f4403e1
IA
785 /*
786 * If this is a free space inode do not take the ordered extents lockdep
787 * map.
788 */
789 freespace_inode = btrfs_is_free_space_inode(inode);
790
eb84ae03
CM
791 /*
792 * pages in the range can be dirty, clean or writeback. We
793 * start IO on any dirty ones so the wait doesn't stall waiting
b2570314 794 * for the flusher thread to find them
eb84ae03 795 */
4b46fce2 796 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
c0a43603 797 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
36d45567
CH
798
799 if (!freespace_inode)
800 btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
801 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags));
e6dcd2dc 802}
cee36a03 803
eb84ae03
CM
804/*
805 * Used to wait on ordered extents across a large range of bytes.
806 */
0ef8b726 807int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
e6dcd2dc 808{
0ef8b726 809 int ret = 0;
28aeeac1 810 int ret_wb = 0;
e6dcd2dc 811 u64 end;
e5a2217e 812 u64 orig_end;
e6dcd2dc 813 struct btrfs_ordered_extent *ordered;
e5a2217e
CM
814
815 if (start + len < start) {
cf260db4 816 orig_end = OFFSET_MAX;
e5a2217e
CM
817 } else {
818 orig_end = start + len - 1;
cf260db4
ZL
819 if (orig_end > OFFSET_MAX)
820 orig_end = OFFSET_MAX;
e5a2217e 821 }
551ebb2d 822
e5a2217e
CM
823 /* start IO across the range first to instantiate any delalloc
824 * extents
825 */
728404da 826 ret = btrfs_fdatawrite_range(inode, start, orig_end);
0ef8b726
JB
827 if (ret)
828 return ret;
728404da 829
28aeeac1
FM
830 /*
831 * If we have a writeback error don't return immediately. Wait first
832 * for any ordered extents that haven't completed yet. This is to make
833 * sure no one can dirty the same page ranges and call writepages()
834 * before the ordered extents complete - to avoid failures (-EEXIST)
835 * when adding the new ordered extents to the ordered tree.
836 */
837 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
e5a2217e 838
f421950f 839 end = orig_end;
d397712b 840 while (1) {
6d072c8e 841 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
d397712b 842 if (!ordered)
e6dcd2dc 843 break;
e5a2217e 844 if (ordered->file_offset > orig_end) {
e6dcd2dc
CM
845 btrfs_put_ordered_extent(ordered);
846 break;
847 }
bffe633e 848 if (ordered->file_offset + ordered->num_bytes <= start) {
e6dcd2dc
CM
849 btrfs_put_ordered_extent(ordered);
850 break;
851 }
36d45567 852 btrfs_start_ordered_extent(ordered);
e6dcd2dc 853 end = ordered->file_offset;
e75fd33b
FM
854 /*
855 * If the ordered extent had an error save the error but don't
856 * exit without waiting first for all other ordered extents in
857 * the range to complete.
858 */
0ef8b726
JB
859 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
860 ret = -EIO;
e6dcd2dc 861 btrfs_put_ordered_extent(ordered);
e75fd33b 862 if (end == 0 || end == start)
e6dcd2dc
CM
863 break;
864 end--;
865 }
28aeeac1 866 return ret_wb ? ret_wb : ret;
cee36a03
CM
867}
868
eb84ae03
CM
869/*
870 * find an ordered extent corresponding to file_offset. return NULL if
871 * nothing is found, otherwise take a reference on the extent and return it
872 */
c3504372 873struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
e6dcd2dc
CM
874 u64 file_offset)
875{
e6dcd2dc
CM
876 struct rb_node *node;
877 struct btrfs_ordered_extent *entry = NULL;
24533f6a 878 unsigned long flags;
e6dcd2dc 879
54c65371
DS
880 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
881 node = ordered_tree_search(inode, file_offset);
e6dcd2dc
CM
882 if (!node)
883 goto out;
884
885 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
20bbf20e 886 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
e6dcd2dc 887 entry = NULL;
5bea2508 888 if (entry) {
e76edab7 889 refcount_inc(&entry->refs);
5bea2508
JT
890 trace_btrfs_ordered_extent_lookup(inode, entry);
891 }
e6dcd2dc 892out:
54c65371 893 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
e6dcd2dc
CM
894 return entry;
895}
896
4b46fce2
JB
897/* Since the DIO code tries to lock a wide area we need to look for any ordered
898 * extents that exist in the range, rather than just the start of the range.
899 */
a776c6fa
NB
900struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
901 struct btrfs_inode *inode, u64 file_offset, u64 len)
4b46fce2 902{
4b46fce2
JB
903 struct rb_node *node;
904 struct btrfs_ordered_extent *entry = NULL;
905
54c65371
DS
906 spin_lock_irq(&inode->ordered_tree_lock);
907 node = ordered_tree_search(inode, file_offset);
4b46fce2 908 if (!node) {
54c65371 909 node = ordered_tree_search(inode, file_offset + len);
4b46fce2
JB
910 if (!node)
911 goto out;
912 }
913
914 while (1) {
915 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
916 if (range_overlaps(entry, file_offset, len))
917 break;
918
919 if (entry->file_offset >= file_offset + len) {
920 entry = NULL;
921 break;
922 }
923 entry = NULL;
924 node = rb_next(node);
925 if (!node)
926 break;
927 }
928out:
5bea2508 929 if (entry) {
e76edab7 930 refcount_inc(&entry->refs);
5bea2508
JT
931 trace_btrfs_ordered_extent_lookup_range(inode, entry);
932 }
54c65371 933 spin_unlock_irq(&inode->ordered_tree_lock);
4b46fce2
JB
934 return entry;
935}
936
48778179
FM
937/*
938 * Adds all ordered extents to the given list. The list ends up sorted by the
939 * file_offset of the ordered extents.
940 */
941void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
942 struct list_head *list)
943{
48778179
FM
944 struct rb_node *n;
945
946 ASSERT(inode_is_locked(&inode->vfs_inode));
947
54c65371
DS
948 spin_lock_irq(&inode->ordered_tree_lock);
949 for (n = rb_first(&inode->ordered_tree); n; n = rb_next(n)) {
48778179
FM
950 struct btrfs_ordered_extent *ordered;
951
952 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
953
954 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
955 continue;
956
957 ASSERT(list_empty(&ordered->log_list));
958 list_add_tail(&ordered->log_list, list);
959 refcount_inc(&ordered->refs);
5bea2508 960 trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
48778179 961 }
54c65371 962 spin_unlock_irq(&inode->ordered_tree_lock);
48778179
FM
963}
964
eb84ae03
CM
965/*
966 * lookup and return any extent before 'file_offset'. NULL is returned
967 * if none is found
968 */
e6dcd2dc 969struct btrfs_ordered_extent *
6d072c8e 970btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
e6dcd2dc 971{
e6dcd2dc
CM
972 struct rb_node *node;
973 struct btrfs_ordered_extent *entry = NULL;
974
54c65371
DS
975 spin_lock_irq(&inode->ordered_tree_lock);
976 node = ordered_tree_search(inode, file_offset);
e6dcd2dc
CM
977 if (!node)
978 goto out;
979
980 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
e76edab7 981 refcount_inc(&entry->refs);
5bea2508 982 trace_btrfs_ordered_extent_lookup_first(inode, entry);
e6dcd2dc 983out:
54c65371 984 spin_unlock_irq(&inode->ordered_tree_lock);
e6dcd2dc 985 return entry;
81d7ed29 986}
dbe674a9 987
c095f333
QW
988/*
989 * Lookup the first ordered extent that overlaps the range
990 * [@file_offset, @file_offset + @len).
991 *
992 * The difference between this and btrfs_lookup_first_ordered_extent() is
993 * that this one won't return any ordered extent that does not overlap the range.
994 * And the difference against btrfs_lookup_ordered_extent() is, this function
995 * ensures the first ordered extent gets returned.
996 */
997struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
998 struct btrfs_inode *inode, u64 file_offset, u64 len)
999{
c095f333
QW
1000 struct rb_node *node;
1001 struct rb_node *cur;
1002 struct rb_node *prev;
1003 struct rb_node *next;
1004 struct btrfs_ordered_extent *entry = NULL;
1005
54c65371
DS
1006 spin_lock_irq(&inode->ordered_tree_lock);
1007 node = inode->ordered_tree.rb_node;
c095f333
QW
1008 /*
1009 * Here we don't want to use tree_search() which will use tree->last
1010 * and screw up the search order.
1011 * And __tree_search() can't return the adjacent ordered extents
1012 * either, thus here we do our own search.
1013 */
1014 while (node) {
1015 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
1016
1017 if (file_offset < entry->file_offset) {
1018 node = node->rb_left;
1019 } else if (file_offset >= entry_end(entry)) {
1020 node = node->rb_right;
1021 } else {
1022 /*
1023 * Direct hit, got an ordered extent that starts at
1024 * @file_offset
1025 */
1026 goto out;
1027 }
1028 }
1029 if (!entry) {
1030 /* Empty tree */
1031 goto out;
1032 }
1033
1034 cur = &entry->rb_node;
1035 /* We got an entry around @file_offset, check adjacent entries */
1036 if (entry->file_offset < file_offset) {
1037 prev = cur;
1038 next = rb_next(cur);
1039 } else {
1040 prev = rb_prev(cur);
1041 next = cur;
1042 }
1043 if (prev) {
1044 entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
1045 if (range_overlaps(entry, file_offset, len))
1046 goto out;
1047 }
1048 if (next) {
1049 entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
1050 if (range_overlaps(entry, file_offset, len))
1051 goto out;
1052 }
1053 /* No ordered extent in the range */
1054 entry = NULL;
1055out:
5bea2508 1056 if (entry) {
c095f333 1057 refcount_inc(&entry->refs);
5bea2508
JT
1058 trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
1059 }
1060
54c65371 1061 spin_unlock_irq(&inode->ordered_tree_lock);
c095f333
QW
1062 return entry;
1063}
1064
ffa87214 1065/*
43dd529a
DS
1066 * Lock the passed range and ensures all pending ordered extents in it are run
1067 * to completion.
ffa87214 1068 *
ffa87214
NB
1069 * @inode: Inode whose ordered tree is to be searched
1070 * @start: Beginning of range to flush
1071 * @end: Last byte of range to lock
1072 * @cached_state: If passed, will return the extent state responsible for the
43dd529a
DS
1073 * locked range. It's the caller's responsibility to free the
1074 * cached state.
ffa87214 1075 *
43dd529a
DS
1076 * Always return with the given range locked, ensuring after it's called no
1077 * order extent can be pending.
ffa87214 1078 */
b272ae22 1079void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
ffa87214
NB
1080 u64 end,
1081 struct extent_state **cached_state)
1082{
1083 struct btrfs_ordered_extent *ordered;
a3b46b86
NA
1084 struct extent_state *cache = NULL;
1085 struct extent_state **cachedp = &cache;
bd80d94e
NB
1086
1087 if (cached_state)
a3b46b86 1088 cachedp = cached_state;
ffa87214
NB
1089
1090 while (1) {
570eb97b 1091 lock_extent(&inode->io_tree, start, end, cachedp);
ffa87214
NB
1092 ordered = btrfs_lookup_ordered_range(inode, start,
1093 end - start + 1);
bd80d94e
NB
1094 if (!ordered) {
1095 /*
1096 * If no external cached_state has been passed then
1097 * decrement the extra ref taken for cachedp since we
1098 * aren't exposing it outside of this function
1099 */
1100 if (!cached_state)
a3b46b86 1101 refcount_dec(&cache->refs);
ffa87214 1102 break;
bd80d94e 1103 }
570eb97b 1104 unlock_extent(&inode->io_tree, start, end, cachedp);
36d45567 1105 btrfs_start_ordered_extent(ordered);
ffa87214
NB
1106 btrfs_put_ordered_extent(ordered);
1107 }
1108}
1109
d2c7a19f
JB
1110/*
1111 * Lock the passed range and ensure all pending ordered extents in it are run
1112 * to completion in nowait mode.
1113 *
1114 * Return true if btrfs_lock_ordered_range does not return any extents,
1115 * otherwise false.
1116 */
632ddfa2
JB
1117bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
1118 struct extent_state **cached_state)
d2c7a19f
JB
1119{
1120 struct btrfs_ordered_extent *ordered;
1121
632ddfa2 1122 if (!try_lock_extent(&inode->io_tree, start, end, cached_state))
d2c7a19f
JB
1123 return false;
1124
1125 ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
1126 if (!ordered)
1127 return true;
1128
1129 btrfs_put_ordered_extent(ordered);
632ddfa2 1130 unlock_extent(&inode->io_tree, start, end, cached_state);
d2c7a19f
JB
1131
1132 return false;
1133}
1134
8f4af4b8 1135/* Split out a new ordered extent for this first @len bytes of @ordered. */
b0307e28
CH
1136struct btrfs_ordered_extent *btrfs_split_ordered_extent(
1137 struct btrfs_ordered_extent *ordered, u64 len)
d22002fd 1138{
816f589b 1139 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
816f589b
CH
1140 struct btrfs_root *root = inode->root;
1141 struct btrfs_fs_info *fs_info = root->fs_info;
f0792b79
CH
1142 u64 file_offset = ordered->file_offset;
1143 u64 disk_bytenr = ordered->disk_bytenr;
52b1fdca
CH
1144 unsigned long flags = ordered->flags;
1145 struct btrfs_ordered_sum *sum, *tmpsum;
816f589b 1146 struct btrfs_ordered_extent *new;
8f4af4b8 1147 struct rb_node *node;
52b1fdca 1148 u64 offset = 0;
d22002fd 1149
816f589b 1150 trace_btrfs_ordered_extent_split(inode, ordered);
5bea2508 1151
f0792b79
CH
1152 ASSERT(!(flags & (1U << BTRFS_ORDERED_COMPRESSED)));
1153
8f4af4b8
CH
1154 /*
1155 * The entire bio must be covered by the ordered extent, but we can't
1156 * reduce the original extent to a zero length either.
1157 */
1158 if (WARN_ON_ONCE(len >= ordered->num_bytes))
b0307e28 1159 return ERR_PTR(-EINVAL);
52b1fdca
CH
1160 /* We cannot split partially completed ordered extents. */
1161 if (ordered->bytes_left) {
1162 ASSERT(!(flags & ~BTRFS_ORDERED_TYPE_FLAGS));
1163 if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes))
1164 return ERR_PTR(-EINVAL);
1165 }
e44ca71c
CH
1166 /* We cannot split a compressed ordered extent. */
1167 if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes))
b0307e28 1168 return ERR_PTR(-EINVAL);
e44ca71c 1169
816f589b
CH
1170 new = alloc_ordered_extent(inode, file_offset, len, len, disk_bytenr,
1171 len, 0, flags, ordered->compress_type);
1172 if (IS_ERR(new))
1173 return new;
1174
1175 /* One ref for the tree. */
1176 refcount_inc(&new->refs);
1177
1178 spin_lock_irq(&root->ordered_extent_lock);
54c65371 1179 spin_lock(&inode->ordered_tree_lock);
d22002fd
NA
1180 /* Remove from tree once */
1181 node = &ordered->rb_node;
54c65371 1182 rb_erase(node, &inode->ordered_tree);
d22002fd 1183 RB_CLEAR_NODE(node);
54c65371
DS
1184 if (inode->ordered_tree_last == node)
1185 inode->ordered_tree_last = NULL;
d22002fd 1186
8f4af4b8
CH
1187 ordered->file_offset += len;
1188 ordered->disk_bytenr += len;
1189 ordered->num_bytes -= len;
1190 ordered->disk_num_bytes -= len;
52b1fdca
CH
1191
1192 if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) {
1193 ASSERT(ordered->bytes_left == 0);
1194 new->bytes_left = 0;
1195 } else {
1196 ordered->bytes_left -= len;
1197 }
1198
1199 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) {
1200 if (ordered->truncated_len > len) {
1201 ordered->truncated_len -= len;
1202 } else {
1203 new->truncated_len = ordered->truncated_len;
1204 ordered->truncated_len = 0;
1205 }
1206 }
1207
1208 list_for_each_entry_safe(sum, tmpsum, &ordered->list, list) {
1209 if (offset == len)
1210 break;
1211 list_move_tail(&sum->list, &new->list);
1212 offset += sum->len;
1213 }
d22002fd
NA
1214
1215 /* Re-insert the node */
54c65371
DS
1216 node = tree_insert(&inode->ordered_tree, ordered->file_offset,
1217 &ordered->rb_node);
d22002fd
NA
1218 if (node)
1219 btrfs_panic(fs_info, -EEXIST,
1220 "zoned: inconsistency in ordered tree at offset %llu",
816f589b 1221 ordered->file_offset);
d22002fd 1222
54c65371 1223 node = tree_insert(&inode->ordered_tree, new->file_offset, &new->rb_node);
816f589b
CH
1224 if (node)
1225 btrfs_panic(fs_info, -EEXIST,
1226 "zoned: inconsistency in ordered tree at offset %llu",
1227 new->file_offset);
54c65371 1228 spin_unlock(&inode->ordered_tree_lock);
f0792b79 1229
816f589b
CH
1230 list_add_tail(&new->root_extent_list, &root->ordered_extents);
1231 root->nr_ordered_extents++;
1232 spin_unlock_irq(&root->ordered_extent_lock);
1233 return new;
d22002fd
NA
1234}
1235
6352b91d
MX
1236int __init ordered_data_init(void)
1237{
ef5a05c5 1238 btrfs_ordered_extent_cache = KMEM_CACHE(btrfs_ordered_extent, 0);
6352b91d
MX
1239 if (!btrfs_ordered_extent_cache)
1240 return -ENOMEM;
25287e0a 1241
6352b91d
MX
1242 return 0;
1243}
1244
e67c718b 1245void __cold ordered_data_exit(void)
6352b91d 1246{
5598e900 1247 kmem_cache_destroy(btrfs_ordered_extent_cache);
6352b91d 1248}