1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/writeback.h>
9 #include <linux/sched/mm.h>
12 #include "transaction.h"
13 #include "btrfs_inode.h"
14 #include "extent_io.h"
16 #include "compression.h"
17 #include "delalloc-space.h"
20 static struct kmem_cache *btrfs_ordered_extent_cache;
22 static u64 entry_end(struct btrfs_ordered_extent *entry)
24 if (entry->file_offset + entry->num_bytes < entry->file_offset)
26 return entry->file_offset + entry->num_bytes;
29 /* returns NULL if the insertion worked, or it returns the node it did find
32 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
35 struct rb_node **p = &root->rb_node;
36 struct rb_node *parent = NULL;
37 struct btrfs_ordered_extent *entry;
41 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
43 if (file_offset < entry->file_offset)
45 else if (file_offset >= entry_end(entry))
51 rb_link_node(node, parent, p);
52 rb_insert_color(node, root);
57 * look for a given offset in the tree, and if it can't be found return the
60 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
61 struct rb_node **prev_ret)
63 struct rb_node *n = root->rb_node;
64 struct rb_node *prev = NULL;
66 struct btrfs_ordered_extent *entry;
67 struct btrfs_ordered_extent *prev_entry = NULL;
70 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
74 if (file_offset < entry->file_offset)
76 else if (file_offset >= entry_end(entry))
84 while (prev && file_offset >= entry_end(prev_entry)) {
88 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
90 if (file_offset < entry_end(prev_entry))
96 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
98 while (prev && file_offset < entry_end(prev_entry)) {
102 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
110 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
113 if (file_offset + len <= entry->file_offset ||
114 entry->file_offset + entry->num_bytes <= file_offset)
120 * look find the first ordered struct that has this offset, otherwise
121 * the first one less than this offset
123 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
126 struct rb_root *root = &tree->tree;
127 struct rb_node *prev = NULL;
129 struct btrfs_ordered_extent *entry;
132 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
134 if (in_range(file_offset, entry->file_offset, entry->num_bytes))
137 ret = __tree_search(root, file_offset, &prev);
146 * Allocate and add a new ordered_extent into the per-inode tree.
148 * The tree is given a single reference on the ordered extent that was
151 static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
152 u64 disk_bytenr, u64 num_bytes,
153 u64 disk_num_bytes, int type, int dio,
156 struct btrfs_root *root = inode->root;
157 struct btrfs_fs_info *fs_info = root->fs_info;
158 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
159 struct rb_node *node;
160 struct btrfs_ordered_extent *entry;
163 if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_PREALLOC) {
164 /* For nocow write, we can release the qgroup rsv right now */
165 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
171 * The ordered extent has reserved qgroup space, release now
172 * and pass the reserved number for qgroup_record to free.
174 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
178 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
182 entry->file_offset = file_offset;
183 entry->disk_bytenr = disk_bytenr;
184 entry->num_bytes = num_bytes;
185 entry->disk_num_bytes = disk_num_bytes;
186 entry->bytes_left = num_bytes;
187 entry->inode = igrab(&inode->vfs_inode);
188 entry->compress_type = compress_type;
189 entry->truncated_len = (u64)-1;
190 entry->qgroup_rsv = ret;
191 entry->physical = (u64)-1;
193 entry->partno = (u8)-1;
195 ASSERT(type == BTRFS_ORDERED_REGULAR ||
196 type == BTRFS_ORDERED_NOCOW ||
197 type == BTRFS_ORDERED_PREALLOC ||
198 type == BTRFS_ORDERED_COMPRESSED);
199 set_bit(type, &entry->flags);
201 percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes,
202 fs_info->delalloc_batch);
205 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
207 /* one ref for the tree */
208 refcount_set(&entry->refs, 1);
209 init_waitqueue_head(&entry->wait);
210 INIT_LIST_HEAD(&entry->list);
211 INIT_LIST_HEAD(&entry->log_list);
212 INIT_LIST_HEAD(&entry->root_extent_list);
213 INIT_LIST_HEAD(&entry->work_list);
214 init_completion(&entry->completion);
216 trace_btrfs_ordered_extent_add(inode, entry);
218 spin_lock_irq(&tree->lock);
219 node = tree_insert(&tree->tree, file_offset,
222 btrfs_panic(fs_info, -EEXIST,
223 "inconsistency in ordered tree at offset %llu",
225 spin_unlock_irq(&tree->lock);
227 spin_lock(&root->ordered_extent_lock);
228 list_add_tail(&entry->root_extent_list,
229 &root->ordered_extents);
230 root->nr_ordered_extents++;
231 if (root->nr_ordered_extents == 1) {
232 spin_lock(&fs_info->ordered_root_lock);
233 BUG_ON(!list_empty(&root->ordered_root));
234 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
235 spin_unlock(&fs_info->ordered_root_lock);
237 spin_unlock(&root->ordered_extent_lock);
240 * We don't need the count_max_extents here, we can assume that all of
241 * that work has been done at higher layers, so this is truly the
242 * smallest the extent is going to get.
244 spin_lock(&inode->lock);
245 btrfs_mod_outstanding_extents(inode, 1);
246 spin_unlock(&inode->lock);
251 int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
252 u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes,
255 ASSERT(type == BTRFS_ORDERED_REGULAR ||
256 type == BTRFS_ORDERED_NOCOW ||
257 type == BTRFS_ORDERED_PREALLOC);
258 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
259 num_bytes, disk_num_bytes, type, 0,
260 BTRFS_COMPRESS_NONE);
263 int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset,
264 u64 disk_bytenr, u64 num_bytes,
265 u64 disk_num_bytes, int type)
267 ASSERT(type == BTRFS_ORDERED_REGULAR ||
268 type == BTRFS_ORDERED_NOCOW ||
269 type == BTRFS_ORDERED_PREALLOC);
270 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
271 num_bytes, disk_num_bytes, type, 1,
272 BTRFS_COMPRESS_NONE);
275 int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset,
276 u64 disk_bytenr, u64 num_bytes,
277 u64 disk_num_bytes, int compress_type)
279 ASSERT(compress_type != BTRFS_COMPRESS_NONE);
280 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
281 num_bytes, disk_num_bytes,
282 BTRFS_ORDERED_COMPRESSED, 0,
287 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
288 * when an ordered extent is finished. If the list covers more than one
289 * ordered extent, it is split across multiples.
291 void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
292 struct btrfs_ordered_sum *sum)
294 struct btrfs_ordered_inode_tree *tree;
296 tree = &BTRFS_I(entry->inode)->ordered_tree;
297 spin_lock_irq(&tree->lock);
298 list_add_tail(&sum->list, &entry->list);
299 spin_unlock_irq(&tree->lock);
303 * Finish IO for one ordered extent across a given range. The range can
304 * contain several ordered extents.
306 * @found_ret: Return the finished ordered extent
307 * @file_offset: File offset for the finished IO
308 * Will also be updated to one byte past the range that is
309 * recordered as finished. This allows caller to walk forward.
310 * @io_size: Length of the finish IO range
311 * @uptodate: If the IO finished without problem
313 * Return true if any ordered extent is finished in the range, and update
314 * @found_ret and @file_offset.
315 * Return false otherwise.
317 * NOTE: Although The range can cross multiple ordered extents, only one
318 * ordered extent will be updated during one call. The caller is responsible to
319 * iterate all ordered extents in the range.
321 bool btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
322 struct btrfs_ordered_extent **finished_ret,
323 u64 *file_offset, u64 io_size, int uptodate)
325 struct btrfs_fs_info *fs_info = inode->root->fs_info;
326 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
327 struct rb_node *node;
328 struct btrfs_ordered_extent *entry = NULL;
329 bool finished = false;
335 spin_lock_irqsave(&tree->lock, flags);
336 node = tree_search(tree, *file_offset);
340 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
341 if (!in_range(*file_offset, entry->file_offset, entry->num_bytes))
344 dec_start = max(*file_offset, entry->file_offset);
345 dec_end = min(*file_offset + io_size,
346 entry->file_offset + entry->num_bytes);
347 *file_offset = dec_end;
348 if (dec_start > dec_end) {
349 btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
352 to_dec = dec_end - dec_start;
353 if (to_dec > entry->bytes_left) {
355 "bad ordered accounting left %llu size %llu",
356 entry->bytes_left, to_dec);
358 entry->bytes_left -= to_dec;
360 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
362 if (entry->bytes_left == 0) {
364 * Ensure only one caller can set the flag and finished_ret
367 finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
368 /* test_and_set_bit implies a barrier */
369 cond_wake_up_nomb(&entry->wait);
372 if (finished && finished_ret && entry) {
373 *finished_ret = entry;
374 refcount_inc(&entry->refs);
376 spin_unlock_irqrestore(&tree->lock, flags);
381 * Finish IO for one ordered extent across a given range. The range can only
382 * contain one ordered extent.
384 * @cached: The cached ordered extent. If not NULL, we can skip the tree
385 * search and use the ordered extent directly.
386 * Will be also used to store the finished ordered extent.
387 * @file_offset: File offset for the finished IO
388 * @io_size: Length of the finish IO range
389 * @uptodate: If the IO finishes without problem
391 * Return true if the ordered extent is finished in the range, and update
393 * Return false otherwise.
395 * NOTE: The range can NOT cross multiple ordered extents.
396 * Thus caller should ensure the range doesn't cross ordered extents.
398 bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
399 struct btrfs_ordered_extent **cached,
400 u64 file_offset, u64 io_size, int uptodate)
402 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
403 struct rb_node *node;
404 struct btrfs_ordered_extent *entry = NULL;
406 bool finished = false;
408 spin_lock_irqsave(&tree->lock, flags);
409 if (cached && *cached) {
414 node = tree_search(tree, file_offset);
418 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
420 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
423 if (io_size > entry->bytes_left)
424 btrfs_crit(inode->root->fs_info,
425 "bad ordered accounting left %llu size %llu",
426 entry->bytes_left, io_size);
428 entry->bytes_left -= io_size;
430 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
432 if (entry->bytes_left == 0) {
434 * Ensure only one caller can set the flag and finished_ret
437 finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
438 /* test_and_set_bit implies a barrier */
439 cond_wake_up_nomb(&entry->wait);
442 if (finished && cached && entry) {
444 refcount_inc(&entry->refs);
446 spin_unlock_irqrestore(&tree->lock, flags);
451 * used to drop a reference on an ordered extent. This will free
452 * the extent if the last reference is dropped
454 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
456 struct list_head *cur;
457 struct btrfs_ordered_sum *sum;
459 trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
461 if (refcount_dec_and_test(&entry->refs)) {
462 ASSERT(list_empty(&entry->root_extent_list));
463 ASSERT(list_empty(&entry->log_list));
464 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
466 btrfs_add_delayed_iput(entry->inode);
467 while (!list_empty(&entry->list)) {
468 cur = entry->list.next;
469 sum = list_entry(cur, struct btrfs_ordered_sum, list);
470 list_del(&sum->list);
473 kmem_cache_free(btrfs_ordered_extent_cache, entry);
478 * remove an ordered extent from the tree. No references are dropped
479 * and waiters are woken up.
481 void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
482 struct btrfs_ordered_extent *entry)
484 struct btrfs_ordered_inode_tree *tree;
485 struct btrfs_root *root = btrfs_inode->root;
486 struct btrfs_fs_info *fs_info = root->fs_info;
487 struct rb_node *node;
490 /* This is paired with btrfs_add_ordered_extent. */
491 spin_lock(&btrfs_inode->lock);
492 btrfs_mod_outstanding_extents(btrfs_inode, -1);
493 spin_unlock(&btrfs_inode->lock);
494 if (root != fs_info->tree_root)
495 btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes,
498 percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
499 fs_info->delalloc_batch);
501 tree = &btrfs_inode->ordered_tree;
502 spin_lock_irq(&tree->lock);
503 node = &entry->rb_node;
504 rb_erase(node, &tree->tree);
506 if (tree->last == node)
508 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
509 pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
510 spin_unlock_irq(&tree->lock);
513 * The current running transaction is waiting on us, we need to let it
514 * know that we're complete and wake it up.
517 struct btrfs_transaction *trans;
520 * The checks for trans are just a formality, it should be set,
521 * but if it isn't we don't want to deref/assert under the spin
522 * lock, so be nice and check if trans is set, but ASSERT() so
523 * if it isn't set a developer will notice.
525 spin_lock(&fs_info->trans_lock);
526 trans = fs_info->running_transaction;
528 refcount_inc(&trans->use_count);
529 spin_unlock(&fs_info->trans_lock);
533 if (atomic_dec_and_test(&trans->pending_ordered))
534 wake_up(&trans->pending_wait);
535 btrfs_put_transaction(trans);
539 spin_lock(&root->ordered_extent_lock);
540 list_del_init(&entry->root_extent_list);
541 root->nr_ordered_extents--;
543 trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
545 if (!root->nr_ordered_extents) {
546 spin_lock(&fs_info->ordered_root_lock);
547 BUG_ON(list_empty(&root->ordered_root));
548 list_del_init(&root->ordered_root);
549 spin_unlock(&fs_info->ordered_root_lock);
551 spin_unlock(&root->ordered_extent_lock);
552 wake_up(&entry->wait);
555 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
557 struct btrfs_ordered_extent *ordered;
559 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
560 btrfs_start_ordered_extent(ordered, 1);
561 complete(&ordered->completion);
565 * wait for all the ordered extents in a root. This is done when balancing
566 * space between drives.
568 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
569 const u64 range_start, const u64 range_len)
571 struct btrfs_fs_info *fs_info = root->fs_info;
575 struct btrfs_ordered_extent *ordered, *next;
577 const u64 range_end = range_start + range_len;
579 mutex_lock(&root->ordered_extent_mutex);
580 spin_lock(&root->ordered_extent_lock);
581 list_splice_init(&root->ordered_extents, &splice);
582 while (!list_empty(&splice) && nr) {
583 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
586 if (range_end <= ordered->disk_bytenr ||
587 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
588 list_move_tail(&ordered->root_extent_list, &skipped);
589 cond_resched_lock(&root->ordered_extent_lock);
593 list_move_tail(&ordered->root_extent_list,
594 &root->ordered_extents);
595 refcount_inc(&ordered->refs);
596 spin_unlock(&root->ordered_extent_lock);
598 btrfs_init_work(&ordered->flush_work,
599 btrfs_run_ordered_extent_work, NULL, NULL);
600 list_add_tail(&ordered->work_list, &works);
601 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
604 spin_lock(&root->ordered_extent_lock);
609 list_splice_tail(&skipped, &root->ordered_extents);
610 list_splice_tail(&splice, &root->ordered_extents);
611 spin_unlock(&root->ordered_extent_lock);
613 list_for_each_entry_safe(ordered, next, &works, work_list) {
614 list_del_init(&ordered->work_list);
615 wait_for_completion(&ordered->completion);
616 btrfs_put_ordered_extent(ordered);
619 mutex_unlock(&root->ordered_extent_mutex);
624 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
625 const u64 range_start, const u64 range_len)
627 struct btrfs_root *root;
628 struct list_head splice;
631 INIT_LIST_HEAD(&splice);
633 mutex_lock(&fs_info->ordered_operations_mutex);
634 spin_lock(&fs_info->ordered_root_lock);
635 list_splice_init(&fs_info->ordered_roots, &splice);
636 while (!list_empty(&splice) && nr) {
637 root = list_first_entry(&splice, struct btrfs_root,
639 root = btrfs_grab_root(root);
641 list_move_tail(&root->ordered_root,
642 &fs_info->ordered_roots);
643 spin_unlock(&fs_info->ordered_root_lock);
645 done = btrfs_wait_ordered_extents(root, nr,
646 range_start, range_len);
647 btrfs_put_root(root);
649 spin_lock(&fs_info->ordered_root_lock);
654 list_splice_tail(&splice, &fs_info->ordered_roots);
655 spin_unlock(&fs_info->ordered_root_lock);
656 mutex_unlock(&fs_info->ordered_operations_mutex);
660 * Used to start IO or wait for a given ordered extent to finish.
662 * If wait is one, this effectively waits on page writeback for all the pages
663 * in the extent, and it waits on the io completion code to insert
664 * metadata into the btree corresponding to the extent
666 void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait)
668 u64 start = entry->file_offset;
669 u64 end = start + entry->num_bytes - 1;
670 struct btrfs_inode *inode = BTRFS_I(entry->inode);
672 trace_btrfs_ordered_extent_start(inode, entry);
675 * pages in the range can be dirty, clean or writeback. We
676 * start IO on any dirty ones so the wait doesn't stall waiting
677 * for the flusher thread to find them
679 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
680 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
682 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
688 * Used to wait on ordered extents across a large range of bytes.
690 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
696 struct btrfs_ordered_extent *ordered;
698 if (start + len < start) {
699 orig_end = INT_LIMIT(loff_t);
701 orig_end = start + len - 1;
702 if (orig_end > INT_LIMIT(loff_t))
703 orig_end = INT_LIMIT(loff_t);
706 /* start IO across the range first to instantiate any delalloc
709 ret = btrfs_fdatawrite_range(inode, start, orig_end);
714 * If we have a writeback error don't return immediately. Wait first
715 * for any ordered extents that haven't completed yet. This is to make
716 * sure no one can dirty the same page ranges and call writepages()
717 * before the ordered extents complete - to avoid failures (-EEXIST)
718 * when adding the new ordered extents to the ordered tree.
720 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
724 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
727 if (ordered->file_offset > orig_end) {
728 btrfs_put_ordered_extent(ordered);
731 if (ordered->file_offset + ordered->num_bytes <= start) {
732 btrfs_put_ordered_extent(ordered);
735 btrfs_start_ordered_extent(ordered, 1);
736 end = ordered->file_offset;
738 * If the ordered extent had an error save the error but don't
739 * exit without waiting first for all other ordered extents in
740 * the range to complete.
742 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
744 btrfs_put_ordered_extent(ordered);
745 if (end == 0 || end == start)
749 return ret_wb ? ret_wb : ret;
753 * find an ordered extent corresponding to file_offset. return NULL if
754 * nothing is found, otherwise take a reference on the extent and return it
756 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
759 struct btrfs_ordered_inode_tree *tree;
760 struct rb_node *node;
761 struct btrfs_ordered_extent *entry = NULL;
764 tree = &inode->ordered_tree;
765 spin_lock_irqsave(&tree->lock, flags);
766 node = tree_search(tree, file_offset);
770 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
771 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
774 refcount_inc(&entry->refs);
776 spin_unlock_irqrestore(&tree->lock, flags);
780 /* Since the DIO code tries to lock a wide area we need to look for any ordered
781 * extents that exist in the range, rather than just the start of the range.
783 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
784 struct btrfs_inode *inode, u64 file_offset, u64 len)
786 struct btrfs_ordered_inode_tree *tree;
787 struct rb_node *node;
788 struct btrfs_ordered_extent *entry = NULL;
790 tree = &inode->ordered_tree;
791 spin_lock_irq(&tree->lock);
792 node = tree_search(tree, file_offset);
794 node = tree_search(tree, file_offset + len);
800 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
801 if (range_overlaps(entry, file_offset, len))
804 if (entry->file_offset >= file_offset + len) {
809 node = rb_next(node);
815 refcount_inc(&entry->refs);
816 spin_unlock_irq(&tree->lock);
821 * Adds all ordered extents to the given list. The list ends up sorted by the
822 * file_offset of the ordered extents.
824 void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
825 struct list_head *list)
827 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
830 ASSERT(inode_is_locked(&inode->vfs_inode));
832 spin_lock_irq(&tree->lock);
833 for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
834 struct btrfs_ordered_extent *ordered;
836 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
838 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
841 ASSERT(list_empty(&ordered->log_list));
842 list_add_tail(&ordered->log_list, list);
843 refcount_inc(&ordered->refs);
845 spin_unlock_irq(&tree->lock);
849 * lookup and return any extent before 'file_offset'. NULL is returned
852 struct btrfs_ordered_extent *
853 btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
855 struct btrfs_ordered_inode_tree *tree;
856 struct rb_node *node;
857 struct btrfs_ordered_extent *entry = NULL;
859 tree = &inode->ordered_tree;
860 spin_lock_irq(&tree->lock);
861 node = tree_search(tree, file_offset);
865 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
866 refcount_inc(&entry->refs);
868 spin_unlock_irq(&tree->lock);
873 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
874 * ordered extents in it are run to completion.
876 * @inode: Inode whose ordered tree is to be searched
877 * @start: Beginning of range to flush
878 * @end: Last byte of range to lock
879 * @cached_state: If passed, will return the extent state responsible for the
880 * locked range. It's the caller's responsibility to free the cached state.
882 * This function always returns with the given range locked, ensuring after it's
883 * called no order extent can be pending.
885 void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
887 struct extent_state **cached_state)
889 struct btrfs_ordered_extent *ordered;
890 struct extent_state *cache = NULL;
891 struct extent_state **cachedp = &cache;
894 cachedp = cached_state;
897 lock_extent_bits(&inode->io_tree, start, end, cachedp);
898 ordered = btrfs_lookup_ordered_range(inode, start,
902 * If no external cached_state has been passed then
903 * decrement the extra ref taken for cachedp since we
904 * aren't exposing it outside of this function
907 refcount_dec(&cache->refs);
910 unlock_extent_cached(&inode->io_tree, start, end, cachedp);
911 btrfs_start_ordered_extent(ordered, 1);
912 btrfs_put_ordered_extent(ordered);
916 static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
919 struct inode *inode = ordered->inode;
920 u64 file_offset = ordered->file_offset + pos;
921 u64 disk_bytenr = ordered->disk_bytenr + pos;
923 u64 disk_num_bytes = len;
925 unsigned long flags_masked = ordered->flags & ~(1 << BTRFS_ORDERED_DIRECT);
926 int compress_type = ordered->compress_type;
927 unsigned long weight;
930 weight = hweight_long(flags_masked);
931 WARN_ON_ONCE(weight > 1);
935 type = __ffs(flags_masked);
937 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered->flags)) {
939 ret = btrfs_add_ordered_extent_compress(BTRFS_I(inode),
940 file_offset, disk_bytenr, num_bytes,
941 disk_num_bytes, compress_type);
942 } else if (test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
943 ret = btrfs_add_ordered_extent_dio(BTRFS_I(inode), file_offset,
944 disk_bytenr, num_bytes, disk_num_bytes, type);
946 ret = btrfs_add_ordered_extent(BTRFS_I(inode), file_offset,
947 disk_bytenr, num_bytes, disk_num_bytes, type);
953 int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
956 struct inode *inode = ordered->inode;
957 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
958 struct rb_node *node;
959 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
962 spin_lock_irq(&tree->lock);
963 /* Remove from tree once */
964 node = &ordered->rb_node;
965 rb_erase(node, &tree->tree);
967 if (tree->last == node)
970 ordered->file_offset += pre;
971 ordered->disk_bytenr += pre;
972 ordered->num_bytes -= (pre + post);
973 ordered->disk_num_bytes -= (pre + post);
974 ordered->bytes_left -= (pre + post);
976 /* Re-insert the node */
977 node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node);
979 btrfs_panic(fs_info, -EEXIST,
980 "zoned: inconsistency in ordered tree at offset %llu",
981 ordered->file_offset);
983 spin_unlock_irq(&tree->lock);
986 ret = clone_ordered_extent(ordered, 0, pre);
987 if (ret == 0 && post)
988 ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
994 int __init ordered_data_init(void)
996 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
997 sizeof(struct btrfs_ordered_extent), 0,
1000 if (!btrfs_ordered_extent_cache)
1006 void __cold ordered_data_exit(void)
1008 kmem_cache_destroy(btrfs_ordered_extent_cache);