1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/writeback.h>
9 #include <linux/sched/mm.h>
13 #include "transaction.h"
14 #include "btrfs_inode.h"
15 #include "extent_io.h"
17 #include "compression.h"
18 #include "delalloc-space.h"
22 #include "block-group.h"
24 static struct kmem_cache *btrfs_ordered_extent_cache;
26 static u64 entry_end(struct btrfs_ordered_extent *entry)
28 if (entry->file_offset + entry->num_bytes < entry->file_offset)
30 return entry->file_offset + entry->num_bytes;
33 /* returns NULL if the insertion worked, or it returns the node it did find
36 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
39 struct rb_node **p = &root->rb_node;
40 struct rb_node *parent = NULL;
41 struct btrfs_ordered_extent *entry;
45 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
47 if (file_offset < entry->file_offset)
49 else if (file_offset >= entry_end(entry))
55 rb_link_node(node, parent, p);
56 rb_insert_color(node, root);
61 * look for a given offset in the tree, and if it can't be found return the
64 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
65 struct rb_node **prev_ret)
67 struct rb_node *n = root->rb_node;
68 struct rb_node *prev = NULL;
70 struct btrfs_ordered_extent *entry;
71 struct btrfs_ordered_extent *prev_entry = NULL;
74 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
78 if (file_offset < entry->file_offset)
80 else if (file_offset >= entry_end(entry))
88 while (prev && file_offset >= entry_end(prev_entry)) {
92 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
94 if (file_offset < entry_end(prev_entry))
100 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
102 while (prev && file_offset < entry_end(prev_entry)) {
103 test = rb_prev(prev);
106 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
114 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
117 if (file_offset + len <= entry->file_offset ||
118 entry->file_offset + entry->num_bytes <= file_offset)
124 * look find the first ordered struct that has this offset, otherwise
125 * the first one less than this offset
127 static inline struct rb_node *ordered_tree_search(struct btrfs_inode *inode,
130 struct rb_node *prev = NULL;
132 struct btrfs_ordered_extent *entry;
134 if (inode->ordered_tree_last) {
135 entry = rb_entry(inode->ordered_tree_last, struct btrfs_ordered_extent,
137 if (in_range(file_offset, entry->file_offset, entry->num_bytes))
138 return inode->ordered_tree_last;
140 ret = __tree_search(&inode->ordered_tree, file_offset, &prev);
144 inode->ordered_tree_last = ret;
148 static struct btrfs_ordered_extent *alloc_ordered_extent(
149 struct btrfs_inode *inode, u64 file_offset, u64 num_bytes,
150 u64 ram_bytes, u64 disk_bytenr, u64 disk_num_bytes,
151 u64 offset, unsigned long flags, int compress_type)
153 struct btrfs_ordered_extent *entry;
158 ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
159 /* For nocow write, we can release the qgroup rsv right now */
160 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
165 * The ordered extent has reserved qgroup space, release now
166 * and pass the reserved number for qgroup_record to free.
168 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv);
172 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
174 return ERR_PTR(-ENOMEM);
176 entry->file_offset = file_offset;
177 entry->num_bytes = num_bytes;
178 entry->ram_bytes = ram_bytes;
179 entry->disk_bytenr = disk_bytenr;
180 entry->disk_num_bytes = disk_num_bytes;
181 entry->offset = offset;
182 entry->bytes_left = num_bytes;
183 entry->inode = BTRFS_I(igrab(&inode->vfs_inode));
184 entry->compress_type = compress_type;
185 entry->truncated_len = (u64)-1;
186 entry->qgroup_rsv = qgroup_rsv;
187 entry->flags = flags;
188 refcount_set(&entry->refs, 1);
189 init_waitqueue_head(&entry->wait);
190 INIT_LIST_HEAD(&entry->list);
191 INIT_LIST_HEAD(&entry->log_list);
192 INIT_LIST_HEAD(&entry->root_extent_list);
193 INIT_LIST_HEAD(&entry->work_list);
194 INIT_LIST_HEAD(&entry->bioc_list);
195 init_completion(&entry->completion);
198 * We don't need the count_max_extents here, we can assume that all of
199 * that work has been done at higher layers, so this is truly the
200 * smallest the extent is going to get.
202 spin_lock(&inode->lock);
203 btrfs_mod_outstanding_extents(inode, 1);
204 spin_unlock(&inode->lock);
209 static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
211 struct btrfs_inode *inode = entry->inode;
212 struct btrfs_root *root = inode->root;
213 struct btrfs_fs_info *fs_info = root->fs_info;
214 struct rb_node *node;
216 trace_btrfs_ordered_extent_add(inode, entry);
218 percpu_counter_add_batch(&fs_info->ordered_bytes, entry->num_bytes,
219 fs_info->delalloc_batch);
221 /* One ref for the tree. */
222 refcount_inc(&entry->refs);
224 spin_lock_irq(&inode->ordered_tree_lock);
225 node = tree_insert(&inode->ordered_tree, entry->file_offset,
228 btrfs_panic(fs_info, -EEXIST,
229 "inconsistency in ordered tree at offset %llu",
231 spin_unlock_irq(&inode->ordered_tree_lock);
233 spin_lock(&root->ordered_extent_lock);
234 list_add_tail(&entry->root_extent_list,
235 &root->ordered_extents);
236 root->nr_ordered_extents++;
237 if (root->nr_ordered_extents == 1) {
238 spin_lock(&fs_info->ordered_root_lock);
239 BUG_ON(!list_empty(&root->ordered_root));
240 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
241 spin_unlock(&fs_info->ordered_root_lock);
243 spin_unlock(&root->ordered_extent_lock);
247 * Add an ordered extent to the per-inode tree.
249 * @inode: Inode that this extent is for.
250 * @file_offset: Logical offset in file where the extent starts.
251 * @num_bytes: Logical length of extent in file.
252 * @ram_bytes: Full length of unencoded data.
253 * @disk_bytenr: Offset of extent on disk.
254 * @disk_num_bytes: Size of extent on disk.
255 * @offset: Offset into unencoded data where file data starts.
256 * @flags: Flags specifying type of extent (1 << BTRFS_ORDERED_*).
257 * @compress_type: Compression algorithm used for data.
259 * Most of these parameters correspond to &struct btrfs_file_extent_item. The
260 * tree is given a single reference on the ordered extent that was inserted, and
261 * the returned pointer is given a second reference.
263 * Return: the new ordered extent or error pointer.
265 struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
266 struct btrfs_inode *inode, u64 file_offset,
267 const struct btrfs_file_extent *file_extent, unsigned long flags)
269 struct btrfs_ordered_extent *entry;
271 ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
274 * For regular writes, we just use the members in @file_extent.
276 * For NOCOW, we don't really care about the numbers except @start and
277 * file_extent->num_bytes, as we won't insert a file extent item at all.
279 * For PREALLOC, we do not use ordered extent members, but
280 * btrfs_mark_extent_written() handles everything.
282 * So here we always pass 0 as offset for NOCOW/PREALLOC ordered extents,
283 * or btrfs_split_ordered_extent() cannot handle it correctly.
285 if (flags & ((1U << BTRFS_ORDERED_NOCOW) | (1U << BTRFS_ORDERED_PREALLOC)))
286 entry = alloc_ordered_extent(inode, file_offset,
287 file_extent->num_bytes,
288 file_extent->num_bytes,
289 file_extent->disk_bytenr + file_extent->offset,
290 file_extent->num_bytes, 0, flags,
291 file_extent->compression);
293 entry = alloc_ordered_extent(inode, file_offset,
294 file_extent->num_bytes,
295 file_extent->ram_bytes,
296 file_extent->disk_bytenr,
297 file_extent->disk_num_bytes,
298 file_extent->offset, flags,
299 file_extent->compression);
301 insert_ordered_extent(entry);
306 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
307 * when an ordered extent is finished. If the list covers more than one
308 * ordered extent, it is split across multiples.
310 void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
311 struct btrfs_ordered_sum *sum)
313 struct btrfs_inode *inode = entry->inode;
315 spin_lock_irq(&inode->ordered_tree_lock);
316 list_add_tail(&sum->list, &entry->list);
317 spin_unlock_irq(&inode->ordered_tree_lock);
320 void btrfs_mark_ordered_extent_error(struct btrfs_ordered_extent *ordered)
322 if (!test_and_set_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
323 mapping_set_error(ordered->inode->vfs_inode.i_mapping, -EIO);
326 static void finish_ordered_fn(struct btrfs_work *work)
328 struct btrfs_ordered_extent *ordered_extent;
330 ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
331 btrfs_finish_ordered_io(ordered_extent);
334 static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
335 struct folio *folio, u64 file_offset,
336 u64 len, bool uptodate)
338 struct btrfs_inode *inode = ordered->inode;
339 struct btrfs_fs_info *fs_info = inode->root->fs_info;
341 lockdep_assert_held(&inode->ordered_tree_lock);
344 ASSERT(folio->mapping);
345 ASSERT(folio_pos(folio) <= file_offset);
346 ASSERT(file_offset + len <= folio_pos(folio) + folio_size(folio));
349 * Ordered (Private2) bit indicates whether we still have
350 * pending io unfinished for the ordered extent.
352 * If there's no such bit, we need to skip to next range.
354 if (!btrfs_folio_test_ordered(fs_info, folio, file_offset, len))
356 btrfs_folio_clear_ordered(fs_info, folio, file_offset, len);
359 /* Now we're fine to update the accounting. */
360 if (WARN_ON_ONCE(len > ordered->bytes_left)) {
362 "bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%llu left=%llu",
363 btrfs_root_id(inode->root), btrfs_ino(inode),
364 ordered->file_offset, ordered->num_bytes,
365 len, ordered->bytes_left);
366 ordered->bytes_left = 0;
368 ordered->bytes_left -= len;
372 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
374 if (ordered->bytes_left)
378 * All the IO of the ordered extent is finished, we need to queue
379 * the finish_func to be executed.
381 set_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags);
382 cond_wake_up(&ordered->wait);
383 refcount_inc(&ordered->refs);
384 trace_btrfs_ordered_extent_mark_finished(inode, ordered);
388 static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered)
390 struct btrfs_inode *inode = ordered->inode;
391 struct btrfs_fs_info *fs_info = inode->root->fs_info;
392 struct btrfs_workqueue *wq = btrfs_is_free_space_inode(inode) ?
393 fs_info->endio_freespace_worker : fs_info->endio_write_workers;
395 btrfs_init_work(&ordered->work, finish_ordered_fn, NULL);
396 btrfs_queue_work(wq, &ordered->work);
399 void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
400 struct folio *folio, u64 file_offset, u64 len,
403 struct btrfs_inode *inode = ordered->inode;
407 trace_btrfs_finish_ordered_extent(inode, file_offset, len, uptodate);
409 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
410 ret = can_finish_ordered_extent(ordered, folio, file_offset, len,
412 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
415 * If this is a COW write it means we created new extent maps for the
416 * range and they point to unwritten locations if we got an error either
417 * before submitting a bio or during IO.
419 * We have marked the ordered extent with BTRFS_ORDERED_IOERR, and we
420 * are queuing its completion below. During completion, at
421 * btrfs_finish_one_ordered(), we will drop the extent maps for the
424 * However because completion runs in a work queue we can end up having
425 * a fast fsync running before that. In the case of direct IO, once we
426 * unlock the inode the fsync might start, and we queue the completion
427 * before unlocking the inode. In the case of buffered IO when writeback
428 * finishes (end_bbio_data_write()) we queue the completion, so if the
429 * writeback was triggered by a fast fsync, the fsync might start
430 * logging before ordered extent completion runs in the work queue.
432 * The fast fsync will log file extent items based on the extent maps it
433 * finds, so if by the time it collects extent maps the ordered extent
434 * completion didn't happen yet, it will log file extent items that
435 * point to unwritten extents, resulting in a corruption if a crash
436 * happens and the log tree is replayed. Note that a fast fsync does not
437 * wait for completion of ordered extents in order to reduce latency.
439 * Set a flag in the inode so that the next fast fsync will wait for
440 * ordered extents to complete before starting to log.
442 if (!uptodate && !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
443 set_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags);
446 btrfs_queue_ordered_fn(ordered);
450 * Mark all ordered extents io inside the specified range finished.
452 * @folio: The involved folio for the operation.
453 * For uncompressed buffered IO, the folio status also needs to be
454 * updated to indicate whether the pending ordered io is finished.
455 * Can be NULL for direct IO and compressed write.
456 * For these cases, callers are ensured they won't execute the
457 * endio function twice.
459 * This function is called for endio, thus the range must have ordered
460 * extent(s) covering it.
462 void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
463 struct folio *folio, u64 file_offset,
464 u64 num_bytes, bool uptodate)
466 struct rb_node *node;
467 struct btrfs_ordered_extent *entry = NULL;
469 u64 cur = file_offset;
471 trace_btrfs_writepage_end_io_hook(inode, file_offset,
472 file_offset + num_bytes - 1,
475 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
476 while (cur < file_offset + num_bytes) {
481 node = ordered_tree_search(inode, cur);
482 /* No ordered extents at all */
486 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
487 entry_end = entry->file_offset + entry->num_bytes;
493 if (cur >= entry_end) {
494 node = rb_next(node);
495 /* No more ordered extents, exit */
498 entry = rb_entry(node, struct btrfs_ordered_extent,
501 /* Go to next ordered extent and continue */
502 cur = entry->file_offset;
508 * Go to the start of OE.
510 if (cur < entry->file_offset) {
511 cur = entry->file_offset;
516 * Now we are definitely inside one ordered extent.
522 end = min(entry->file_offset + entry->num_bytes,
523 file_offset + num_bytes) - 1;
524 ASSERT(end + 1 - cur < U32_MAX);
527 if (can_finish_ordered_extent(entry, folio, cur, len, uptodate)) {
528 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
529 btrfs_queue_ordered_fn(entry);
530 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
534 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
538 * Finish IO for one ordered extent across a given range. The range can only
539 * contain one ordered extent.
541 * @cached: The cached ordered extent. If not NULL, we can skip the tree
542 * search and use the ordered extent directly.
543 * Will be also used to store the finished ordered extent.
544 * @file_offset: File offset for the finished IO
545 * @io_size: Length of the finish IO range
547 * Return true if the ordered extent is finished in the range, and update
549 * Return false otherwise.
551 * NOTE: The range can NOT cross multiple ordered extents.
552 * Thus caller should ensure the range doesn't cross ordered extents.
554 bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
555 struct btrfs_ordered_extent **cached,
556 u64 file_offset, u64 io_size)
558 struct rb_node *node;
559 struct btrfs_ordered_extent *entry = NULL;
561 bool finished = false;
563 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
564 if (cached && *cached) {
569 node = ordered_tree_search(inode, file_offset);
573 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
575 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
578 if (io_size > entry->bytes_left)
579 btrfs_crit(inode->root->fs_info,
580 "bad ordered accounting left %llu size %llu",
581 entry->bytes_left, io_size);
583 entry->bytes_left -= io_size;
585 if (entry->bytes_left == 0) {
587 * Ensure only one caller can set the flag and finished_ret
590 finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
591 /* test_and_set_bit implies a barrier */
592 cond_wake_up_nomb(&entry->wait);
595 if (finished && cached && entry) {
597 refcount_inc(&entry->refs);
598 trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
600 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
605 * used to drop a reference on an ordered extent. This will free
606 * the extent if the last reference is dropped
608 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
610 struct list_head *cur;
611 struct btrfs_ordered_sum *sum;
613 trace_btrfs_ordered_extent_put(entry->inode, entry);
615 if (refcount_dec_and_test(&entry->refs)) {
616 ASSERT(list_empty(&entry->root_extent_list));
617 ASSERT(list_empty(&entry->log_list));
618 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
620 btrfs_add_delayed_iput(entry->inode);
621 while (!list_empty(&entry->list)) {
622 cur = entry->list.next;
623 sum = list_entry(cur, struct btrfs_ordered_sum, list);
624 list_del(&sum->list);
627 kmem_cache_free(btrfs_ordered_extent_cache, entry);
632 * remove an ordered extent from the tree. No references are dropped
633 * and waiters are woken up.
635 void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
636 struct btrfs_ordered_extent *entry)
638 struct btrfs_root *root = btrfs_inode->root;
639 struct btrfs_fs_info *fs_info = root->fs_info;
640 struct rb_node *node;
642 bool freespace_inode;
645 * If this is a free space inode the thread has not acquired the ordered
646 * extents lockdep map.
648 freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
650 btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
651 /* This is paired with alloc_ordered_extent(). */
652 spin_lock(&btrfs_inode->lock);
653 btrfs_mod_outstanding_extents(btrfs_inode, -1);
654 spin_unlock(&btrfs_inode->lock);
655 if (root != fs_info->tree_root) {
658 if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags))
659 release = entry->disk_num_bytes;
661 release = entry->num_bytes;
662 btrfs_delalloc_release_metadata(btrfs_inode, release,
663 test_bit(BTRFS_ORDERED_IOERR,
667 percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
668 fs_info->delalloc_batch);
670 spin_lock_irq(&btrfs_inode->ordered_tree_lock);
671 node = &entry->rb_node;
672 rb_erase(node, &btrfs_inode->ordered_tree);
674 if (btrfs_inode->ordered_tree_last == node)
675 btrfs_inode->ordered_tree_last = NULL;
676 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
677 pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
678 spin_unlock_irq(&btrfs_inode->ordered_tree_lock);
681 * The current running transaction is waiting on us, we need to let it
682 * know that we're complete and wake it up.
685 struct btrfs_transaction *trans;
688 * The checks for trans are just a formality, it should be set,
689 * but if it isn't we don't want to deref/assert under the spin
690 * lock, so be nice and check if trans is set, but ASSERT() so
691 * if it isn't set a developer will notice.
693 spin_lock(&fs_info->trans_lock);
694 trans = fs_info->running_transaction;
696 refcount_inc(&trans->use_count);
697 spin_unlock(&fs_info->trans_lock);
699 ASSERT(trans || BTRFS_FS_ERROR(fs_info));
701 if (atomic_dec_and_test(&trans->pending_ordered))
702 wake_up(&trans->pending_wait);
703 btrfs_put_transaction(trans);
707 btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered);
709 spin_lock(&root->ordered_extent_lock);
710 list_del_init(&entry->root_extent_list);
711 root->nr_ordered_extents--;
713 trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
715 if (!root->nr_ordered_extents) {
716 spin_lock(&fs_info->ordered_root_lock);
717 BUG_ON(list_empty(&root->ordered_root));
718 list_del_init(&root->ordered_root);
719 spin_unlock(&fs_info->ordered_root_lock);
721 spin_unlock(&root->ordered_extent_lock);
722 wake_up(&entry->wait);
723 if (!freespace_inode)
724 btrfs_lockdep_release(fs_info, btrfs_ordered_extent);
727 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
729 struct btrfs_ordered_extent *ordered;
731 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
732 btrfs_start_ordered_extent(ordered);
733 complete(&ordered->completion);
737 * Wait for all the ordered extents in a root. Use @bg as range or do whole
738 * range if it's NULL.
740 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
741 const struct btrfs_block_group *bg)
743 struct btrfs_fs_info *fs_info = root->fs_info;
747 struct btrfs_ordered_extent *ordered, *next;
749 u64 range_start, range_len;
753 range_start = bg->start;
754 range_len = bg->length;
759 range_end = range_start + range_len;
761 mutex_lock(&root->ordered_extent_mutex);
762 spin_lock(&root->ordered_extent_lock);
763 list_splice_init(&root->ordered_extents, &splice);
764 while (!list_empty(&splice) && nr) {
765 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
768 if (range_end <= ordered->disk_bytenr ||
769 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
770 list_move_tail(&ordered->root_extent_list, &skipped);
771 cond_resched_lock(&root->ordered_extent_lock);
775 list_move_tail(&ordered->root_extent_list,
776 &root->ordered_extents);
777 refcount_inc(&ordered->refs);
778 spin_unlock(&root->ordered_extent_lock);
780 btrfs_init_work(&ordered->flush_work,
781 btrfs_run_ordered_extent_work, NULL);
782 list_add_tail(&ordered->work_list, &works);
783 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
789 spin_lock(&root->ordered_extent_lock);
791 list_splice_tail(&skipped, &root->ordered_extents);
792 list_splice_tail(&splice, &root->ordered_extents);
793 spin_unlock(&root->ordered_extent_lock);
795 list_for_each_entry_safe(ordered, next, &works, work_list) {
796 list_del_init(&ordered->work_list);
797 wait_for_completion(&ordered->completion);
798 btrfs_put_ordered_extent(ordered);
801 mutex_unlock(&root->ordered_extent_mutex);
807 * Wait for @nr ordered extents that intersect the @bg, or the whole range of
808 * the filesystem if @bg is NULL.
810 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
811 const struct btrfs_block_group *bg)
813 struct btrfs_root *root;
817 mutex_lock(&fs_info->ordered_operations_mutex);
818 spin_lock(&fs_info->ordered_root_lock);
819 list_splice_init(&fs_info->ordered_roots, &splice);
820 while (!list_empty(&splice) && nr) {
821 root = list_first_entry(&splice, struct btrfs_root,
823 root = btrfs_grab_root(root);
825 list_move_tail(&root->ordered_root,
826 &fs_info->ordered_roots);
827 spin_unlock(&fs_info->ordered_root_lock);
829 done = btrfs_wait_ordered_extents(root, nr, bg);
830 btrfs_put_root(root);
835 spin_lock(&fs_info->ordered_root_lock);
837 list_splice_tail(&splice, &fs_info->ordered_roots);
838 spin_unlock(&fs_info->ordered_root_lock);
839 mutex_unlock(&fs_info->ordered_operations_mutex);
843 * Start IO and wait for a given ordered extent to finish.
845 * Wait on page writeback for all the pages in the extent and the IO completion
846 * code to insert metadata into the btree corresponding to the extent.
848 void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry)
850 u64 start = entry->file_offset;
851 u64 end = start + entry->num_bytes - 1;
852 struct btrfs_inode *inode = entry->inode;
853 bool freespace_inode;
855 trace_btrfs_ordered_extent_start(inode, entry);
858 * If this is a free space inode do not take the ordered extents lockdep
861 freespace_inode = btrfs_is_free_space_inode(inode);
864 * pages in the range can be dirty, clean or writeback. We
865 * start IO on any dirty ones so the wait doesn't stall waiting
866 * for the flusher thread to find them
868 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
869 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
871 if (!freespace_inode)
872 btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
873 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags));
877 * Used to wait on ordered extents across a large range of bytes.
879 int btrfs_wait_ordered_range(struct btrfs_inode *inode, u64 start, u64 len)
885 struct btrfs_ordered_extent *ordered;
887 if (start + len < start) {
888 orig_end = OFFSET_MAX;
890 orig_end = start + len - 1;
891 if (orig_end > OFFSET_MAX)
892 orig_end = OFFSET_MAX;
895 /* start IO across the range first to instantiate any delalloc
898 ret = btrfs_fdatawrite_range(inode, start, orig_end);
903 * If we have a writeback error don't return immediately. Wait first
904 * for any ordered extents that haven't completed yet. This is to make
905 * sure no one can dirty the same page ranges and call writepages()
906 * before the ordered extents complete - to avoid failures (-EEXIST)
907 * when adding the new ordered extents to the ordered tree.
909 ret_wb = filemap_fdatawait_range(inode->vfs_inode.i_mapping, start, orig_end);
913 ordered = btrfs_lookup_first_ordered_extent(inode, end);
916 if (ordered->file_offset > orig_end) {
917 btrfs_put_ordered_extent(ordered);
920 if (ordered->file_offset + ordered->num_bytes <= start) {
921 btrfs_put_ordered_extent(ordered);
924 btrfs_start_ordered_extent(ordered);
925 end = ordered->file_offset;
927 * If the ordered extent had an error save the error but don't
928 * exit without waiting first for all other ordered extents in
929 * the range to complete.
931 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
933 btrfs_put_ordered_extent(ordered);
934 if (end == 0 || end == start)
938 return ret_wb ? ret_wb : ret;
942 * find an ordered extent corresponding to file_offset. return NULL if
943 * nothing is found, otherwise take a reference on the extent and return it
945 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
948 struct rb_node *node;
949 struct btrfs_ordered_extent *entry = NULL;
952 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
953 node = ordered_tree_search(inode, file_offset);
957 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
958 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
961 refcount_inc(&entry->refs);
962 trace_btrfs_ordered_extent_lookup(inode, entry);
965 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
969 /* Since the DIO code tries to lock a wide area we need to look for any ordered
970 * extents that exist in the range, rather than just the start of the range.
972 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
973 struct btrfs_inode *inode, u64 file_offset, u64 len)
975 struct rb_node *node;
976 struct btrfs_ordered_extent *entry = NULL;
978 spin_lock_irq(&inode->ordered_tree_lock);
979 node = ordered_tree_search(inode, file_offset);
981 node = ordered_tree_search(inode, file_offset + len);
987 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
988 if (range_overlaps(entry, file_offset, len))
991 if (entry->file_offset >= file_offset + len) {
996 node = rb_next(node);
1002 refcount_inc(&entry->refs);
1003 trace_btrfs_ordered_extent_lookup_range(inode, entry);
1005 spin_unlock_irq(&inode->ordered_tree_lock);
1010 * Adds all ordered extents to the given list. The list ends up sorted by the
1011 * file_offset of the ordered extents.
1013 void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
1014 struct list_head *list)
1018 btrfs_assert_inode_locked(inode);
1020 spin_lock_irq(&inode->ordered_tree_lock);
1021 for (n = rb_first(&inode->ordered_tree); n; n = rb_next(n)) {
1022 struct btrfs_ordered_extent *ordered;
1024 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
1026 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
1029 ASSERT(list_empty(&ordered->log_list));
1030 list_add_tail(&ordered->log_list, list);
1031 refcount_inc(&ordered->refs);
1032 trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
1034 spin_unlock_irq(&inode->ordered_tree_lock);
1038 * lookup and return any extent before 'file_offset'. NULL is returned
1041 struct btrfs_ordered_extent *
1042 btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
1044 struct rb_node *node;
1045 struct btrfs_ordered_extent *entry = NULL;
1047 spin_lock_irq(&inode->ordered_tree_lock);
1048 node = ordered_tree_search(inode, file_offset);
1052 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
1053 refcount_inc(&entry->refs);
1054 trace_btrfs_ordered_extent_lookup_first(inode, entry);
1056 spin_unlock_irq(&inode->ordered_tree_lock);
1061 * Lookup the first ordered extent that overlaps the range
1062 * [@file_offset, @file_offset + @len).
1064 * The difference between this and btrfs_lookup_first_ordered_extent() is
1065 * that this one won't return any ordered extent that does not overlap the range.
1066 * And the difference against btrfs_lookup_ordered_extent() is, this function
1067 * ensures the first ordered extent gets returned.
1069 struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
1070 struct btrfs_inode *inode, u64 file_offset, u64 len)
1072 struct rb_node *node;
1073 struct rb_node *cur;
1074 struct rb_node *prev;
1075 struct rb_node *next;
1076 struct btrfs_ordered_extent *entry = NULL;
1078 spin_lock_irq(&inode->ordered_tree_lock);
1079 node = inode->ordered_tree.rb_node;
1081 * Here we don't want to use tree_search() which will use tree->last
1082 * and screw up the search order.
1083 * And __tree_search() can't return the adjacent ordered extents
1084 * either, thus here we do our own search.
1087 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
1089 if (file_offset < entry->file_offset) {
1090 node = node->rb_left;
1091 } else if (file_offset >= entry_end(entry)) {
1092 node = node->rb_right;
1095 * Direct hit, got an ordered extent that starts at
1106 cur = &entry->rb_node;
1107 /* We got an entry around @file_offset, check adjacent entries */
1108 if (entry->file_offset < file_offset) {
1110 next = rb_next(cur);
1112 prev = rb_prev(cur);
1116 entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
1117 if (range_overlaps(entry, file_offset, len))
1121 entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
1122 if (range_overlaps(entry, file_offset, len))
1125 /* No ordered extent in the range */
1129 refcount_inc(&entry->refs);
1130 trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
1133 spin_unlock_irq(&inode->ordered_tree_lock);
1138 * Lock the passed range and ensures all pending ordered extents in it are run
1141 * @inode: Inode whose ordered tree is to be searched
1142 * @start: Beginning of range to flush
1143 * @end: Last byte of range to lock
1144 * @cached_state: If passed, will return the extent state responsible for the
1145 * locked range. It's the caller's responsibility to free the
1148 * Always return with the given range locked, ensuring after it's called no
1149 * order extent can be pending.
1151 void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
1153 struct extent_state **cached_state)
1155 struct btrfs_ordered_extent *ordered;
1156 struct extent_state *cache = NULL;
1157 struct extent_state **cachedp = &cache;
1160 cachedp = cached_state;
1163 lock_extent(&inode->io_tree, start, end, cachedp);
1164 ordered = btrfs_lookup_ordered_range(inode, start,
1168 * If no external cached_state has been passed then
1169 * decrement the extra ref taken for cachedp since we
1170 * aren't exposing it outside of this function
1173 refcount_dec(&cache->refs);
1176 unlock_extent(&inode->io_tree, start, end, cachedp);
1177 btrfs_start_ordered_extent(ordered);
1178 btrfs_put_ordered_extent(ordered);
1183 * Lock the passed range and ensure all pending ordered extents in it are run
1184 * to completion in nowait mode.
1186 * Return true if btrfs_lock_ordered_range does not return any extents,
1189 bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
1190 struct extent_state **cached_state)
1192 struct btrfs_ordered_extent *ordered;
1194 if (!try_lock_extent(&inode->io_tree, start, end, cached_state))
1197 ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
1201 btrfs_put_ordered_extent(ordered);
1202 unlock_extent(&inode->io_tree, start, end, cached_state);
1207 /* Split out a new ordered extent for this first @len bytes of @ordered. */
1208 struct btrfs_ordered_extent *btrfs_split_ordered_extent(
1209 struct btrfs_ordered_extent *ordered, u64 len)
1211 struct btrfs_inode *inode = ordered->inode;
1212 struct btrfs_root *root = inode->root;
1213 struct btrfs_fs_info *fs_info = root->fs_info;
1214 u64 file_offset = ordered->file_offset;
1215 u64 disk_bytenr = ordered->disk_bytenr;
1216 unsigned long flags = ordered->flags;
1217 struct btrfs_ordered_sum *sum, *tmpsum;
1218 struct btrfs_ordered_extent *new;
1219 struct rb_node *node;
1222 trace_btrfs_ordered_extent_split(inode, ordered);
1224 ASSERT(!(flags & (1U << BTRFS_ORDERED_COMPRESSED)));
1227 * The entire bio must be covered by the ordered extent, but we can't
1228 * reduce the original extent to a zero length either.
1230 if (WARN_ON_ONCE(len >= ordered->num_bytes))
1231 return ERR_PTR(-EINVAL);
1232 /* We cannot split partially completed ordered extents. */
1233 if (ordered->bytes_left) {
1234 ASSERT(!(flags & ~BTRFS_ORDERED_TYPE_FLAGS));
1235 if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes))
1236 return ERR_PTR(-EINVAL);
1238 /* We cannot split a compressed ordered extent. */
1239 if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes))
1240 return ERR_PTR(-EINVAL);
1242 new = alloc_ordered_extent(inode, file_offset, len, len, disk_bytenr,
1243 len, 0, flags, ordered->compress_type);
1247 /* One ref for the tree. */
1248 refcount_inc(&new->refs);
1251 * Take the root's ordered_extent_lock to avoid a race with
1252 * btrfs_wait_ordered_extents() when updating the disk_bytenr and
1253 * disk_num_bytes fields of the ordered extent below. And we disable
1254 * IRQs because the inode's ordered_tree_lock is used in IRQ context
1257 * There's no concern about a previous caller of
1258 * btrfs_wait_ordered_extents() getting the trimmed ordered extent
1259 * before we insert the new one, because even if it gets the ordered
1260 * extent before it's trimmed and the new one inserted, right before it
1261 * uses it or during its use, the ordered extent might have been
1262 * trimmed in the meanwhile, and it missed the new ordered extent.
1263 * There's no way around this and it's harmless for current use cases,
1264 * so we take the root's ordered_extent_lock to fix that race during
1265 * trimming and silence tools like KCSAN.
1267 spin_lock_irq(&root->ordered_extent_lock);
1268 spin_lock(&inode->ordered_tree_lock);
1271 * We don't have overlapping ordered extents (that would imply double
1272 * allocation of extents) and we checked above that the split length
1273 * does not cross the ordered extent's num_bytes field, so there's
1274 * no need to remove it and re-insert it in the tree.
1276 ordered->file_offset += len;
1277 ordered->disk_bytenr += len;
1278 ordered->num_bytes -= len;
1279 ordered->disk_num_bytes -= len;
1280 ordered->ram_bytes -= len;
1282 if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) {
1283 ASSERT(ordered->bytes_left == 0);
1284 new->bytes_left = 0;
1286 ordered->bytes_left -= len;
1289 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) {
1290 if (ordered->truncated_len > len) {
1291 ordered->truncated_len -= len;
1293 new->truncated_len = ordered->truncated_len;
1294 ordered->truncated_len = 0;
1298 list_for_each_entry_safe(sum, tmpsum, &ordered->list, list) {
1301 list_move_tail(&sum->list, &new->list);
1305 node = tree_insert(&inode->ordered_tree, new->file_offset, &new->rb_node);
1307 btrfs_panic(fs_info, -EEXIST,
1308 "inconsistency in ordered tree at offset %llu after split",
1310 spin_unlock(&inode->ordered_tree_lock);
1312 list_add_tail(&new->root_extent_list, &root->ordered_extents);
1313 root->nr_ordered_extents++;
1314 spin_unlock_irq(&root->ordered_extent_lock);
1318 int __init ordered_data_init(void)
1320 btrfs_ordered_extent_cache = KMEM_CACHE(btrfs_ordered_extent, 0);
1321 if (!btrfs_ordered_extent_cache)
1327 void __cold ordered_data_exit(void)
1329 kmem_cache_destroy(btrfs_ordered_extent_cache);