Merge tag 'pull-work.unaligned' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / fs / btrfs / ordered-data.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/writeback.h>
9 #include <linux/sched/mm.h>
10 #include "messages.h"
11 #include "misc.h"
12 #include "ctree.h"
13 #include "transaction.h"
14 #include "btrfs_inode.h"
15 #include "extent_io.h"
16 #include "disk-io.h"
17 #include "compression.h"
18 #include "delalloc-space.h"
19 #include "qgroup.h"
20 #include "subpage.h"
21 #include "file.h"
22 #include "block-group.h"
23
24 static struct kmem_cache *btrfs_ordered_extent_cache;
25
26 static u64 entry_end(struct btrfs_ordered_extent *entry)
27 {
28         if (entry->file_offset + entry->num_bytes < entry->file_offset)
29                 return (u64)-1;
30         return entry->file_offset + entry->num_bytes;
31 }
32
33 /* returns NULL if the insertion worked, or it returns the node it did find
34  * in the tree
35  */
36 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
37                                    struct rb_node *node)
38 {
39         struct rb_node **p = &root->rb_node;
40         struct rb_node *parent = NULL;
41         struct btrfs_ordered_extent *entry;
42
43         while (*p) {
44                 parent = *p;
45                 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
46
47                 if (file_offset < entry->file_offset)
48                         p = &(*p)->rb_left;
49                 else if (file_offset >= entry_end(entry))
50                         p = &(*p)->rb_right;
51                 else
52                         return parent;
53         }
54
55         rb_link_node(node, parent, p);
56         rb_insert_color(node, root);
57         return NULL;
58 }
59
60 /*
61  * look for a given offset in the tree, and if it can't be found return the
62  * first lesser offset
63  */
64 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
65                                      struct rb_node **prev_ret)
66 {
67         struct rb_node *n = root->rb_node;
68         struct rb_node *prev = NULL;
69         struct rb_node *test;
70         struct btrfs_ordered_extent *entry;
71         struct btrfs_ordered_extent *prev_entry = NULL;
72
73         while (n) {
74                 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
75                 prev = n;
76                 prev_entry = entry;
77
78                 if (file_offset < entry->file_offset)
79                         n = n->rb_left;
80                 else if (file_offset >= entry_end(entry))
81                         n = n->rb_right;
82                 else
83                         return n;
84         }
85         if (!prev_ret)
86                 return NULL;
87
88         while (prev && file_offset >= entry_end(prev_entry)) {
89                 test = rb_next(prev);
90                 if (!test)
91                         break;
92                 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
93                                       rb_node);
94                 if (file_offset < entry_end(prev_entry))
95                         break;
96
97                 prev = test;
98         }
99         if (prev)
100                 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
101                                       rb_node);
102         while (prev && file_offset < entry_end(prev_entry)) {
103                 test = rb_prev(prev);
104                 if (!test)
105                         break;
106                 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
107                                       rb_node);
108                 prev = test;
109         }
110         *prev_ret = prev;
111         return NULL;
112 }
113
114 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
115                           u64 len)
116 {
117         if (file_offset + len <= entry->file_offset ||
118             entry->file_offset + entry->num_bytes <= file_offset)
119                 return 0;
120         return 1;
121 }
122
123 /*
124  * look find the first ordered struct that has this offset, otherwise
125  * the first one less than this offset
126  */
127 static inline struct rb_node *ordered_tree_search(struct btrfs_inode *inode,
128                                                   u64 file_offset)
129 {
130         struct rb_node *prev = NULL;
131         struct rb_node *ret;
132         struct btrfs_ordered_extent *entry;
133
134         if (inode->ordered_tree_last) {
135                 entry = rb_entry(inode->ordered_tree_last, struct btrfs_ordered_extent,
136                                  rb_node);
137                 if (in_range(file_offset, entry->file_offset, entry->num_bytes))
138                         return inode->ordered_tree_last;
139         }
140         ret = __tree_search(&inode->ordered_tree, file_offset, &prev);
141         if (!ret)
142                 ret = prev;
143         if (ret)
144                 inode->ordered_tree_last = ret;
145         return ret;
146 }
147
148 static struct btrfs_ordered_extent *alloc_ordered_extent(
149                         struct btrfs_inode *inode, u64 file_offset, u64 num_bytes,
150                         u64 ram_bytes, u64 disk_bytenr, u64 disk_num_bytes,
151                         u64 offset, unsigned long flags, int compress_type)
152 {
153         struct btrfs_ordered_extent *entry;
154         int ret;
155         u64 qgroup_rsv = 0;
156
157         if (flags &
158             ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
159                 /* For nocow write, we can release the qgroup rsv right now */
160                 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
161                 if (ret < 0)
162                         return ERR_PTR(ret);
163         } else {
164                 /*
165                  * The ordered extent has reserved qgroup space, release now
166                  * and pass the reserved number for qgroup_record to free.
167                  */
168                 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv);
169                 if (ret < 0)
170                         return ERR_PTR(ret);
171         }
172         entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
173         if (!entry)
174                 return ERR_PTR(-ENOMEM);
175
176         entry->file_offset = file_offset;
177         entry->num_bytes = num_bytes;
178         entry->ram_bytes = ram_bytes;
179         entry->disk_bytenr = disk_bytenr;
180         entry->disk_num_bytes = disk_num_bytes;
181         entry->offset = offset;
182         entry->bytes_left = num_bytes;
183         entry->inode = BTRFS_I(igrab(&inode->vfs_inode));
184         entry->compress_type = compress_type;
185         entry->truncated_len = (u64)-1;
186         entry->qgroup_rsv = qgroup_rsv;
187         entry->flags = flags;
188         refcount_set(&entry->refs, 1);
189         init_waitqueue_head(&entry->wait);
190         INIT_LIST_HEAD(&entry->list);
191         INIT_LIST_HEAD(&entry->log_list);
192         INIT_LIST_HEAD(&entry->root_extent_list);
193         INIT_LIST_HEAD(&entry->work_list);
194         INIT_LIST_HEAD(&entry->bioc_list);
195         init_completion(&entry->completion);
196
197         /*
198          * We don't need the count_max_extents here, we can assume that all of
199          * that work has been done at higher layers, so this is truly the
200          * smallest the extent is going to get.
201          */
202         spin_lock(&inode->lock);
203         btrfs_mod_outstanding_extents(inode, 1);
204         spin_unlock(&inode->lock);
205
206         return entry;
207 }
208
209 static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
210 {
211         struct btrfs_inode *inode = entry->inode;
212         struct btrfs_root *root = inode->root;
213         struct btrfs_fs_info *fs_info = root->fs_info;
214         struct rb_node *node;
215
216         trace_btrfs_ordered_extent_add(inode, entry);
217
218         percpu_counter_add_batch(&fs_info->ordered_bytes, entry->num_bytes,
219                                  fs_info->delalloc_batch);
220
221         /* One ref for the tree. */
222         refcount_inc(&entry->refs);
223
224         spin_lock_irq(&inode->ordered_tree_lock);
225         node = tree_insert(&inode->ordered_tree, entry->file_offset,
226                            &entry->rb_node);
227         if (unlikely(node))
228                 btrfs_panic(fs_info, -EEXIST,
229                                 "inconsistency in ordered tree at offset %llu",
230                                 entry->file_offset);
231         spin_unlock_irq(&inode->ordered_tree_lock);
232
233         spin_lock(&root->ordered_extent_lock);
234         list_add_tail(&entry->root_extent_list,
235                       &root->ordered_extents);
236         root->nr_ordered_extents++;
237         if (root->nr_ordered_extents == 1) {
238                 spin_lock(&fs_info->ordered_root_lock);
239                 BUG_ON(!list_empty(&root->ordered_root));
240                 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
241                 spin_unlock(&fs_info->ordered_root_lock);
242         }
243         spin_unlock(&root->ordered_extent_lock);
244 }
245
246 /*
247  * Add an ordered extent to the per-inode tree.
248  *
249  * @inode:           Inode that this extent is for.
250  * @file_offset:     Logical offset in file where the extent starts.
251  * @num_bytes:       Logical length of extent in file.
252  * @ram_bytes:       Full length of unencoded data.
253  * @disk_bytenr:     Offset of extent on disk.
254  * @disk_num_bytes:  Size of extent on disk.
255  * @offset:          Offset into unencoded data where file data starts.
256  * @flags:           Flags specifying type of extent (1 << BTRFS_ORDERED_*).
257  * @compress_type:   Compression algorithm used for data.
258  *
259  * Most of these parameters correspond to &struct btrfs_file_extent_item. The
260  * tree is given a single reference on the ordered extent that was inserted, and
261  * the returned pointer is given a second reference.
262  *
263  * Return: the new ordered extent or error pointer.
264  */
265 struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
266                         struct btrfs_inode *inode, u64 file_offset,
267                         const struct btrfs_file_extent *file_extent, unsigned long flags)
268 {
269         struct btrfs_ordered_extent *entry;
270
271         ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
272
273         /*
274          * For regular writes, we just use the members in @file_extent.
275          *
276          * For NOCOW, we don't really care about the numbers except @start and
277          * file_extent->num_bytes, as we won't insert a file extent item at all.
278          *
279          * For PREALLOC, we do not use ordered extent members, but
280          * btrfs_mark_extent_written() handles everything.
281          *
282          * So here we always pass 0 as offset for NOCOW/PREALLOC ordered extents,
283          * or btrfs_split_ordered_extent() cannot handle it correctly.
284          */
285         if (flags & ((1U << BTRFS_ORDERED_NOCOW) | (1U << BTRFS_ORDERED_PREALLOC)))
286                 entry = alloc_ordered_extent(inode, file_offset,
287                                              file_extent->num_bytes,
288                                              file_extent->num_bytes,
289                                              file_extent->disk_bytenr + file_extent->offset,
290                                              file_extent->num_bytes, 0, flags,
291                                              file_extent->compression);
292         else
293                 entry = alloc_ordered_extent(inode, file_offset,
294                                              file_extent->num_bytes,
295                                              file_extent->ram_bytes,
296                                              file_extent->disk_bytenr,
297                                              file_extent->disk_num_bytes,
298                                              file_extent->offset, flags,
299                                              file_extent->compression);
300         if (!IS_ERR(entry))
301                 insert_ordered_extent(entry);
302         return entry;
303 }
304
305 /*
306  * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
307  * when an ordered extent is finished.  If the list covers more than one
308  * ordered extent, it is split across multiples.
309  */
310 void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
311                            struct btrfs_ordered_sum *sum)
312 {
313         struct btrfs_inode *inode = entry->inode;
314
315         spin_lock_irq(&inode->ordered_tree_lock);
316         list_add_tail(&sum->list, &entry->list);
317         spin_unlock_irq(&inode->ordered_tree_lock);
318 }
319
320 void btrfs_mark_ordered_extent_error(struct btrfs_ordered_extent *ordered)
321 {
322         if (!test_and_set_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
323                 mapping_set_error(ordered->inode->vfs_inode.i_mapping, -EIO);
324 }
325
326 static void finish_ordered_fn(struct btrfs_work *work)
327 {
328         struct btrfs_ordered_extent *ordered_extent;
329
330         ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
331         btrfs_finish_ordered_io(ordered_extent);
332 }
333
334 static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
335                                       struct folio *folio, u64 file_offset,
336                                       u64 len, bool uptodate)
337 {
338         struct btrfs_inode *inode = ordered->inode;
339         struct btrfs_fs_info *fs_info = inode->root->fs_info;
340
341         lockdep_assert_held(&inode->ordered_tree_lock);
342
343         if (folio) {
344                 ASSERT(folio->mapping);
345                 ASSERT(folio_pos(folio) <= file_offset);
346                 ASSERT(file_offset + len <= folio_pos(folio) + folio_size(folio));
347
348                 /*
349                  * Ordered (Private2) bit indicates whether we still have
350                  * pending io unfinished for the ordered extent.
351                  *
352                  * If there's no such bit, we need to skip to next range.
353                  */
354                 if (!btrfs_folio_test_ordered(fs_info, folio, file_offset, len))
355                         return false;
356                 btrfs_folio_clear_ordered(fs_info, folio, file_offset, len);
357         }
358
359         /* Now we're fine to update the accounting. */
360         if (WARN_ON_ONCE(len > ordered->bytes_left)) {
361                 btrfs_crit(fs_info,
362 "bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%llu left=%llu",
363                            btrfs_root_id(inode->root), btrfs_ino(inode),
364                            ordered->file_offset, ordered->num_bytes,
365                            len, ordered->bytes_left);
366                 ordered->bytes_left = 0;
367         } else {
368                 ordered->bytes_left -= len;
369         }
370
371         if (!uptodate)
372                 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
373
374         if (ordered->bytes_left)
375                 return false;
376
377         /*
378          * All the IO of the ordered extent is finished, we need to queue
379          * the finish_func to be executed.
380          */
381         set_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags);
382         cond_wake_up(&ordered->wait);
383         refcount_inc(&ordered->refs);
384         trace_btrfs_ordered_extent_mark_finished(inode, ordered);
385         return true;
386 }
387
388 static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered)
389 {
390         struct btrfs_inode *inode = ordered->inode;
391         struct btrfs_fs_info *fs_info = inode->root->fs_info;
392         struct btrfs_workqueue *wq = btrfs_is_free_space_inode(inode) ?
393                 fs_info->endio_freespace_worker : fs_info->endio_write_workers;
394
395         btrfs_init_work(&ordered->work, finish_ordered_fn, NULL);
396         btrfs_queue_work(wq, &ordered->work);
397 }
398
399 void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
400                                  struct folio *folio, u64 file_offset, u64 len,
401                                  bool uptodate)
402 {
403         struct btrfs_inode *inode = ordered->inode;
404         unsigned long flags;
405         bool ret;
406
407         trace_btrfs_finish_ordered_extent(inode, file_offset, len, uptodate);
408
409         spin_lock_irqsave(&inode->ordered_tree_lock, flags);
410         ret = can_finish_ordered_extent(ordered, folio, file_offset, len,
411                                         uptodate);
412         spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
413
414         /*
415          * If this is a COW write it means we created new extent maps for the
416          * range and they point to unwritten locations if we got an error either
417          * before submitting a bio or during IO.
418          *
419          * We have marked the ordered extent with BTRFS_ORDERED_IOERR, and we
420          * are queuing its completion below. During completion, at
421          * btrfs_finish_one_ordered(), we will drop the extent maps for the
422          * unwritten extents.
423          *
424          * However because completion runs in a work queue we can end up having
425          * a fast fsync running before that. In the case of direct IO, once we
426          * unlock the inode the fsync might start, and we queue the completion
427          * before unlocking the inode. In the case of buffered IO when writeback
428          * finishes (end_bbio_data_write()) we queue the completion, so if the
429          * writeback was triggered by a fast fsync, the fsync might start
430          * logging before ordered extent completion runs in the work queue.
431          *
432          * The fast fsync will log file extent items based on the extent maps it
433          * finds, so if by the time it collects extent maps the ordered extent
434          * completion didn't happen yet, it will log file extent items that
435          * point to unwritten extents, resulting in a corruption if a crash
436          * happens and the log tree is replayed. Note that a fast fsync does not
437          * wait for completion of ordered extents in order to reduce latency.
438          *
439          * Set a flag in the inode so that the next fast fsync will wait for
440          * ordered extents to complete before starting to log.
441          */
442         if (!uptodate && !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
443                 set_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags);
444
445         if (ret)
446                 btrfs_queue_ordered_fn(ordered);
447 }
448
449 /*
450  * Mark all ordered extents io inside the specified range finished.
451  *
452  * @folio:       The involved folio for the operation.
453  *               For uncompressed buffered IO, the folio status also needs to be
454  *               updated to indicate whether the pending ordered io is finished.
455  *               Can be NULL for direct IO and compressed write.
456  *               For these cases, callers are ensured they won't execute the
457  *               endio function twice.
458  *
459  * This function is called for endio, thus the range must have ordered
460  * extent(s) covering it.
461  */
462 void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
463                                     struct folio *folio, u64 file_offset,
464                                     u64 num_bytes, bool uptodate)
465 {
466         struct rb_node *node;
467         struct btrfs_ordered_extent *entry = NULL;
468         unsigned long flags;
469         u64 cur = file_offset;
470
471         trace_btrfs_writepage_end_io_hook(inode, file_offset,
472                                           file_offset + num_bytes - 1,
473                                           uptodate);
474
475         spin_lock_irqsave(&inode->ordered_tree_lock, flags);
476         while (cur < file_offset + num_bytes) {
477                 u64 entry_end;
478                 u64 end;
479                 u32 len;
480
481                 node = ordered_tree_search(inode, cur);
482                 /* No ordered extents at all */
483                 if (!node)
484                         break;
485
486                 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
487                 entry_end = entry->file_offset + entry->num_bytes;
488                 /*
489                  * |<-- OE --->|  |
490                  *                cur
491                  * Go to next OE.
492                  */
493                 if (cur >= entry_end) {
494                         node = rb_next(node);
495                         /* No more ordered extents, exit */
496                         if (!node)
497                                 break;
498                         entry = rb_entry(node, struct btrfs_ordered_extent,
499                                          rb_node);
500
501                         /* Go to next ordered extent and continue */
502                         cur = entry->file_offset;
503                         continue;
504                 }
505                 /*
506                  * |    |<--- OE --->|
507                  * cur
508                  * Go to the start of OE.
509                  */
510                 if (cur < entry->file_offset) {
511                         cur = entry->file_offset;
512                         continue;
513                 }
514
515                 /*
516                  * Now we are definitely inside one ordered extent.
517                  *
518                  * |<--- OE --->|
519                  *      |
520                  *      cur
521                  */
522                 end = min(entry->file_offset + entry->num_bytes,
523                           file_offset + num_bytes) - 1;
524                 ASSERT(end + 1 - cur < U32_MAX);
525                 len = end + 1 - cur;
526
527                 if (can_finish_ordered_extent(entry, folio, cur, len, uptodate)) {
528                         spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
529                         btrfs_queue_ordered_fn(entry);
530                         spin_lock_irqsave(&inode->ordered_tree_lock, flags);
531                 }
532                 cur += len;
533         }
534         spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
535 }
536
537 /*
538  * Finish IO for one ordered extent across a given range.  The range can only
539  * contain one ordered extent.
540  *
541  * @cached:      The cached ordered extent. If not NULL, we can skip the tree
542  *               search and use the ordered extent directly.
543  *               Will be also used to store the finished ordered extent.
544  * @file_offset: File offset for the finished IO
545  * @io_size:     Length of the finish IO range
546  *
547  * Return true if the ordered extent is finished in the range, and update
548  * @cached.
549  * Return false otherwise.
550  *
551  * NOTE: The range can NOT cross multiple ordered extents.
552  * Thus caller should ensure the range doesn't cross ordered extents.
553  */
554 bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
555                                     struct btrfs_ordered_extent **cached,
556                                     u64 file_offset, u64 io_size)
557 {
558         struct rb_node *node;
559         struct btrfs_ordered_extent *entry = NULL;
560         unsigned long flags;
561         bool finished = false;
562
563         spin_lock_irqsave(&inode->ordered_tree_lock, flags);
564         if (cached && *cached) {
565                 entry = *cached;
566                 goto have_entry;
567         }
568
569         node = ordered_tree_search(inode, file_offset);
570         if (!node)
571                 goto out;
572
573         entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
574 have_entry:
575         if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
576                 goto out;
577
578         if (io_size > entry->bytes_left)
579                 btrfs_crit(inode->root->fs_info,
580                            "bad ordered accounting left %llu size %llu",
581                        entry->bytes_left, io_size);
582
583         entry->bytes_left -= io_size;
584
585         if (entry->bytes_left == 0) {
586                 /*
587                  * Ensure only one caller can set the flag and finished_ret
588                  * accordingly
589                  */
590                 finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
591                 /* test_and_set_bit implies a barrier */
592                 cond_wake_up_nomb(&entry->wait);
593         }
594 out:
595         if (finished && cached && entry) {
596                 *cached = entry;
597                 refcount_inc(&entry->refs);
598                 trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
599         }
600         spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
601         return finished;
602 }
603
604 /*
605  * used to drop a reference on an ordered extent.  This will free
606  * the extent if the last reference is dropped
607  */
608 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
609 {
610         struct list_head *cur;
611         struct btrfs_ordered_sum *sum;
612
613         trace_btrfs_ordered_extent_put(entry->inode, entry);
614
615         if (refcount_dec_and_test(&entry->refs)) {
616                 ASSERT(list_empty(&entry->root_extent_list));
617                 ASSERT(list_empty(&entry->log_list));
618                 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
619                 if (entry->inode)
620                         btrfs_add_delayed_iput(entry->inode);
621                 while (!list_empty(&entry->list)) {
622                         cur = entry->list.next;
623                         sum = list_entry(cur, struct btrfs_ordered_sum, list);
624                         list_del(&sum->list);
625                         kvfree(sum);
626                 }
627                 kmem_cache_free(btrfs_ordered_extent_cache, entry);
628         }
629 }
630
631 /*
632  * remove an ordered extent from the tree.  No references are dropped
633  * and waiters are woken up.
634  */
635 void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
636                                  struct btrfs_ordered_extent *entry)
637 {
638         struct btrfs_root *root = btrfs_inode->root;
639         struct btrfs_fs_info *fs_info = root->fs_info;
640         struct rb_node *node;
641         bool pending;
642         bool freespace_inode;
643
644         /*
645          * If this is a free space inode the thread has not acquired the ordered
646          * extents lockdep map.
647          */
648         freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
649
650         btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
651         /* This is paired with alloc_ordered_extent(). */
652         spin_lock(&btrfs_inode->lock);
653         btrfs_mod_outstanding_extents(btrfs_inode, -1);
654         spin_unlock(&btrfs_inode->lock);
655         if (root != fs_info->tree_root) {
656                 u64 release;
657
658                 if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags))
659                         release = entry->disk_num_bytes;
660                 else
661                         release = entry->num_bytes;
662                 btrfs_delalloc_release_metadata(btrfs_inode, release,
663                                                 test_bit(BTRFS_ORDERED_IOERR,
664                                                          &entry->flags));
665         }
666
667         percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
668                                  fs_info->delalloc_batch);
669
670         spin_lock_irq(&btrfs_inode->ordered_tree_lock);
671         node = &entry->rb_node;
672         rb_erase(node, &btrfs_inode->ordered_tree);
673         RB_CLEAR_NODE(node);
674         if (btrfs_inode->ordered_tree_last == node)
675                 btrfs_inode->ordered_tree_last = NULL;
676         set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
677         pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
678         spin_unlock_irq(&btrfs_inode->ordered_tree_lock);
679
680         /*
681          * The current running transaction is waiting on us, we need to let it
682          * know that we're complete and wake it up.
683          */
684         if (pending) {
685                 struct btrfs_transaction *trans;
686
687                 /*
688                  * The checks for trans are just a formality, it should be set,
689                  * but if it isn't we don't want to deref/assert under the spin
690                  * lock, so be nice and check if trans is set, but ASSERT() so
691                  * if it isn't set a developer will notice.
692                  */
693                 spin_lock(&fs_info->trans_lock);
694                 trans = fs_info->running_transaction;
695                 if (trans)
696                         refcount_inc(&trans->use_count);
697                 spin_unlock(&fs_info->trans_lock);
698
699                 ASSERT(trans || BTRFS_FS_ERROR(fs_info));
700                 if (trans) {
701                         if (atomic_dec_and_test(&trans->pending_ordered))
702                                 wake_up(&trans->pending_wait);
703                         btrfs_put_transaction(trans);
704                 }
705         }
706
707         btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered);
708
709         spin_lock(&root->ordered_extent_lock);
710         list_del_init(&entry->root_extent_list);
711         root->nr_ordered_extents--;
712
713         trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
714
715         if (!root->nr_ordered_extents) {
716                 spin_lock(&fs_info->ordered_root_lock);
717                 BUG_ON(list_empty(&root->ordered_root));
718                 list_del_init(&root->ordered_root);
719                 spin_unlock(&fs_info->ordered_root_lock);
720         }
721         spin_unlock(&root->ordered_extent_lock);
722         wake_up(&entry->wait);
723         if (!freespace_inode)
724                 btrfs_lockdep_release(fs_info, btrfs_ordered_extent);
725 }
726
727 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
728 {
729         struct btrfs_ordered_extent *ordered;
730
731         ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
732         btrfs_start_ordered_extent(ordered);
733         complete(&ordered->completion);
734 }
735
736 /*
737  * Wait for all the ordered extents in a root. Use @bg as range or do whole
738  * range if it's NULL.
739  */
740 u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
741                                const struct btrfs_block_group *bg)
742 {
743         struct btrfs_fs_info *fs_info = root->fs_info;
744         LIST_HEAD(splice);
745         LIST_HEAD(skipped);
746         LIST_HEAD(works);
747         struct btrfs_ordered_extent *ordered, *next;
748         u64 count = 0;
749         u64 range_start, range_len;
750         u64 range_end;
751
752         if (bg) {
753                 range_start = bg->start;
754                 range_len = bg->length;
755         } else {
756                 range_start = 0;
757                 range_len = U64_MAX;
758         }
759         range_end = range_start + range_len;
760
761         mutex_lock(&root->ordered_extent_mutex);
762         spin_lock(&root->ordered_extent_lock);
763         list_splice_init(&root->ordered_extents, &splice);
764         while (!list_empty(&splice) && nr) {
765                 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
766                                            root_extent_list);
767
768                 if (range_end <= ordered->disk_bytenr ||
769                     ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
770                         list_move_tail(&ordered->root_extent_list, &skipped);
771                         cond_resched_lock(&root->ordered_extent_lock);
772                         continue;
773                 }
774
775                 list_move_tail(&ordered->root_extent_list,
776                                &root->ordered_extents);
777                 refcount_inc(&ordered->refs);
778                 spin_unlock(&root->ordered_extent_lock);
779
780                 btrfs_init_work(&ordered->flush_work,
781                                 btrfs_run_ordered_extent_work, NULL);
782                 list_add_tail(&ordered->work_list, &works);
783                 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
784
785                 cond_resched();
786                 if (nr != U64_MAX)
787                         nr--;
788                 count++;
789                 spin_lock(&root->ordered_extent_lock);
790         }
791         list_splice_tail(&skipped, &root->ordered_extents);
792         list_splice_tail(&splice, &root->ordered_extents);
793         spin_unlock(&root->ordered_extent_lock);
794
795         list_for_each_entry_safe(ordered, next, &works, work_list) {
796                 list_del_init(&ordered->work_list);
797                 wait_for_completion(&ordered->completion);
798                 btrfs_put_ordered_extent(ordered);
799                 cond_resched();
800         }
801         mutex_unlock(&root->ordered_extent_mutex);
802
803         return count;
804 }
805
806 /*
807  * Wait for @nr ordered extents that intersect the @bg, or the whole range of
808  * the filesystem if @bg is NULL.
809  */
810 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
811                               const struct btrfs_block_group *bg)
812 {
813         struct btrfs_root *root;
814         LIST_HEAD(splice);
815         u64 done;
816
817         mutex_lock(&fs_info->ordered_operations_mutex);
818         spin_lock(&fs_info->ordered_root_lock);
819         list_splice_init(&fs_info->ordered_roots, &splice);
820         while (!list_empty(&splice) && nr) {
821                 root = list_first_entry(&splice, struct btrfs_root,
822                                         ordered_root);
823                 root = btrfs_grab_root(root);
824                 BUG_ON(!root);
825                 list_move_tail(&root->ordered_root,
826                                &fs_info->ordered_roots);
827                 spin_unlock(&fs_info->ordered_root_lock);
828
829                 done = btrfs_wait_ordered_extents(root, nr, bg);
830                 btrfs_put_root(root);
831
832                 if (nr != U64_MAX)
833                         nr -= done;
834
835                 spin_lock(&fs_info->ordered_root_lock);
836         }
837         list_splice_tail(&splice, &fs_info->ordered_roots);
838         spin_unlock(&fs_info->ordered_root_lock);
839         mutex_unlock(&fs_info->ordered_operations_mutex);
840 }
841
842 /*
843  * Start IO and wait for a given ordered extent to finish.
844  *
845  * Wait on page writeback for all the pages in the extent and the IO completion
846  * code to insert metadata into the btree corresponding to the extent.
847  */
848 void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry)
849 {
850         u64 start = entry->file_offset;
851         u64 end = start + entry->num_bytes - 1;
852         struct btrfs_inode *inode = entry->inode;
853         bool freespace_inode;
854
855         trace_btrfs_ordered_extent_start(inode, entry);
856
857         /*
858          * If this is a free space inode do not take the ordered extents lockdep
859          * map.
860          */
861         freespace_inode = btrfs_is_free_space_inode(inode);
862
863         /*
864          * pages in the range can be dirty, clean or writeback.  We
865          * start IO on any dirty ones so the wait doesn't stall waiting
866          * for the flusher thread to find them
867          */
868         if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
869                 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
870
871         if (!freespace_inode)
872                 btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
873         wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags));
874 }
875
876 /*
877  * Used to wait on ordered extents across a large range of bytes.
878  */
879 int btrfs_wait_ordered_range(struct btrfs_inode *inode, u64 start, u64 len)
880 {
881         int ret = 0;
882         int ret_wb = 0;
883         u64 end;
884         u64 orig_end;
885         struct btrfs_ordered_extent *ordered;
886
887         if (start + len < start) {
888                 orig_end = OFFSET_MAX;
889         } else {
890                 orig_end = start + len - 1;
891                 if (orig_end > OFFSET_MAX)
892                         orig_end = OFFSET_MAX;
893         }
894
895         /* start IO across the range first to instantiate any delalloc
896          * extents
897          */
898         ret = btrfs_fdatawrite_range(inode, start, orig_end);
899         if (ret)
900                 return ret;
901
902         /*
903          * If we have a writeback error don't return immediately. Wait first
904          * for any ordered extents that haven't completed yet. This is to make
905          * sure no one can dirty the same page ranges and call writepages()
906          * before the ordered extents complete - to avoid failures (-EEXIST)
907          * when adding the new ordered extents to the ordered tree.
908          */
909         ret_wb = filemap_fdatawait_range(inode->vfs_inode.i_mapping, start, orig_end);
910
911         end = orig_end;
912         while (1) {
913                 ordered = btrfs_lookup_first_ordered_extent(inode, end);
914                 if (!ordered)
915                         break;
916                 if (ordered->file_offset > orig_end) {
917                         btrfs_put_ordered_extent(ordered);
918                         break;
919                 }
920                 if (ordered->file_offset + ordered->num_bytes <= start) {
921                         btrfs_put_ordered_extent(ordered);
922                         break;
923                 }
924                 btrfs_start_ordered_extent(ordered);
925                 end = ordered->file_offset;
926                 /*
927                  * If the ordered extent had an error save the error but don't
928                  * exit without waiting first for all other ordered extents in
929                  * the range to complete.
930                  */
931                 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
932                         ret = -EIO;
933                 btrfs_put_ordered_extent(ordered);
934                 if (end == 0 || end == start)
935                         break;
936                 end--;
937         }
938         return ret_wb ? ret_wb : ret;
939 }
940
941 /*
942  * find an ordered extent corresponding to file_offset.  return NULL if
943  * nothing is found, otherwise take a reference on the extent and return it
944  */
945 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
946                                                          u64 file_offset)
947 {
948         struct rb_node *node;
949         struct btrfs_ordered_extent *entry = NULL;
950         unsigned long flags;
951
952         spin_lock_irqsave(&inode->ordered_tree_lock, flags);
953         node = ordered_tree_search(inode, file_offset);
954         if (!node)
955                 goto out;
956
957         entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
958         if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
959                 entry = NULL;
960         if (entry) {
961                 refcount_inc(&entry->refs);
962                 trace_btrfs_ordered_extent_lookup(inode, entry);
963         }
964 out:
965         spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
966         return entry;
967 }
968
969 /* Since the DIO code tries to lock a wide area we need to look for any ordered
970  * extents that exist in the range, rather than just the start of the range.
971  */
972 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
973                 struct btrfs_inode *inode, u64 file_offset, u64 len)
974 {
975         struct rb_node *node;
976         struct btrfs_ordered_extent *entry = NULL;
977
978         spin_lock_irq(&inode->ordered_tree_lock);
979         node = ordered_tree_search(inode, file_offset);
980         if (!node) {
981                 node = ordered_tree_search(inode, file_offset + len);
982                 if (!node)
983                         goto out;
984         }
985
986         while (1) {
987                 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
988                 if (range_overlaps(entry, file_offset, len))
989                         break;
990
991                 if (entry->file_offset >= file_offset + len) {
992                         entry = NULL;
993                         break;
994                 }
995                 entry = NULL;
996                 node = rb_next(node);
997                 if (!node)
998                         break;
999         }
1000 out:
1001         if (entry) {
1002                 refcount_inc(&entry->refs);
1003                 trace_btrfs_ordered_extent_lookup_range(inode, entry);
1004         }
1005         spin_unlock_irq(&inode->ordered_tree_lock);
1006         return entry;
1007 }
1008
1009 /*
1010  * Adds all ordered extents to the given list. The list ends up sorted by the
1011  * file_offset of the ordered extents.
1012  */
1013 void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
1014                                            struct list_head *list)
1015 {
1016         struct rb_node *n;
1017
1018         btrfs_assert_inode_locked(inode);
1019
1020         spin_lock_irq(&inode->ordered_tree_lock);
1021         for (n = rb_first(&inode->ordered_tree); n; n = rb_next(n)) {
1022                 struct btrfs_ordered_extent *ordered;
1023
1024                 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
1025
1026                 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
1027                         continue;
1028
1029                 ASSERT(list_empty(&ordered->log_list));
1030                 list_add_tail(&ordered->log_list, list);
1031                 refcount_inc(&ordered->refs);
1032                 trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
1033         }
1034         spin_unlock_irq(&inode->ordered_tree_lock);
1035 }
1036
1037 /*
1038  * lookup and return any extent before 'file_offset'.  NULL is returned
1039  * if none is found
1040  */
1041 struct btrfs_ordered_extent *
1042 btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
1043 {
1044         struct rb_node *node;
1045         struct btrfs_ordered_extent *entry = NULL;
1046
1047         spin_lock_irq(&inode->ordered_tree_lock);
1048         node = ordered_tree_search(inode, file_offset);
1049         if (!node)
1050                 goto out;
1051
1052         entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
1053         refcount_inc(&entry->refs);
1054         trace_btrfs_ordered_extent_lookup_first(inode, entry);
1055 out:
1056         spin_unlock_irq(&inode->ordered_tree_lock);
1057         return entry;
1058 }
1059
1060 /*
1061  * Lookup the first ordered extent that overlaps the range
1062  * [@file_offset, @file_offset + @len).
1063  *
1064  * The difference between this and btrfs_lookup_first_ordered_extent() is
1065  * that this one won't return any ordered extent that does not overlap the range.
1066  * And the difference against btrfs_lookup_ordered_extent() is, this function
1067  * ensures the first ordered extent gets returned.
1068  */
1069 struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
1070                         struct btrfs_inode *inode, u64 file_offset, u64 len)
1071 {
1072         struct rb_node *node;
1073         struct rb_node *cur;
1074         struct rb_node *prev;
1075         struct rb_node *next;
1076         struct btrfs_ordered_extent *entry = NULL;
1077
1078         spin_lock_irq(&inode->ordered_tree_lock);
1079         node = inode->ordered_tree.rb_node;
1080         /*
1081          * Here we don't want to use tree_search() which will use tree->last
1082          * and screw up the search order.
1083          * And __tree_search() can't return the adjacent ordered extents
1084          * either, thus here we do our own search.
1085          */
1086         while (node) {
1087                 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
1088
1089                 if (file_offset < entry->file_offset) {
1090                         node = node->rb_left;
1091                 } else if (file_offset >= entry_end(entry)) {
1092                         node = node->rb_right;
1093                 } else {
1094                         /*
1095                          * Direct hit, got an ordered extent that starts at
1096                          * @file_offset
1097                          */
1098                         goto out;
1099                 }
1100         }
1101         if (!entry) {
1102                 /* Empty tree */
1103                 goto out;
1104         }
1105
1106         cur = &entry->rb_node;
1107         /* We got an entry around @file_offset, check adjacent entries */
1108         if (entry->file_offset < file_offset) {
1109                 prev = cur;
1110                 next = rb_next(cur);
1111         } else {
1112                 prev = rb_prev(cur);
1113                 next = cur;
1114         }
1115         if (prev) {
1116                 entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
1117                 if (range_overlaps(entry, file_offset, len))
1118                         goto out;
1119         }
1120         if (next) {
1121                 entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
1122                 if (range_overlaps(entry, file_offset, len))
1123                         goto out;
1124         }
1125         /* No ordered extent in the range */
1126         entry = NULL;
1127 out:
1128         if (entry) {
1129                 refcount_inc(&entry->refs);
1130                 trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
1131         }
1132
1133         spin_unlock_irq(&inode->ordered_tree_lock);
1134         return entry;
1135 }
1136
1137 /*
1138  * Lock the passed range and ensures all pending ordered extents in it are run
1139  * to completion.
1140  *
1141  * @inode:        Inode whose ordered tree is to be searched
1142  * @start:        Beginning of range to flush
1143  * @end:          Last byte of range to lock
1144  * @cached_state: If passed, will return the extent state responsible for the
1145  *                locked range. It's the caller's responsibility to free the
1146  *                cached state.
1147  *
1148  * Always return with the given range locked, ensuring after it's called no
1149  * order extent can be pending.
1150  */
1151 void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
1152                                         u64 end,
1153                                         struct extent_state **cached_state)
1154 {
1155         struct btrfs_ordered_extent *ordered;
1156         struct extent_state *cache = NULL;
1157         struct extent_state **cachedp = &cache;
1158
1159         if (cached_state)
1160                 cachedp = cached_state;
1161
1162         while (1) {
1163                 lock_extent(&inode->io_tree, start, end, cachedp);
1164                 ordered = btrfs_lookup_ordered_range(inode, start,
1165                                                      end - start + 1);
1166                 if (!ordered) {
1167                         /*
1168                          * If no external cached_state has been passed then
1169                          * decrement the extra ref taken for cachedp since we
1170                          * aren't exposing it outside of this function
1171                          */
1172                         if (!cached_state)
1173                                 refcount_dec(&cache->refs);
1174                         break;
1175                 }
1176                 unlock_extent(&inode->io_tree, start, end, cachedp);
1177                 btrfs_start_ordered_extent(ordered);
1178                 btrfs_put_ordered_extent(ordered);
1179         }
1180 }
1181
1182 /*
1183  * Lock the passed range and ensure all pending ordered extents in it are run
1184  * to completion in nowait mode.
1185  *
1186  * Return true if btrfs_lock_ordered_range does not return any extents,
1187  * otherwise false.
1188  */
1189 bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
1190                                   struct extent_state **cached_state)
1191 {
1192         struct btrfs_ordered_extent *ordered;
1193
1194         if (!try_lock_extent(&inode->io_tree, start, end, cached_state))
1195                 return false;
1196
1197         ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
1198         if (!ordered)
1199                 return true;
1200
1201         btrfs_put_ordered_extent(ordered);
1202         unlock_extent(&inode->io_tree, start, end, cached_state);
1203
1204         return false;
1205 }
1206
1207 /* Split out a new ordered extent for this first @len bytes of @ordered. */
1208 struct btrfs_ordered_extent *btrfs_split_ordered_extent(
1209                         struct btrfs_ordered_extent *ordered, u64 len)
1210 {
1211         struct btrfs_inode *inode = ordered->inode;
1212         struct btrfs_root *root = inode->root;
1213         struct btrfs_fs_info *fs_info = root->fs_info;
1214         u64 file_offset = ordered->file_offset;
1215         u64 disk_bytenr = ordered->disk_bytenr;
1216         unsigned long flags = ordered->flags;
1217         struct btrfs_ordered_sum *sum, *tmpsum;
1218         struct btrfs_ordered_extent *new;
1219         struct rb_node *node;
1220         u64 offset = 0;
1221
1222         trace_btrfs_ordered_extent_split(inode, ordered);
1223
1224         ASSERT(!(flags & (1U << BTRFS_ORDERED_COMPRESSED)));
1225
1226         /*
1227          * The entire bio must be covered by the ordered extent, but we can't
1228          * reduce the original extent to a zero length either.
1229          */
1230         if (WARN_ON_ONCE(len >= ordered->num_bytes))
1231                 return ERR_PTR(-EINVAL);
1232         /* We cannot split partially completed ordered extents. */
1233         if (ordered->bytes_left) {
1234                 ASSERT(!(flags & ~BTRFS_ORDERED_TYPE_FLAGS));
1235                 if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes))
1236                         return ERR_PTR(-EINVAL);
1237         }
1238         /* We cannot split a compressed ordered extent. */
1239         if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes))
1240                 return ERR_PTR(-EINVAL);
1241
1242         new = alloc_ordered_extent(inode, file_offset, len, len, disk_bytenr,
1243                                    len, 0, flags, ordered->compress_type);
1244         if (IS_ERR(new))
1245                 return new;
1246
1247         /* One ref for the tree. */
1248         refcount_inc(&new->refs);
1249
1250         /*
1251          * Take the root's ordered_extent_lock to avoid a race with
1252          * btrfs_wait_ordered_extents() when updating the disk_bytenr and
1253          * disk_num_bytes fields of the ordered extent below. And we disable
1254          * IRQs because the inode's ordered_tree_lock is used in IRQ context
1255          * elsewhere.
1256          *
1257          * There's no concern about a previous caller of
1258          * btrfs_wait_ordered_extents() getting the trimmed ordered extent
1259          * before we insert the new one, because even if it gets the ordered
1260          * extent before it's trimmed and the new one inserted, right before it
1261          * uses it or during its use, the ordered extent might have been
1262          * trimmed in the meanwhile, and it missed the new ordered extent.
1263          * There's no way around this and it's harmless for current use cases,
1264          * so we take the root's ordered_extent_lock to fix that race during
1265          * trimming and silence tools like KCSAN.
1266          */
1267         spin_lock_irq(&root->ordered_extent_lock);
1268         spin_lock(&inode->ordered_tree_lock);
1269
1270         /*
1271          * We don't have overlapping ordered extents (that would imply double
1272          * allocation of extents) and we checked above that the split length
1273          * does not cross the ordered extent's num_bytes field, so there's
1274          * no need to remove it and re-insert it in the tree.
1275          */
1276         ordered->file_offset += len;
1277         ordered->disk_bytenr += len;
1278         ordered->num_bytes -= len;
1279         ordered->disk_num_bytes -= len;
1280         ordered->ram_bytes -= len;
1281
1282         if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) {
1283                 ASSERT(ordered->bytes_left == 0);
1284                 new->bytes_left = 0;
1285         } else {
1286                 ordered->bytes_left -= len;
1287         }
1288
1289         if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) {
1290                 if (ordered->truncated_len > len) {
1291                         ordered->truncated_len -= len;
1292                 } else {
1293                         new->truncated_len = ordered->truncated_len;
1294                         ordered->truncated_len = 0;
1295                 }
1296         }
1297
1298         list_for_each_entry_safe(sum, tmpsum, &ordered->list, list) {
1299                 if (offset == len)
1300                         break;
1301                 list_move_tail(&sum->list, &new->list);
1302                 offset += sum->len;
1303         }
1304
1305         node = tree_insert(&inode->ordered_tree, new->file_offset, &new->rb_node);
1306         if (unlikely(node))
1307                 btrfs_panic(fs_info, -EEXIST,
1308                         "inconsistency in ordered tree at offset %llu after split",
1309                         new->file_offset);
1310         spin_unlock(&inode->ordered_tree_lock);
1311
1312         list_add_tail(&new->root_extent_list, &root->ordered_extents);
1313         root->nr_ordered_extents++;
1314         spin_unlock_irq(&root->ordered_extent_lock);
1315         return new;
1316 }
1317
1318 int __init ordered_data_init(void)
1319 {
1320         btrfs_ordered_extent_cache = KMEM_CACHE(btrfs_ordered_extent, 0);
1321         if (!btrfs_ordered_extent_cache)
1322                 return -ENOMEM;
1323
1324         return 0;
1325 }
1326
1327 void __cold ordered_data_exit(void)
1328 {
1329         kmem_cache_destroy(btrfs_ordered_extent_cache);
1330 }