Btrfs: Remove superfluous casts from u64 to unsigned long long
[linux-2.6-block.git] / fs / btrfs / ordered-data.c
CommitLineData
dc17ff8f
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
dc17ff8f 19#include <linux/slab.h>
d6bfde87 20#include <linux/blkdev.h>
f421950f
CM
21#include <linux/writeback.h>
22#include <linux/pagevec.h>
dc17ff8f
CM
23#include "ctree.h"
24#include "transaction.h"
25#include "btrfs_inode.h"
e6dcd2dc 26#include "extent_io.h"
199c2a9c 27#include "disk-io.h"
dc17ff8f 28
6352b91d
MX
29static struct kmem_cache *btrfs_ordered_extent_cache;
30
e6dcd2dc 31static u64 entry_end(struct btrfs_ordered_extent *entry)
dc17ff8f 32{
e6dcd2dc
CM
33 if (entry->file_offset + entry->len < entry->file_offset)
34 return (u64)-1;
35 return entry->file_offset + entry->len;
dc17ff8f
CM
36}
37
d352ac68
CM
38/* returns NULL if the insertion worked, or it returns the node it did find
39 * in the tree
40 */
e6dcd2dc
CM
41static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
42 struct rb_node *node)
dc17ff8f 43{
d397712b
CM
44 struct rb_node **p = &root->rb_node;
45 struct rb_node *parent = NULL;
e6dcd2dc 46 struct btrfs_ordered_extent *entry;
dc17ff8f 47
d397712b 48 while (*p) {
dc17ff8f 49 parent = *p;
e6dcd2dc 50 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
dc17ff8f 51
e6dcd2dc 52 if (file_offset < entry->file_offset)
dc17ff8f 53 p = &(*p)->rb_left;
e6dcd2dc 54 else if (file_offset >= entry_end(entry))
dc17ff8f
CM
55 p = &(*p)->rb_right;
56 else
57 return parent;
58 }
59
60 rb_link_node(node, parent, p);
61 rb_insert_color(node, root);
62 return NULL;
63}
64
43c04fb1
JM
65static void ordered_data_tree_panic(struct inode *inode, int errno,
66 u64 offset)
67{
68 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
69 btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
c1c9ff7c 70 "%llu\n", offset);
43c04fb1
JM
71}
72
d352ac68
CM
73/*
74 * look for a given offset in the tree, and if it can't be found return the
75 * first lesser offset
76 */
e6dcd2dc
CM
77static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
78 struct rb_node **prev_ret)
dc17ff8f 79{
d397712b 80 struct rb_node *n = root->rb_node;
dc17ff8f 81 struct rb_node *prev = NULL;
e6dcd2dc
CM
82 struct rb_node *test;
83 struct btrfs_ordered_extent *entry;
84 struct btrfs_ordered_extent *prev_entry = NULL;
dc17ff8f 85
d397712b 86 while (n) {
e6dcd2dc 87 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
dc17ff8f
CM
88 prev = n;
89 prev_entry = entry;
dc17ff8f 90
e6dcd2dc 91 if (file_offset < entry->file_offset)
dc17ff8f 92 n = n->rb_left;
e6dcd2dc 93 else if (file_offset >= entry_end(entry))
dc17ff8f
CM
94 n = n->rb_right;
95 else
96 return n;
97 }
98 if (!prev_ret)
99 return NULL;
100
d397712b 101 while (prev && file_offset >= entry_end(prev_entry)) {
e6dcd2dc
CM
102 test = rb_next(prev);
103 if (!test)
104 break;
105 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
106 rb_node);
107 if (file_offset < entry_end(prev_entry))
108 break;
109
110 prev = test;
111 }
112 if (prev)
113 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
114 rb_node);
d397712b 115 while (prev && file_offset < entry_end(prev_entry)) {
e6dcd2dc
CM
116 test = rb_prev(prev);
117 if (!test)
118 break;
119 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
120 rb_node);
121 prev = test;
dc17ff8f
CM
122 }
123 *prev_ret = prev;
124 return NULL;
125}
126
d352ac68
CM
127/*
128 * helper to check if a given offset is inside a given entry
129 */
e6dcd2dc
CM
130static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
131{
132 if (file_offset < entry->file_offset ||
133 entry->file_offset + entry->len <= file_offset)
134 return 0;
135 return 1;
136}
137
4b46fce2
JB
138static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
139 u64 len)
140{
141 if (file_offset + len <= entry->file_offset ||
142 entry->file_offset + entry->len <= file_offset)
143 return 0;
144 return 1;
145}
146
d352ac68
CM
147/*
148 * look find the first ordered struct that has this offset, otherwise
149 * the first one less than this offset
150 */
e6dcd2dc
CM
151static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
152 u64 file_offset)
dc17ff8f 153{
e6dcd2dc 154 struct rb_root *root = &tree->tree;
c87fb6fd 155 struct rb_node *prev = NULL;
dc17ff8f 156 struct rb_node *ret;
e6dcd2dc
CM
157 struct btrfs_ordered_extent *entry;
158
159 if (tree->last) {
160 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
161 rb_node);
162 if (offset_in_entry(entry, file_offset))
163 return tree->last;
164 }
165 ret = __tree_search(root, file_offset, &prev);
dc17ff8f 166 if (!ret)
e6dcd2dc
CM
167 ret = prev;
168 if (ret)
169 tree->last = ret;
dc17ff8f
CM
170 return ret;
171}
172
eb84ae03
CM
173/* allocate and add a new ordered_extent into the per-inode tree.
174 * file_offset is the logical offset in the file
175 *
176 * start is the disk block number of an extent already reserved in the
177 * extent allocation tree
178 *
179 * len is the length of the extent
180 *
eb84ae03
CM
181 * The tree is given a single reference on the ordered extent that was
182 * inserted.
183 */
4b46fce2
JB
184static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
185 u64 start, u64 len, u64 disk_len,
261507a0 186 int type, int dio, int compress_type)
dc17ff8f 187{
199c2a9c 188 struct btrfs_root *root = BTRFS_I(inode)->root;
dc17ff8f 189 struct btrfs_ordered_inode_tree *tree;
e6dcd2dc
CM
190 struct rb_node *node;
191 struct btrfs_ordered_extent *entry;
dc17ff8f 192
e6dcd2dc 193 tree = &BTRFS_I(inode)->ordered_tree;
6352b91d 194 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
dc17ff8f
CM
195 if (!entry)
196 return -ENOMEM;
197
e6dcd2dc
CM
198 entry->file_offset = file_offset;
199 entry->start = start;
200 entry->len = len;
2ab28f32
JB
201 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) &&
202 !(type == BTRFS_ORDERED_NOCOW))
203 entry->csum_bytes_left = disk_len;
c8b97818 204 entry->disk_len = disk_len;
8b62b72b 205 entry->bytes_left = len;
5fd02043 206 entry->inode = igrab(inode);
261507a0 207 entry->compress_type = compress_type;
d899e052 208 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
80ff3856 209 set_bit(type, &entry->flags);
3eaa2885 210
4b46fce2
JB
211 if (dio)
212 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
213
e6dcd2dc
CM
214 /* one ref for the tree */
215 atomic_set(&entry->refs, 1);
216 init_waitqueue_head(&entry->wait);
217 INIT_LIST_HEAD(&entry->list);
3eaa2885 218 INIT_LIST_HEAD(&entry->root_extent_list);
9afab882
MX
219 INIT_LIST_HEAD(&entry->work_list);
220 init_completion(&entry->completion);
2ab28f32 221 INIT_LIST_HEAD(&entry->log_list);
dc17ff8f 222
1abe9b8a 223 trace_btrfs_ordered_extent_add(inode, entry);
224
5fd02043 225 spin_lock_irq(&tree->lock);
e6dcd2dc
CM
226 node = tree_insert(&tree->tree, file_offset,
227 &entry->rb_node);
43c04fb1
JM
228 if (node)
229 ordered_data_tree_panic(inode, -EEXIST, file_offset);
5fd02043 230 spin_unlock_irq(&tree->lock);
d397712b 231
199c2a9c 232 spin_lock(&root->ordered_extent_lock);
3eaa2885 233 list_add_tail(&entry->root_extent_list,
199c2a9c
MX
234 &root->ordered_extents);
235 root->nr_ordered_extents++;
236 if (root->nr_ordered_extents == 1) {
237 spin_lock(&root->fs_info->ordered_root_lock);
238 BUG_ON(!list_empty(&root->ordered_root));
239 list_add_tail(&root->ordered_root,
240 &root->fs_info->ordered_roots);
241 spin_unlock(&root->fs_info->ordered_root_lock);
242 }
243 spin_unlock(&root->ordered_extent_lock);
3eaa2885 244
dc17ff8f
CM
245 return 0;
246}
247
4b46fce2
JB
248int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
249 u64 start, u64 len, u64 disk_len, int type)
250{
251 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
261507a0
LZ
252 disk_len, type, 0,
253 BTRFS_COMPRESS_NONE);
4b46fce2
JB
254}
255
256int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
257 u64 start, u64 len, u64 disk_len, int type)
258{
259 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
261507a0
LZ
260 disk_len, type, 1,
261 BTRFS_COMPRESS_NONE);
262}
263
264int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
265 u64 start, u64 len, u64 disk_len,
266 int type, int compress_type)
267{
268 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
269 disk_len, type, 0,
270 compress_type);
4b46fce2
JB
271}
272
eb84ae03
CM
273/*
274 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
3edf7d33
CM
275 * when an ordered extent is finished. If the list covers more than one
276 * ordered extent, it is split across multiples.
eb84ae03 277 */
143bede5
JM
278void btrfs_add_ordered_sum(struct inode *inode,
279 struct btrfs_ordered_extent *entry,
280 struct btrfs_ordered_sum *sum)
dc17ff8f 281{
e6dcd2dc 282 struct btrfs_ordered_inode_tree *tree;
dc17ff8f 283
e6dcd2dc 284 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043 285 spin_lock_irq(&tree->lock);
e6dcd2dc 286 list_add_tail(&sum->list, &entry->list);
2ab28f32
JB
287 WARN_ON(entry->csum_bytes_left < sum->len);
288 entry->csum_bytes_left -= sum->len;
289 if (entry->csum_bytes_left == 0)
290 wake_up(&entry->wait);
5fd02043 291 spin_unlock_irq(&tree->lock);
dc17ff8f
CM
292}
293
163cf09c
CM
294/*
295 * this is used to account for finished IO across a given range
296 * of the file. The IO may span ordered extents. If
297 * a given ordered_extent is completely done, 1 is returned, otherwise
298 * 0.
299 *
300 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
301 * to make sure this function only returns 1 once for a given ordered extent.
302 *
303 * file_offset is updated to one byte past the range that is recorded as
304 * complete. This allows you to walk forward in the file.
305 */
306int btrfs_dec_test_first_ordered_pending(struct inode *inode,
307 struct btrfs_ordered_extent **cached,
5fd02043 308 u64 *file_offset, u64 io_size, int uptodate)
163cf09c
CM
309{
310 struct btrfs_ordered_inode_tree *tree;
311 struct rb_node *node;
312 struct btrfs_ordered_extent *entry = NULL;
313 int ret;
5fd02043 314 unsigned long flags;
163cf09c
CM
315 u64 dec_end;
316 u64 dec_start;
317 u64 to_dec;
318
319 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043 320 spin_lock_irqsave(&tree->lock, flags);
163cf09c
CM
321 node = tree_search(tree, *file_offset);
322 if (!node) {
323 ret = 1;
324 goto out;
325 }
326
327 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
328 if (!offset_in_entry(entry, *file_offset)) {
329 ret = 1;
330 goto out;
331 }
332
333 dec_start = max(*file_offset, entry->file_offset);
334 dec_end = min(*file_offset + io_size, entry->file_offset +
335 entry->len);
336 *file_offset = dec_end;
337 if (dec_start > dec_end) {
338 printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n",
c1c9ff7c 339 dec_start, dec_end);
163cf09c
CM
340 }
341 to_dec = dec_end - dec_start;
342 if (to_dec > entry->bytes_left) {
343 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
c1c9ff7c 344 entry->bytes_left, to_dec);
163cf09c
CM
345 }
346 entry->bytes_left -= to_dec;
5fd02043
JB
347 if (!uptodate)
348 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
349
163cf09c
CM
350 if (entry->bytes_left == 0)
351 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
352 else
353 ret = 1;
354out:
355 if (!ret && cached && entry) {
356 *cached = entry;
357 atomic_inc(&entry->refs);
358 }
5fd02043 359 spin_unlock_irqrestore(&tree->lock, flags);
163cf09c
CM
360 return ret == 0;
361}
362
eb84ae03
CM
363/*
364 * this is used to account for finished IO across a given range
365 * of the file. The IO should not span ordered extents. If
366 * a given ordered_extent is completely done, 1 is returned, otherwise
367 * 0.
368 *
369 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
370 * to make sure this function only returns 1 once for a given ordered extent.
371 */
e6dcd2dc 372int btrfs_dec_test_ordered_pending(struct inode *inode,
5a1a3df1 373 struct btrfs_ordered_extent **cached,
5fd02043 374 u64 file_offset, u64 io_size, int uptodate)
dc17ff8f 375{
e6dcd2dc 376 struct btrfs_ordered_inode_tree *tree;
dc17ff8f 377 struct rb_node *node;
5a1a3df1 378 struct btrfs_ordered_extent *entry = NULL;
5fd02043 379 unsigned long flags;
e6dcd2dc
CM
380 int ret;
381
382 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043
JB
383 spin_lock_irqsave(&tree->lock, flags);
384 if (cached && *cached) {
385 entry = *cached;
386 goto have_entry;
387 }
388
e6dcd2dc 389 node = tree_search(tree, file_offset);
dc17ff8f 390 if (!node) {
e6dcd2dc
CM
391 ret = 1;
392 goto out;
dc17ff8f
CM
393 }
394
e6dcd2dc 395 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
5fd02043 396have_entry:
e6dcd2dc
CM
397 if (!offset_in_entry(entry, file_offset)) {
398 ret = 1;
399 goto out;
dc17ff8f 400 }
e6dcd2dc 401
8b62b72b
CM
402 if (io_size > entry->bytes_left) {
403 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
c1c9ff7c 404 entry->bytes_left, io_size);
8b62b72b
CM
405 }
406 entry->bytes_left -= io_size;
5fd02043
JB
407 if (!uptodate)
408 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
409
8b62b72b 410 if (entry->bytes_left == 0)
e6dcd2dc 411 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
8b62b72b
CM
412 else
413 ret = 1;
e6dcd2dc 414out:
5a1a3df1
JB
415 if (!ret && cached && entry) {
416 *cached = entry;
417 atomic_inc(&entry->refs);
418 }
5fd02043 419 spin_unlock_irqrestore(&tree->lock, flags);
e6dcd2dc
CM
420 return ret == 0;
421}
dc17ff8f 422
2ab28f32
JB
423/* Needs to either be called under a log transaction or the log_mutex */
424void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode)
425{
426 struct btrfs_ordered_inode_tree *tree;
427 struct btrfs_ordered_extent *ordered;
428 struct rb_node *n;
429 int index = log->log_transid % 2;
430
431 tree = &BTRFS_I(inode)->ordered_tree;
432 spin_lock_irq(&tree->lock);
433 for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
434 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
435 spin_lock(&log->log_extents_lock[index]);
436 if (list_empty(&ordered->log_list)) {
437 list_add_tail(&ordered->log_list, &log->logged_list[index]);
438 atomic_inc(&ordered->refs);
439 }
440 spin_unlock(&log->log_extents_lock[index]);
441 }
442 spin_unlock_irq(&tree->lock);
443}
444
445void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
446{
447 struct btrfs_ordered_extent *ordered;
448 int index = transid % 2;
449
450 spin_lock_irq(&log->log_extents_lock[index]);
451 while (!list_empty(&log->logged_list[index])) {
452 ordered = list_first_entry(&log->logged_list[index],
453 struct btrfs_ordered_extent,
454 log_list);
455 list_del_init(&ordered->log_list);
456 spin_unlock_irq(&log->log_extents_lock[index]);
457 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
458 &ordered->flags));
459 btrfs_put_ordered_extent(ordered);
460 spin_lock_irq(&log->log_extents_lock[index]);
461 }
462 spin_unlock_irq(&log->log_extents_lock[index]);
463}
464
465void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
466{
467 struct btrfs_ordered_extent *ordered;
468 int index = transid % 2;
469
470 spin_lock_irq(&log->log_extents_lock[index]);
471 while (!list_empty(&log->logged_list[index])) {
472 ordered = list_first_entry(&log->logged_list[index],
473 struct btrfs_ordered_extent,
474 log_list);
475 list_del_init(&ordered->log_list);
476 spin_unlock_irq(&log->log_extents_lock[index]);
477 btrfs_put_ordered_extent(ordered);
478 spin_lock_irq(&log->log_extents_lock[index]);
479 }
480 spin_unlock_irq(&log->log_extents_lock[index]);
481}
482
eb84ae03
CM
483/*
484 * used to drop a reference on an ordered extent. This will free
485 * the extent if the last reference is dropped
486 */
143bede5 487void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
e6dcd2dc 488{
ba1da2f4
CM
489 struct list_head *cur;
490 struct btrfs_ordered_sum *sum;
491
1abe9b8a 492 trace_btrfs_ordered_extent_put(entry->inode, entry);
493
ba1da2f4 494 if (atomic_dec_and_test(&entry->refs)) {
5fd02043
JB
495 if (entry->inode)
496 btrfs_add_delayed_iput(entry->inode);
d397712b 497 while (!list_empty(&entry->list)) {
ba1da2f4
CM
498 cur = entry->list.next;
499 sum = list_entry(cur, struct btrfs_ordered_sum, list);
500 list_del(&sum->list);
501 kfree(sum);
502 }
6352b91d 503 kmem_cache_free(btrfs_ordered_extent_cache, entry);
ba1da2f4 504 }
dc17ff8f 505}
cee36a03 506
eb84ae03
CM
507/*
508 * remove an ordered extent from the tree. No references are dropped
5fd02043 509 * and waiters are woken up.
eb84ae03 510 */
5fd02043
JB
511void btrfs_remove_ordered_extent(struct inode *inode,
512 struct btrfs_ordered_extent *entry)
cee36a03 513{
e6dcd2dc 514 struct btrfs_ordered_inode_tree *tree;
287a0ab9 515 struct btrfs_root *root = BTRFS_I(inode)->root;
cee36a03 516 struct rb_node *node;
cee36a03 517
e6dcd2dc 518 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043 519 spin_lock_irq(&tree->lock);
e6dcd2dc 520 node = &entry->rb_node;
cee36a03 521 rb_erase(node, &tree->tree);
e6dcd2dc
CM
522 tree->last = NULL;
523 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
5fd02043 524 spin_unlock_irq(&tree->lock);
3eaa2885 525
199c2a9c 526 spin_lock(&root->ordered_extent_lock);
3eaa2885 527 list_del_init(&entry->root_extent_list);
199c2a9c 528 root->nr_ordered_extents--;
5a3f23d5 529
1abe9b8a 530 trace_btrfs_ordered_extent_remove(inode, entry);
531
5a3f23d5
CM
532 /*
533 * we have no more ordered extents for this inode and
534 * no dirty pages. We can safely remove it from the
535 * list of ordered extents
536 */
537 if (RB_EMPTY_ROOT(&tree->tree) &&
538 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
539 list_del_init(&BTRFS_I(inode)->ordered_operations);
540 }
199c2a9c
MX
541
542 if (!root->nr_ordered_extents) {
543 spin_lock(&root->fs_info->ordered_root_lock);
544 BUG_ON(list_empty(&root->ordered_root));
545 list_del_init(&root->ordered_root);
546 spin_unlock(&root->fs_info->ordered_root_lock);
547 }
548 spin_unlock(&root->ordered_extent_lock);
e6dcd2dc 549 wake_up(&entry->wait);
cee36a03
CM
550}
551
9afab882
MX
552static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
553{
554 struct btrfs_ordered_extent *ordered;
555
556 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
557 btrfs_start_ordered_extent(ordered->inode, ordered, 1);
558 complete(&ordered->completion);
559}
560
d352ac68
CM
561/*
562 * wait for all the ordered extents in a root. This is done when balancing
563 * space between drives.
564 */
6bbe3a9c 565void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
3eaa2885 566{
9afab882 567 struct list_head splice, works;
9afab882 568 struct btrfs_ordered_extent *ordered, *next;
3eaa2885
CM
569 struct inode *inode;
570
571 INIT_LIST_HEAD(&splice);
9afab882 572 INIT_LIST_HEAD(&works);
3eaa2885 573
db1d607d 574 mutex_lock(&root->fs_info->ordered_operations_mutex);
199c2a9c
MX
575 spin_lock(&root->ordered_extent_lock);
576 list_splice_init(&root->ordered_extents, &splice);
5b21f2ed 577 while (!list_empty(&splice)) {
199c2a9c
MX
578 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
579 root_extent_list);
580 list_move_tail(&ordered->root_extent_list,
581 &root->ordered_extents);
3eaa2885 582 /*
5b21f2ed 583 * the inode may be getting freed (in sys_unlink path).
3eaa2885 584 */
5b21f2ed 585 inode = igrab(ordered->inode);
199c2a9c
MX
586 if (!inode) {
587 cond_resched_lock(&root->ordered_extent_lock);
588 continue;
589 }
5b21f2ed 590
199c2a9c
MX
591 atomic_inc(&ordered->refs);
592 spin_unlock(&root->ordered_extent_lock);
3eaa2885 593
199c2a9c
MX
594 ordered->flush_work.func = btrfs_run_ordered_extent_work;
595 list_add_tail(&ordered->work_list, &works);
596 btrfs_queue_worker(&root->fs_info->flush_workers,
597 &ordered->flush_work);
3eaa2885 598
9afab882 599 cond_resched();
199c2a9c 600 spin_lock(&root->ordered_extent_lock);
3eaa2885 601 }
199c2a9c 602 spin_unlock(&root->ordered_extent_lock);
9afab882
MX
603
604 list_for_each_entry_safe(ordered, next, &works, work_list) {
605 list_del_init(&ordered->work_list);
606 wait_for_completion(&ordered->completion);
607
608 inode = ordered->inode;
609 btrfs_put_ordered_extent(ordered);
610 if (delay_iput)
611 btrfs_add_delayed_iput(inode);
612 else
613 iput(inode);
614
615 cond_resched();
616 }
db1d607d 617 mutex_unlock(&root->fs_info->ordered_operations_mutex);
3eaa2885
CM
618}
619
199c2a9c
MX
620void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info,
621 int delay_iput)
622{
623 struct btrfs_root *root;
624 struct list_head splice;
625
626 INIT_LIST_HEAD(&splice);
627
628 spin_lock(&fs_info->ordered_root_lock);
629 list_splice_init(&fs_info->ordered_roots, &splice);
630 while (!list_empty(&splice)) {
631 root = list_first_entry(&splice, struct btrfs_root,
632 ordered_root);
633 root = btrfs_grab_fs_root(root);
634 BUG_ON(!root);
635 list_move_tail(&root->ordered_root,
636 &fs_info->ordered_roots);
637 spin_unlock(&fs_info->ordered_root_lock);
638
639 btrfs_wait_ordered_extents(root, delay_iput);
640 btrfs_put_fs_root(root);
641
642 spin_lock(&fs_info->ordered_root_lock);
643 }
644 spin_unlock(&fs_info->ordered_root_lock);
645}
646
5a3f23d5
CM
647/*
648 * this is used during transaction commit to write all the inodes
649 * added to the ordered operation list. These files must be fully on
650 * disk before the transaction commits.
651 *
652 * we have two modes here, one is to just start the IO via filemap_flush
653 * and the other is to wait for all the io. When we wait, we have an
654 * extra check to make sure the ordered operation list really is empty
655 * before we return
656 */
569e0f35
JB
657int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
658 struct btrfs_root *root, int wait)
5a3f23d5
CM
659{
660 struct btrfs_inode *btrfs_inode;
661 struct inode *inode;
569e0f35 662 struct btrfs_transaction *cur_trans = trans->transaction;
5a3f23d5 663 struct list_head splice;
25287e0a
MX
664 struct list_head works;
665 struct btrfs_delalloc_work *work, *next;
666 int ret = 0;
5a3f23d5
CM
667
668 INIT_LIST_HEAD(&splice);
25287e0a 669 INIT_LIST_HEAD(&works);
5a3f23d5 670
9ffba8cd 671 mutex_lock(&root->fs_info->ordered_extent_flush_mutex);
199c2a9c 672 spin_lock(&root->fs_info->ordered_root_lock);
569e0f35 673 list_splice_init(&cur_trans->ordered_operations, &splice);
5a3f23d5
CM
674 while (!list_empty(&splice)) {
675 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
676 ordered_operations);
5a3f23d5
CM
677 inode = &btrfs_inode->vfs_inode;
678
679 list_del_init(&btrfs_inode->ordered_operations);
680
681 /*
682 * the inode may be getting freed (in sys_unlink path).
683 */
684 inode = igrab(inode);
25287e0a
MX
685 if (!inode)
686 continue;
5b947f1b
MX
687
688 if (!wait)
689 list_add_tail(&BTRFS_I(inode)->ordered_operations,
569e0f35 690 &cur_trans->ordered_operations);
199c2a9c 691 spin_unlock(&root->fs_info->ordered_root_lock);
5a3f23d5 692
25287e0a
MX
693 work = btrfs_alloc_delalloc_work(inode, wait, 1);
694 if (!work) {
199c2a9c 695 spin_lock(&root->fs_info->ordered_root_lock);
25287e0a
MX
696 if (list_empty(&BTRFS_I(inode)->ordered_operations))
697 list_add_tail(&btrfs_inode->ordered_operations,
698 &splice);
25287e0a 699 list_splice_tail(&splice,
569e0f35 700 &cur_trans->ordered_operations);
199c2a9c 701 spin_unlock(&root->fs_info->ordered_root_lock);
25287e0a
MX
702 ret = -ENOMEM;
703 goto out;
5a3f23d5 704 }
25287e0a
MX
705 list_add_tail(&work->list, &works);
706 btrfs_queue_worker(&root->fs_info->flush_workers,
707 &work->work);
5a3f23d5
CM
708
709 cond_resched();
199c2a9c 710 spin_lock(&root->fs_info->ordered_root_lock);
5a3f23d5 711 }
199c2a9c 712 spin_unlock(&root->fs_info->ordered_root_lock);
25287e0a
MX
713out:
714 list_for_each_entry_safe(work, next, &works, list) {
715 list_del_init(&work->list);
716 btrfs_wait_and_free_delalloc_work(work);
717 }
9ffba8cd 718 mutex_unlock(&root->fs_info->ordered_extent_flush_mutex);
25287e0a 719 return ret;
5a3f23d5
CM
720}
721
eb84ae03
CM
722/*
723 * Used to start IO or wait for a given ordered extent to finish.
724 *
725 * If wait is one, this effectively waits on page writeback for all the pages
726 * in the extent, and it waits on the io completion code to insert
727 * metadata into the btree corresponding to the extent
728 */
729void btrfs_start_ordered_extent(struct inode *inode,
730 struct btrfs_ordered_extent *entry,
731 int wait)
e6dcd2dc
CM
732{
733 u64 start = entry->file_offset;
734 u64 end = start + entry->len - 1;
e1b81e67 735
1abe9b8a 736 trace_btrfs_ordered_extent_start(inode, entry);
737
eb84ae03
CM
738 /*
739 * pages in the range can be dirty, clean or writeback. We
740 * start IO on any dirty ones so the wait doesn't stall waiting
b2570314 741 * for the flusher thread to find them
eb84ae03 742 */
4b46fce2
JB
743 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
744 filemap_fdatawrite_range(inode->i_mapping, start, end);
c8b97818 745 if (wait) {
e6dcd2dc
CM
746 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
747 &entry->flags));
c8b97818 748 }
e6dcd2dc 749}
cee36a03 750
eb84ae03
CM
751/*
752 * Used to wait on ordered extents across a large range of bytes.
753 */
143bede5 754void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
e6dcd2dc
CM
755{
756 u64 end;
e5a2217e 757 u64 orig_end;
e6dcd2dc 758 struct btrfs_ordered_extent *ordered;
e5a2217e
CM
759
760 if (start + len < start) {
f421950f 761 orig_end = INT_LIMIT(loff_t);
e5a2217e
CM
762 } else {
763 orig_end = start + len - 1;
f421950f
CM
764 if (orig_end > INT_LIMIT(loff_t))
765 orig_end = INT_LIMIT(loff_t);
e5a2217e 766 }
551ebb2d 767
e5a2217e
CM
768 /* start IO across the range first to instantiate any delalloc
769 * extents
770 */
7ddf5a42
JB
771 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
772
773 /*
774 * So with compression we will find and lock a dirty page and clear the
775 * first one as dirty, setup an async extent, and immediately return
776 * with the entire range locked but with nobody actually marked with
777 * writeback. So we can't just filemap_write_and_wait_range() and
778 * expect it to work since it will just kick off a thread to do the
779 * actual work. So we need to call filemap_fdatawrite_range _again_
780 * since it will wait on the page lock, which won't be unlocked until
781 * after the pages have been marked as writeback and so we're good to go
782 * from there. We have to do this otherwise we'll miss the ordered
783 * extents and that results in badness. Please Josef, do not think you
784 * know better and pull this out at some point in the future, it is
785 * right and you are wrong.
786 */
787 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
788 &BTRFS_I(inode)->runtime_flags))
789 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
790
791 filemap_fdatawait_range(inode->i_mapping, start, orig_end);
e5a2217e 792
f421950f 793 end = orig_end;
d397712b 794 while (1) {
e6dcd2dc 795 ordered = btrfs_lookup_first_ordered_extent(inode, end);
d397712b 796 if (!ordered)
e6dcd2dc 797 break;
e5a2217e 798 if (ordered->file_offset > orig_end) {
e6dcd2dc
CM
799 btrfs_put_ordered_extent(ordered);
800 break;
801 }
802 if (ordered->file_offset + ordered->len < start) {
803 btrfs_put_ordered_extent(ordered);
804 break;
805 }
e5a2217e 806 btrfs_start_ordered_extent(inode, ordered, 1);
e6dcd2dc
CM
807 end = ordered->file_offset;
808 btrfs_put_ordered_extent(ordered);
e5a2217e 809 if (end == 0 || end == start)
e6dcd2dc
CM
810 break;
811 end--;
812 }
cee36a03
CM
813}
814
eb84ae03
CM
815/*
816 * find an ordered extent corresponding to file_offset. return NULL if
817 * nothing is found, otherwise take a reference on the extent and return it
818 */
e6dcd2dc
CM
819struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
820 u64 file_offset)
821{
822 struct btrfs_ordered_inode_tree *tree;
823 struct rb_node *node;
824 struct btrfs_ordered_extent *entry = NULL;
825
826 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043 827 spin_lock_irq(&tree->lock);
e6dcd2dc
CM
828 node = tree_search(tree, file_offset);
829 if (!node)
830 goto out;
831
832 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
833 if (!offset_in_entry(entry, file_offset))
834 entry = NULL;
835 if (entry)
836 atomic_inc(&entry->refs);
837out:
5fd02043 838 spin_unlock_irq(&tree->lock);
e6dcd2dc
CM
839 return entry;
840}
841
4b46fce2
JB
842/* Since the DIO code tries to lock a wide area we need to look for any ordered
843 * extents that exist in the range, rather than just the start of the range.
844 */
845struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
846 u64 file_offset,
847 u64 len)
848{
849 struct btrfs_ordered_inode_tree *tree;
850 struct rb_node *node;
851 struct btrfs_ordered_extent *entry = NULL;
852
853 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043 854 spin_lock_irq(&tree->lock);
4b46fce2
JB
855 node = tree_search(tree, file_offset);
856 if (!node) {
857 node = tree_search(tree, file_offset + len);
858 if (!node)
859 goto out;
860 }
861
862 while (1) {
863 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
864 if (range_overlaps(entry, file_offset, len))
865 break;
866
867 if (entry->file_offset >= file_offset + len) {
868 entry = NULL;
869 break;
870 }
871 entry = NULL;
872 node = rb_next(node);
873 if (!node)
874 break;
875 }
876out:
877 if (entry)
878 atomic_inc(&entry->refs);
5fd02043 879 spin_unlock_irq(&tree->lock);
4b46fce2
JB
880 return entry;
881}
882
eb84ae03
CM
883/*
884 * lookup and return any extent before 'file_offset'. NULL is returned
885 * if none is found
886 */
e6dcd2dc 887struct btrfs_ordered_extent *
d397712b 888btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
e6dcd2dc
CM
889{
890 struct btrfs_ordered_inode_tree *tree;
891 struct rb_node *node;
892 struct btrfs_ordered_extent *entry = NULL;
893
894 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043 895 spin_lock_irq(&tree->lock);
e6dcd2dc
CM
896 node = tree_search(tree, file_offset);
897 if (!node)
898 goto out;
899
900 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
901 atomic_inc(&entry->refs);
902out:
5fd02043 903 spin_unlock_irq(&tree->lock);
e6dcd2dc 904 return entry;
81d7ed29 905}
dbe674a9 906
eb84ae03
CM
907/*
908 * After an extent is done, call this to conditionally update the on disk
909 * i_size. i_size is updated to cover any fully written part of the file.
910 */
c2167754 911int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
dbe674a9
CM
912 struct btrfs_ordered_extent *ordered)
913{
914 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
dbe674a9
CM
915 u64 disk_i_size;
916 u64 new_i_size;
c2167754 917 u64 i_size = i_size_read(inode);
dbe674a9 918 struct rb_node *node;
c2167754 919 struct rb_node *prev = NULL;
dbe674a9 920 struct btrfs_ordered_extent *test;
c2167754
YZ
921 int ret = 1;
922
923 if (ordered)
924 offset = entry_end(ordered);
a038fab0
YZ
925 else
926 offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
dbe674a9 927
5fd02043 928 spin_lock_irq(&tree->lock);
dbe674a9
CM
929 disk_i_size = BTRFS_I(inode)->disk_i_size;
930
c2167754
YZ
931 /* truncate file */
932 if (disk_i_size > i_size) {
933 BTRFS_I(inode)->disk_i_size = i_size;
934 ret = 0;
935 goto out;
936 }
937
dbe674a9
CM
938 /*
939 * if the disk i_size is already at the inode->i_size, or
940 * this ordered extent is inside the disk i_size, we're done
941 */
5d1f4020
JB
942 if (disk_i_size == i_size)
943 goto out;
944
945 /*
946 * We still need to update disk_i_size if outstanding_isize is greater
947 * than disk_i_size.
948 */
949 if (offset <= disk_i_size &&
950 (!ordered || ordered->outstanding_isize <= disk_i_size))
dbe674a9 951 goto out;
dbe674a9 952
dbe674a9
CM
953 /*
954 * walk backward from this ordered extent to disk_i_size.
955 * if we find an ordered extent then we can't update disk i_size
956 * yet
957 */
c2167754
YZ
958 if (ordered) {
959 node = rb_prev(&ordered->rb_node);
960 } else {
961 prev = tree_search(tree, offset);
962 /*
963 * we insert file extents without involving ordered struct,
964 * so there should be no ordered struct cover this offset
965 */
966 if (prev) {
967 test = rb_entry(prev, struct btrfs_ordered_extent,
968 rb_node);
969 BUG_ON(offset_in_entry(test, offset));
970 }
971 node = prev;
972 }
5fd02043 973 for (; node; node = rb_prev(node)) {
dbe674a9 974 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
5fd02043
JB
975
976 /* We treat this entry as if it doesnt exist */
977 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
978 continue;
dbe674a9
CM
979 if (test->file_offset + test->len <= disk_i_size)
980 break;
c2167754 981 if (test->file_offset >= i_size)
dbe674a9 982 break;
59fe4f41 983 if (entry_end(test) > disk_i_size) {
b9a8cc5b
MX
984 /*
985 * we don't update disk_i_size now, so record this
986 * undealt i_size. Or we will not know the real
987 * i_size.
988 */
989 if (test->outstanding_isize < offset)
990 test->outstanding_isize = offset;
991 if (ordered &&
992 ordered->outstanding_isize >
993 test->outstanding_isize)
994 test->outstanding_isize =
995 ordered->outstanding_isize;
dbe674a9 996 goto out;
5fd02043 997 }
dbe674a9 998 }
b9a8cc5b 999 new_i_size = min_t(u64, offset, i_size);
dbe674a9
CM
1000
1001 /*
b9a8cc5b
MX
1002 * Some ordered extents may completed before the current one, and
1003 * we hold the real i_size in ->outstanding_isize.
dbe674a9 1004 */
b9a8cc5b
MX
1005 if (ordered && ordered->outstanding_isize > new_i_size)
1006 new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
dbe674a9 1007 BTRFS_I(inode)->disk_i_size = new_i_size;
c2167754 1008 ret = 0;
dbe674a9 1009out:
c2167754 1010 /*
5fd02043
JB
1011 * We need to do this because we can't remove ordered extents until
1012 * after the i_disk_size has been updated and then the inode has been
1013 * updated to reflect the change, so we need to tell anybody who finds
1014 * this ordered extent that we've already done all the real work, we
1015 * just haven't completed all the other work.
c2167754
YZ
1016 */
1017 if (ordered)
5fd02043
JB
1018 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
1019 spin_unlock_irq(&tree->lock);
c2167754 1020 return ret;
dbe674a9 1021}
ba1da2f4 1022
eb84ae03
CM
1023/*
1024 * search the ordered extents for one corresponding to 'offset' and
1025 * try to find a checksum. This is used because we allow pages to
1026 * be reclaimed before their checksum is actually put into the btree
1027 */
d20f7043 1028int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
e4100d98 1029 u32 *sum, int len)
ba1da2f4
CM
1030{
1031 struct btrfs_ordered_sum *ordered_sum;
ba1da2f4
CM
1032 struct btrfs_ordered_extent *ordered;
1033 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
3edf7d33
CM
1034 unsigned long num_sectors;
1035 unsigned long i;
1036 u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
e4100d98 1037 int index = 0;
ba1da2f4
CM
1038
1039 ordered = btrfs_lookup_ordered_extent(inode, offset);
1040 if (!ordered)
e4100d98 1041 return 0;
ba1da2f4 1042
5fd02043 1043 spin_lock_irq(&tree->lock);
c6e30871 1044 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
e4100d98
MX
1045 if (disk_bytenr >= ordered_sum->bytenr &&
1046 disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
1047 i = (disk_bytenr - ordered_sum->bytenr) >>
1048 inode->i_sb->s_blocksize_bits;
e4100d98
MX
1049 num_sectors = ordered_sum->len >>
1050 inode->i_sb->s_blocksize_bits;
f51a4a18
MX
1051 num_sectors = min_t(int, len - index, num_sectors - i);
1052 memcpy(sum + index, ordered_sum->sums + i,
1053 num_sectors);
1054
1055 index += (int)num_sectors;
1056 if (index == len)
1057 goto out;
1058 disk_bytenr += num_sectors * sectorsize;
ba1da2f4
CM
1059 }
1060 }
1061out:
5fd02043 1062 spin_unlock_irq(&tree->lock);
89642229 1063 btrfs_put_ordered_extent(ordered);
e4100d98 1064 return index;
ba1da2f4
CM
1065}
1066
f421950f 1067
5a3f23d5
CM
1068/*
1069 * add a given inode to the list of inodes that must be fully on
1070 * disk before a transaction commit finishes.
1071 *
1072 * This basically gives us the ext3 style data=ordered mode, and it is mostly
1073 * used to make sure renamed files are fully on disk.
1074 *
1075 * It is a noop if the inode is already fully on disk.
1076 *
1077 * If trans is not null, we'll do a friendly check for a transaction that
1078 * is already flushing things and force the IO down ourselves.
1079 */
143bede5
JM
1080void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
1081 struct btrfs_root *root, struct inode *inode)
5a3f23d5 1082{
569e0f35 1083 struct btrfs_transaction *cur_trans = trans->transaction;
5a3f23d5
CM
1084 u64 last_mod;
1085
1086 last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
1087
1088 /*
1089 * if this file hasn't been changed since the last transaction
1090 * commit, we can safely return without doing anything
1091 */
1092 if (last_mod < root->fs_info->last_trans_committed)
143bede5 1093 return;
5a3f23d5 1094
199c2a9c 1095 spin_lock(&root->fs_info->ordered_root_lock);
5a3f23d5
CM
1096 if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
1097 list_add_tail(&BTRFS_I(inode)->ordered_operations,
569e0f35 1098 &cur_trans->ordered_operations);
5a3f23d5 1099 }
199c2a9c 1100 spin_unlock(&root->fs_info->ordered_root_lock);
5a3f23d5 1101}
6352b91d
MX
1102
1103int __init ordered_data_init(void)
1104{
1105 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1106 sizeof(struct btrfs_ordered_extent), 0,
1107 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
1108 NULL);
1109 if (!btrfs_ordered_extent_cache)
1110 return -ENOMEM;
25287e0a 1111
6352b91d
MX
1112 return 0;
1113}
1114
1115void ordered_data_exit(void)
1116{
1117 if (btrfs_ordered_extent_cache)
1118 kmem_cache_destroy(btrfs_ordered_extent_cache);
1119}