Merge tag 'sound-4.17-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[linux-2.6-block.git] / fs / btrfs / delayed-inode.c
CommitLineData
c1d7c514 1// SPDX-License-Identifier: GPL-2.0
16cdcec7
MX
2/*
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
16cdcec7
MX
5 */
6
7#include <linux/slab.h>
c7f88c4e 8#include <linux/iversion.h>
16cdcec7
MX
9#include "delayed-inode.h"
10#include "disk-io.h"
11#include "transaction.h"
3cae210f 12#include "ctree.h"
4f5427cc 13#include "qgroup.h"
16cdcec7 14
de3cb945
CM
15#define BTRFS_DELAYED_WRITEBACK 512
16#define BTRFS_DELAYED_BACKGROUND 128
17#define BTRFS_DELAYED_BATCH 16
16cdcec7
MX
18
19static struct kmem_cache *delayed_node_cache;
20
21int __init btrfs_delayed_inode_init(void)
22{
837e1972 23 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
16cdcec7
MX
24 sizeof(struct btrfs_delayed_node),
25 0,
fba4b697 26 SLAB_MEM_SPREAD,
16cdcec7
MX
27 NULL);
28 if (!delayed_node_cache)
29 return -ENOMEM;
30 return 0;
31}
32
e67c718b 33void __cold btrfs_delayed_inode_exit(void)
16cdcec7 34{
5598e900 35 kmem_cache_destroy(delayed_node_cache);
16cdcec7
MX
36}
37
38static inline void btrfs_init_delayed_node(
39 struct btrfs_delayed_node *delayed_node,
40 struct btrfs_root *root, u64 inode_id)
41{
42 delayed_node->root = root;
43 delayed_node->inode_id = inode_id;
6de5f18e 44 refcount_set(&delayed_node->refs, 0);
16cdcec7
MX
45 delayed_node->ins_root = RB_ROOT;
46 delayed_node->del_root = RB_ROOT;
47 mutex_init(&delayed_node->mutex);
16cdcec7
MX
48 INIT_LIST_HEAD(&delayed_node->n_list);
49 INIT_LIST_HEAD(&delayed_node->p_list);
16cdcec7
MX
50}
51
52static inline int btrfs_is_continuous_delayed_item(
53 struct btrfs_delayed_item *item1,
54 struct btrfs_delayed_item *item2)
55{
56 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
57 item1->key.objectid == item2->key.objectid &&
58 item1->key.type == item2->key.type &&
59 item1->key.offset + 1 == item2->key.offset)
60 return 1;
61 return 0;
62}
63
f85b7379
DS
64static struct btrfs_delayed_node *btrfs_get_delayed_node(
65 struct btrfs_inode *btrfs_inode)
16cdcec7 66{
16cdcec7 67 struct btrfs_root *root = btrfs_inode->root;
4a0cc7ca 68 u64 ino = btrfs_ino(btrfs_inode);
2f7e33d4 69 struct btrfs_delayed_node *node;
16cdcec7 70
20c7bcec 71 node = READ_ONCE(btrfs_inode->delayed_node);
16cdcec7 72 if (node) {
6de5f18e 73 refcount_inc(&node->refs);
16cdcec7
MX
74 return node;
75 }
76
77 spin_lock(&root->inode_lock);
0d0ca30f 78 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
ec35e48b 79
16cdcec7
MX
80 if (node) {
81 if (btrfs_inode->delayed_node) {
6de5f18e 82 refcount_inc(&node->refs); /* can be accessed */
2f7e33d4 83 BUG_ON(btrfs_inode->delayed_node != node);
16cdcec7 84 spin_unlock(&root->inode_lock);
2f7e33d4 85 return node;
16cdcec7 86 }
ec35e48b
CM
87
88 /*
89 * It's possible that we're racing into the middle of removing
90 * this node from the radix tree. In this case, the refcount
91 * was zero and it should never go back to one. Just return
92 * NULL like it was never in the radix at all; our release
93 * function is in the process of removing it.
94 *
95 * Some implementations of refcount_inc refuse to bump the
96 * refcount once it has hit zero. If we don't do this dance
97 * here, refcount_inc() may decide to just WARN_ONCE() instead
98 * of actually bumping the refcount.
99 *
100 * If this node is properly in the radix, we want to bump the
101 * refcount twice, once for the inode and once for this get
102 * operation.
103 */
104 if (refcount_inc_not_zero(&node->refs)) {
105 refcount_inc(&node->refs);
106 btrfs_inode->delayed_node = node;
107 } else {
108 node = NULL;
109 }
110
16cdcec7
MX
111 spin_unlock(&root->inode_lock);
112 return node;
113 }
114 spin_unlock(&root->inode_lock);
115
2f7e33d4
MX
116 return NULL;
117}
118
79787eaa 119/* Will return either the node or PTR_ERR(-ENOMEM) */
2f7e33d4 120static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
f85b7379 121 struct btrfs_inode *btrfs_inode)
2f7e33d4
MX
122{
123 struct btrfs_delayed_node *node;
2f7e33d4 124 struct btrfs_root *root = btrfs_inode->root;
4a0cc7ca 125 u64 ino = btrfs_ino(btrfs_inode);
2f7e33d4
MX
126 int ret;
127
128again:
340c6ca9 129 node = btrfs_get_delayed_node(btrfs_inode);
2f7e33d4
MX
130 if (node)
131 return node;
132
352dd9c8 133 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
16cdcec7
MX
134 if (!node)
135 return ERR_PTR(-ENOMEM);
0d0ca30f 136 btrfs_init_delayed_node(node, root, ino);
16cdcec7 137
95e94d14 138 /* cached in the btrfs inode and can be accessed */
6de5f18e 139 refcount_set(&node->refs, 2);
16cdcec7 140
e1860a77 141 ret = radix_tree_preload(GFP_NOFS);
16cdcec7
MX
142 if (ret) {
143 kmem_cache_free(delayed_node_cache, node);
144 return ERR_PTR(ret);
145 }
146
147 spin_lock(&root->inode_lock);
0d0ca30f 148 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
16cdcec7 149 if (ret == -EEXIST) {
16cdcec7 150 spin_unlock(&root->inode_lock);
96493031 151 kmem_cache_free(delayed_node_cache, node);
16cdcec7
MX
152 radix_tree_preload_end();
153 goto again;
154 }
155 btrfs_inode->delayed_node = node;
156 spin_unlock(&root->inode_lock);
157 radix_tree_preload_end();
158
159 return node;
160}
161
162/*
163 * Call it when holding delayed_node->mutex
164 *
165 * If mod = 1, add this node into the prepared list.
166 */
167static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
168 struct btrfs_delayed_node *node,
169 int mod)
170{
171 spin_lock(&root->lock);
7cf35d91 172 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
16cdcec7
MX
173 if (!list_empty(&node->p_list))
174 list_move_tail(&node->p_list, &root->prepare_list);
175 else if (mod)
176 list_add_tail(&node->p_list, &root->prepare_list);
177 } else {
178 list_add_tail(&node->n_list, &root->node_list);
179 list_add_tail(&node->p_list, &root->prepare_list);
6de5f18e 180 refcount_inc(&node->refs); /* inserted into list */
16cdcec7 181 root->nodes++;
7cf35d91 182 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
16cdcec7
MX
183 }
184 spin_unlock(&root->lock);
185}
186
187/* Call it when holding delayed_node->mutex */
188static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
189 struct btrfs_delayed_node *node)
190{
191 spin_lock(&root->lock);
7cf35d91 192 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
16cdcec7 193 root->nodes--;
6de5f18e 194 refcount_dec(&node->refs); /* not in the list */
16cdcec7
MX
195 list_del_init(&node->n_list);
196 if (!list_empty(&node->p_list))
197 list_del_init(&node->p_list);
7cf35d91 198 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
16cdcec7
MX
199 }
200 spin_unlock(&root->lock);
201}
202
48a3b636 203static struct btrfs_delayed_node *btrfs_first_delayed_node(
16cdcec7
MX
204 struct btrfs_delayed_root *delayed_root)
205{
206 struct list_head *p;
207 struct btrfs_delayed_node *node = NULL;
208
209 spin_lock(&delayed_root->lock);
210 if (list_empty(&delayed_root->node_list))
211 goto out;
212
213 p = delayed_root->node_list.next;
214 node = list_entry(p, struct btrfs_delayed_node, n_list);
6de5f18e 215 refcount_inc(&node->refs);
16cdcec7
MX
216out:
217 spin_unlock(&delayed_root->lock);
218
219 return node;
220}
221
48a3b636 222static struct btrfs_delayed_node *btrfs_next_delayed_node(
16cdcec7
MX
223 struct btrfs_delayed_node *node)
224{
225 struct btrfs_delayed_root *delayed_root;
226 struct list_head *p;
227 struct btrfs_delayed_node *next = NULL;
228
229 delayed_root = node->root->fs_info->delayed_root;
230 spin_lock(&delayed_root->lock);
7cf35d91
MX
231 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
232 /* not in the list */
16cdcec7
MX
233 if (list_empty(&delayed_root->node_list))
234 goto out;
235 p = delayed_root->node_list.next;
236 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
237 goto out;
238 else
239 p = node->n_list.next;
240
241 next = list_entry(p, struct btrfs_delayed_node, n_list);
6de5f18e 242 refcount_inc(&next->refs);
16cdcec7
MX
243out:
244 spin_unlock(&delayed_root->lock);
245
246 return next;
247}
248
249static void __btrfs_release_delayed_node(
250 struct btrfs_delayed_node *delayed_node,
251 int mod)
252{
253 struct btrfs_delayed_root *delayed_root;
254
255 if (!delayed_node)
256 return;
257
258 delayed_root = delayed_node->root->fs_info->delayed_root;
259
260 mutex_lock(&delayed_node->mutex);
261 if (delayed_node->count)
262 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
263 else
264 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
265 mutex_unlock(&delayed_node->mutex);
266
6de5f18e 267 if (refcount_dec_and_test(&delayed_node->refs)) {
16cdcec7 268 struct btrfs_root *root = delayed_node->root;
ec35e48b 269
16cdcec7 270 spin_lock(&root->inode_lock);
ec35e48b
CM
271 /*
272 * Once our refcount goes to zero, nobody is allowed to bump it
273 * back up. We can delete it now.
274 */
275 ASSERT(refcount_read(&delayed_node->refs) == 0);
276 radix_tree_delete(&root->delayed_nodes_tree,
277 delayed_node->inode_id);
16cdcec7 278 spin_unlock(&root->inode_lock);
ec35e48b 279 kmem_cache_free(delayed_node_cache, delayed_node);
16cdcec7
MX
280 }
281}
282
283static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
284{
285 __btrfs_release_delayed_node(node, 0);
286}
287
48a3b636 288static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
16cdcec7
MX
289 struct btrfs_delayed_root *delayed_root)
290{
291 struct list_head *p;
292 struct btrfs_delayed_node *node = NULL;
293
294 spin_lock(&delayed_root->lock);
295 if (list_empty(&delayed_root->prepare_list))
296 goto out;
297
298 p = delayed_root->prepare_list.next;
299 list_del_init(p);
300 node = list_entry(p, struct btrfs_delayed_node, p_list);
6de5f18e 301 refcount_inc(&node->refs);
16cdcec7
MX
302out:
303 spin_unlock(&delayed_root->lock);
304
305 return node;
306}
307
308static inline void btrfs_release_prepared_delayed_node(
309 struct btrfs_delayed_node *node)
310{
311 __btrfs_release_delayed_node(node, 1);
312}
313
48a3b636 314static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
16cdcec7
MX
315{
316 struct btrfs_delayed_item *item;
317 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
318 if (item) {
319 item->data_len = data_len;
320 item->ins_or_del = 0;
321 item->bytes_reserved = 0;
16cdcec7 322 item->delayed_node = NULL;
089e77e1 323 refcount_set(&item->refs, 1);
16cdcec7
MX
324 }
325 return item;
326}
327
328/*
329 * __btrfs_lookup_delayed_item - look up the delayed item by key
330 * @delayed_node: pointer to the delayed node
331 * @key: the key to look up
332 * @prev: used to store the prev item if the right item isn't found
333 * @next: used to store the next item if the right item isn't found
334 *
335 * Note: if we don't find the right item, we will return the prev item and
336 * the next item.
337 */
338static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
339 struct rb_root *root,
340 struct btrfs_key *key,
341 struct btrfs_delayed_item **prev,
342 struct btrfs_delayed_item **next)
343{
344 struct rb_node *node, *prev_node = NULL;
345 struct btrfs_delayed_item *delayed_item = NULL;
346 int ret = 0;
347
348 node = root->rb_node;
349
350 while (node) {
351 delayed_item = rb_entry(node, struct btrfs_delayed_item,
352 rb_node);
353 prev_node = node;
354 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
355 if (ret < 0)
356 node = node->rb_right;
357 else if (ret > 0)
358 node = node->rb_left;
359 else
360 return delayed_item;
361 }
362
363 if (prev) {
364 if (!prev_node)
365 *prev = NULL;
366 else if (ret < 0)
367 *prev = delayed_item;
368 else if ((node = rb_prev(prev_node)) != NULL) {
369 *prev = rb_entry(node, struct btrfs_delayed_item,
370 rb_node);
371 } else
372 *prev = NULL;
373 }
374
375 if (next) {
376 if (!prev_node)
377 *next = NULL;
378 else if (ret > 0)
379 *next = delayed_item;
380 else if ((node = rb_next(prev_node)) != NULL) {
381 *next = rb_entry(node, struct btrfs_delayed_item,
382 rb_node);
383 } else
384 *next = NULL;
385 }
386 return NULL;
387}
388
48a3b636 389static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
16cdcec7
MX
390 struct btrfs_delayed_node *delayed_node,
391 struct btrfs_key *key)
392{
e2c89907 393 return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
16cdcec7 394 NULL, NULL);
16cdcec7
MX
395}
396
16cdcec7
MX
397static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
398 struct btrfs_delayed_item *ins,
399 int action)
400{
401 struct rb_node **p, *node;
402 struct rb_node *parent_node = NULL;
403 struct rb_root *root;
404 struct btrfs_delayed_item *item;
405 int cmp;
406
407 if (action == BTRFS_DELAYED_INSERTION_ITEM)
408 root = &delayed_node->ins_root;
409 else if (action == BTRFS_DELAYED_DELETION_ITEM)
410 root = &delayed_node->del_root;
411 else
412 BUG();
413 p = &root->rb_node;
414 node = &ins->rb_node;
415
416 while (*p) {
417 parent_node = *p;
418 item = rb_entry(parent_node, struct btrfs_delayed_item,
419 rb_node);
420
421 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
422 if (cmp < 0)
423 p = &(*p)->rb_right;
424 else if (cmp > 0)
425 p = &(*p)->rb_left;
426 else
427 return -EEXIST;
428 }
429
430 rb_link_node(node, parent_node, p);
431 rb_insert_color(node, root);
432 ins->delayed_node = delayed_node;
433 ins->ins_or_del = action;
434
435 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
436 action == BTRFS_DELAYED_INSERTION_ITEM &&
437 ins->key.offset >= delayed_node->index_cnt)
438 delayed_node->index_cnt = ins->key.offset + 1;
439
440 delayed_node->count++;
441 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
442 return 0;
443}
444
445static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
446 struct btrfs_delayed_item *item)
447{
448 return __btrfs_add_delayed_item(node, item,
449 BTRFS_DELAYED_INSERTION_ITEM);
450}
451
452static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
453 struct btrfs_delayed_item *item)
454{
455 return __btrfs_add_delayed_item(node, item,
456 BTRFS_DELAYED_DELETION_ITEM);
457}
458
de3cb945
CM
459static void finish_one_item(struct btrfs_delayed_root *delayed_root)
460{
461 int seq = atomic_inc_return(&delayed_root->items_seq);
ee863954
DS
462
463 /*
464 * atomic_dec_return implies a barrier for waitqueue_active
465 */
de3cb945
CM
466 if ((atomic_dec_return(&delayed_root->items) <
467 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
468 waitqueue_active(&delayed_root->wait))
469 wake_up(&delayed_root->wait);
470}
471
16cdcec7
MX
472static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
473{
474 struct rb_root *root;
475 struct btrfs_delayed_root *delayed_root;
476
477 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
478
479 BUG_ON(!delayed_root);
480 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
481 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
482
483 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
484 root = &delayed_item->delayed_node->ins_root;
485 else
486 root = &delayed_item->delayed_node->del_root;
487
488 rb_erase(&delayed_item->rb_node, root);
489 delayed_item->delayed_node->count--;
de3cb945
CM
490
491 finish_one_item(delayed_root);
16cdcec7
MX
492}
493
494static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
495{
496 if (item) {
497 __btrfs_remove_delayed_item(item);
089e77e1 498 if (refcount_dec_and_test(&item->refs))
16cdcec7
MX
499 kfree(item);
500 }
501}
502
48a3b636 503static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
16cdcec7
MX
504 struct btrfs_delayed_node *delayed_node)
505{
506 struct rb_node *p;
507 struct btrfs_delayed_item *item = NULL;
508
509 p = rb_first(&delayed_node->ins_root);
510 if (p)
511 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
512
513 return item;
514}
515
48a3b636 516static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
16cdcec7
MX
517 struct btrfs_delayed_node *delayed_node)
518{
519 struct rb_node *p;
520 struct btrfs_delayed_item *item = NULL;
521
522 p = rb_first(&delayed_node->del_root);
523 if (p)
524 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
525
526 return item;
527}
528
48a3b636 529static struct btrfs_delayed_item *__btrfs_next_delayed_item(
16cdcec7
MX
530 struct btrfs_delayed_item *item)
531{
532 struct rb_node *p;
533 struct btrfs_delayed_item *next = NULL;
534
535 p = rb_next(&item->rb_node);
536 if (p)
537 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
538
539 return next;
540}
541
16cdcec7 542static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
4f5427cc 543 struct btrfs_root *root,
16cdcec7
MX
544 struct btrfs_delayed_item *item)
545{
546 struct btrfs_block_rsv *src_rsv;
547 struct btrfs_block_rsv *dst_rsv;
4f5427cc 548 struct btrfs_fs_info *fs_info = root->fs_info;
16cdcec7
MX
549 u64 num_bytes;
550 int ret;
551
552 if (!trans->bytes_reserved)
553 return 0;
554
555 src_rsv = trans->block_rsv;
0b246afa 556 dst_rsv = &fs_info->delayed_block_rsv;
16cdcec7 557
0b246afa 558 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
f218ea6c
QW
559
560 /*
561 * Here we migrate space rsv from transaction rsv, since have already
562 * reserved space when starting a transaction. So no need to reserve
563 * qgroup space here.
564 */
25d609f8 565 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
8c2a3ca2 566 if (!ret) {
0b246afa 567 trace_btrfs_space_reservation(fs_info, "delayed_item",
8c2a3ca2
JB
568 item->key.objectid,
569 num_bytes, 1);
16cdcec7 570 item->bytes_reserved = num_bytes;
8c2a3ca2 571 }
16cdcec7
MX
572
573 return ret;
574}
575
4f5427cc 576static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
16cdcec7
MX
577 struct btrfs_delayed_item *item)
578{
19fd2949 579 struct btrfs_block_rsv *rsv;
4f5427cc 580 struct btrfs_fs_info *fs_info = root->fs_info;
19fd2949 581
16cdcec7
MX
582 if (!item->bytes_reserved)
583 return;
584
0b246afa 585 rsv = &fs_info->delayed_block_rsv;
f218ea6c
QW
586 /*
587 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
588 * to release/reserve qgroup space.
589 */
0b246afa 590 trace_btrfs_space_reservation(fs_info, "delayed_item",
8c2a3ca2
JB
591 item->key.objectid, item->bytes_reserved,
592 0);
2ff7e61e 593 btrfs_block_rsv_release(fs_info, rsv,
16cdcec7
MX
594 item->bytes_reserved);
595}
596
597static int btrfs_delayed_inode_reserve_metadata(
598 struct btrfs_trans_handle *trans,
599 struct btrfs_root *root,
fcabdd1c 600 struct btrfs_inode *inode,
16cdcec7
MX
601 struct btrfs_delayed_node *node)
602{
0b246afa 603 struct btrfs_fs_info *fs_info = root->fs_info;
16cdcec7
MX
604 struct btrfs_block_rsv *src_rsv;
605 struct btrfs_block_rsv *dst_rsv;
606 u64 num_bytes;
607 int ret;
608
16cdcec7 609 src_rsv = trans->block_rsv;
0b246afa 610 dst_rsv = &fs_info->delayed_block_rsv;
16cdcec7 611
0b246afa 612 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
c06a0e12
JB
613
614 /*
615 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
616 * which doesn't reserve space for speed. This is a problem since we
617 * still need to reserve space for this update, so try to reserve the
618 * space.
619 *
620 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
69fe2d75 621 * we always reserve enough to update the inode item.
c06a0e12 622 */
e755d9ab 623 if (!src_rsv || (!trans->bytes_reserved &&
66d8f3dd 624 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
f218ea6c
QW
625 ret = btrfs_qgroup_reserve_meta_prealloc(root,
626 fs_info->nodesize, true);
627 if (ret < 0)
628 return ret;
08e007d2
MX
629 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
630 BTRFS_RESERVE_NO_FLUSH);
c06a0e12
JB
631 /*
632 * Since we're under a transaction reserve_metadata_bytes could
633 * try to commit the transaction which will make it return
634 * EAGAIN to make us stop the transaction we have, so return
635 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
636 */
4f5427cc 637 if (ret == -EAGAIN) {
c06a0e12 638 ret = -ENOSPC;
4f5427cc
QW
639 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
640 }
8c2a3ca2 641 if (!ret) {
c06a0e12 642 node->bytes_reserved = num_bytes;
0b246afa 643 trace_btrfs_space_reservation(fs_info,
8c2a3ca2 644 "delayed_inode",
fcabdd1c 645 btrfs_ino(inode),
8c2a3ca2 646 num_bytes, 1);
f218ea6c
QW
647 } else {
648 btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
8c2a3ca2 649 }
c06a0e12
JB
650 return ret;
651 }
652
25d609f8 653 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
8c2a3ca2 654 if (!ret) {
0b246afa 655 trace_btrfs_space_reservation(fs_info, "delayed_inode",
fcabdd1c 656 btrfs_ino(inode), num_bytes, 1);
16cdcec7 657 node->bytes_reserved = num_bytes;
8c2a3ca2 658 }
16cdcec7
MX
659
660 return ret;
661}
662
2ff7e61e 663static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
4f5427cc
QW
664 struct btrfs_delayed_node *node,
665 bool qgroup_free)
16cdcec7
MX
666{
667 struct btrfs_block_rsv *rsv;
668
669 if (!node->bytes_reserved)
670 return;
671
0b246afa
JM
672 rsv = &fs_info->delayed_block_rsv;
673 trace_btrfs_space_reservation(fs_info, "delayed_inode",
8c2a3ca2 674 node->inode_id, node->bytes_reserved, 0);
2ff7e61e 675 btrfs_block_rsv_release(fs_info, rsv,
16cdcec7 676 node->bytes_reserved);
4f5427cc
QW
677 if (qgroup_free)
678 btrfs_qgroup_free_meta_prealloc(node->root,
679 node->bytes_reserved);
680 else
681 btrfs_qgroup_convert_reserved_meta(node->root,
682 node->bytes_reserved);
16cdcec7
MX
683 node->bytes_reserved = 0;
684}
685
686/*
687 * This helper will insert some continuous items into the same leaf according
688 * to the free space of the leaf.
689 */
afe5fea7
TI
690static int btrfs_batch_insert_items(struct btrfs_root *root,
691 struct btrfs_path *path,
692 struct btrfs_delayed_item *item)
16cdcec7 693{
2ff7e61e 694 struct btrfs_fs_info *fs_info = root->fs_info;
16cdcec7
MX
695 struct btrfs_delayed_item *curr, *next;
696 int free_space;
697 int total_data_size = 0, total_size = 0;
698 struct extent_buffer *leaf;
699 char *data_ptr;
700 struct btrfs_key *keys;
701 u32 *data_size;
702 struct list_head head;
703 int slot;
704 int nitems;
705 int i;
706 int ret = 0;
707
708 BUG_ON(!path->nodes[0]);
709
710 leaf = path->nodes[0];
2ff7e61e 711 free_space = btrfs_leaf_free_space(fs_info, leaf);
16cdcec7
MX
712 INIT_LIST_HEAD(&head);
713
714 next = item;
17aca1c9 715 nitems = 0;
16cdcec7
MX
716
717 /*
718 * count the number of the continuous items that we can insert in batch
719 */
720 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
721 free_space) {
722 total_data_size += next->data_len;
723 total_size += next->data_len + sizeof(struct btrfs_item);
724 list_add_tail(&next->tree_list, &head);
725 nitems++;
726
727 curr = next;
728 next = __btrfs_next_delayed_item(curr);
729 if (!next)
730 break;
731
732 if (!btrfs_is_continuous_delayed_item(curr, next))
733 break;
734 }
735
736 if (!nitems) {
737 ret = 0;
738 goto out;
739 }
740
741 /*
742 * we need allocate some memory space, but it might cause the task
743 * to sleep, so we set all locked nodes in the path to blocking locks
744 * first.
745 */
746 btrfs_set_path_blocking(path);
747
d9b0d9ba 748 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
16cdcec7
MX
749 if (!keys) {
750 ret = -ENOMEM;
751 goto out;
752 }
753
d9b0d9ba 754 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
16cdcec7
MX
755 if (!data_size) {
756 ret = -ENOMEM;
757 goto error;
758 }
759
760 /* get keys of all the delayed items */
761 i = 0;
762 list_for_each_entry(next, &head, tree_list) {
763 keys[i] = next->key;
764 data_size[i] = next->data_len;
765 i++;
766 }
767
768 /* reset all the locked nodes in the patch to spinning locks. */
bd681513 769 btrfs_clear_path_blocking(path, NULL, 0);
16cdcec7
MX
770
771 /* insert the keys of the items */
afe5fea7 772 setup_items_for_insert(root, path, keys, data_size,
143bede5 773 total_data_size, total_size, nitems);
16cdcec7
MX
774
775 /* insert the dir index items */
776 slot = path->slots[0];
777 list_for_each_entry_safe(curr, next, &head, tree_list) {
778 data_ptr = btrfs_item_ptr(leaf, slot, char);
779 write_extent_buffer(leaf, &curr->data,
780 (unsigned long)data_ptr,
781 curr->data_len);
782 slot++;
783
4f5427cc 784 btrfs_delayed_item_release_metadata(root, curr);
16cdcec7
MX
785
786 list_del(&curr->tree_list);
787 btrfs_release_delayed_item(curr);
788 }
789
790error:
791 kfree(data_size);
792 kfree(keys);
793out:
794 return ret;
795}
796
797/*
798 * This helper can just do simple insertion that needn't extend item for new
799 * data, such as directory name index insertion, inode insertion.
800 */
801static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
802 struct btrfs_root *root,
803 struct btrfs_path *path,
804 struct btrfs_delayed_item *delayed_item)
805{
806 struct extent_buffer *leaf;
16cdcec7
MX
807 char *ptr;
808 int ret;
809
810 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
811 delayed_item->data_len);
812 if (ret < 0 && ret != -EEXIST)
813 return ret;
814
815 leaf = path->nodes[0];
816
16cdcec7
MX
817 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
818
819 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
820 delayed_item->data_len);
821 btrfs_mark_buffer_dirty(leaf);
822
4f5427cc 823 btrfs_delayed_item_release_metadata(root, delayed_item);
16cdcec7
MX
824 return 0;
825}
826
827/*
828 * we insert an item first, then if there are some continuous items, we try
829 * to insert those items into the same leaf.
830 */
831static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
832 struct btrfs_path *path,
833 struct btrfs_root *root,
834 struct btrfs_delayed_node *node)
835{
836 struct btrfs_delayed_item *curr, *prev;
837 int ret = 0;
838
839do_again:
840 mutex_lock(&node->mutex);
841 curr = __btrfs_first_delayed_insertion_item(node);
842 if (!curr)
843 goto insert_end;
844
845 ret = btrfs_insert_delayed_item(trans, root, path, curr);
846 if (ret < 0) {
945d8962 847 btrfs_release_path(path);
16cdcec7
MX
848 goto insert_end;
849 }
850
851 prev = curr;
852 curr = __btrfs_next_delayed_item(prev);
853 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
854 /* insert the continuous items into the same leaf */
855 path->slots[0]++;
afe5fea7 856 btrfs_batch_insert_items(root, path, curr);
16cdcec7
MX
857 }
858 btrfs_release_delayed_item(prev);
859 btrfs_mark_buffer_dirty(path->nodes[0]);
860
945d8962 861 btrfs_release_path(path);
16cdcec7
MX
862 mutex_unlock(&node->mutex);
863 goto do_again;
864
865insert_end:
866 mutex_unlock(&node->mutex);
867 return ret;
868}
869
870static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
871 struct btrfs_root *root,
872 struct btrfs_path *path,
873 struct btrfs_delayed_item *item)
874{
875 struct btrfs_delayed_item *curr, *next;
876 struct extent_buffer *leaf;
877 struct btrfs_key key;
878 struct list_head head;
879 int nitems, i, last_item;
880 int ret = 0;
881
882 BUG_ON(!path->nodes[0]);
883
884 leaf = path->nodes[0];
885
886 i = path->slots[0];
887 last_item = btrfs_header_nritems(leaf) - 1;
888 if (i > last_item)
889 return -ENOENT; /* FIXME: Is errno suitable? */
890
891 next = item;
892 INIT_LIST_HEAD(&head);
893 btrfs_item_key_to_cpu(leaf, &key, i);
894 nitems = 0;
895 /*
896 * count the number of the dir index items that we can delete in batch
897 */
898 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
899 list_add_tail(&next->tree_list, &head);
900 nitems++;
901
902 curr = next;
903 next = __btrfs_next_delayed_item(curr);
904 if (!next)
905 break;
906
907 if (!btrfs_is_continuous_delayed_item(curr, next))
908 break;
909
910 i++;
911 if (i > last_item)
912 break;
913 btrfs_item_key_to_cpu(leaf, &key, i);
914 }
915
916 if (!nitems)
917 return 0;
918
919 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
920 if (ret)
921 goto out;
922
923 list_for_each_entry_safe(curr, next, &head, tree_list) {
4f5427cc 924 btrfs_delayed_item_release_metadata(root, curr);
16cdcec7
MX
925 list_del(&curr->tree_list);
926 btrfs_release_delayed_item(curr);
927 }
928
929out:
930 return ret;
931}
932
933static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
934 struct btrfs_path *path,
935 struct btrfs_root *root,
936 struct btrfs_delayed_node *node)
937{
938 struct btrfs_delayed_item *curr, *prev;
939 int ret = 0;
940
941do_again:
942 mutex_lock(&node->mutex);
943 curr = __btrfs_first_delayed_deletion_item(node);
944 if (!curr)
945 goto delete_fail;
946
947 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
948 if (ret < 0)
949 goto delete_fail;
950 else if (ret > 0) {
951 /*
952 * can't find the item which the node points to, so this node
953 * is invalid, just drop it.
954 */
955 prev = curr;
956 curr = __btrfs_next_delayed_item(prev);
957 btrfs_release_delayed_item(prev);
958 ret = 0;
945d8962 959 btrfs_release_path(path);
62095265
FW
960 if (curr) {
961 mutex_unlock(&node->mutex);
16cdcec7 962 goto do_again;
62095265 963 } else
16cdcec7
MX
964 goto delete_fail;
965 }
966
967 btrfs_batch_delete_items(trans, root, path, curr);
945d8962 968 btrfs_release_path(path);
16cdcec7
MX
969 mutex_unlock(&node->mutex);
970 goto do_again;
971
972delete_fail:
945d8962 973 btrfs_release_path(path);
16cdcec7
MX
974 mutex_unlock(&node->mutex);
975 return ret;
976}
977
978static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
979{
980 struct btrfs_delayed_root *delayed_root;
981
7cf35d91
MX
982 if (delayed_node &&
983 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
16cdcec7 984 BUG_ON(!delayed_node->root);
7cf35d91 985 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
16cdcec7
MX
986 delayed_node->count--;
987
988 delayed_root = delayed_node->root->fs_info->delayed_root;
de3cb945 989 finish_one_item(delayed_root);
16cdcec7
MX
990 }
991}
992
67de1176
MX
993static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
994{
995 struct btrfs_delayed_root *delayed_root;
996
997 ASSERT(delayed_node->root);
998 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
999 delayed_node->count--;
1000
1001 delayed_root = delayed_node->root->fs_info->delayed_root;
1002 finish_one_item(delayed_root);
1003}
1004
0e8c36a9
MX
1005static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1006 struct btrfs_root *root,
1007 struct btrfs_path *path,
1008 struct btrfs_delayed_node *node)
16cdcec7 1009{
2ff7e61e 1010 struct btrfs_fs_info *fs_info = root->fs_info;
16cdcec7
MX
1011 struct btrfs_key key;
1012 struct btrfs_inode_item *inode_item;
1013 struct extent_buffer *leaf;
67de1176 1014 int mod;
16cdcec7
MX
1015 int ret;
1016
16cdcec7 1017 key.objectid = node->inode_id;
962a298f 1018 key.type = BTRFS_INODE_ITEM_KEY;
16cdcec7 1019 key.offset = 0;
0e8c36a9 1020
67de1176
MX
1021 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1022 mod = -1;
1023 else
1024 mod = 1;
1025
1026 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
16cdcec7 1027 if (ret > 0) {
945d8962 1028 btrfs_release_path(path);
16cdcec7
MX
1029 return -ENOENT;
1030 } else if (ret < 0) {
16cdcec7
MX
1031 return ret;
1032 }
1033
16cdcec7
MX
1034 leaf = path->nodes[0];
1035 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1036 struct btrfs_inode_item);
1037 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1038 sizeof(struct btrfs_inode_item));
1039 btrfs_mark_buffer_dirty(leaf);
16cdcec7 1040
67de1176
MX
1041 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1042 goto no_iref;
1043
1044 path->slots[0]++;
1045 if (path->slots[0] >= btrfs_header_nritems(leaf))
1046 goto search;
1047again:
1048 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1049 if (key.objectid != node->inode_id)
1050 goto out;
1051
1052 if (key.type != BTRFS_INODE_REF_KEY &&
1053 key.type != BTRFS_INODE_EXTREF_KEY)
1054 goto out;
1055
1056 /*
1057 * Delayed iref deletion is for the inode who has only one link,
1058 * so there is only one iref. The case that several irefs are
1059 * in the same item doesn't exist.
1060 */
1061 btrfs_del_item(trans, root, path);
1062out:
1063 btrfs_release_delayed_iref(node);
1064no_iref:
1065 btrfs_release_path(path);
1066err_out:
4f5427cc 1067 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
16cdcec7 1068 btrfs_release_delayed_inode(node);
16cdcec7 1069
67de1176
MX
1070 return ret;
1071
1072search:
1073 btrfs_release_path(path);
1074
962a298f 1075 key.type = BTRFS_INODE_EXTREF_KEY;
67de1176
MX
1076 key.offset = -1;
1077 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1078 if (ret < 0)
1079 goto err_out;
1080 ASSERT(ret);
1081
1082 ret = 0;
1083 leaf = path->nodes[0];
1084 path->slots[0]--;
1085 goto again;
16cdcec7
MX
1086}
1087
0e8c36a9
MX
1088static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1089 struct btrfs_root *root,
1090 struct btrfs_path *path,
1091 struct btrfs_delayed_node *node)
1092{
1093 int ret;
1094
1095 mutex_lock(&node->mutex);
7cf35d91 1096 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
0e8c36a9
MX
1097 mutex_unlock(&node->mutex);
1098 return 0;
1099 }
1100
1101 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1102 mutex_unlock(&node->mutex);
1103 return ret;
1104}
1105
4ea41ce0
MX
1106static inline int
1107__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1108 struct btrfs_path *path,
1109 struct btrfs_delayed_node *node)
1110{
1111 int ret;
1112
1113 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1114 if (ret)
1115 return ret;
1116
1117 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1118 if (ret)
1119 return ret;
1120
1121 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1122 return ret;
1123}
1124
79787eaa
JM
1125/*
1126 * Called when committing the transaction.
1127 * Returns 0 on success.
1128 * Returns < 0 on error and returns with an aborted transaction with any
1129 * outstanding delayed items cleaned up.
1130 */
b84acab3 1131static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
16cdcec7 1132{
b84acab3 1133 struct btrfs_fs_info *fs_info = trans->fs_info;
16cdcec7
MX
1134 struct btrfs_delayed_root *delayed_root;
1135 struct btrfs_delayed_node *curr_node, *prev_node;
1136 struct btrfs_path *path;
19fd2949 1137 struct btrfs_block_rsv *block_rsv;
16cdcec7 1138 int ret = 0;
96c3f433 1139 bool count = (nr > 0);
16cdcec7 1140
79787eaa
JM
1141 if (trans->aborted)
1142 return -EIO;
1143
16cdcec7
MX
1144 path = btrfs_alloc_path();
1145 if (!path)
1146 return -ENOMEM;
1147 path->leave_spinning = 1;
1148
19fd2949 1149 block_rsv = trans->block_rsv;
0b246afa 1150 trans->block_rsv = &fs_info->delayed_block_rsv;
19fd2949 1151
ccdf9b30 1152 delayed_root = fs_info->delayed_root;
16cdcec7
MX
1153
1154 curr_node = btrfs_first_delayed_node(delayed_root);
96c3f433 1155 while (curr_node && (!count || (count && nr--))) {
4ea41ce0
MX
1156 ret = __btrfs_commit_inode_delayed_items(trans, path,
1157 curr_node);
16cdcec7
MX
1158 if (ret) {
1159 btrfs_release_delayed_node(curr_node);
96c3f433 1160 curr_node = NULL;
66642832 1161 btrfs_abort_transaction(trans, ret);
16cdcec7
MX
1162 break;
1163 }
1164
1165 prev_node = curr_node;
1166 curr_node = btrfs_next_delayed_node(curr_node);
1167 btrfs_release_delayed_node(prev_node);
1168 }
1169
96c3f433
JB
1170 if (curr_node)
1171 btrfs_release_delayed_node(curr_node);
16cdcec7 1172 btrfs_free_path(path);
19fd2949 1173 trans->block_rsv = block_rsv;
79787eaa 1174
16cdcec7
MX
1175 return ret;
1176}
1177
e5c304e6 1178int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
96c3f433 1179{
b84acab3 1180 return __btrfs_run_delayed_items(trans, -1);
96c3f433
JB
1181}
1182
e5c304e6 1183int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
96c3f433 1184{
b84acab3 1185 return __btrfs_run_delayed_items(trans, nr);
96c3f433
JB
1186}
1187
16cdcec7 1188int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
5f4b32e9 1189 struct btrfs_inode *inode)
16cdcec7 1190{
5f4b32e9 1191 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
4ea41ce0
MX
1192 struct btrfs_path *path;
1193 struct btrfs_block_rsv *block_rsv;
16cdcec7
MX
1194 int ret;
1195
1196 if (!delayed_node)
1197 return 0;
1198
1199 mutex_lock(&delayed_node->mutex);
1200 if (!delayed_node->count) {
1201 mutex_unlock(&delayed_node->mutex);
1202 btrfs_release_delayed_node(delayed_node);
1203 return 0;
1204 }
1205 mutex_unlock(&delayed_node->mutex);
1206
4ea41ce0 1207 path = btrfs_alloc_path();
3c77bd94
FDBM
1208 if (!path) {
1209 btrfs_release_delayed_node(delayed_node);
4ea41ce0 1210 return -ENOMEM;
3c77bd94 1211 }
4ea41ce0
MX
1212 path->leave_spinning = 1;
1213
1214 block_rsv = trans->block_rsv;
1215 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1216
1217 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1218
16cdcec7 1219 btrfs_release_delayed_node(delayed_node);
4ea41ce0
MX
1220 btrfs_free_path(path);
1221 trans->block_rsv = block_rsv;
1222
16cdcec7
MX
1223 return ret;
1224}
1225
aa79021f 1226int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
0e8c36a9 1227{
aa79021f 1228 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
0e8c36a9 1229 struct btrfs_trans_handle *trans;
aa79021f 1230 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
0e8c36a9
MX
1231 struct btrfs_path *path;
1232 struct btrfs_block_rsv *block_rsv;
1233 int ret;
1234
1235 if (!delayed_node)
1236 return 0;
1237
1238 mutex_lock(&delayed_node->mutex);
7cf35d91 1239 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
0e8c36a9
MX
1240 mutex_unlock(&delayed_node->mutex);
1241 btrfs_release_delayed_node(delayed_node);
1242 return 0;
1243 }
1244 mutex_unlock(&delayed_node->mutex);
1245
1246 trans = btrfs_join_transaction(delayed_node->root);
1247 if (IS_ERR(trans)) {
1248 ret = PTR_ERR(trans);
1249 goto out;
1250 }
1251
1252 path = btrfs_alloc_path();
1253 if (!path) {
1254 ret = -ENOMEM;
1255 goto trans_out;
1256 }
1257 path->leave_spinning = 1;
1258
1259 block_rsv = trans->block_rsv;
2ff7e61e 1260 trans->block_rsv = &fs_info->delayed_block_rsv;
0e8c36a9
MX
1261
1262 mutex_lock(&delayed_node->mutex);
7cf35d91 1263 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
0e8c36a9
MX
1264 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1265 path, delayed_node);
1266 else
1267 ret = 0;
1268 mutex_unlock(&delayed_node->mutex);
1269
1270 btrfs_free_path(path);
1271 trans->block_rsv = block_rsv;
1272trans_out:
3a45bb20 1273 btrfs_end_transaction(trans);
2ff7e61e 1274 btrfs_btree_balance_dirty(fs_info);
0e8c36a9
MX
1275out:
1276 btrfs_release_delayed_node(delayed_node);
1277
1278 return ret;
1279}
1280
f48d1cf5 1281void btrfs_remove_delayed_node(struct btrfs_inode *inode)
16cdcec7
MX
1282{
1283 struct btrfs_delayed_node *delayed_node;
1284
f48d1cf5 1285 delayed_node = READ_ONCE(inode->delayed_node);
16cdcec7
MX
1286 if (!delayed_node)
1287 return;
1288
f48d1cf5 1289 inode->delayed_node = NULL;
16cdcec7
MX
1290 btrfs_release_delayed_node(delayed_node);
1291}
1292
de3cb945
CM
1293struct btrfs_async_delayed_work {
1294 struct btrfs_delayed_root *delayed_root;
1295 int nr;
d458b054 1296 struct btrfs_work work;
16cdcec7
MX
1297};
1298
d458b054 1299static void btrfs_async_run_delayed_root(struct btrfs_work *work)
16cdcec7 1300{
de3cb945
CM
1301 struct btrfs_async_delayed_work *async_work;
1302 struct btrfs_delayed_root *delayed_root;
16cdcec7
MX
1303 struct btrfs_trans_handle *trans;
1304 struct btrfs_path *path;
1305 struct btrfs_delayed_node *delayed_node = NULL;
1306 struct btrfs_root *root;
19fd2949 1307 struct btrfs_block_rsv *block_rsv;
de3cb945 1308 int total_done = 0;
16cdcec7 1309
de3cb945
CM
1310 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1311 delayed_root = async_work->delayed_root;
16cdcec7
MX
1312
1313 path = btrfs_alloc_path();
1314 if (!path)
1315 goto out;
16cdcec7 1316
617c54a8
NB
1317 do {
1318 if (atomic_read(&delayed_root->items) <
1319 BTRFS_DELAYED_BACKGROUND / 2)
1320 break;
de3cb945 1321
617c54a8
NB
1322 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1323 if (!delayed_node)
1324 break;
de3cb945 1325
617c54a8
NB
1326 path->leave_spinning = 1;
1327 root = delayed_node->root;
16cdcec7 1328
617c54a8
NB
1329 trans = btrfs_join_transaction(root);
1330 if (IS_ERR(trans)) {
1331 btrfs_release_path(path);
1332 btrfs_release_prepared_delayed_node(delayed_node);
1333 total_done++;
1334 continue;
1335 }
16cdcec7 1336
617c54a8
NB
1337 block_rsv = trans->block_rsv;
1338 trans->block_rsv = &root->fs_info->delayed_block_rsv;
19fd2949 1339
617c54a8 1340 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
16cdcec7 1341
617c54a8
NB
1342 trans->block_rsv = block_rsv;
1343 btrfs_end_transaction(trans);
1344 btrfs_btree_balance_dirty_nodelay(root->fs_info);
de3cb945 1345
617c54a8
NB
1346 btrfs_release_path(path);
1347 btrfs_release_prepared_delayed_node(delayed_node);
1348 total_done++;
de3cb945 1349
617c54a8
NB
1350 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1351 || total_done < async_work->nr);
de3cb945 1352
16cdcec7
MX
1353 btrfs_free_path(path);
1354out:
de3cb945
CM
1355 wake_up(&delayed_root->wait);
1356 kfree(async_work);
16cdcec7
MX
1357}
1358
de3cb945 1359
16cdcec7 1360static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
a585e948 1361 struct btrfs_fs_info *fs_info, int nr)
16cdcec7 1362{
de3cb945 1363 struct btrfs_async_delayed_work *async_work;
16cdcec7 1364
de3cb945
CM
1365 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1366 if (!async_work)
16cdcec7 1367 return -ENOMEM;
16cdcec7 1368
de3cb945 1369 async_work->delayed_root = delayed_root;
9e0af237
LB
1370 btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1371 btrfs_async_run_delayed_root, NULL, NULL);
de3cb945 1372 async_work->nr = nr;
16cdcec7 1373
a585e948 1374 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
16cdcec7
MX
1375 return 0;
1376}
1377
ccdf9b30 1378void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
e999376f 1379{
ccdf9b30 1380 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
e999376f
CM
1381}
1382
0353808c 1383static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
de3cb945
CM
1384{
1385 int val = atomic_read(&delayed_root->items_seq);
1386
0353808c 1387 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
de3cb945 1388 return 1;
0353808c
MX
1389
1390 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1391 return 1;
1392
de3cb945
CM
1393 return 0;
1394}
1395
2ff7e61e 1396void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
16cdcec7 1397{
2ff7e61e 1398 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
16cdcec7 1399
8577787f
NB
1400 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1401 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
16cdcec7
MX
1402 return;
1403
1404 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
0353808c 1405 int seq;
16cdcec7 1406 int ret;
0353808c
MX
1407
1408 seq = atomic_read(&delayed_root->items_seq);
de3cb945 1409
a585e948 1410 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
16cdcec7
MX
1411 if (ret)
1412 return;
1413
0353808c
MX
1414 wait_event_interruptible(delayed_root->wait,
1415 could_end_wait(delayed_root, seq));
4dd466d3 1416 return;
16cdcec7
MX
1417 }
1418
a585e948 1419 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
16cdcec7
MX
1420}
1421
79787eaa 1422/* Will return 0 or -ENOMEM */
16cdcec7 1423int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
2ff7e61e
JM
1424 struct btrfs_fs_info *fs_info,
1425 const char *name, int name_len,
6f45d185 1426 struct btrfs_inode *dir,
16cdcec7
MX
1427 struct btrfs_disk_key *disk_key, u8 type,
1428 u64 index)
1429{
1430 struct btrfs_delayed_node *delayed_node;
1431 struct btrfs_delayed_item *delayed_item;
1432 struct btrfs_dir_item *dir_item;
1433 int ret;
1434
6f45d185 1435 delayed_node = btrfs_get_or_create_delayed_node(dir);
16cdcec7
MX
1436 if (IS_ERR(delayed_node))
1437 return PTR_ERR(delayed_node);
1438
1439 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1440 if (!delayed_item) {
1441 ret = -ENOMEM;
1442 goto release_node;
1443 }
1444
6f45d185 1445 delayed_item->key.objectid = btrfs_ino(dir);
962a298f 1446 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
16cdcec7
MX
1447 delayed_item->key.offset = index;
1448
1449 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1450 dir_item->location = *disk_key;
3cae210f
QW
1451 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1452 btrfs_set_stack_dir_data_len(dir_item, 0);
1453 btrfs_set_stack_dir_name_len(dir_item, name_len);
1454 btrfs_set_stack_dir_type(dir_item, type);
16cdcec7
MX
1455 memcpy((char *)(dir_item + 1), name, name_len);
1456
4f5427cc 1457 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
8c2a3ca2
JB
1458 /*
1459 * we have reserved enough space when we start a new transaction,
1460 * so reserving metadata failure is impossible
1461 */
1462 BUG_ON(ret);
1463
1464
16cdcec7
MX
1465 mutex_lock(&delayed_node->mutex);
1466 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1467 if (unlikely(ret)) {
2ff7e61e 1468 btrfs_err(fs_info,
5d163e0e
JM
1469 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1470 name_len, name, delayed_node->root->objectid,
1471 delayed_node->inode_id, ret);
16cdcec7
MX
1472 BUG();
1473 }
1474 mutex_unlock(&delayed_node->mutex);
1475
1476release_node:
1477 btrfs_release_delayed_node(delayed_node);
1478 return ret;
1479}
1480
2ff7e61e 1481static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
16cdcec7
MX
1482 struct btrfs_delayed_node *node,
1483 struct btrfs_key *key)
1484{
1485 struct btrfs_delayed_item *item;
1486
1487 mutex_lock(&node->mutex);
1488 item = __btrfs_lookup_delayed_insertion_item(node, key);
1489 if (!item) {
1490 mutex_unlock(&node->mutex);
1491 return 1;
1492 }
1493
4f5427cc 1494 btrfs_delayed_item_release_metadata(node->root, item);
16cdcec7
MX
1495 btrfs_release_delayed_item(item);
1496 mutex_unlock(&node->mutex);
1497 return 0;
1498}
1499
1500int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
2ff7e61e 1501 struct btrfs_fs_info *fs_info,
e67bbbb9 1502 struct btrfs_inode *dir, u64 index)
16cdcec7
MX
1503{
1504 struct btrfs_delayed_node *node;
1505 struct btrfs_delayed_item *item;
1506 struct btrfs_key item_key;
1507 int ret;
1508
e67bbbb9 1509 node = btrfs_get_or_create_delayed_node(dir);
16cdcec7
MX
1510 if (IS_ERR(node))
1511 return PTR_ERR(node);
1512
e67bbbb9 1513 item_key.objectid = btrfs_ino(dir);
962a298f 1514 item_key.type = BTRFS_DIR_INDEX_KEY;
16cdcec7
MX
1515 item_key.offset = index;
1516
2ff7e61e 1517 ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
16cdcec7
MX
1518 if (!ret)
1519 goto end;
1520
1521 item = btrfs_alloc_delayed_item(0);
1522 if (!item) {
1523 ret = -ENOMEM;
1524 goto end;
1525 }
1526
1527 item->key = item_key;
1528
4f5427cc 1529 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
16cdcec7
MX
1530 /*
1531 * we have reserved enough space when we start a new transaction,
1532 * so reserving metadata failure is impossible.
1533 */
1534 BUG_ON(ret);
1535
1536 mutex_lock(&node->mutex);
1537 ret = __btrfs_add_delayed_deletion_item(node, item);
1538 if (unlikely(ret)) {
2ff7e61e 1539 btrfs_err(fs_info,
5d163e0e
JM
1540 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1541 index, node->root->objectid, node->inode_id, ret);
16cdcec7
MX
1542 BUG();
1543 }
1544 mutex_unlock(&node->mutex);
1545end:
1546 btrfs_release_delayed_node(node);
1547 return ret;
1548}
1549
f5cc7b80 1550int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
16cdcec7 1551{
f5cc7b80 1552 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
16cdcec7
MX
1553
1554 if (!delayed_node)
1555 return -ENOENT;
1556
1557 /*
1558 * Since we have held i_mutex of this directory, it is impossible that
1559 * a new directory index is added into the delayed node and index_cnt
1560 * is updated now. So we needn't lock the delayed node.
1561 */
2f7e33d4
MX
1562 if (!delayed_node->index_cnt) {
1563 btrfs_release_delayed_node(delayed_node);
16cdcec7 1564 return -EINVAL;
2f7e33d4 1565 }
16cdcec7 1566
f5cc7b80 1567 inode->index_cnt = delayed_node->index_cnt;
2f7e33d4
MX
1568 btrfs_release_delayed_node(delayed_node);
1569 return 0;
16cdcec7
MX
1570}
1571
02dbfc99
OS
1572bool btrfs_readdir_get_delayed_items(struct inode *inode,
1573 struct list_head *ins_list,
1574 struct list_head *del_list)
16cdcec7
MX
1575{
1576 struct btrfs_delayed_node *delayed_node;
1577 struct btrfs_delayed_item *item;
1578
340c6ca9 1579 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
16cdcec7 1580 if (!delayed_node)
02dbfc99
OS
1581 return false;
1582
1583 /*
1584 * We can only do one readdir with delayed items at a time because of
1585 * item->readdir_list.
1586 */
1587 inode_unlock_shared(inode);
1588 inode_lock(inode);
16cdcec7
MX
1589
1590 mutex_lock(&delayed_node->mutex);
1591 item = __btrfs_first_delayed_insertion_item(delayed_node);
1592 while (item) {
089e77e1 1593 refcount_inc(&item->refs);
16cdcec7
MX
1594 list_add_tail(&item->readdir_list, ins_list);
1595 item = __btrfs_next_delayed_item(item);
1596 }
1597
1598 item = __btrfs_first_delayed_deletion_item(delayed_node);
1599 while (item) {
089e77e1 1600 refcount_inc(&item->refs);
16cdcec7
MX
1601 list_add_tail(&item->readdir_list, del_list);
1602 item = __btrfs_next_delayed_item(item);
1603 }
1604 mutex_unlock(&delayed_node->mutex);
1605 /*
1606 * This delayed node is still cached in the btrfs inode, so refs
1607 * must be > 1 now, and we needn't check it is going to be freed
1608 * or not.
1609 *
1610 * Besides that, this function is used to read dir, we do not
1611 * insert/delete delayed items in this period. So we also needn't
1612 * requeue or dequeue this delayed node.
1613 */
6de5f18e 1614 refcount_dec(&delayed_node->refs);
02dbfc99
OS
1615
1616 return true;
16cdcec7
MX
1617}
1618
02dbfc99
OS
1619void btrfs_readdir_put_delayed_items(struct inode *inode,
1620 struct list_head *ins_list,
1621 struct list_head *del_list)
16cdcec7
MX
1622{
1623 struct btrfs_delayed_item *curr, *next;
1624
1625 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1626 list_del(&curr->readdir_list);
089e77e1 1627 if (refcount_dec_and_test(&curr->refs))
16cdcec7
MX
1628 kfree(curr);
1629 }
1630
1631 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1632 list_del(&curr->readdir_list);
089e77e1 1633 if (refcount_dec_and_test(&curr->refs))
16cdcec7
MX
1634 kfree(curr);
1635 }
02dbfc99
OS
1636
1637 /*
1638 * The VFS is going to do up_read(), so we need to downgrade back to a
1639 * read lock.
1640 */
1641 downgrade_write(&inode->i_rwsem);
16cdcec7
MX
1642}
1643
1644int btrfs_should_delete_dir_index(struct list_head *del_list,
1645 u64 index)
1646{
e4fd493c
JB
1647 struct btrfs_delayed_item *curr;
1648 int ret = 0;
16cdcec7 1649
e4fd493c 1650 list_for_each_entry(curr, del_list, readdir_list) {
16cdcec7
MX
1651 if (curr->key.offset > index)
1652 break;
e4fd493c
JB
1653 if (curr->key.offset == index) {
1654 ret = 1;
1655 break;
1656 }
16cdcec7 1657 }
e4fd493c 1658 return ret;
16cdcec7
MX
1659}
1660
1661/*
1662 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1663 *
1664 */
9cdda8d3 1665int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
d2fbb2b5 1666 struct list_head *ins_list)
16cdcec7
MX
1667{
1668 struct btrfs_dir_item *di;
1669 struct btrfs_delayed_item *curr, *next;
1670 struct btrfs_key location;
1671 char *name;
1672 int name_len;
1673 int over = 0;
1674 unsigned char d_type;
1675
1676 if (list_empty(ins_list))
1677 return 0;
1678
1679 /*
1680 * Changing the data of the delayed item is impossible. So
1681 * we needn't lock them. And we have held i_mutex of the
1682 * directory, nobody can delete any directory indexes now.
1683 */
1684 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1685 list_del(&curr->readdir_list);
1686
9cdda8d3 1687 if (curr->key.offset < ctx->pos) {
089e77e1 1688 if (refcount_dec_and_test(&curr->refs))
16cdcec7
MX
1689 kfree(curr);
1690 continue;
1691 }
1692
9cdda8d3 1693 ctx->pos = curr->key.offset;
16cdcec7
MX
1694
1695 di = (struct btrfs_dir_item *)curr->data;
1696 name = (char *)(di + 1);
3cae210f 1697 name_len = btrfs_stack_dir_name_len(di);
16cdcec7
MX
1698
1699 d_type = btrfs_filetype_table[di->type];
1700 btrfs_disk_key_to_cpu(&location, &di->location);
1701
9cdda8d3 1702 over = !dir_emit(ctx, name, name_len,
16cdcec7
MX
1703 location.objectid, d_type);
1704
089e77e1 1705 if (refcount_dec_and_test(&curr->refs))
16cdcec7
MX
1706 kfree(curr);
1707
1708 if (over)
1709 return 1;
42e9cc46 1710 ctx->pos++;
16cdcec7
MX
1711 }
1712 return 0;
1713}
1714
16cdcec7
MX
1715static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1716 struct btrfs_inode_item *inode_item,
1717 struct inode *inode)
1718{
2f2f43d3
EB
1719 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1720 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
16cdcec7
MX
1721 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1722 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1723 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1724 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1725 btrfs_set_stack_inode_generation(inode_item,
1726 BTRFS_I(inode)->generation);
c7f88c4e
JL
1727 btrfs_set_stack_inode_sequence(inode_item,
1728 inode_peek_iversion(inode));
16cdcec7
MX
1729 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1730 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1731 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
ff5714cc 1732 btrfs_set_stack_inode_block_group(inode_item, 0);
16cdcec7 1733
a937b979 1734 btrfs_set_stack_timespec_sec(&inode_item->atime,
16cdcec7 1735 inode->i_atime.tv_sec);
a937b979 1736 btrfs_set_stack_timespec_nsec(&inode_item->atime,
16cdcec7
MX
1737 inode->i_atime.tv_nsec);
1738
a937b979 1739 btrfs_set_stack_timespec_sec(&inode_item->mtime,
16cdcec7 1740 inode->i_mtime.tv_sec);
a937b979 1741 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
16cdcec7
MX
1742 inode->i_mtime.tv_nsec);
1743
a937b979 1744 btrfs_set_stack_timespec_sec(&inode_item->ctime,
16cdcec7 1745 inode->i_ctime.tv_sec);
a937b979 1746 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
16cdcec7 1747 inode->i_ctime.tv_nsec);
9cc97d64 1748
1749 btrfs_set_stack_timespec_sec(&inode_item->otime,
1750 BTRFS_I(inode)->i_otime.tv_sec);
1751 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1752 BTRFS_I(inode)->i_otime.tv_nsec);
16cdcec7
MX
1753}
1754
2f7e33d4
MX
1755int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1756{
1757 struct btrfs_delayed_node *delayed_node;
1758 struct btrfs_inode_item *inode_item;
2f7e33d4 1759
340c6ca9 1760 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
2f7e33d4
MX
1761 if (!delayed_node)
1762 return -ENOENT;
1763
1764 mutex_lock(&delayed_node->mutex);
7cf35d91 1765 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
2f7e33d4
MX
1766 mutex_unlock(&delayed_node->mutex);
1767 btrfs_release_delayed_node(delayed_node);
1768 return -ENOENT;
1769 }
1770
1771 inode_item = &delayed_node->inode_item;
1772
2f2f43d3
EB
1773 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1774 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
6ef06d27 1775 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
2f7e33d4 1776 inode->i_mode = btrfs_stack_inode_mode(inode_item);
bfe86848 1777 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
2f7e33d4
MX
1778 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1779 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
6e17d30b
YD
1780 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1781
c7f88c4e
JL
1782 inode_set_iversion_queried(inode,
1783 btrfs_stack_inode_sequence(inode_item));
2f7e33d4
MX
1784 inode->i_rdev = 0;
1785 *rdev = btrfs_stack_inode_rdev(inode_item);
1786 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1787
a937b979
DS
1788 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1789 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
2f7e33d4 1790
a937b979
DS
1791 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1792 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
2f7e33d4 1793
a937b979
DS
1794 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1795 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
2f7e33d4 1796
9cc97d64 1797 BTRFS_I(inode)->i_otime.tv_sec =
1798 btrfs_stack_timespec_sec(&inode_item->otime);
1799 BTRFS_I(inode)->i_otime.tv_nsec =
1800 btrfs_stack_timespec_nsec(&inode_item->otime);
1801
2f7e33d4
MX
1802 inode->i_generation = BTRFS_I(inode)->generation;
1803 BTRFS_I(inode)->index_cnt = (u64)-1;
1804
1805 mutex_unlock(&delayed_node->mutex);
1806 btrfs_release_delayed_node(delayed_node);
1807 return 0;
1808}
1809
16cdcec7
MX
1810int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1811 struct btrfs_root *root, struct inode *inode)
1812{
1813 struct btrfs_delayed_node *delayed_node;
aa0467d8 1814 int ret = 0;
16cdcec7 1815
e5517a7b 1816 delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
16cdcec7
MX
1817 if (IS_ERR(delayed_node))
1818 return PTR_ERR(delayed_node);
1819
1820 mutex_lock(&delayed_node->mutex);
7cf35d91 1821 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
16cdcec7
MX
1822 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1823 goto release_node;
1824 }
1825
fcabdd1c 1826 ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
7fd2ae21 1827 delayed_node);
c06a0e12
JB
1828 if (ret)
1829 goto release_node;
16cdcec7
MX
1830
1831 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
7cf35d91 1832 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
16cdcec7
MX
1833 delayed_node->count++;
1834 atomic_inc(&root->fs_info->delayed_root->items);
1835release_node:
1836 mutex_unlock(&delayed_node->mutex);
1837 btrfs_release_delayed_node(delayed_node);
1838 return ret;
1839}
1840
e07222c7 1841int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
67de1176 1842{
e07222c7 1843 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
67de1176
MX
1844 struct btrfs_delayed_node *delayed_node;
1845
6f896054
CM
1846 /*
1847 * we don't do delayed inode updates during log recovery because it
1848 * leads to enospc problems. This means we also can't do
1849 * delayed inode refs
1850 */
0b246afa 1851 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
6f896054
CM
1852 return -EAGAIN;
1853
e07222c7 1854 delayed_node = btrfs_get_or_create_delayed_node(inode);
67de1176
MX
1855 if (IS_ERR(delayed_node))
1856 return PTR_ERR(delayed_node);
1857
1858 /*
1859 * We don't reserve space for inode ref deletion is because:
1860 * - We ONLY do async inode ref deletion for the inode who has only
1861 * one link(i_nlink == 1), it means there is only one inode ref.
1862 * And in most case, the inode ref and the inode item are in the
1863 * same leaf, and we will deal with them at the same time.
1864 * Since we are sure we will reserve the space for the inode item,
1865 * it is unnecessary to reserve space for inode ref deletion.
1866 * - If the inode ref and the inode item are not in the same leaf,
1867 * We also needn't worry about enospc problem, because we reserve
1868 * much more space for the inode update than it needs.
1869 * - At the worst, we can steal some space from the global reservation.
1870 * It is very rare.
1871 */
1872 mutex_lock(&delayed_node->mutex);
1873 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1874 goto release_node;
1875
1876 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1877 delayed_node->count++;
0b246afa 1878 atomic_inc(&fs_info->delayed_root->items);
67de1176
MX
1879release_node:
1880 mutex_unlock(&delayed_node->mutex);
1881 btrfs_release_delayed_node(delayed_node);
1882 return 0;
1883}
1884
16cdcec7
MX
1885static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1886{
1887 struct btrfs_root *root = delayed_node->root;
2ff7e61e 1888 struct btrfs_fs_info *fs_info = root->fs_info;
16cdcec7
MX
1889 struct btrfs_delayed_item *curr_item, *prev_item;
1890
1891 mutex_lock(&delayed_node->mutex);
1892 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1893 while (curr_item) {
4f5427cc 1894 btrfs_delayed_item_release_metadata(root, curr_item);
16cdcec7
MX
1895 prev_item = curr_item;
1896 curr_item = __btrfs_next_delayed_item(prev_item);
1897 btrfs_release_delayed_item(prev_item);
1898 }
1899
1900 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1901 while (curr_item) {
4f5427cc 1902 btrfs_delayed_item_release_metadata(root, curr_item);
16cdcec7
MX
1903 prev_item = curr_item;
1904 curr_item = __btrfs_next_delayed_item(prev_item);
1905 btrfs_release_delayed_item(prev_item);
1906 }
1907
67de1176
MX
1908 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1909 btrfs_release_delayed_iref(delayed_node);
1910
7cf35d91 1911 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
4f5427cc 1912 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
16cdcec7
MX
1913 btrfs_release_delayed_inode(delayed_node);
1914 }
1915 mutex_unlock(&delayed_node->mutex);
1916}
1917
4ccb5c72 1918void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
16cdcec7
MX
1919{
1920 struct btrfs_delayed_node *delayed_node;
1921
4ccb5c72 1922 delayed_node = btrfs_get_delayed_node(inode);
16cdcec7
MX
1923 if (!delayed_node)
1924 return;
1925
1926 __btrfs_kill_delayed_node(delayed_node);
1927 btrfs_release_delayed_node(delayed_node);
1928}
1929
1930void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1931{
1932 u64 inode_id = 0;
1933 struct btrfs_delayed_node *delayed_nodes[8];
1934 int i, n;
1935
1936 while (1) {
1937 spin_lock(&root->inode_lock);
1938 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1939 (void **)delayed_nodes, inode_id,
1940 ARRAY_SIZE(delayed_nodes));
1941 if (!n) {
1942 spin_unlock(&root->inode_lock);
1943 break;
1944 }
1945
1946 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1947
1948 for (i = 0; i < n; i++)
6de5f18e 1949 refcount_inc(&delayed_nodes[i]->refs);
16cdcec7
MX
1950 spin_unlock(&root->inode_lock);
1951
1952 for (i = 0; i < n; i++) {
1953 __btrfs_kill_delayed_node(delayed_nodes[i]);
1954 btrfs_release_delayed_node(delayed_nodes[i]);
1955 }
1956 }
1957}
67cde344 1958
ccdf9b30 1959void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
67cde344 1960{
67cde344
MX
1961 struct btrfs_delayed_node *curr_node, *prev_node;
1962
ccdf9b30 1963 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
67cde344
MX
1964 while (curr_node) {
1965 __btrfs_kill_delayed_node(curr_node);
1966
1967 prev_node = curr_node;
1968 curr_node = btrfs_next_delayed_node(curr_node);
1969 btrfs_release_delayed_node(prev_node);
1970 }
1971}
1972