btrfs: move accessor helpers into accessors.h
[linux-2.6-block.git] / fs / btrfs / delayed-inode.c
CommitLineData
c1d7c514 1// SPDX-License-Identifier: GPL-2.0
16cdcec7
MX
2/*
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
16cdcec7
MX
5 */
6
7#include <linux/slab.h>
c7f88c4e 8#include <linux/iversion.h>
ec8eb376
JB
9#include "ctree.h"
10#include "fs.h"
9b569ea0 11#include "messages.h"
602cbe91 12#include "misc.h"
16cdcec7
MX
13#include "delayed-inode.h"
14#include "disk-io.h"
15#include "transaction.h"
4f5427cc 16#include "qgroup.h"
1f95ec01 17#include "locking.h"
26c2c454 18#include "inode-item.h"
f1e5c618 19#include "space-info.h"
07e81dc9 20#include "accessors.h"
16cdcec7 21
de3cb945
CM
22#define BTRFS_DELAYED_WRITEBACK 512
23#define BTRFS_DELAYED_BACKGROUND 128
24#define BTRFS_DELAYED_BATCH 16
16cdcec7
MX
25
26static struct kmem_cache *delayed_node_cache;
27
28int __init btrfs_delayed_inode_init(void)
29{
837e1972 30 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
16cdcec7
MX
31 sizeof(struct btrfs_delayed_node),
32 0,
fba4b697 33 SLAB_MEM_SPREAD,
16cdcec7
MX
34 NULL);
35 if (!delayed_node_cache)
36 return -ENOMEM;
37 return 0;
38}
39
e67c718b 40void __cold btrfs_delayed_inode_exit(void)
16cdcec7 41{
5598e900 42 kmem_cache_destroy(delayed_node_cache);
16cdcec7
MX
43}
44
45static inline void btrfs_init_delayed_node(
46 struct btrfs_delayed_node *delayed_node,
47 struct btrfs_root *root, u64 inode_id)
48{
49 delayed_node->root = root;
50 delayed_node->inode_id = inode_id;
6de5f18e 51 refcount_set(&delayed_node->refs, 0);
03a1d4c8
LB
52 delayed_node->ins_root = RB_ROOT_CACHED;
53 delayed_node->del_root = RB_ROOT_CACHED;
16cdcec7 54 mutex_init(&delayed_node->mutex);
16cdcec7
MX
55 INIT_LIST_HEAD(&delayed_node->n_list);
56 INIT_LIST_HEAD(&delayed_node->p_list);
16cdcec7
MX
57}
58
f85b7379
DS
59static struct btrfs_delayed_node *btrfs_get_delayed_node(
60 struct btrfs_inode *btrfs_inode)
16cdcec7 61{
16cdcec7 62 struct btrfs_root *root = btrfs_inode->root;
4a0cc7ca 63 u64 ino = btrfs_ino(btrfs_inode);
2f7e33d4 64 struct btrfs_delayed_node *node;
16cdcec7 65
20c7bcec 66 node = READ_ONCE(btrfs_inode->delayed_node);
16cdcec7 67 if (node) {
6de5f18e 68 refcount_inc(&node->refs);
16cdcec7
MX
69 return node;
70 }
71
72 spin_lock(&root->inode_lock);
088aea3b 73 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
ec35e48b 74
16cdcec7
MX
75 if (node) {
76 if (btrfs_inode->delayed_node) {
6de5f18e 77 refcount_inc(&node->refs); /* can be accessed */
2f7e33d4 78 BUG_ON(btrfs_inode->delayed_node != node);
16cdcec7 79 spin_unlock(&root->inode_lock);
2f7e33d4 80 return node;
16cdcec7 81 }
ec35e48b
CM
82
83 /*
84 * It's possible that we're racing into the middle of removing
088aea3b 85 * this node from the radix tree. In this case, the refcount
ec35e48b 86 * was zero and it should never go back to one. Just return
088aea3b 87 * NULL like it was never in the radix at all; our release
ec35e48b
CM
88 * function is in the process of removing it.
89 *
90 * Some implementations of refcount_inc refuse to bump the
91 * refcount once it has hit zero. If we don't do this dance
92 * here, refcount_inc() may decide to just WARN_ONCE() instead
93 * of actually bumping the refcount.
94 *
088aea3b 95 * If this node is properly in the radix, we want to bump the
ec35e48b
CM
96 * refcount twice, once for the inode and once for this get
97 * operation.
98 */
99 if (refcount_inc_not_zero(&node->refs)) {
100 refcount_inc(&node->refs);
101 btrfs_inode->delayed_node = node;
102 } else {
103 node = NULL;
104 }
105
16cdcec7
MX
106 spin_unlock(&root->inode_lock);
107 return node;
108 }
109 spin_unlock(&root->inode_lock);
110
2f7e33d4
MX
111 return NULL;
112}
113
79787eaa 114/* Will return either the node or PTR_ERR(-ENOMEM) */
2f7e33d4 115static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
f85b7379 116 struct btrfs_inode *btrfs_inode)
2f7e33d4
MX
117{
118 struct btrfs_delayed_node *node;
2f7e33d4 119 struct btrfs_root *root = btrfs_inode->root;
4a0cc7ca 120 u64 ino = btrfs_ino(btrfs_inode);
2f7e33d4
MX
121 int ret;
122
088aea3b
DS
123again:
124 node = btrfs_get_delayed_node(btrfs_inode);
125 if (node)
126 return node;
2f7e33d4 127
088aea3b
DS
128 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
129 if (!node)
130 return ERR_PTR(-ENOMEM);
131 btrfs_init_delayed_node(node, root, ino);
16cdcec7 132
088aea3b
DS
133 /* cached in the btrfs inode and can be accessed */
134 refcount_set(&node->refs, 2);
16cdcec7 135
088aea3b
DS
136 ret = radix_tree_preload(GFP_NOFS);
137 if (ret) {
138 kmem_cache_free(delayed_node_cache, node);
139 return ERR_PTR(ret);
140 }
141
142 spin_lock(&root->inode_lock);
143 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
144 if (ret == -EEXIST) {
145 spin_unlock(&root->inode_lock);
146 kmem_cache_free(delayed_node_cache, node);
147 radix_tree_preload_end();
148 goto again;
149 }
16cdcec7
MX
150 btrfs_inode->delayed_node = node;
151 spin_unlock(&root->inode_lock);
088aea3b 152 radix_tree_preload_end();
16cdcec7
MX
153
154 return node;
155}
156
157/*
158 * Call it when holding delayed_node->mutex
159 *
160 * If mod = 1, add this node into the prepared list.
161 */
162static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
163 struct btrfs_delayed_node *node,
164 int mod)
165{
166 spin_lock(&root->lock);
7cf35d91 167 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
16cdcec7
MX
168 if (!list_empty(&node->p_list))
169 list_move_tail(&node->p_list, &root->prepare_list);
170 else if (mod)
171 list_add_tail(&node->p_list, &root->prepare_list);
172 } else {
173 list_add_tail(&node->n_list, &root->node_list);
174 list_add_tail(&node->p_list, &root->prepare_list);
6de5f18e 175 refcount_inc(&node->refs); /* inserted into list */
16cdcec7 176 root->nodes++;
7cf35d91 177 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
16cdcec7
MX
178 }
179 spin_unlock(&root->lock);
180}
181
182/* Call it when holding delayed_node->mutex */
183static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
184 struct btrfs_delayed_node *node)
185{
186 spin_lock(&root->lock);
7cf35d91 187 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
16cdcec7 188 root->nodes--;
6de5f18e 189 refcount_dec(&node->refs); /* not in the list */
16cdcec7
MX
190 list_del_init(&node->n_list);
191 if (!list_empty(&node->p_list))
192 list_del_init(&node->p_list);
7cf35d91 193 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
16cdcec7
MX
194 }
195 spin_unlock(&root->lock);
196}
197
48a3b636 198static struct btrfs_delayed_node *btrfs_first_delayed_node(
16cdcec7
MX
199 struct btrfs_delayed_root *delayed_root)
200{
201 struct list_head *p;
202 struct btrfs_delayed_node *node = NULL;
203
204 spin_lock(&delayed_root->lock);
205 if (list_empty(&delayed_root->node_list))
206 goto out;
207
208 p = delayed_root->node_list.next;
209 node = list_entry(p, struct btrfs_delayed_node, n_list);
6de5f18e 210 refcount_inc(&node->refs);
16cdcec7
MX
211out:
212 spin_unlock(&delayed_root->lock);
213
214 return node;
215}
216
48a3b636 217static struct btrfs_delayed_node *btrfs_next_delayed_node(
16cdcec7
MX
218 struct btrfs_delayed_node *node)
219{
220 struct btrfs_delayed_root *delayed_root;
221 struct list_head *p;
222 struct btrfs_delayed_node *next = NULL;
223
224 delayed_root = node->root->fs_info->delayed_root;
225 spin_lock(&delayed_root->lock);
7cf35d91
MX
226 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
227 /* not in the list */
16cdcec7
MX
228 if (list_empty(&delayed_root->node_list))
229 goto out;
230 p = delayed_root->node_list.next;
231 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
232 goto out;
233 else
234 p = node->n_list.next;
235
236 next = list_entry(p, struct btrfs_delayed_node, n_list);
6de5f18e 237 refcount_inc(&next->refs);
16cdcec7
MX
238out:
239 spin_unlock(&delayed_root->lock);
240
241 return next;
242}
243
244static void __btrfs_release_delayed_node(
245 struct btrfs_delayed_node *delayed_node,
246 int mod)
247{
248 struct btrfs_delayed_root *delayed_root;
249
250 if (!delayed_node)
251 return;
252
253 delayed_root = delayed_node->root->fs_info->delayed_root;
254
255 mutex_lock(&delayed_node->mutex);
256 if (delayed_node->count)
257 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
258 else
259 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
260 mutex_unlock(&delayed_node->mutex);
261
6de5f18e 262 if (refcount_dec_and_test(&delayed_node->refs)) {
16cdcec7 263 struct btrfs_root *root = delayed_node->root;
ec35e48b 264
16cdcec7 265 spin_lock(&root->inode_lock);
ec35e48b
CM
266 /*
267 * Once our refcount goes to zero, nobody is allowed to bump it
268 * back up. We can delete it now.
269 */
270 ASSERT(refcount_read(&delayed_node->refs) == 0);
088aea3b
DS
271 radix_tree_delete(&root->delayed_nodes_tree,
272 delayed_node->inode_id);
16cdcec7 273 spin_unlock(&root->inode_lock);
ec35e48b 274 kmem_cache_free(delayed_node_cache, delayed_node);
16cdcec7
MX
275 }
276}
277
278static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
279{
280 __btrfs_release_delayed_node(node, 0);
281}
282
48a3b636 283static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
16cdcec7
MX
284 struct btrfs_delayed_root *delayed_root)
285{
286 struct list_head *p;
287 struct btrfs_delayed_node *node = NULL;
288
289 spin_lock(&delayed_root->lock);
290 if (list_empty(&delayed_root->prepare_list))
291 goto out;
292
293 p = delayed_root->prepare_list.next;
294 list_del_init(p);
295 node = list_entry(p, struct btrfs_delayed_node, p_list);
6de5f18e 296 refcount_inc(&node->refs);
16cdcec7
MX
297out:
298 spin_unlock(&delayed_root->lock);
299
300 return node;
301}
302
303static inline void btrfs_release_prepared_delayed_node(
304 struct btrfs_delayed_node *node)
305{
306 __btrfs_release_delayed_node(node, 1);
307}
308
4c469798
FM
309static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len,
310 struct btrfs_delayed_node *node,
311 enum btrfs_delayed_item_type type)
16cdcec7
MX
312{
313 struct btrfs_delayed_item *item;
4c469798 314
16cdcec7
MX
315 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
316 if (item) {
317 item->data_len = data_len;
4c469798 318 item->type = type;
16cdcec7 319 item->bytes_reserved = 0;
96d89923
FM
320 item->delayed_node = node;
321 RB_CLEAR_NODE(&item->rb_node);
30b80f3c
FM
322 INIT_LIST_HEAD(&item->log_list);
323 item->logged = false;
089e77e1 324 refcount_set(&item->refs, 1);
16cdcec7
MX
325 }
326 return item;
327}
328
329/*
330 * __btrfs_lookup_delayed_item - look up the delayed item by key
331 * @delayed_node: pointer to the delayed node
96d89923 332 * @index: the dir index value to lookup (offset of a dir index key)
16cdcec7
MX
333 *
334 * Note: if we don't find the right item, we will return the prev item and
335 * the next item.
336 */
337static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
338 struct rb_root *root,
4cbf37f5 339 u64 index)
16cdcec7 340{
4cbf37f5 341 struct rb_node *node = root->rb_node;
16cdcec7 342 struct btrfs_delayed_item *delayed_item = NULL;
16cdcec7
MX
343
344 while (node) {
345 delayed_item = rb_entry(node, struct btrfs_delayed_item,
346 rb_node);
96d89923 347 if (delayed_item->index < index)
16cdcec7 348 node = node->rb_right;
96d89923 349 else if (delayed_item->index > index)
16cdcec7
MX
350 node = node->rb_left;
351 else
352 return delayed_item;
353 }
354
16cdcec7
MX
355 return NULL;
356}
357
16cdcec7 358static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
c9d02ab4 359 struct btrfs_delayed_item *ins)
16cdcec7
MX
360{
361 struct rb_node **p, *node;
362 struct rb_node *parent_node = NULL;
03a1d4c8 363 struct rb_root_cached *root;
16cdcec7 364 struct btrfs_delayed_item *item;
03a1d4c8 365 bool leftmost = true;
16cdcec7 366
4c469798 367 if (ins->type == BTRFS_DELAYED_INSERTION_ITEM)
16cdcec7 368 root = &delayed_node->ins_root;
16cdcec7 369 else
4c469798
FM
370 root = &delayed_node->del_root;
371
03a1d4c8 372 p = &root->rb_root.rb_node;
16cdcec7
MX
373 node = &ins->rb_node;
374
375 while (*p) {
376 parent_node = *p;
377 item = rb_entry(parent_node, struct btrfs_delayed_item,
378 rb_node);
379
96d89923 380 if (item->index < ins->index) {
16cdcec7 381 p = &(*p)->rb_right;
03a1d4c8 382 leftmost = false;
96d89923 383 } else if (item->index > ins->index) {
16cdcec7 384 p = &(*p)->rb_left;
03a1d4c8 385 } else {
16cdcec7 386 return -EEXIST;
03a1d4c8 387 }
16cdcec7
MX
388 }
389
390 rb_link_node(node, parent_node, p);
03a1d4c8 391 rb_insert_color_cached(node, root, leftmost);
a176affe 392
4c469798 393 if (ins->type == BTRFS_DELAYED_INSERTION_ITEM &&
96d89923
FM
394 ins->index >= delayed_node->index_cnt)
395 delayed_node->index_cnt = ins->index + 1;
16cdcec7
MX
396
397 delayed_node->count++;
398 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
399 return 0;
400}
401
de3cb945
CM
402static void finish_one_item(struct btrfs_delayed_root *delayed_root)
403{
404 int seq = atomic_inc_return(&delayed_root->items_seq);
ee863954 405
093258e6 406 /* atomic_dec_return implies a barrier */
de3cb945 407 if ((atomic_dec_return(&delayed_root->items) <
093258e6
DS
408 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
409 cond_wake_up_nomb(&delayed_root->wait);
de3cb945
CM
410}
411
16cdcec7
MX
412static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
413{
03a1d4c8 414 struct rb_root_cached *root;
16cdcec7
MX
415 struct btrfs_delayed_root *delayed_root;
416
96d89923
FM
417 /* Not inserted, ignore it. */
418 if (RB_EMPTY_NODE(&delayed_item->rb_node))
933c22a7 419 return;
96d89923 420
16cdcec7
MX
421 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
422
423 BUG_ON(!delayed_root);
16cdcec7 424
4c469798 425 if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
16cdcec7
MX
426 root = &delayed_item->delayed_node->ins_root;
427 else
428 root = &delayed_item->delayed_node->del_root;
429
03a1d4c8 430 rb_erase_cached(&delayed_item->rb_node, root);
96d89923 431 RB_CLEAR_NODE(&delayed_item->rb_node);
16cdcec7 432 delayed_item->delayed_node->count--;
de3cb945
CM
433
434 finish_one_item(delayed_root);
16cdcec7
MX
435}
436
437static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
438{
439 if (item) {
440 __btrfs_remove_delayed_item(item);
089e77e1 441 if (refcount_dec_and_test(&item->refs))
16cdcec7
MX
442 kfree(item);
443 }
444}
445
48a3b636 446static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
16cdcec7
MX
447 struct btrfs_delayed_node *delayed_node)
448{
449 struct rb_node *p;
450 struct btrfs_delayed_item *item = NULL;
451
03a1d4c8 452 p = rb_first_cached(&delayed_node->ins_root);
16cdcec7
MX
453 if (p)
454 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
455
456 return item;
457}
458
48a3b636 459static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
16cdcec7
MX
460 struct btrfs_delayed_node *delayed_node)
461{
462 struct rb_node *p;
463 struct btrfs_delayed_item *item = NULL;
464
03a1d4c8 465 p = rb_first_cached(&delayed_node->del_root);
16cdcec7
MX
466 if (p)
467 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
468
469 return item;
470}
471
48a3b636 472static struct btrfs_delayed_item *__btrfs_next_delayed_item(
16cdcec7
MX
473 struct btrfs_delayed_item *item)
474{
475 struct rb_node *p;
476 struct btrfs_delayed_item *next = NULL;
477
478 p = rb_next(&item->rb_node);
479 if (p)
480 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
481
482 return next;
483}
484
16cdcec7 485static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
16cdcec7
MX
486 struct btrfs_delayed_item *item)
487{
488 struct btrfs_block_rsv *src_rsv;
489 struct btrfs_block_rsv *dst_rsv;
df492881 490 struct btrfs_fs_info *fs_info = trans->fs_info;
16cdcec7
MX
491 u64 num_bytes;
492 int ret;
493
494 if (!trans->bytes_reserved)
495 return 0;
496
497 src_rsv = trans->block_rsv;
0b246afa 498 dst_rsv = &fs_info->delayed_block_rsv;
16cdcec7 499
2bd36e7b 500 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
f218ea6c
QW
501
502 /*
503 * Here we migrate space rsv from transaction rsv, since have already
504 * reserved space when starting a transaction. So no need to reserve
505 * qgroup space here.
506 */
3a584174 507 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
8c2a3ca2 508 if (!ret) {
0b246afa 509 trace_btrfs_space_reservation(fs_info, "delayed_item",
96d89923 510 item->delayed_node->inode_id,
8c2a3ca2 511 num_bytes, 1);
763748b2
FM
512 /*
513 * For insertions we track reserved metadata space by accounting
514 * for the number of leaves that will be used, based on the delayed
515 * node's index_items_size field.
516 */
4c469798 517 if (item->type == BTRFS_DELAYED_DELETION_ITEM)
763748b2 518 item->bytes_reserved = num_bytes;
8c2a3ca2 519 }
16cdcec7
MX
520
521 return ret;
522}
523
4f5427cc 524static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
16cdcec7
MX
525 struct btrfs_delayed_item *item)
526{
19fd2949 527 struct btrfs_block_rsv *rsv;
4f5427cc 528 struct btrfs_fs_info *fs_info = root->fs_info;
19fd2949 529
16cdcec7
MX
530 if (!item->bytes_reserved)
531 return;
532
0b246afa 533 rsv = &fs_info->delayed_block_rsv;
f218ea6c
QW
534 /*
535 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
536 * to release/reserve qgroup space.
537 */
0b246afa 538 trace_btrfs_space_reservation(fs_info, "delayed_item",
96d89923
FM
539 item->delayed_node->inode_id,
540 item->bytes_reserved, 0);
63f018be 541 btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
16cdcec7
MX
542}
543
763748b2
FM
544static void btrfs_delayed_item_release_leaves(struct btrfs_delayed_node *node,
545 unsigned int num_leaves)
546{
547 struct btrfs_fs_info *fs_info = node->root->fs_info;
548 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, num_leaves);
549
550 /* There are no space reservations during log replay, bail out. */
551 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
552 return;
553
554 trace_btrfs_space_reservation(fs_info, "delayed_item", node->inode_id,
555 bytes, 0);
556 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv, bytes, NULL);
557}
558
16cdcec7
MX
559static int btrfs_delayed_inode_reserve_metadata(
560 struct btrfs_trans_handle *trans,
561 struct btrfs_root *root,
562 struct btrfs_delayed_node *node)
563{
0b246afa 564 struct btrfs_fs_info *fs_info = root->fs_info;
16cdcec7
MX
565 struct btrfs_block_rsv *src_rsv;
566 struct btrfs_block_rsv *dst_rsv;
567 u64 num_bytes;
568 int ret;
569
16cdcec7 570 src_rsv = trans->block_rsv;
0b246afa 571 dst_rsv = &fs_info->delayed_block_rsv;
16cdcec7 572
bcacf5f3 573 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
c06a0e12
JB
574
575 /*
576 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
577 * which doesn't reserve space for speed. This is a problem since we
578 * still need to reserve space for this update, so try to reserve the
579 * space.
580 *
581 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
69fe2d75 582 * we always reserve enough to update the inode item.
c06a0e12 583 */
e755d9ab 584 if (!src_rsv || (!trans->bytes_reserved &&
66d8f3dd 585 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
4d14c5cd
NB
586 ret = btrfs_qgroup_reserve_meta(root, num_bytes,
587 BTRFS_QGROUP_RSV_META_PREALLOC, true);
f218ea6c
QW
588 if (ret < 0)
589 return ret;
9270501c 590 ret = btrfs_block_rsv_add(fs_info, dst_rsv, num_bytes,
08e007d2 591 BTRFS_RESERVE_NO_FLUSH);
98686ffc
NB
592 /* NO_FLUSH could only fail with -ENOSPC */
593 ASSERT(ret == 0 || ret == -ENOSPC);
594 if (ret)
0f9c03d8 595 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
98686ffc
NB
596 } else {
597 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
c06a0e12
JB
598 }
599
8c2a3ca2 600 if (!ret) {
0b246afa 601 trace_btrfs_space_reservation(fs_info, "delayed_inode",
8e3c9d3c 602 node->inode_id, num_bytes, 1);
16cdcec7 603 node->bytes_reserved = num_bytes;
8c2a3ca2 604 }
16cdcec7
MX
605
606 return ret;
607}
608
2ff7e61e 609static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
4f5427cc
QW
610 struct btrfs_delayed_node *node,
611 bool qgroup_free)
16cdcec7
MX
612{
613 struct btrfs_block_rsv *rsv;
614
615 if (!node->bytes_reserved)
616 return;
617
0b246afa
JM
618 rsv = &fs_info->delayed_block_rsv;
619 trace_btrfs_space_reservation(fs_info, "delayed_inode",
8c2a3ca2 620 node->inode_id, node->bytes_reserved, 0);
63f018be 621 btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
4f5427cc
QW
622 if (qgroup_free)
623 btrfs_qgroup_free_meta_prealloc(node->root,
624 node->bytes_reserved);
625 else
626 btrfs_qgroup_convert_reserved_meta(node->root,
627 node->bytes_reserved);
16cdcec7
MX
628 node->bytes_reserved = 0;
629}
630
631/*
06ac264f
FM
632 * Insert a single delayed item or a batch of delayed items, as many as possible
633 * that fit in a leaf. The delayed items (dir index keys) are sorted by their key
634 * in the rbtree, and if there's a gap between two consecutive dir index items,
635 * then it means at some point we had delayed dir indexes to add but they got
636 * removed (by btrfs_delete_delayed_dir_index()) before we attempted to flush them
637 * into the subvolume tree. Dir index keys also have their offsets coming from a
638 * monotonically increasing counter, so we can't get new keys with an offset that
639 * fits within a gap between delayed dir index items.
16cdcec7 640 */
506650dc
FM
641static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
642 struct btrfs_root *root,
643 struct btrfs_path *path,
644 struct btrfs_delayed_item *first_item)
16cdcec7 645{
763748b2
FM
646 struct btrfs_fs_info *fs_info = root->fs_info;
647 struct btrfs_delayed_node *node = first_item->delayed_node;
b7ef5f3a 648 LIST_HEAD(item_list);
506650dc
FM
649 struct btrfs_delayed_item *curr;
650 struct btrfs_delayed_item *next;
763748b2 651 const int max_size = BTRFS_LEAF_DATA_SIZE(fs_info);
b7ef5f3a 652 struct btrfs_item_batch batch;
96d89923 653 struct btrfs_key first_key;
4c469798 654 const u32 first_data_size = first_item->data_len;
506650dc 655 int total_size;
506650dc 656 char *ins_data = NULL;
506650dc 657 int ret;
71b68e9e 658 bool continuous_keys_only = false;
16cdcec7 659
763748b2
FM
660 lockdep_assert_held(&node->mutex);
661
71b68e9e
JB
662 /*
663 * During normal operation the delayed index offset is continuously
664 * increasing, so we can batch insert all items as there will not be any
665 * overlapping keys in the tree.
666 *
667 * The exception to this is log replay, where we may have interleaved
668 * offsets in the tree, so our batch needs to be continuous keys only in
669 * order to ensure we do not end up with out of order items in our leaf.
670 */
671 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
672 continuous_keys_only = true;
673
763748b2
FM
674 /*
675 * For delayed items to insert, we track reserved metadata bytes based
676 * on the number of leaves that we will use.
677 * See btrfs_insert_delayed_dir_index() and
678 * btrfs_delayed_item_reserve_metadata()).
679 */
680 ASSERT(first_item->bytes_reserved == 0);
681
b7ef5f3a 682 list_add_tail(&first_item->tree_list, &item_list);
4c469798 683 batch.total_data_size = first_data_size;
b7ef5f3a 684 batch.nr = 1;
4c469798 685 total_size = first_data_size + sizeof(struct btrfs_item);
506650dc 686 curr = first_item;
16cdcec7 687
506650dc
FM
688 while (true) {
689 int next_size;
16cdcec7 690
16cdcec7 691 next = __btrfs_next_delayed_item(curr);
06ac264f 692 if (!next)
16cdcec7
MX
693 break;
694
71b68e9e
JB
695 /*
696 * We cannot allow gaps in the key space if we're doing log
697 * replay.
698 */
96d89923 699 if (continuous_keys_only && (next->index != curr->index + 1))
71b68e9e
JB
700 break;
701
763748b2
FM
702 ASSERT(next->bytes_reserved == 0);
703
506650dc
FM
704 next_size = next->data_len + sizeof(struct btrfs_item);
705 if (total_size + next_size > max_size)
16cdcec7 706 break;
16cdcec7 707
b7ef5f3a
FM
708 list_add_tail(&next->tree_list, &item_list);
709 batch.nr++;
506650dc 710 total_size += next_size;
b7ef5f3a 711 batch.total_data_size += next->data_len;
506650dc 712 curr = next;
16cdcec7
MX
713 }
714
b7ef5f3a 715 if (batch.nr == 1) {
96d89923
FM
716 first_key.objectid = node->inode_id;
717 first_key.type = BTRFS_DIR_INDEX_KEY;
718 first_key.offset = first_item->index;
719 batch.keys = &first_key;
4c469798 720 batch.data_sizes = &first_data_size;
506650dc 721 } else {
b7ef5f3a
FM
722 struct btrfs_key *ins_keys;
723 u32 *ins_sizes;
506650dc 724 int i = 0;
16cdcec7 725
b7ef5f3a
FM
726 ins_data = kmalloc(batch.nr * sizeof(u32) +
727 batch.nr * sizeof(struct btrfs_key), GFP_NOFS);
506650dc
FM
728 if (!ins_data) {
729 ret = -ENOMEM;
730 goto out;
731 }
732 ins_sizes = (u32 *)ins_data;
b7ef5f3a
FM
733 ins_keys = (struct btrfs_key *)(ins_data + batch.nr * sizeof(u32));
734 batch.keys = ins_keys;
735 batch.data_sizes = ins_sizes;
736 list_for_each_entry(curr, &item_list, tree_list) {
96d89923
FM
737 ins_keys[i].objectid = node->inode_id;
738 ins_keys[i].type = BTRFS_DIR_INDEX_KEY;
739 ins_keys[i].offset = curr->index;
506650dc
FM
740 ins_sizes[i] = curr->data_len;
741 i++;
742 }
16cdcec7
MX
743 }
744
b7ef5f3a 745 ret = btrfs_insert_empty_items(trans, root, path, &batch);
506650dc
FM
746 if (ret)
747 goto out;
16cdcec7 748
b7ef5f3a 749 list_for_each_entry(curr, &item_list, tree_list) {
506650dc 750 char *data_ptr;
16cdcec7 751
506650dc
FM
752 data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char);
753 write_extent_buffer(path->nodes[0], &curr->data,
754 (unsigned long)data_ptr, curr->data_len);
755 path->slots[0]++;
756 }
16cdcec7 757
506650dc
FM
758 /*
759 * Now release our path before releasing the delayed items and their
760 * metadata reservations, so that we don't block other tasks for more
761 * time than needed.
762 */
763 btrfs_release_path(path);
16cdcec7 764
763748b2
FM
765 ASSERT(node->index_item_leaves > 0);
766
71b68e9e
JB
767 /*
768 * For normal operations we will batch an entire leaf's worth of delayed
769 * items, so if there are more items to process we can decrement
770 * index_item_leaves by 1 as we inserted 1 leaf's worth of items.
771 *
772 * However for log replay we may not have inserted an entire leaf's
773 * worth of items, we may have not had continuous items, so decrementing
774 * here would mess up the index_item_leaves accounting. For this case
775 * only clean up the accounting when there are no items left.
776 */
777 if (next && !continuous_keys_only) {
763748b2
FM
778 /*
779 * We inserted one batch of items into a leaf a there are more
780 * items to flush in a future batch, now release one unit of
781 * metadata space from the delayed block reserve, corresponding
782 * the leaf we just flushed to.
783 */
784 btrfs_delayed_item_release_leaves(node, 1);
785 node->index_item_leaves--;
71b68e9e 786 } else if (!next) {
763748b2
FM
787 /*
788 * There are no more items to insert. We can have a number of
789 * reserved leaves > 1 here - this happens when many dir index
790 * items are added and then removed before they are flushed (file
791 * names with a very short life, never span a transaction). So
792 * release all remaining leaves.
793 */
794 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
795 node->index_item_leaves = 0;
796 }
797
b7ef5f3a 798 list_for_each_entry_safe(curr, next, &item_list, tree_list) {
16cdcec7
MX
799 list_del(&curr->tree_list);
800 btrfs_release_delayed_item(curr);
801 }
16cdcec7 802out:
506650dc 803 kfree(ins_data);
16cdcec7
MX
804 return ret;
805}
806
16cdcec7
MX
807static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
808 struct btrfs_path *path,
809 struct btrfs_root *root,
810 struct btrfs_delayed_node *node)
811{
16cdcec7
MX
812 int ret = 0;
813
506650dc
FM
814 while (ret == 0) {
815 struct btrfs_delayed_item *curr;
16cdcec7 816
506650dc
FM
817 mutex_lock(&node->mutex);
818 curr = __btrfs_first_delayed_insertion_item(node);
819 if (!curr) {
820 mutex_unlock(&node->mutex);
821 break;
822 }
823 ret = btrfs_insert_delayed_item(trans, root, path, curr);
824 mutex_unlock(&node->mutex);
16cdcec7 825 }
16cdcec7 826
16cdcec7
MX
827 return ret;
828}
829
830static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
831 struct btrfs_root *root,
832 struct btrfs_path *path,
833 struct btrfs_delayed_item *item)
834{
96d89923 835 const u64 ino = item->delayed_node->inode_id;
1f4f639f 836 struct btrfs_fs_info *fs_info = root->fs_info;
16cdcec7 837 struct btrfs_delayed_item *curr, *next;
659192e6 838 struct extent_buffer *leaf = path->nodes[0];
4bd02d90
FM
839 LIST_HEAD(batch_list);
840 int nitems, slot, last_slot;
841 int ret;
1f4f639f 842 u64 total_reserved_size = item->bytes_reserved;
16cdcec7 843
659192e6 844 ASSERT(leaf != NULL);
16cdcec7 845
4bd02d90
FM
846 slot = path->slots[0];
847 last_slot = btrfs_header_nritems(leaf) - 1;
659192e6
FM
848 /*
849 * Our caller always gives us a path pointing to an existing item, so
850 * this can not happen.
851 */
4bd02d90
FM
852 ASSERT(slot <= last_slot);
853 if (WARN_ON(slot > last_slot))
659192e6 854 return -ENOENT;
16cdcec7 855
4bd02d90
FM
856 nitems = 1;
857 curr = item;
858 list_add_tail(&curr->tree_list, &batch_list);
859
16cdcec7 860 /*
4bd02d90
FM
861 * Keep checking if the next delayed item matches the next item in the
862 * leaf - if so, we can add it to the batch of items to delete from the
863 * leaf.
16cdcec7 864 */
4bd02d90
FM
865 while (slot < last_slot) {
866 struct btrfs_key key;
16cdcec7 867
16cdcec7
MX
868 next = __btrfs_next_delayed_item(curr);
869 if (!next)
870 break;
871
4bd02d90
FM
872 slot++;
873 btrfs_item_key_to_cpu(leaf, &key, slot);
96d89923
FM
874 if (key.objectid != ino ||
875 key.type != BTRFS_DIR_INDEX_KEY ||
876 key.offset != next->index)
16cdcec7 877 break;
4bd02d90
FM
878 nitems++;
879 curr = next;
880 list_add_tail(&curr->tree_list, &batch_list);
1f4f639f 881 total_reserved_size += curr->bytes_reserved;
16cdcec7
MX
882 }
883
16cdcec7
MX
884 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
885 if (ret)
4bd02d90 886 return ret;
16cdcec7 887
1f4f639f
NB
888 /* In case of BTRFS_FS_LOG_RECOVERING items won't have reserved space */
889 if (total_reserved_size > 0) {
890 /*
891 * Check btrfs_delayed_item_reserve_metadata() to see why we
892 * don't need to release/reserve qgroup space.
893 */
96d89923
FM
894 trace_btrfs_space_reservation(fs_info, "delayed_item", ino,
895 total_reserved_size, 0);
1f4f639f
NB
896 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv,
897 total_reserved_size, NULL);
898 }
899
4bd02d90 900 list_for_each_entry_safe(curr, next, &batch_list, tree_list) {
16cdcec7
MX
901 list_del(&curr->tree_list);
902 btrfs_release_delayed_item(curr);
903 }
904
4bd02d90 905 return 0;
16cdcec7
MX
906}
907
908static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
909 struct btrfs_path *path,
910 struct btrfs_root *root,
911 struct btrfs_delayed_node *node)
912{
96d89923 913 struct btrfs_key key;
16cdcec7
MX
914 int ret = 0;
915
96d89923
FM
916 key.objectid = node->inode_id;
917 key.type = BTRFS_DIR_INDEX_KEY;
918
36baa2c7
FM
919 while (ret == 0) {
920 struct btrfs_delayed_item *item;
921
922 mutex_lock(&node->mutex);
923 item = __btrfs_first_delayed_deletion_item(node);
924 if (!item) {
925 mutex_unlock(&node->mutex);
926 break;
927 }
928
96d89923
FM
929 key.offset = item->index;
930 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
36baa2c7
FM
931 if (ret > 0) {
932 /*
933 * There's no matching item in the leaf. This means we
934 * have already deleted this item in a past run of the
935 * delayed items. We ignore errors when running delayed
936 * items from an async context, through a work queue job
937 * running btrfs_async_run_delayed_root(), and don't
938 * release delayed items that failed to complete. This
939 * is because we will retry later, and at transaction
940 * commit time we always run delayed items and will
941 * then deal with errors if they fail to run again.
942 *
943 * So just release delayed items for which we can't find
944 * an item in the tree, and move to the next item.
945 */
946 btrfs_release_path(path);
947 btrfs_release_delayed_item(item);
948 ret = 0;
949 } else if (ret == 0) {
950 ret = btrfs_batch_delete_items(trans, root, path, item);
951 btrfs_release_path(path);
952 }
16cdcec7 953
16cdcec7 954 /*
36baa2c7
FM
955 * We unlock and relock on each iteration, this is to prevent
956 * blocking other tasks for too long while we are being run from
957 * the async context (work queue job). Those tasks are typically
958 * running system calls like creat/mkdir/rename/unlink/etc which
959 * need to add delayed items to this delayed node.
16cdcec7 960 */
36baa2c7 961 mutex_unlock(&node->mutex);
16cdcec7
MX
962 }
963
16cdcec7
MX
964 return ret;
965}
966
967static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
968{
969 struct btrfs_delayed_root *delayed_root;
970
7cf35d91
MX
971 if (delayed_node &&
972 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
16cdcec7 973 BUG_ON(!delayed_node->root);
7cf35d91 974 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
16cdcec7
MX
975 delayed_node->count--;
976
977 delayed_root = delayed_node->root->fs_info->delayed_root;
de3cb945 978 finish_one_item(delayed_root);
16cdcec7
MX
979 }
980}
981
67de1176
MX
982static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
983{
67de1176 984
a4cb90dc
JB
985 if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
986 struct btrfs_delayed_root *delayed_root;
67de1176 987
a4cb90dc
JB
988 ASSERT(delayed_node->root);
989 delayed_node->count--;
990
991 delayed_root = delayed_node->root->fs_info->delayed_root;
992 finish_one_item(delayed_root);
993 }
67de1176
MX
994}
995
0e8c36a9
MX
996static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
997 struct btrfs_root *root,
998 struct btrfs_path *path,
999 struct btrfs_delayed_node *node)
16cdcec7 1000{
2ff7e61e 1001 struct btrfs_fs_info *fs_info = root->fs_info;
16cdcec7
MX
1002 struct btrfs_key key;
1003 struct btrfs_inode_item *inode_item;
1004 struct extent_buffer *leaf;
67de1176 1005 int mod;
16cdcec7
MX
1006 int ret;
1007
16cdcec7 1008 key.objectid = node->inode_id;
962a298f 1009 key.type = BTRFS_INODE_ITEM_KEY;
16cdcec7 1010 key.offset = 0;
0e8c36a9 1011
67de1176
MX
1012 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1013 mod = -1;
1014 else
1015 mod = 1;
1016
1017 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
bb385bed
JB
1018 if (ret > 0)
1019 ret = -ENOENT;
1020 if (ret < 0)
1021 goto out;
16cdcec7 1022
16cdcec7
MX
1023 leaf = path->nodes[0];
1024 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1025 struct btrfs_inode_item);
1026 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1027 sizeof(struct btrfs_inode_item));
1028 btrfs_mark_buffer_dirty(leaf);
16cdcec7 1029
67de1176 1030 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
a4cb90dc 1031 goto out;
67de1176
MX
1032
1033 path->slots[0]++;
1034 if (path->slots[0] >= btrfs_header_nritems(leaf))
1035 goto search;
1036again:
1037 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1038 if (key.objectid != node->inode_id)
1039 goto out;
1040
1041 if (key.type != BTRFS_INODE_REF_KEY &&
1042 key.type != BTRFS_INODE_EXTREF_KEY)
1043 goto out;
1044
1045 /*
1046 * Delayed iref deletion is for the inode who has only one link,
1047 * so there is only one iref. The case that several irefs are
1048 * in the same item doesn't exist.
1049 */
1050 btrfs_del_item(trans, root, path);
1051out:
1052 btrfs_release_delayed_iref(node);
67de1176
MX
1053 btrfs_release_path(path);
1054err_out:
4f5427cc 1055 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
16cdcec7 1056 btrfs_release_delayed_inode(node);
16cdcec7 1057
04587ad9
JB
1058 /*
1059 * If we fail to update the delayed inode we need to abort the
1060 * transaction, because we could leave the inode with the improper
1061 * counts behind.
1062 */
1063 if (ret && ret != -ENOENT)
1064 btrfs_abort_transaction(trans, ret);
1065
67de1176
MX
1066 return ret;
1067
1068search:
1069 btrfs_release_path(path);
1070
962a298f 1071 key.type = BTRFS_INODE_EXTREF_KEY;
67de1176 1072 key.offset = -1;
351cbf6e 1073
67de1176
MX
1074 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1075 if (ret < 0)
1076 goto err_out;
1077 ASSERT(ret);
1078
1079 ret = 0;
1080 leaf = path->nodes[0];
1081 path->slots[0]--;
1082 goto again;
16cdcec7
MX
1083}
1084
0e8c36a9
MX
1085static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1086 struct btrfs_root *root,
1087 struct btrfs_path *path,
1088 struct btrfs_delayed_node *node)
1089{
1090 int ret;
1091
1092 mutex_lock(&node->mutex);
7cf35d91 1093 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
0e8c36a9
MX
1094 mutex_unlock(&node->mutex);
1095 return 0;
1096 }
1097
1098 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1099 mutex_unlock(&node->mutex);
1100 return ret;
1101}
1102
4ea41ce0
MX
1103static inline int
1104__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1105 struct btrfs_path *path,
1106 struct btrfs_delayed_node *node)
1107{
1108 int ret;
1109
1110 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1111 if (ret)
1112 return ret;
1113
1114 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1115 if (ret)
1116 return ret;
1117
1118 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1119 return ret;
1120}
1121
79787eaa
JM
1122/*
1123 * Called when committing the transaction.
1124 * Returns 0 on success.
1125 * Returns < 0 on error and returns with an aborted transaction with any
1126 * outstanding delayed items cleaned up.
1127 */
b84acab3 1128static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
16cdcec7 1129{
b84acab3 1130 struct btrfs_fs_info *fs_info = trans->fs_info;
16cdcec7
MX
1131 struct btrfs_delayed_root *delayed_root;
1132 struct btrfs_delayed_node *curr_node, *prev_node;
1133 struct btrfs_path *path;
19fd2949 1134 struct btrfs_block_rsv *block_rsv;
16cdcec7 1135 int ret = 0;
96c3f433 1136 bool count = (nr > 0);
16cdcec7 1137
bf31f87f 1138 if (TRANS_ABORTED(trans))
79787eaa
JM
1139 return -EIO;
1140
16cdcec7
MX
1141 path = btrfs_alloc_path();
1142 if (!path)
1143 return -ENOMEM;
16cdcec7 1144
19fd2949 1145 block_rsv = trans->block_rsv;
0b246afa 1146 trans->block_rsv = &fs_info->delayed_block_rsv;
19fd2949 1147
ccdf9b30 1148 delayed_root = fs_info->delayed_root;
16cdcec7
MX
1149
1150 curr_node = btrfs_first_delayed_node(delayed_root);
a4559e6f 1151 while (curr_node && (!count || nr--)) {
4ea41ce0
MX
1152 ret = __btrfs_commit_inode_delayed_items(trans, path,
1153 curr_node);
16cdcec7
MX
1154 if (ret) {
1155 btrfs_release_delayed_node(curr_node);
96c3f433 1156 curr_node = NULL;
66642832 1157 btrfs_abort_transaction(trans, ret);
16cdcec7
MX
1158 break;
1159 }
1160
1161 prev_node = curr_node;
1162 curr_node = btrfs_next_delayed_node(curr_node);
1163 btrfs_release_delayed_node(prev_node);
1164 }
1165
96c3f433
JB
1166 if (curr_node)
1167 btrfs_release_delayed_node(curr_node);
16cdcec7 1168 btrfs_free_path(path);
19fd2949 1169 trans->block_rsv = block_rsv;
79787eaa 1170
16cdcec7
MX
1171 return ret;
1172}
1173
e5c304e6 1174int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
96c3f433 1175{
b84acab3 1176 return __btrfs_run_delayed_items(trans, -1);
96c3f433
JB
1177}
1178
e5c304e6 1179int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
96c3f433 1180{
b84acab3 1181 return __btrfs_run_delayed_items(trans, nr);
96c3f433
JB
1182}
1183
16cdcec7 1184int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
5f4b32e9 1185 struct btrfs_inode *inode)
16cdcec7 1186{
5f4b32e9 1187 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
4ea41ce0
MX
1188 struct btrfs_path *path;
1189 struct btrfs_block_rsv *block_rsv;
16cdcec7
MX
1190 int ret;
1191
1192 if (!delayed_node)
1193 return 0;
1194
1195 mutex_lock(&delayed_node->mutex);
1196 if (!delayed_node->count) {
1197 mutex_unlock(&delayed_node->mutex);
1198 btrfs_release_delayed_node(delayed_node);
1199 return 0;
1200 }
1201 mutex_unlock(&delayed_node->mutex);
1202
4ea41ce0 1203 path = btrfs_alloc_path();
3c77bd94
FDBM
1204 if (!path) {
1205 btrfs_release_delayed_node(delayed_node);
4ea41ce0 1206 return -ENOMEM;
3c77bd94 1207 }
4ea41ce0
MX
1208
1209 block_rsv = trans->block_rsv;
1210 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1211
1212 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1213
16cdcec7 1214 btrfs_release_delayed_node(delayed_node);
4ea41ce0
MX
1215 btrfs_free_path(path);
1216 trans->block_rsv = block_rsv;
1217
16cdcec7
MX
1218 return ret;
1219}
1220
aa79021f 1221int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
0e8c36a9 1222{
3ffbd68c 1223 struct btrfs_fs_info *fs_info = inode->root->fs_info;
0e8c36a9 1224 struct btrfs_trans_handle *trans;
aa79021f 1225 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
0e8c36a9
MX
1226 struct btrfs_path *path;
1227 struct btrfs_block_rsv *block_rsv;
1228 int ret;
1229
1230 if (!delayed_node)
1231 return 0;
1232
1233 mutex_lock(&delayed_node->mutex);
7cf35d91 1234 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
0e8c36a9
MX
1235 mutex_unlock(&delayed_node->mutex);
1236 btrfs_release_delayed_node(delayed_node);
1237 return 0;
1238 }
1239 mutex_unlock(&delayed_node->mutex);
1240
1241 trans = btrfs_join_transaction(delayed_node->root);
1242 if (IS_ERR(trans)) {
1243 ret = PTR_ERR(trans);
1244 goto out;
1245 }
1246
1247 path = btrfs_alloc_path();
1248 if (!path) {
1249 ret = -ENOMEM;
1250 goto trans_out;
1251 }
0e8c36a9
MX
1252
1253 block_rsv = trans->block_rsv;
2ff7e61e 1254 trans->block_rsv = &fs_info->delayed_block_rsv;
0e8c36a9
MX
1255
1256 mutex_lock(&delayed_node->mutex);
7cf35d91 1257 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
0e8c36a9
MX
1258 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1259 path, delayed_node);
1260 else
1261 ret = 0;
1262 mutex_unlock(&delayed_node->mutex);
1263
1264 btrfs_free_path(path);
1265 trans->block_rsv = block_rsv;
1266trans_out:
3a45bb20 1267 btrfs_end_transaction(trans);
2ff7e61e 1268 btrfs_btree_balance_dirty(fs_info);
0e8c36a9
MX
1269out:
1270 btrfs_release_delayed_node(delayed_node);
1271
1272 return ret;
1273}
1274
f48d1cf5 1275void btrfs_remove_delayed_node(struct btrfs_inode *inode)
16cdcec7
MX
1276{
1277 struct btrfs_delayed_node *delayed_node;
1278
f48d1cf5 1279 delayed_node = READ_ONCE(inode->delayed_node);
16cdcec7
MX
1280 if (!delayed_node)
1281 return;
1282
f48d1cf5 1283 inode->delayed_node = NULL;
16cdcec7
MX
1284 btrfs_release_delayed_node(delayed_node);
1285}
1286
de3cb945
CM
1287struct btrfs_async_delayed_work {
1288 struct btrfs_delayed_root *delayed_root;
1289 int nr;
d458b054 1290 struct btrfs_work work;
16cdcec7
MX
1291};
1292
d458b054 1293static void btrfs_async_run_delayed_root(struct btrfs_work *work)
16cdcec7 1294{
de3cb945
CM
1295 struct btrfs_async_delayed_work *async_work;
1296 struct btrfs_delayed_root *delayed_root;
16cdcec7
MX
1297 struct btrfs_trans_handle *trans;
1298 struct btrfs_path *path;
1299 struct btrfs_delayed_node *delayed_node = NULL;
1300 struct btrfs_root *root;
19fd2949 1301 struct btrfs_block_rsv *block_rsv;
de3cb945 1302 int total_done = 0;
16cdcec7 1303
de3cb945
CM
1304 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1305 delayed_root = async_work->delayed_root;
16cdcec7
MX
1306
1307 path = btrfs_alloc_path();
1308 if (!path)
1309 goto out;
16cdcec7 1310
617c54a8
NB
1311 do {
1312 if (atomic_read(&delayed_root->items) <
1313 BTRFS_DELAYED_BACKGROUND / 2)
1314 break;
de3cb945 1315
617c54a8
NB
1316 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1317 if (!delayed_node)
1318 break;
de3cb945 1319
617c54a8 1320 root = delayed_node->root;
16cdcec7 1321
617c54a8
NB
1322 trans = btrfs_join_transaction(root);
1323 if (IS_ERR(trans)) {
1324 btrfs_release_path(path);
1325 btrfs_release_prepared_delayed_node(delayed_node);
1326 total_done++;
1327 continue;
1328 }
16cdcec7 1329
617c54a8
NB
1330 block_rsv = trans->block_rsv;
1331 trans->block_rsv = &root->fs_info->delayed_block_rsv;
19fd2949 1332
617c54a8 1333 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
16cdcec7 1334
617c54a8
NB
1335 trans->block_rsv = block_rsv;
1336 btrfs_end_transaction(trans);
1337 btrfs_btree_balance_dirty_nodelay(root->fs_info);
de3cb945 1338
617c54a8
NB
1339 btrfs_release_path(path);
1340 btrfs_release_prepared_delayed_node(delayed_node);
1341 total_done++;
de3cb945 1342
617c54a8
NB
1343 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1344 || total_done < async_work->nr);
de3cb945 1345
16cdcec7
MX
1346 btrfs_free_path(path);
1347out:
de3cb945
CM
1348 wake_up(&delayed_root->wait);
1349 kfree(async_work);
16cdcec7
MX
1350}
1351
de3cb945 1352
16cdcec7 1353static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
a585e948 1354 struct btrfs_fs_info *fs_info, int nr)
16cdcec7 1355{
de3cb945 1356 struct btrfs_async_delayed_work *async_work;
16cdcec7 1357
de3cb945
CM
1358 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1359 if (!async_work)
16cdcec7 1360 return -ENOMEM;
16cdcec7 1361
de3cb945 1362 async_work->delayed_root = delayed_root;
a0cac0ec
OS
1363 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1364 NULL);
de3cb945 1365 async_work->nr = nr;
16cdcec7 1366
a585e948 1367 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
16cdcec7
MX
1368 return 0;
1369}
1370
ccdf9b30 1371void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
e999376f 1372{
ccdf9b30 1373 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
e999376f
CM
1374}
1375
0353808c 1376static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
de3cb945
CM
1377{
1378 int val = atomic_read(&delayed_root->items_seq);
1379
0353808c 1380 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
de3cb945 1381 return 1;
0353808c
MX
1382
1383 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1384 return 1;
1385
de3cb945
CM
1386 return 0;
1387}
1388
2ff7e61e 1389void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
16cdcec7 1390{
2ff7e61e 1391 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
16cdcec7 1392
8577787f
NB
1393 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1394 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
16cdcec7
MX
1395 return;
1396
1397 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
0353808c 1398 int seq;
16cdcec7 1399 int ret;
0353808c
MX
1400
1401 seq = atomic_read(&delayed_root->items_seq);
de3cb945 1402
a585e948 1403 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
16cdcec7
MX
1404 if (ret)
1405 return;
1406
0353808c
MX
1407 wait_event_interruptible(delayed_root->wait,
1408 could_end_wait(delayed_root, seq));
4dd466d3 1409 return;
16cdcec7
MX
1410 }
1411
a585e948 1412 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
16cdcec7
MX
1413}
1414
79787eaa 1415/* Will return 0 or -ENOMEM */
16cdcec7 1416int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
2ff7e61e 1417 const char *name, int name_len,
6f45d185 1418 struct btrfs_inode *dir,
16cdcec7
MX
1419 struct btrfs_disk_key *disk_key, u8 type,
1420 u64 index)
1421{
763748b2
FM
1422 struct btrfs_fs_info *fs_info = trans->fs_info;
1423 const unsigned int leaf_data_size = BTRFS_LEAF_DATA_SIZE(fs_info);
16cdcec7
MX
1424 struct btrfs_delayed_node *delayed_node;
1425 struct btrfs_delayed_item *delayed_item;
1426 struct btrfs_dir_item *dir_item;
763748b2
FM
1427 bool reserve_leaf_space;
1428 u32 data_len;
16cdcec7
MX
1429 int ret;
1430
6f45d185 1431 delayed_node = btrfs_get_or_create_delayed_node(dir);
16cdcec7
MX
1432 if (IS_ERR(delayed_node))
1433 return PTR_ERR(delayed_node);
1434
96d89923 1435 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len,
4c469798
FM
1436 delayed_node,
1437 BTRFS_DELAYED_INSERTION_ITEM);
16cdcec7
MX
1438 if (!delayed_item) {
1439 ret = -ENOMEM;
1440 goto release_node;
1441 }
1442
96d89923 1443 delayed_item->index = index;
16cdcec7
MX
1444
1445 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1446 dir_item->location = *disk_key;
3cae210f
QW
1447 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1448 btrfs_set_stack_dir_data_len(dir_item, 0);
1449 btrfs_set_stack_dir_name_len(dir_item, name_len);
1450 btrfs_set_stack_dir_type(dir_item, type);
16cdcec7
MX
1451 memcpy((char *)(dir_item + 1), name, name_len);
1452
763748b2 1453 data_len = delayed_item->data_len + sizeof(struct btrfs_item);
8c2a3ca2 1454
16cdcec7 1455 mutex_lock(&delayed_node->mutex);
763748b2
FM
1456
1457 if (delayed_node->index_item_leaves == 0 ||
1458 delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
1459 delayed_node->curr_index_batch_size = data_len;
1460 reserve_leaf_space = true;
1461 } else {
1462 delayed_node->curr_index_batch_size += data_len;
1463 reserve_leaf_space = false;
1464 }
1465
1466 if (reserve_leaf_space) {
df492881 1467 ret = btrfs_delayed_item_reserve_metadata(trans, delayed_item);
763748b2
FM
1468 /*
1469 * Space was reserved for a dir index item insertion when we
1470 * started the transaction, so getting a failure here should be
1471 * impossible.
1472 */
1473 if (WARN_ON(ret)) {
1474 mutex_unlock(&delayed_node->mutex);
1475 btrfs_release_delayed_item(delayed_item);
1476 goto release_node;
1477 }
1478
1479 delayed_node->index_item_leaves++;
1480 } else if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
1481 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
1482
1483 /*
1484 * Adding the new dir index item does not require touching another
1485 * leaf, so we can release 1 unit of metadata that was previously
1486 * reserved when starting the transaction. This applies only to
1487 * the case where we had a transaction start and excludes the
1488 * transaction join case (when replaying log trees).
1489 */
1490 trace_btrfs_space_reservation(fs_info, "transaction",
1491 trans->transid, bytes, 0);
1492 btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
1493 ASSERT(trans->bytes_reserved >= bytes);
1494 trans->bytes_reserved -= bytes;
1495 }
1496
c9d02ab4 1497 ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
16cdcec7 1498 if (unlikely(ret)) {
4465c8b4 1499 btrfs_err(trans->fs_info,
5d163e0e 1500 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
4fd786e6 1501 name_len, name, delayed_node->root->root_key.objectid,
5d163e0e 1502 delayed_node->inode_id, ret);
16cdcec7
MX
1503 BUG();
1504 }
1505 mutex_unlock(&delayed_node->mutex);
1506
1507release_node:
1508 btrfs_release_delayed_node(delayed_node);
1509 return ret;
1510}
1511
2ff7e61e 1512static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
16cdcec7 1513 struct btrfs_delayed_node *node,
96d89923 1514 u64 index)
16cdcec7
MX
1515{
1516 struct btrfs_delayed_item *item;
1517
1518 mutex_lock(&node->mutex);
4cbf37f5 1519 item = __btrfs_lookup_delayed_item(&node->ins_root.rb_root, index);
16cdcec7
MX
1520 if (!item) {
1521 mutex_unlock(&node->mutex);
1522 return 1;
1523 }
1524
763748b2
FM
1525 /*
1526 * For delayed items to insert, we track reserved metadata bytes based
1527 * on the number of leaves that we will use.
1528 * See btrfs_insert_delayed_dir_index() and
1529 * btrfs_delayed_item_reserve_metadata()).
1530 */
1531 ASSERT(item->bytes_reserved == 0);
1532 ASSERT(node->index_item_leaves > 0);
1533
1534 /*
1535 * If there's only one leaf reserved, we can decrement this item from the
1536 * current batch, otherwise we can not because we don't know which leaf
1537 * it belongs to. With the current limit on delayed items, we rarely
1538 * accumulate enough dir index items to fill more than one leaf (even
1539 * when using a leaf size of 4K).
1540 */
1541 if (node->index_item_leaves == 1) {
1542 const u32 data_len = item->data_len + sizeof(struct btrfs_item);
1543
1544 ASSERT(node->curr_index_batch_size >= data_len);
1545 node->curr_index_batch_size -= data_len;
1546 }
1547
16cdcec7 1548 btrfs_release_delayed_item(item);
763748b2
FM
1549
1550 /* If we now have no more dir index items, we can release all leaves. */
1551 if (RB_EMPTY_ROOT(&node->ins_root.rb_root)) {
1552 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
1553 node->index_item_leaves = 0;
1554 }
1555
16cdcec7
MX
1556 mutex_unlock(&node->mutex);
1557 return 0;
1558}
1559
1560int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
e67bbbb9 1561 struct btrfs_inode *dir, u64 index)
16cdcec7
MX
1562{
1563 struct btrfs_delayed_node *node;
1564 struct btrfs_delayed_item *item;
16cdcec7
MX
1565 int ret;
1566
e67bbbb9 1567 node = btrfs_get_or_create_delayed_node(dir);
16cdcec7
MX
1568 if (IS_ERR(node))
1569 return PTR_ERR(node);
1570
96d89923 1571 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node, index);
16cdcec7
MX
1572 if (!ret)
1573 goto end;
1574
4c469798 1575 item = btrfs_alloc_delayed_item(0, node, BTRFS_DELAYED_DELETION_ITEM);
16cdcec7
MX
1576 if (!item) {
1577 ret = -ENOMEM;
1578 goto end;
1579 }
1580
96d89923 1581 item->index = index;
16cdcec7 1582
df492881 1583 ret = btrfs_delayed_item_reserve_metadata(trans, item);
16cdcec7
MX
1584 /*
1585 * we have reserved enough space when we start a new transaction,
1586 * so reserving metadata failure is impossible.
1587 */
933c22a7
QW
1588 if (ret < 0) {
1589 btrfs_err(trans->fs_info,
1590"metadata reservation failed for delayed dir item deltiona, should have been reserved");
1591 btrfs_release_delayed_item(item);
1592 goto end;
1593 }
16cdcec7
MX
1594
1595 mutex_lock(&node->mutex);
c9d02ab4 1596 ret = __btrfs_add_delayed_item(node, item);
16cdcec7 1597 if (unlikely(ret)) {
9add2945 1598 btrfs_err(trans->fs_info,
5d163e0e 1599 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
4fd786e6
MT
1600 index, node->root->root_key.objectid,
1601 node->inode_id, ret);
933c22a7
QW
1602 btrfs_delayed_item_release_metadata(dir->root, item);
1603 btrfs_release_delayed_item(item);
16cdcec7
MX
1604 }
1605 mutex_unlock(&node->mutex);
1606end:
1607 btrfs_release_delayed_node(node);
1608 return ret;
1609}
1610
f5cc7b80 1611int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
16cdcec7 1612{
f5cc7b80 1613 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
16cdcec7
MX
1614
1615 if (!delayed_node)
1616 return -ENOENT;
1617
1618 /*
1619 * Since we have held i_mutex of this directory, it is impossible that
1620 * a new directory index is added into the delayed node and index_cnt
1621 * is updated now. So we needn't lock the delayed node.
1622 */
2f7e33d4
MX
1623 if (!delayed_node->index_cnt) {
1624 btrfs_release_delayed_node(delayed_node);
16cdcec7 1625 return -EINVAL;
2f7e33d4 1626 }
16cdcec7 1627
f5cc7b80 1628 inode->index_cnt = delayed_node->index_cnt;
2f7e33d4
MX
1629 btrfs_release_delayed_node(delayed_node);
1630 return 0;
16cdcec7
MX
1631}
1632
02dbfc99
OS
1633bool btrfs_readdir_get_delayed_items(struct inode *inode,
1634 struct list_head *ins_list,
1635 struct list_head *del_list)
16cdcec7
MX
1636{
1637 struct btrfs_delayed_node *delayed_node;
1638 struct btrfs_delayed_item *item;
1639
340c6ca9 1640 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
16cdcec7 1641 if (!delayed_node)
02dbfc99
OS
1642 return false;
1643
1644 /*
1645 * We can only do one readdir with delayed items at a time because of
1646 * item->readdir_list.
1647 */
64708539
JB
1648 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
1649 btrfs_inode_lock(inode, 0);
16cdcec7
MX
1650
1651 mutex_lock(&delayed_node->mutex);
1652 item = __btrfs_first_delayed_insertion_item(delayed_node);
1653 while (item) {
089e77e1 1654 refcount_inc(&item->refs);
16cdcec7
MX
1655 list_add_tail(&item->readdir_list, ins_list);
1656 item = __btrfs_next_delayed_item(item);
1657 }
1658
1659 item = __btrfs_first_delayed_deletion_item(delayed_node);
1660 while (item) {
089e77e1 1661 refcount_inc(&item->refs);
16cdcec7
MX
1662 list_add_tail(&item->readdir_list, del_list);
1663 item = __btrfs_next_delayed_item(item);
1664 }
1665 mutex_unlock(&delayed_node->mutex);
1666 /*
1667 * This delayed node is still cached in the btrfs inode, so refs
1668 * must be > 1 now, and we needn't check it is going to be freed
1669 * or not.
1670 *
1671 * Besides that, this function is used to read dir, we do not
1672 * insert/delete delayed items in this period. So we also needn't
1673 * requeue or dequeue this delayed node.
1674 */
6de5f18e 1675 refcount_dec(&delayed_node->refs);
02dbfc99
OS
1676
1677 return true;
16cdcec7
MX
1678}
1679
02dbfc99
OS
1680void btrfs_readdir_put_delayed_items(struct inode *inode,
1681 struct list_head *ins_list,
1682 struct list_head *del_list)
16cdcec7
MX
1683{
1684 struct btrfs_delayed_item *curr, *next;
1685
1686 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1687 list_del(&curr->readdir_list);
089e77e1 1688 if (refcount_dec_and_test(&curr->refs))
16cdcec7
MX
1689 kfree(curr);
1690 }
1691
1692 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1693 list_del(&curr->readdir_list);
089e77e1 1694 if (refcount_dec_and_test(&curr->refs))
16cdcec7
MX
1695 kfree(curr);
1696 }
02dbfc99
OS
1697
1698 /*
1699 * The VFS is going to do up_read(), so we need to downgrade back to a
1700 * read lock.
1701 */
1702 downgrade_write(&inode->i_rwsem);
16cdcec7
MX
1703}
1704
1705int btrfs_should_delete_dir_index(struct list_head *del_list,
1706 u64 index)
1707{
e4fd493c
JB
1708 struct btrfs_delayed_item *curr;
1709 int ret = 0;
16cdcec7 1710
e4fd493c 1711 list_for_each_entry(curr, del_list, readdir_list) {
96d89923 1712 if (curr->index > index)
16cdcec7 1713 break;
96d89923 1714 if (curr->index == index) {
e4fd493c
JB
1715 ret = 1;
1716 break;
1717 }
16cdcec7 1718 }
e4fd493c 1719 return ret;
16cdcec7
MX
1720}
1721
1722/*
1723 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1724 *
1725 */
9cdda8d3 1726int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
d2fbb2b5 1727 struct list_head *ins_list)
16cdcec7
MX
1728{
1729 struct btrfs_dir_item *di;
1730 struct btrfs_delayed_item *curr, *next;
1731 struct btrfs_key location;
1732 char *name;
1733 int name_len;
1734 int over = 0;
1735 unsigned char d_type;
1736
1737 if (list_empty(ins_list))
1738 return 0;
1739
1740 /*
1741 * Changing the data of the delayed item is impossible. So
1742 * we needn't lock them. And we have held i_mutex of the
1743 * directory, nobody can delete any directory indexes now.
1744 */
1745 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1746 list_del(&curr->readdir_list);
1747
96d89923 1748 if (curr->index < ctx->pos) {
089e77e1 1749 if (refcount_dec_and_test(&curr->refs))
16cdcec7
MX
1750 kfree(curr);
1751 continue;
1752 }
1753
96d89923 1754 ctx->pos = curr->index;
16cdcec7
MX
1755
1756 di = (struct btrfs_dir_item *)curr->data;
1757 name = (char *)(di + 1);
3cae210f 1758 name_len = btrfs_stack_dir_name_len(di);
16cdcec7 1759
7d157c3d 1760 d_type = fs_ftype_to_dtype(di->type);
16cdcec7
MX
1761 btrfs_disk_key_to_cpu(&location, &di->location);
1762
9cdda8d3 1763 over = !dir_emit(ctx, name, name_len,
16cdcec7
MX
1764 location.objectid, d_type);
1765
089e77e1 1766 if (refcount_dec_and_test(&curr->refs))
16cdcec7
MX
1767 kfree(curr);
1768
1769 if (over)
1770 return 1;
42e9cc46 1771 ctx->pos++;
16cdcec7
MX
1772 }
1773 return 0;
1774}
1775
16cdcec7
MX
1776static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1777 struct btrfs_inode_item *inode_item,
1778 struct inode *inode)
1779{
77eea05e
BB
1780 u64 flags;
1781
2f2f43d3
EB
1782 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1783 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
16cdcec7
MX
1784 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1785 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1786 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1787 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1788 btrfs_set_stack_inode_generation(inode_item,
1789 BTRFS_I(inode)->generation);
c7f88c4e
JL
1790 btrfs_set_stack_inode_sequence(inode_item,
1791 inode_peek_iversion(inode));
16cdcec7
MX
1792 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1793 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
77eea05e
BB
1794 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
1795 BTRFS_I(inode)->ro_flags);
1796 btrfs_set_stack_inode_flags(inode_item, flags);
ff5714cc 1797 btrfs_set_stack_inode_block_group(inode_item, 0);
16cdcec7 1798
a937b979 1799 btrfs_set_stack_timespec_sec(&inode_item->atime,
16cdcec7 1800 inode->i_atime.tv_sec);
a937b979 1801 btrfs_set_stack_timespec_nsec(&inode_item->atime,
16cdcec7
MX
1802 inode->i_atime.tv_nsec);
1803
a937b979 1804 btrfs_set_stack_timespec_sec(&inode_item->mtime,
16cdcec7 1805 inode->i_mtime.tv_sec);
a937b979 1806 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
16cdcec7
MX
1807 inode->i_mtime.tv_nsec);
1808
a937b979 1809 btrfs_set_stack_timespec_sec(&inode_item->ctime,
16cdcec7 1810 inode->i_ctime.tv_sec);
a937b979 1811 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
16cdcec7 1812 inode->i_ctime.tv_nsec);
9cc97d64 1813
1814 btrfs_set_stack_timespec_sec(&inode_item->otime,
1815 BTRFS_I(inode)->i_otime.tv_sec);
1816 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1817 BTRFS_I(inode)->i_otime.tv_nsec);
16cdcec7
MX
1818}
1819
2f7e33d4
MX
1820int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1821{
9ddc959e 1822 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2f7e33d4
MX
1823 struct btrfs_delayed_node *delayed_node;
1824 struct btrfs_inode_item *inode_item;
2f7e33d4 1825
340c6ca9 1826 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
2f7e33d4
MX
1827 if (!delayed_node)
1828 return -ENOENT;
1829
1830 mutex_lock(&delayed_node->mutex);
7cf35d91 1831 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
2f7e33d4
MX
1832 mutex_unlock(&delayed_node->mutex);
1833 btrfs_release_delayed_node(delayed_node);
1834 return -ENOENT;
1835 }
1836
1837 inode_item = &delayed_node->inode_item;
1838
2f2f43d3
EB
1839 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1840 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
6ef06d27 1841 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
9ddc959e
JB
1842 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1843 round_up(i_size_read(inode), fs_info->sectorsize));
2f7e33d4 1844 inode->i_mode = btrfs_stack_inode_mode(inode_item);
bfe86848 1845 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
2f7e33d4
MX
1846 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1847 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
6e17d30b
YD
1848 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1849
c7f88c4e
JL
1850 inode_set_iversion_queried(inode,
1851 btrfs_stack_inode_sequence(inode_item));
2f7e33d4
MX
1852 inode->i_rdev = 0;
1853 *rdev = btrfs_stack_inode_rdev(inode_item);
77eea05e
BB
1854 btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
1855 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
2f7e33d4 1856
a937b979
DS
1857 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1858 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
2f7e33d4 1859
a937b979
DS
1860 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1861 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
2f7e33d4 1862
a937b979
DS
1863 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1864 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
2f7e33d4 1865
9cc97d64 1866 BTRFS_I(inode)->i_otime.tv_sec =
1867 btrfs_stack_timespec_sec(&inode_item->otime);
1868 BTRFS_I(inode)->i_otime.tv_nsec =
1869 btrfs_stack_timespec_nsec(&inode_item->otime);
1870
2f7e33d4
MX
1871 inode->i_generation = BTRFS_I(inode)->generation;
1872 BTRFS_I(inode)->index_cnt = (u64)-1;
1873
1874 mutex_unlock(&delayed_node->mutex);
1875 btrfs_release_delayed_node(delayed_node);
1876 return 0;
1877}
1878
16cdcec7 1879int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
f3fbcaef
NB
1880 struct btrfs_root *root,
1881 struct btrfs_inode *inode)
16cdcec7
MX
1882{
1883 struct btrfs_delayed_node *delayed_node;
aa0467d8 1884 int ret = 0;
16cdcec7 1885
f3fbcaef 1886 delayed_node = btrfs_get_or_create_delayed_node(inode);
16cdcec7
MX
1887 if (IS_ERR(delayed_node))
1888 return PTR_ERR(delayed_node);
1889
1890 mutex_lock(&delayed_node->mutex);
7cf35d91 1891 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
f3fbcaef
NB
1892 fill_stack_inode_item(trans, &delayed_node->inode_item,
1893 &inode->vfs_inode);
16cdcec7
MX
1894 goto release_node;
1895 }
1896
8e3c9d3c 1897 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
c06a0e12
JB
1898 if (ret)
1899 goto release_node;
16cdcec7 1900
f3fbcaef 1901 fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
7cf35d91 1902 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
16cdcec7
MX
1903 delayed_node->count++;
1904 atomic_inc(&root->fs_info->delayed_root->items);
1905release_node:
1906 mutex_unlock(&delayed_node->mutex);
1907 btrfs_release_delayed_node(delayed_node);
1908 return ret;
1909}
1910
e07222c7 1911int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
67de1176 1912{
3ffbd68c 1913 struct btrfs_fs_info *fs_info = inode->root->fs_info;
67de1176
MX
1914 struct btrfs_delayed_node *delayed_node;
1915
6f896054
CM
1916 /*
1917 * we don't do delayed inode updates during log recovery because it
1918 * leads to enospc problems. This means we also can't do
1919 * delayed inode refs
1920 */
0b246afa 1921 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
6f896054
CM
1922 return -EAGAIN;
1923
e07222c7 1924 delayed_node = btrfs_get_or_create_delayed_node(inode);
67de1176
MX
1925 if (IS_ERR(delayed_node))
1926 return PTR_ERR(delayed_node);
1927
1928 /*
1929 * We don't reserve space for inode ref deletion is because:
1930 * - We ONLY do async inode ref deletion for the inode who has only
1931 * one link(i_nlink == 1), it means there is only one inode ref.
1932 * And in most case, the inode ref and the inode item are in the
1933 * same leaf, and we will deal with them at the same time.
1934 * Since we are sure we will reserve the space for the inode item,
1935 * it is unnecessary to reserve space for inode ref deletion.
1936 * - If the inode ref and the inode item are not in the same leaf,
1937 * We also needn't worry about enospc problem, because we reserve
1938 * much more space for the inode update than it needs.
1939 * - At the worst, we can steal some space from the global reservation.
1940 * It is very rare.
1941 */
1942 mutex_lock(&delayed_node->mutex);
1943 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1944 goto release_node;
1945
1946 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1947 delayed_node->count++;
0b246afa 1948 atomic_inc(&fs_info->delayed_root->items);
67de1176
MX
1949release_node:
1950 mutex_unlock(&delayed_node->mutex);
1951 btrfs_release_delayed_node(delayed_node);
1952 return 0;
1953}
1954
16cdcec7
MX
1955static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1956{
1957 struct btrfs_root *root = delayed_node->root;
2ff7e61e 1958 struct btrfs_fs_info *fs_info = root->fs_info;
16cdcec7
MX
1959 struct btrfs_delayed_item *curr_item, *prev_item;
1960
1961 mutex_lock(&delayed_node->mutex);
1962 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1963 while (curr_item) {
16cdcec7
MX
1964 prev_item = curr_item;
1965 curr_item = __btrfs_next_delayed_item(prev_item);
1966 btrfs_release_delayed_item(prev_item);
1967 }
1968
763748b2
FM
1969 if (delayed_node->index_item_leaves > 0) {
1970 btrfs_delayed_item_release_leaves(delayed_node,
1971 delayed_node->index_item_leaves);
1972 delayed_node->index_item_leaves = 0;
1973 }
1974
16cdcec7
MX
1975 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1976 while (curr_item) {
4f5427cc 1977 btrfs_delayed_item_release_metadata(root, curr_item);
16cdcec7
MX
1978 prev_item = curr_item;
1979 curr_item = __btrfs_next_delayed_item(prev_item);
1980 btrfs_release_delayed_item(prev_item);
1981 }
1982
a4cb90dc 1983 btrfs_release_delayed_iref(delayed_node);
67de1176 1984
7cf35d91 1985 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
4f5427cc 1986 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
16cdcec7
MX
1987 btrfs_release_delayed_inode(delayed_node);
1988 }
1989 mutex_unlock(&delayed_node->mutex);
1990}
1991
4ccb5c72 1992void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
16cdcec7
MX
1993{
1994 struct btrfs_delayed_node *delayed_node;
1995
4ccb5c72 1996 delayed_node = btrfs_get_delayed_node(inode);
16cdcec7
MX
1997 if (!delayed_node)
1998 return;
1999
2000 __btrfs_kill_delayed_node(delayed_node);
2001 btrfs_release_delayed_node(delayed_node);
2002}
2003
2004void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
2005{
088aea3b 2006 u64 inode_id = 0;
16cdcec7 2007 struct btrfs_delayed_node *delayed_nodes[8];
088aea3b 2008 int i, n;
16cdcec7
MX
2009
2010 while (1) {
2011 spin_lock(&root->inode_lock);
088aea3b
DS
2012 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
2013 (void **)delayed_nodes, inode_id,
2014 ARRAY_SIZE(delayed_nodes));
2015 if (!n) {
16cdcec7 2016 spin_unlock(&root->inode_lock);
088aea3b 2017 break;
16cdcec7
MX
2018 }
2019
088aea3b
DS
2020 inode_id = delayed_nodes[n - 1]->inode_id + 1;
2021 for (i = 0; i < n; i++) {
baf320b9
JB
2022 /*
2023 * Don't increase refs in case the node is dead and
2024 * about to be removed from the tree in the loop below
2025 */
088aea3b
DS
2026 if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
2027 delayed_nodes[i] = NULL;
baf320b9 2028 }
16cdcec7
MX
2029 spin_unlock(&root->inode_lock);
2030
088aea3b
DS
2031 for (i = 0; i < n; i++) {
2032 if (!delayed_nodes[i])
2033 continue;
16cdcec7
MX
2034 __btrfs_kill_delayed_node(delayed_nodes[i]);
2035 btrfs_release_delayed_node(delayed_nodes[i]);
2036 }
2037 }
2038}
67cde344 2039
ccdf9b30 2040void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
67cde344 2041{
67cde344
MX
2042 struct btrfs_delayed_node *curr_node, *prev_node;
2043
ccdf9b30 2044 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
67cde344
MX
2045 while (curr_node) {
2046 __btrfs_kill_delayed_node(curr_node);
2047
2048 prev_node = curr_node;
2049 curr_node = btrfs_next_delayed_node(curr_node);
2050 btrfs_release_delayed_node(prev_node);
2051 }
2052}
2053
30b80f3c
FM
2054void btrfs_log_get_delayed_items(struct btrfs_inode *inode,
2055 struct list_head *ins_list,
2056 struct list_head *del_list)
2057{
2058 struct btrfs_delayed_node *node;
2059 struct btrfs_delayed_item *item;
2060
2061 node = btrfs_get_delayed_node(inode);
2062 if (!node)
2063 return;
2064
2065 mutex_lock(&node->mutex);
2066 item = __btrfs_first_delayed_insertion_item(node);
2067 while (item) {
2068 /*
2069 * It's possible that the item is already in a log list. This
2070 * can happen in case two tasks are trying to log the same
2071 * directory. For example if we have tasks A and task B:
2072 *
2073 * Task A collected the delayed items into a log list while
2074 * under the inode's log_mutex (at btrfs_log_inode()), but it
2075 * only releases the items after logging the inodes they point
2076 * to (if they are new inodes), which happens after unlocking
2077 * the log mutex;
2078 *
2079 * Task B enters btrfs_log_inode() and acquires the log_mutex
2080 * of the same directory inode, before task B releases the
2081 * delayed items. This can happen for example when logging some
2082 * inode we need to trigger logging of its parent directory, so
2083 * logging two files that have the same parent directory can
2084 * lead to this.
2085 *
2086 * If this happens, just ignore delayed items already in a log
2087 * list. All the tasks logging the directory are under a log
2088 * transaction and whichever finishes first can not sync the log
2089 * before the other completes and leaves the log transaction.
2090 */
2091 if (!item->logged && list_empty(&item->log_list)) {
2092 refcount_inc(&item->refs);
2093 list_add_tail(&item->log_list, ins_list);
2094 }
2095 item = __btrfs_next_delayed_item(item);
2096 }
2097
2098 item = __btrfs_first_delayed_deletion_item(node);
2099 while (item) {
2100 /* It may be non-empty, for the same reason mentioned above. */
2101 if (!item->logged && list_empty(&item->log_list)) {
2102 refcount_inc(&item->refs);
2103 list_add_tail(&item->log_list, del_list);
2104 }
2105 item = __btrfs_next_delayed_item(item);
2106 }
2107 mutex_unlock(&node->mutex);
2108
2109 /*
2110 * We are called during inode logging, which means the inode is in use
2111 * and can not be evicted before we finish logging the inode. So we never
2112 * have the last reference on the delayed inode.
2113 * Also, we don't use btrfs_release_delayed_node() because that would
2114 * requeue the delayed inode (change its order in the list of prepared
2115 * nodes) and we don't want to do such change because we don't create or
2116 * delete delayed items.
2117 */
2118 ASSERT(refcount_read(&node->refs) > 1);
2119 refcount_dec(&node->refs);
2120}
2121
2122void btrfs_log_put_delayed_items(struct btrfs_inode *inode,
2123 struct list_head *ins_list,
2124 struct list_head *del_list)
2125{
2126 struct btrfs_delayed_node *node;
2127 struct btrfs_delayed_item *item;
2128 struct btrfs_delayed_item *next;
2129
2130 node = btrfs_get_delayed_node(inode);
2131 if (!node)
2132 return;
2133
2134 mutex_lock(&node->mutex);
2135
2136 list_for_each_entry_safe(item, next, ins_list, log_list) {
2137 item->logged = true;
2138 list_del_init(&item->log_list);
2139 if (refcount_dec_and_test(&item->refs))
2140 kfree(item);
2141 }
2142
2143 list_for_each_entry_safe(item, next, del_list, log_list) {
2144 item->logged = true;
2145 list_del_init(&item->log_list);
2146 if (refcount_dec_and_test(&item->refs))
2147 kfree(item);
2148 }
2149
2150 mutex_unlock(&node->mutex);
2151
2152 /*
2153 * We are called during inode logging, which means the inode is in use
2154 * and can not be evicted before we finish logging the inode. So we never
2155 * have the last reference on the delayed inode.
2156 * Also, we don't use btrfs_release_delayed_node() because that would
2157 * requeue the delayed inode (change its order in the list of prepared
2158 * nodes) and we don't want to do such change because we don't create or
2159 * delete delayed items.
2160 */
2161 ASSERT(refcount_read(&node->refs) > 1);
2162 refcount_dec(&node->refs);
2163}