overlayfs: Implement splice-read
[linux-block.git] / fs / f2fs / extent_cache.c
CommitLineData
7c1a000d 1// SPDX-License-Identifier: GPL-2.0
a28ef1f5
CY
2/*
3 * f2fs extent cache support
4 *
5 * Copyright (c) 2015 Motorola Mobility
6 * Copyright (c) 2015 Samsung Electronics
7 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
8 * Chao Yu <chao2.yu@samsung.com>
71644dff
JK
9 *
10 * block_age-based extent cache added by:
11 * Copyright (c) 2022 xiaomi Co., Ltd.
12 * http://www.xiaomi.com/
a28ef1f5
CY
13 */
14
15#include <linux/fs.h>
16#include <linux/f2fs_fs.h>
17
18#include "f2fs.h"
19#include "node.h"
20#include <trace/events/f2fs.h>
21
269d1194
CY
22bool sanity_check_extent_cache(struct inode *inode)
23{
24 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
25 struct f2fs_inode_info *fi = F2FS_I(inode);
bd90c5cd 26 struct extent_tree *et = fi->extent_tree[EX_READ];
269d1194
CY
27 struct extent_info *ei;
28
bd90c5cd 29 if (!et)
269d1194
CY
30 return true;
31
bd90c5cd
JK
32 ei = &et->largest;
33 if (!ei->len)
34 return true;
35
36 /* Let's drop, if checkpoint got corrupted. */
37 if (is_set_ckpt_flags(sbi, CP_ERROR_FLAG)) {
38 ei->len = 0;
39 et->largest_updated = true;
40 return true;
41 }
269d1194 42
bd90c5cd
JK
43 if (!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC_ENHANCE) ||
44 !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
45 DATA_GENERIC_ENHANCE)) {
269d1194
CY
46 set_sbi_flag(sbi, SBI_NEED_FSCK);
47 f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
48 __func__, inode->i_ino,
49 ei->blk, ei->fofs, ei->len);
50 return false;
51 }
52 return true;
53}
54
3bac20a8
JK
55static void __set_extent_info(struct extent_info *ei,
56 unsigned int fofs, unsigned int len,
e7547dac 57 block_t blk, bool keep_clen,
71644dff 58 unsigned long age, unsigned long last_blocks,
e7547dac 59 enum extent_type type)
3bac20a8
JK
60{
61 ei->fofs = fofs;
3bac20a8
JK
62 ei->len = len;
63
e7547dac
JK
64 if (type == EX_READ) {
65 ei->blk = blk;
66 if (keep_clen)
67 return;
3bac20a8 68#ifdef CONFIG_F2FS_FS_COMPRESSION
e7547dac 69 ei->c_len = 0;
3bac20a8 70#endif
71644dff
JK
71 } else if (type == EX_BLOCK_AGE) {
72 ei->age = age;
73 ei->last_blocks = last_blocks;
e7547dac
JK
74 }
75}
76
77static bool __may_read_extent_tree(struct inode *inode)
78{
79 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
80
81 if (!test_opt(sbi, READ_EXTENT_CACHE))
82 return false;
83 if (is_inode_flag_set(inode, FI_NO_EXTENT))
84 return false;
85 if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
86 !f2fs_sb_has_readonly(sbi))
87 return false;
88 return S_ISREG(inode->i_mode);
3bac20a8
JK
89}
90
71644dff
JK
91static bool __may_age_extent_tree(struct inode *inode)
92{
93 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
94
95 if (!test_opt(sbi, AGE_EXTENT_CACHE))
96 return false;
71644dff
JK
97 if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
98 return false;
99 if (file_is_cold(inode))
100 return false;
101
102 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
103}
104
72840ccc 105static bool __init_may_extent_tree(struct inode *inode, enum extent_type type)
3bac20a8 106{
72840ccc
JK
107 if (type == EX_READ)
108 return __may_read_extent_tree(inode);
71644dff
JK
109 else if (type == EX_BLOCK_AGE)
110 return __may_age_extent_tree(inode);
72840ccc
JK
111 return false;
112}
3bac20a8 113
72840ccc
JK
114static bool __may_extent_tree(struct inode *inode, enum extent_type type)
115{
3bac20a8
JK
116 /*
117 * for recovered files during mount do not create extents
118 * if shrinker is not registered.
119 */
72840ccc 120 if (list_empty(&F2FS_I_SB(inode)->s_list))
3bac20a8
JK
121 return false;
122
72840ccc 123 return __init_may_extent_tree(inode, type);
3bac20a8
JK
124}
125
126static void __try_update_largest_extent(struct extent_tree *et,
127 struct extent_node *en)
128{
e7547dac
JK
129 if (et->type != EX_READ)
130 return;
3bac20a8
JK
131 if (en->ei.len <= et->largest.len)
132 return;
133
134 et->largest = en->ei;
135 et->largest_updated = true;
136}
137
138static bool __is_extent_mergeable(struct extent_info *back,
e7547dac 139 struct extent_info *front, enum extent_type type)
3bac20a8 140{
e7547dac 141 if (type == EX_READ) {
3bac20a8 142#ifdef CONFIG_F2FS_FS_COMPRESSION
e7547dac
JK
143 if (back->c_len && back->len != back->c_len)
144 return false;
145 if (front->c_len && front->len != front->c_len)
146 return false;
3bac20a8 147#endif
e7547dac
JK
148 return (back->fofs + back->len == front->fofs &&
149 back->blk + back->len == front->blk);
71644dff
JK
150 } else if (type == EX_BLOCK_AGE) {
151 return (back->fofs + back->len == front->fofs &&
152 abs(back->age - front->age) <= SAME_AGE_REGION &&
153 abs(back->last_blocks - front->last_blocks) <=
154 SAME_AGE_REGION);
e7547dac
JK
155 }
156 return false;
3bac20a8
JK
157}
158
159static bool __is_back_mergeable(struct extent_info *cur,
e7547dac 160 struct extent_info *back, enum extent_type type)
3bac20a8 161{
e7547dac 162 return __is_extent_mergeable(back, cur, type);
3bac20a8
JK
163}
164
165static bool __is_front_mergeable(struct extent_info *cur,
e7547dac 166 struct extent_info *front, enum extent_type type)
3bac20a8 167{
e7547dac 168 return __is_extent_mergeable(cur, front, type);
3bac20a8
JK
169}
170
bf21acf9
JK
171static struct extent_node *__lookup_extent_node(struct rb_root_cached *root,
172 struct extent_node *cached_en, unsigned int fofs)
54c2258c 173{
4dada3fd 174 struct rb_node *node = root->rb_root.rb_node;
bf21acf9
JK
175 struct extent_node *en;
176
177 /* check a cached entry */
178 if (cached_en && cached_en->ei.fofs <= fofs &&
179 cached_en->ei.fofs + cached_en->ei.len > fofs)
180 return cached_en;
54c2258c 181
bf21acf9 182 /* check rb_tree */
54c2258c 183 while (node) {
bf21acf9 184 en = rb_entry(node, struct extent_node, rb_node);
54c2258c 185
bf21acf9 186 if (fofs < en->ei.fofs)
54c2258c 187 node = node->rb_left;
bf21acf9 188 else if (fofs >= en->ei.fofs + en->ei.len)
54c2258c
CY
189 node = node->rb_right;
190 else
bf21acf9 191 return en;
54c2258c
CY
192 }
193 return NULL;
194}
195
54c2258c 196/*
bf21acf9 197 * lookup rb entry in position of @fofs in rb-tree,
54c2258c 198 * if hit, return the entry, otherwise, return NULL
bf21acf9
JK
199 * @prev_ex: extent before fofs
200 * @next_ex: extent after fofs
201 * @insert_p: insert point for new extent at fofs
146949de 202 * in order to simplify the insertion after.
54c2258c
CY
203 * tree must stay unchanged between lookup and insertion.
204 */
bf21acf9
JK
205static struct extent_node *__lookup_extent_node_ret(struct rb_root_cached *root,
206 struct extent_node *cached_en,
207 unsigned int fofs,
208 struct extent_node **prev_entry,
209 struct extent_node **next_entry,
54c2258c 210 struct rb_node ***insert_p,
004b6862 211 struct rb_node **insert_parent,
bf21acf9 212 bool *leftmost)
54c2258c 213{
4dada3fd 214 struct rb_node **pnode = &root->rb_root.rb_node;
54c2258c 215 struct rb_node *parent = NULL, *tmp_node;
bf21acf9 216 struct extent_node *en = cached_en;
54c2258c
CY
217
218 *insert_p = NULL;
219 *insert_parent = NULL;
220 *prev_entry = NULL;
221 *next_entry = NULL;
222
4dada3fd 223 if (RB_EMPTY_ROOT(&root->rb_root))
54c2258c
CY
224 return NULL;
225
bf21acf9
JK
226 if (en && en->ei.fofs <= fofs && en->ei.fofs + en->ei.len > fofs)
227 goto lookup_neighbors;
54c2258c 228
bf21acf9 229 *leftmost = true;
4dada3fd 230
54c2258c
CY
231 while (*pnode) {
232 parent = *pnode;
bf21acf9 233 en = rb_entry(*pnode, struct extent_node, rb_node);
54c2258c 234
bf21acf9 235 if (fofs < en->ei.fofs) {
54c2258c 236 pnode = &(*pnode)->rb_left;
bf21acf9 237 } else if (fofs >= en->ei.fofs + en->ei.len) {
54c2258c 238 pnode = &(*pnode)->rb_right;
bf21acf9 239 *leftmost = false;
4dada3fd 240 } else {
54c2258c 241 goto lookup_neighbors;
4dada3fd 242 }
54c2258c
CY
243 }
244
245 *insert_p = pnode;
246 *insert_parent = parent;
247
bf21acf9 248 en = rb_entry(parent, struct extent_node, rb_node);
54c2258c 249 tmp_node = parent;
bf21acf9 250 if (parent && fofs > en->ei.fofs)
54c2258c 251 tmp_node = rb_next(parent);
bf21acf9 252 *next_entry = rb_entry_safe(tmp_node, struct extent_node, rb_node);
54c2258c
CY
253
254 tmp_node = parent;
bf21acf9 255 if (parent && fofs < en->ei.fofs)
54c2258c 256 tmp_node = rb_prev(parent);
bf21acf9 257 *prev_entry = rb_entry_safe(tmp_node, struct extent_node, rb_node);
54c2258c
CY
258 return NULL;
259
260lookup_neighbors:
bf21acf9 261 if (fofs == en->ei.fofs) {
54c2258c 262 /* lookup prev node for merging backward later */
bf21acf9
JK
263 tmp_node = rb_prev(&en->rb_node);
264 *prev_entry = rb_entry_safe(tmp_node,
265 struct extent_node, rb_node);
54c2258c 266 }
bf21acf9 267 if (fofs == en->ei.fofs + en->ei.len - 1) {
54c2258c 268 /* lookup next node for merging frontward later */
bf21acf9
JK
269 tmp_node = rb_next(&en->rb_node);
270 *next_entry = rb_entry_safe(tmp_node,
271 struct extent_node, rb_node);
54c2258c 272 }
bf21acf9 273 return en;
54c2258c
CY
274}
275
a28ef1f5
CY
276static struct kmem_cache *extent_tree_slab;
277static struct kmem_cache *extent_node_slab;
278
279static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
280 struct extent_tree *et, struct extent_info *ei,
4dada3fd
CY
281 struct rb_node *parent, struct rb_node **p,
282 bool leftmost)
a28ef1f5 283{
e7547dac 284 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
a28ef1f5
CY
285 struct extent_node *en;
286
32410577 287 en = f2fs_kmem_cache_alloc(extent_node_slab, GFP_ATOMIC, false, sbi);
a28ef1f5
CY
288 if (!en)
289 return NULL;
290
291 en->ei = *ei;
292 INIT_LIST_HEAD(&en->list);
201ef5e0 293 en->et = et;
a28ef1f5
CY
294
295 rb_link_node(&en->rb_node, parent, p);
4dada3fd 296 rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
68e35385 297 atomic_inc(&et->node_cnt);
e7547dac 298 atomic_inc(&eti->total_ext_node);
a28ef1f5
CY
299 return en;
300}
301
302static void __detach_extent_node(struct f2fs_sb_info *sbi,
303 struct extent_tree *et, struct extent_node *en)
304{
e7547dac
JK
305 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
306
4dada3fd 307 rb_erase_cached(&en->rb_node, &et->root);
68e35385 308 atomic_dec(&et->node_cnt);
e7547dac 309 atomic_dec(&eti->total_ext_node);
a28ef1f5
CY
310
311 if (et->cached_en == en)
312 et->cached_en = NULL;
a03f01f2
HP
313 kmem_cache_free(extent_node_slab, en);
314}
315
316/*
317 * Flow to release an extent_node:
318 * 1. list_del_init
319 * 2. __detach_extent_node
320 * 3. kmem_cache_free.
321 */
322static void __release_extent_node(struct f2fs_sb_info *sbi,
323 struct extent_tree *et, struct extent_node *en)
324{
e7547dac
JK
325 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
326
327 spin_lock(&eti->extent_lock);
201ef5e0
HP
328 f2fs_bug_on(sbi, list_empty(&en->list));
329 list_del_init(&en->list);
e7547dac 330 spin_unlock(&eti->extent_lock);
a03f01f2
HP
331
332 __detach_extent_node(sbi, et, en);
a28ef1f5
CY
333}
334
e7547dac
JK
335static struct extent_tree *__grab_extent_tree(struct inode *inode,
336 enum extent_type type)
a28ef1f5
CY
337{
338 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
e7547dac 339 struct extent_tree_info *eti = &sbi->extent_tree[type];
a28ef1f5
CY
340 struct extent_tree *et;
341 nid_t ino = inode->i_ino;
342
e7547dac
JK
343 mutex_lock(&eti->extent_tree_lock);
344 et = radix_tree_lookup(&eti->extent_tree_root, ino);
a28ef1f5 345 if (!et) {
32410577
CY
346 et = f2fs_kmem_cache_alloc(extent_tree_slab,
347 GFP_NOFS, true, NULL);
e7547dac 348 f2fs_radix_tree_insert(&eti->extent_tree_root, ino, et);
a28ef1f5
CY
349 memset(et, 0, sizeof(struct extent_tree));
350 et->ino = ino;
e7547dac 351 et->type = type;
4dada3fd 352 et->root = RB_ROOT_CACHED;
a28ef1f5
CY
353 et->cached_en = NULL;
354 rwlock_init(&et->lock);
137d09f0 355 INIT_LIST_HEAD(&et->list);
68e35385 356 atomic_set(&et->node_cnt, 0);
e7547dac 357 atomic_inc(&eti->total_ext_tree);
74fd8d99 358 } else {
e7547dac 359 atomic_dec(&eti->total_zombie_tree);
137d09f0 360 list_del_init(&et->list);
a28ef1f5 361 }
e7547dac 362 mutex_unlock(&eti->extent_tree_lock);
a28ef1f5
CY
363
364 /* never died until evict_inode */
e7547dac 365 F2FS_I(inode)->extent_tree[type] = et;
a28ef1f5
CY
366
367 return et;
368}
369
a28ef1f5 370static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
201ef5e0 371 struct extent_tree *et)
a28ef1f5
CY
372{
373 struct rb_node *node, *next;
374 struct extent_node *en;
68e35385 375 unsigned int count = atomic_read(&et->node_cnt);
a28ef1f5 376
4dada3fd 377 node = rb_first_cached(&et->root);
a28ef1f5
CY
378 while (node) {
379 next = rb_next(node);
380 en = rb_entry(node, struct extent_node, rb_node);
201ef5e0 381 __release_extent_node(sbi, et, en);
a28ef1f5
CY
382 node = next;
383 }
384
68e35385 385 return count - atomic_read(&et->node_cnt);
a28ef1f5
CY
386}
387
b430f726 388static void __drop_largest_extent(struct extent_tree *et,
41a099de 389 pgoff_t fofs, unsigned int len)
a28ef1f5 390{
b430f726
ZZ
391 if (fofs < et->largest.fofs + et->largest.len &&
392 fofs + len > et->largest.fofs) {
393 et->largest.len = 0;
394 et->largest_updated = true;
205b9822 395 }
a28ef1f5
CY
396}
397
72840ccc 398void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage)
a28ef1f5
CY
399{
400 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
72840ccc
JK
401 struct extent_tree_info *eti = &sbi->extent_tree[EX_READ];
402 struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext;
a28ef1f5
CY
403 struct extent_tree *et;
404 struct extent_node *en;
405 struct extent_info ei;
406
72840ccc 407 if (!__may_extent_tree(inode, EX_READ)) {
e7547dac 408 /* drop largest read extent */
72840ccc 409 if (i_ext && i_ext->len) {
a6d601f3 410 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
ed3d1256 411 i_ext->len = 0;
a6d601f3 412 set_page_dirty(ipage);
ed3d1256 413 }
e7547dac 414 goto out;
ed3d1256 415 }
a28ef1f5 416
72840ccc 417 et = __grab_extent_tree(inode, EX_READ);
a28ef1f5 418
ed3d1256 419 if (!i_ext || !i_ext->len)
e7547dac
JK
420 goto out;
421
12607c1b 422 get_read_extent_info(&ei, i_ext);
a28ef1f5
CY
423
424 write_lock(&et->lock);
68e35385 425 if (atomic_read(&et->node_cnt))
e7547dac 426 goto unlock_out;
a28ef1f5 427
749d543c
JK
428 en = __attach_extent_node(sbi, et, &ei, NULL,
429 &et->root.rb_root.rb_node, true);
a28ef1f5 430 if (en) {
749d543c
JK
431 et->largest = en->ei;
432 et->cached_en = en;
433
e7547dac
JK
434 spin_lock(&eti->extent_lock);
435 list_add_tail(&en->list, &eti->extent_list);
436 spin_unlock(&eti->extent_lock);
a28ef1f5 437 }
e7547dac 438unlock_out:
a28ef1f5 439 write_unlock(&et->lock);
e7547dac 440out:
72840ccc 441 if (!F2FS_I(inode)->extent_tree[EX_READ])
e7547dac 442 set_inode_flag(inode, FI_NO_EXTENT);
a28ef1f5
CY
443}
444
71644dff
JK
445void f2fs_init_age_extent_tree(struct inode *inode)
446{
447 if (!__init_may_extent_tree(inode, EX_BLOCK_AGE))
448 return;
449 __grab_extent_tree(inode, EX_BLOCK_AGE);
450}
451
72840ccc 452void f2fs_init_extent_tree(struct inode *inode)
dad48e73 453{
e7547dac 454 /* initialize read cache */
72840ccc
JK
455 if (__init_may_extent_tree(inode, EX_READ))
456 __grab_extent_tree(inode, EX_READ);
71644dff
JK
457
458 /* initialize block age cache */
459 if (__init_may_extent_tree(inode, EX_BLOCK_AGE))
460 __grab_extent_tree(inode, EX_BLOCK_AGE);
dad48e73
YH
461}
462
e7547dac
JK
463static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
464 struct extent_info *ei, enum extent_type type)
a28ef1f5
CY
465{
466 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
e7547dac
JK
467 struct extent_tree_info *eti = &sbi->extent_tree[type];
468 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
a28ef1f5
CY
469 struct extent_node *en;
470 bool ret = false;
471
df9d44b6
JK
472 if (!et)
473 return false;
a28ef1f5 474
e7547dac 475 trace_f2fs_lookup_extent_tree_start(inode, pgofs, type);
a28ef1f5
CY
476
477 read_lock(&et->lock);
478
e7547dac
JK
479 if (type == EX_READ &&
480 et->largest.fofs <= pgofs &&
a28ef1f5
CY
481 et->largest.fofs + et->largest.len > pgofs) {
482 *ei = et->largest;
483 ret = true;
91c481ff 484 stat_inc_largest_node_hit(sbi);
a28ef1f5
CY
485 goto out;
486 }
487
bf21acf9 488 en = __lookup_extent_node(&et->root, et->cached_en, pgofs);
54c2258c
CY
489 if (!en)
490 goto out;
491
492 if (en == et->cached_en)
e7547dac 493 stat_inc_cached_node_hit(sbi, type);
54c2258c 494 else
e7547dac 495 stat_inc_rbtree_node_hit(sbi, type);
54c2258c
CY
496
497 *ei = en->ei;
e7547dac 498 spin_lock(&eti->extent_lock);
54c2258c 499 if (!list_empty(&en->list)) {
e7547dac 500 list_move_tail(&en->list, &eti->extent_list);
54c2258c 501 et->cached_en = en;
a28ef1f5 502 }
e7547dac 503 spin_unlock(&eti->extent_lock);
54c2258c 504 ret = true;
a28ef1f5 505out:
e7547dac 506 stat_inc_total_hit(sbi, type);
a28ef1f5
CY
507 read_unlock(&et->lock);
508
e7547dac
JK
509 if (type == EX_READ)
510 trace_f2fs_lookup_read_extent_tree_end(inode, pgofs, ei);
71644dff
JK
511 else if (type == EX_BLOCK_AGE)
512 trace_f2fs_lookup_age_extent_tree_end(inode, pgofs, ei);
a28ef1f5
CY
513 return ret;
514}
515
b430f726 516static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
0f825ee6 517 struct extent_tree *et, struct extent_info *ei,
0f825ee6 518 struct extent_node *prev_ex,
ef05e221 519 struct extent_node *next_ex)
0f825ee6 520{
e7547dac 521 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
0f825ee6 522 struct extent_node *en = NULL;
0f825ee6 523
e7547dac 524 if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, et->type)) {
0f825ee6
FL
525 prev_ex->ei.len += ei->len;
526 ei = &prev_ex->ei;
527 en = prev_ex;
528 }
ef05e221 529
e7547dac 530 if (next_ex && __is_front_mergeable(ei, &next_ex->ei, et->type)) {
0f825ee6 531 next_ex->ei.fofs = ei->fofs;
0f825ee6 532 next_ex->ei.len += ei->len;
e7547dac
JK
533 if (et->type == EX_READ)
534 next_ex->ei.blk = ei->blk;
7855eba4
YH
535 if (en)
536 __release_extent_node(sbi, et, prev_ex);
537
0f825ee6
FL
538 en = next_ex;
539 }
ef05e221 540
43a2fa18
JK
541 if (!en)
542 return NULL;
543
b430f726 544 __try_update_largest_extent(et, en);
43a2fa18 545
e7547dac 546 spin_lock(&eti->extent_lock);
42926744 547 if (!list_empty(&en->list)) {
e7547dac 548 list_move_tail(&en->list, &eti->extent_list);
42926744
JK
549 et->cached_en = en;
550 }
e7547dac 551 spin_unlock(&eti->extent_lock);
ef05e221
CY
552 return en;
553}
554
b430f726 555static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
ef05e221
CY
556 struct extent_tree *et, struct extent_info *ei,
557 struct rb_node **insert_p,
4dada3fd
CY
558 struct rb_node *insert_parent,
559 bool leftmost)
ef05e221 560{
e7547dac 561 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
bf21acf9 562 struct rb_node **p = &et->root.rb_root.rb_node;
ef05e221
CY
563 struct rb_node *parent = NULL;
564 struct extent_node *en = NULL;
0f825ee6
FL
565
566 if (insert_p && insert_parent) {
567 parent = insert_parent;
568 p = insert_p;
569 goto do_insert;
570 }
571
4dada3fd
CY
572 leftmost = true;
573
bf21acf9
JK
574 /* look up extent_node in the rb tree */
575 while (*p) {
576 parent = *p;
577 en = rb_entry(parent, struct extent_node, rb_node);
578
579 if (ei->fofs < en->ei.fofs) {
580 p = &(*p)->rb_left;
581 } else if (ei->fofs >= en->ei.fofs + en->ei.len) {
582 p = &(*p)->rb_right;
583 leftmost = false;
584 } else {
585 f2fs_bug_on(sbi, 1);
586 }
587 }
588
0f825ee6 589do_insert:
4dada3fd 590 en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
0f825ee6
FL
591 if (!en)
592 return NULL;
ef05e221 593
b430f726 594 __try_update_largest_extent(et, en);
43a2fa18
JK
595
596 /* update in global extent list */
e7547dac
JK
597 spin_lock(&eti->extent_lock);
598 list_add_tail(&en->list, &eti->extent_list);
42926744 599 et->cached_en = en;
e7547dac 600 spin_unlock(&eti->extent_lock);
0f825ee6
FL
601 return en;
602}
603
e7547dac
JK
604static void __update_extent_tree_range(struct inode *inode,
605 struct extent_info *tei, enum extent_type type)
a28ef1f5
CY
606{
607 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
e7547dac 608 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
4d1fa815 609 struct extent_node *en = NULL, *en1 = NULL;
19b2c30d 610 struct extent_node *prev_en = NULL, *next_en = NULL;
a28ef1f5 611 struct extent_info ei, dei, prev;
0f825ee6 612 struct rb_node **insert_p = NULL, *insert_parent = NULL;
e7547dac 613 unsigned int fofs = tei->fofs, len = tei->len;
19b2c30d 614 unsigned int end = fofs + len;
b430f726 615 bool updated = false;
f9aa52a8 616 bool leftmost = false;
a28ef1f5
CY
617
618 if (!et)
317e1300 619 return;
a28ef1f5 620
e7547dac
JK
621 if (type == EX_READ)
622 trace_f2fs_update_read_extent_tree_range(inode, fofs, len,
623 tei->blk, 0);
71644dff
JK
624 else if (type == EX_BLOCK_AGE)
625 trace_f2fs_update_age_extent_tree_range(inode, fofs, len,
626 tei->age, tei->last_blocks);
627
a28ef1f5
CY
628 write_lock(&et->lock);
629
e7547dac
JK
630 if (type == EX_READ) {
631 if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
632 write_unlock(&et->lock);
633 return;
634 }
a28ef1f5 635
e7547dac
JK
636 prev = et->largest;
637 dei.len = 0;
a28ef1f5 638
e7547dac
JK
639 /*
640 * drop largest extent before lookup, in case it's already
641 * been shrunk from extent tree
642 */
643 __drop_largest_extent(et, fofs, len);
644 }
a28ef1f5 645
19b2c30d 646 /* 1. lookup first extent node in range [fofs, fofs + len - 1] */
bf21acf9
JK
647 en = __lookup_extent_node_ret(&et->root,
648 et->cached_en, fofs,
649 &prev_en, &next_en,
650 &insert_p, &insert_parent,
4dada3fd 651 &leftmost);
4d1fa815
FL
652 if (!en)
653 en = next_en;
19b2c30d 654
146949de 655 /* 2. invalidate all extent nodes in range [fofs, fofs + len - 1] */
4d1fa815
FL
656 while (en && en->ei.fofs < end) {
657 unsigned int org_end;
658 int parts = 0; /* # of parts current extent split into */
19b2c30d 659
4d1fa815 660 next_en = en1 = NULL;
19b2c30d
CY
661
662 dei = en->ei;
4d1fa815 663 org_end = dei.fofs + dei.len;
e7547dac 664 f2fs_bug_on(sbi, fofs >= org_end);
19b2c30d 665
e7547dac
JK
666 if (fofs > dei.fofs && (type != EX_READ ||
667 fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN)) {
668 en->ei.len = fofs - en->ei.fofs;
4d1fa815
FL
669 prev_en = en;
670 parts = 1;
671 }
19b2c30d 672
e7547dac
JK
673 if (end < org_end && (type != EX_READ ||
674 org_end - end >= F2FS_MIN_EXTENT_LEN)) {
4d1fa815 675 if (parts) {
3bac20a8
JK
676 __set_extent_info(&ei,
677 end, org_end - end,
e7547dac 678 end - dei.fofs + dei.blk, false,
71644dff 679 dei.age, dei.last_blocks,
e7547dac 680 type);
b430f726 681 en1 = __insert_extent_tree(sbi, et, &ei,
4dada3fd 682 NULL, NULL, true);
4d1fa815
FL
683 next_en = en1;
684 } else {
3bac20a8
JK
685 __set_extent_info(&en->ei,
686 end, en->ei.len - (end - dei.fofs),
e7547dac 687 en->ei.blk + (end - dei.fofs), true,
71644dff 688 dei.age, dei.last_blocks,
e7547dac 689 type);
4d1fa815 690 next_en = en;
19b2c30d 691 }
4d1fa815 692 parts++;
19b2c30d
CY
693 }
694
4d1fa815
FL
695 if (!next_en) {
696 struct rb_node *node = rb_next(&en->rb_node);
19b2c30d 697
ed0b5620
GT
698 next_en = rb_entry_safe(node, struct extent_node,
699 rb_node);
a28ef1f5
CY
700 }
701
4abd3f5a 702 if (parts)
b430f726 703 __try_update_largest_extent(et, en);
4abd3f5a 704 else
a03f01f2 705 __release_extent_node(sbi, et, en);
19b2c30d
CY
706
707 /*
4d1fa815
FL
708 * if original extent is split into zero or two parts, extent
709 * tree has been altered by deletion or insertion, therefore
710 * invalidate pointers regard to tree.
19b2c30d 711 */
4d1fa815
FL
712 if (parts != 1) {
713 insert_p = NULL;
714 insert_parent = NULL;
a28ef1f5 715 }
4d1fa815 716 en = next_en;
a28ef1f5
CY
717 }
718
71644dff
JK
719 if (type == EX_BLOCK_AGE)
720 goto update_age_extent_cache;
721
e7547dac
JK
722 /* 3. update extent in read extent cache */
723 BUG_ON(type != EX_READ);
724
725 if (tei->blk) {
71644dff
JK
726 __set_extent_info(&ei, fofs, len, tei->blk, false,
727 0, 0, EX_READ);
b430f726
ZZ
728 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
729 __insert_extent_tree(sbi, et, &ei,
4dada3fd 730 insert_p, insert_parent, leftmost);
a28ef1f5
CY
731
732 /* give up extent_cache, if split and small updates happen */
733 if (dei.len >= 1 &&
734 prev.len < F2FS_MIN_EXTENT_LEN &&
735 et->largest.len < F2FS_MIN_EXTENT_LEN) {
b430f726
ZZ
736 et->largest.len = 0;
737 et->largest_updated = true;
91942321 738 set_inode_flag(inode, FI_NO_EXTENT);
a28ef1f5 739 }
19b2c30d 740 }
a28ef1f5 741
91942321 742 if (is_inode_flag_set(inode, FI_NO_EXTENT))
201ef5e0 743 __free_extent_tree(sbi, et);
a28ef1f5 744
b430f726
ZZ
745 if (et->largest_updated) {
746 et->largest_updated = false;
747 updated = true;
748 }
71644dff
JK
749 goto out_read_extent_cache;
750update_age_extent_cache:
751 if (!tei->last_blocks)
752 goto out_read_extent_cache;
b430f726 753
71644dff
JK
754 __set_extent_info(&ei, fofs, len, 0, false,
755 tei->age, tei->last_blocks, EX_BLOCK_AGE);
756 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
757 __insert_extent_tree(sbi, et, &ei,
758 insert_p, insert_parent, leftmost);
759out_read_extent_cache:
a28ef1f5 760 write_unlock(&et->lock);
b430f726
ZZ
761
762 if (updated)
763 f2fs_mark_inode_dirty_sync(inode, true);
a28ef1f5
CY
764}
765
94afd6d6 766#ifdef CONFIG_F2FS_FS_COMPRESSION
e7547dac 767void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
94afd6d6
CY
768 pgoff_t fofs, block_t blkaddr, unsigned int llen,
769 unsigned int c_len)
770{
771 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
e7547dac 772 struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
94afd6d6
CY
773 struct extent_node *en = NULL;
774 struct extent_node *prev_en = NULL, *next_en = NULL;
775 struct extent_info ei;
776 struct rb_node **insert_p = NULL, *insert_parent = NULL;
777 bool leftmost = false;
778
e7547dac
JK
779 trace_f2fs_update_read_extent_tree_range(inode, fofs, llen,
780 blkaddr, c_len);
94afd6d6
CY
781
782 /* it is safe here to check FI_NO_EXTENT w/o et->lock in ro image */
783 if (is_inode_flag_set(inode, FI_NO_EXTENT))
784 return;
785
786 write_lock(&et->lock);
787
bf21acf9
JK
788 en = __lookup_extent_node_ret(&et->root,
789 et->cached_en, fofs,
790 &prev_en, &next_en,
791 &insert_p, &insert_parent,
792 &leftmost);
94afd6d6
CY
793 if (en)
794 goto unlock_out;
795
71644dff 796 __set_extent_info(&ei, fofs, llen, blkaddr, true, 0, 0, EX_READ);
94afd6d6
CY
797 ei.c_len = c_len;
798
799 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
800 __insert_extent_tree(sbi, et, &ei,
801 insert_p, insert_parent, leftmost);
802unlock_out:
803 write_unlock(&et->lock);
804}
805#endif
806
d23be468 807static unsigned long long __calculate_block_age(struct f2fs_sb_info *sbi,
808 unsigned long long new,
71644dff
JK
809 unsigned long long old)
810{
b03a41a4 811 unsigned int rem_old, rem_new;
812 unsigned long long res;
d23be468 813 unsigned int weight = sbi->last_age_weight;
71644dff 814
d23be468 815 res = div_u64_rem(new, 100, &rem_new) * (100 - weight)
816 + div_u64_rem(old, 100, &rem_old) * weight;
71644dff 817
b03a41a4 818 if (rem_new)
d23be468 819 res += rem_new * (100 - weight) / 100;
b03a41a4 820 if (rem_old)
d23be468 821 res += rem_old * weight / 100;
b03a41a4 822
823 return res;
71644dff
JK
824}
825
826/* This returns a new age and allocated blocks in ei */
ed272476
JK
827static int __get_new_block_age(struct inode *inode, struct extent_info *ei,
828 block_t blkaddr)
71644dff
JK
829{
830 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
831 loff_t f_size = i_size_read(inode);
832 unsigned long long cur_blocks =
833 atomic64_read(&sbi->allocated_data_blocks);
22a341b4 834 struct extent_info tei = *ei; /* only fofs and len are valid */
71644dff
JK
835
836 /*
837 * When I/O is not aligned to a PAGE_SIZE, update will happen to the last
838 * file block even in seq write. So don't record age for newly last file
839 * block here.
840 */
841 if ((f_size >> PAGE_SHIFT) == ei->fofs && f_size & (PAGE_SIZE - 1) &&
ed272476 842 blkaddr == NEW_ADDR)
71644dff
JK
843 return -EINVAL;
844
22a341b4 845 if (__lookup_extent_tree(inode, ei->fofs, &tei, EX_BLOCK_AGE)) {
71644dff
JK
846 unsigned long long cur_age;
847
22a341b4
JK
848 if (cur_blocks >= tei.last_blocks)
849 cur_age = cur_blocks - tei.last_blocks;
71644dff
JK
850 else
851 /* allocated_data_blocks overflow */
22a341b4 852 cur_age = ULLONG_MAX - tei.last_blocks + cur_blocks;
71644dff 853
22a341b4 854 if (tei.age)
d23be468 855 ei->age = __calculate_block_age(sbi, cur_age, tei.age);
71644dff
JK
856 else
857 ei->age = cur_age;
858 ei->last_blocks = cur_blocks;
859 WARN_ON(ei->age > cur_blocks);
860 return 0;
861 }
862
ed272476 863 f2fs_bug_on(sbi, blkaddr == NULL_ADDR);
71644dff
JK
864
865 /* the data block was allocated for the first time */
ed272476 866 if (blkaddr == NEW_ADDR)
71644dff
JK
867 goto out;
868
ed272476
JK
869 if (__is_valid_data_blkaddr(blkaddr) &&
870 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
71644dff
JK
871 f2fs_bug_on(sbi, 1);
872 return -EINVAL;
873 }
874out:
875 /*
876 * init block age with zero, this can happen when the block age extent
877 * was reclaimed due to memory constraint or system reboot
878 */
879 ei->age = 0;
880 ei->last_blocks = cur_blocks;
881 return 0;
882}
883
e7547dac 884static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type type)
a28ef1f5 885{
fe59109a 886 struct extent_info ei = {};
e7547dac
JK
887
888 if (!__may_extent_tree(dn->inode, type))
889 return;
890
891 ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
892 dn->ofs_in_node;
893 ei.len = 1;
894
895 if (type == EX_READ) {
896 if (dn->data_blkaddr == NEW_ADDR)
897 ei.blk = NULL_ADDR;
898 else
899 ei.blk = dn->data_blkaddr;
71644dff 900 } else if (type == EX_BLOCK_AGE) {
ed272476 901 if (__get_new_block_age(dn->inode, &ei, dn->data_blkaddr))
71644dff 902 return;
e7547dac
JK
903 }
904 __update_extent_tree_range(dn->inode, &ei, type);
905}
906
907static unsigned int __shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink,
908 enum extent_type type)
909{
910 struct extent_tree_info *eti = &sbi->extent_tree[type];
137d09f0 911 struct extent_tree *et, *next;
201ef5e0 912 struct extent_node *en;
a28ef1f5
CY
913 unsigned int node_cnt = 0, tree_cnt = 0;
914 int remained;
915
e7547dac 916 if (!atomic_read(&eti->total_zombie_tree))
74fd8d99
JK
917 goto free_node;
918
e7547dac 919 if (!mutex_trylock(&eti->extent_tree_lock))
a28ef1f5
CY
920 goto out;
921
922 /* 1. remove unreferenced extent tree */
e7547dac 923 list_for_each_entry_safe(et, next, &eti->zombie_list, list) {
9b72a388
CY
924 if (atomic_read(&et->node_cnt)) {
925 write_lock(&et->lock);
201ef5e0 926 node_cnt += __free_extent_tree(sbi, et);
9b72a388
CY
927 write_unlock(&et->lock);
928 }
201ef5e0 929 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
137d09f0 930 list_del_init(&et->list);
e7547dac 931 radix_tree_delete(&eti->extent_tree_root, et->ino);
137d09f0 932 kmem_cache_free(extent_tree_slab, et);
e7547dac
JK
933 atomic_dec(&eti->total_ext_tree);
934 atomic_dec(&eti->total_zombie_tree);
137d09f0 935 tree_cnt++;
a28ef1f5 936
137d09f0
JK
937 if (node_cnt + tree_cnt >= nr_shrink)
938 goto unlock_out;
6fe2bc95 939 cond_resched();
a28ef1f5 940 }
e7547dac 941 mutex_unlock(&eti->extent_tree_lock);
a28ef1f5 942
74fd8d99 943free_node:
a28ef1f5 944 /* 2. remove LRU extent entries */
e7547dac 945 if (!mutex_trylock(&eti->extent_tree_lock))
a28ef1f5
CY
946 goto out;
947
948 remained = nr_shrink - (node_cnt + tree_cnt);
949
e7547dac 950 spin_lock(&eti->extent_lock);
201ef5e0 951 for (; remained > 0; remained--) {
e7547dac 952 if (list_empty(&eti->extent_list))
a28ef1f5 953 break;
e7547dac 954 en = list_first_entry(&eti->extent_list,
201ef5e0
HP
955 struct extent_node, list);
956 et = en->et;
957 if (!write_trylock(&et->lock)) {
958 /* refresh this extent node's position in extent list */
e7547dac 959 list_move_tail(&en->list, &eti->extent_list);
201ef5e0
HP
960 continue;
961 }
a28ef1f5 962
201ef5e0 963 list_del_init(&en->list);
e7547dac 964 spin_unlock(&eti->extent_lock);
9b72a388 965
201ef5e0 966 __detach_extent_node(sbi, et, en);
a28ef1f5 967
201ef5e0
HP
968 write_unlock(&et->lock);
969 node_cnt++;
e7547dac 970 spin_lock(&eti->extent_lock);
a28ef1f5 971 }
e7547dac 972 spin_unlock(&eti->extent_lock);
201ef5e0 973
a28ef1f5 974unlock_out:
e7547dac 975 mutex_unlock(&eti->extent_tree_lock);
a28ef1f5 976out:
e7547dac 977 trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt, type);
a28ef1f5
CY
978
979 return node_cnt + tree_cnt;
980}
981
e7547dac
JK
982/* read extent cache operations */
983bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
984 struct extent_info *ei)
985{
986 if (!__may_extent_tree(inode, EX_READ))
987 return false;
988
989 return __lookup_extent_tree(inode, pgofs, ei, EX_READ);
990}
991
04a91ab0
CH
992bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index,
993 block_t *blkaddr)
994{
995 struct extent_info ei = {};
996
997 if (!f2fs_lookup_read_extent_cache(inode, index, &ei))
998 return false;
999 *blkaddr = ei.blk + index - ei.fofs;
1000 return true;
1001}
1002
e7547dac
JK
1003void f2fs_update_read_extent_cache(struct dnode_of_data *dn)
1004{
1005 return __update_extent_cache(dn, EX_READ);
1006}
1007
1008void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
1009 pgoff_t fofs, block_t blkaddr, unsigned int len)
1010{
1011 struct extent_info ei = {
1012 .fofs = fofs,
1013 .len = len,
1014 .blk = blkaddr,
1015 };
1016
1017 if (!__may_extent_tree(dn->inode, EX_READ))
1018 return;
1019
1020 __update_extent_tree_range(dn->inode, &ei, EX_READ);
1021}
1022
1023unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
1024{
1025 if (!test_opt(sbi, READ_EXTENT_CACHE))
1026 return 0;
1027
1028 return __shrink_extent_tree(sbi, nr_shrink, EX_READ);
1029}
1030
71644dff
JK
1031/* block age extent cache operations */
1032bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs,
1033 struct extent_info *ei)
1034{
1035 if (!__may_extent_tree(inode, EX_BLOCK_AGE))
1036 return false;
1037
1038 return __lookup_extent_tree(inode, pgofs, ei, EX_BLOCK_AGE);
1039}
1040
1041void f2fs_update_age_extent_cache(struct dnode_of_data *dn)
1042{
1043 return __update_extent_cache(dn, EX_BLOCK_AGE);
1044}
1045
1046void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn,
1047 pgoff_t fofs, unsigned int len)
1048{
1049 struct extent_info ei = {
1050 .fofs = fofs,
1051 .len = len,
1052 };
1053
1054 if (!__may_extent_tree(dn->inode, EX_BLOCK_AGE))
1055 return;
1056
1057 __update_extent_tree_range(dn->inode, &ei, EX_BLOCK_AGE);
1058}
1059
1060unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
1061{
1062 if (!test_opt(sbi, AGE_EXTENT_CACHE))
1063 return 0;
1064
1065 return __shrink_extent_tree(sbi, nr_shrink, EX_BLOCK_AGE);
1066}
1067
e7547dac
JK
1068static unsigned int __destroy_extent_node(struct inode *inode,
1069 enum extent_type type)
a28ef1f5
CY
1070{
1071 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
e7547dac 1072 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
a28ef1f5
CY
1073 unsigned int node_cnt = 0;
1074
9b72a388 1075 if (!et || !atomic_read(&et->node_cnt))
a28ef1f5
CY
1076 return 0;
1077
1078 write_lock(&et->lock);
201ef5e0 1079 node_cnt = __free_extent_tree(sbi, et);
a28ef1f5
CY
1080 write_unlock(&et->lock);
1081
1082 return node_cnt;
1083}
1084
e7547dac
JK
1085void f2fs_destroy_extent_node(struct inode *inode)
1086{
1087 __destroy_extent_node(inode, EX_READ);
71644dff 1088 __destroy_extent_node(inode, EX_BLOCK_AGE);
e7547dac
JK
1089}
1090
1091static void __drop_extent_tree(struct inode *inode, enum extent_type type)
5f281fab
JK
1092{
1093 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
e7547dac 1094 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
b430f726 1095 bool updated = false;
5f281fab 1096
e7547dac 1097 if (!__may_extent_tree(inode, type))
bf617f7a
CY
1098 return;
1099
5f281fab
JK
1100 write_lock(&et->lock);
1101 __free_extent_tree(sbi, et);
e7547dac
JK
1102 if (type == EX_READ) {
1103 set_inode_flag(inode, FI_NO_EXTENT);
1104 if (et->largest.len) {
1105 et->largest.len = 0;
1106 updated = true;
1107 }
b430f726 1108 }
5f281fab 1109 write_unlock(&et->lock);
b430f726
ZZ
1110 if (updated)
1111 f2fs_mark_inode_dirty_sync(inode, true);
5f281fab
JK
1112}
1113
e7547dac
JK
1114void f2fs_drop_extent_tree(struct inode *inode)
1115{
1116 __drop_extent_tree(inode, EX_READ);
71644dff 1117 __drop_extent_tree(inode, EX_BLOCK_AGE);
e7547dac
JK
1118}
1119
1120static void __destroy_extent_tree(struct inode *inode, enum extent_type type)
a28ef1f5
CY
1121{
1122 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
e7547dac
JK
1123 struct extent_tree_info *eti = &sbi->extent_tree[type];
1124 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
a28ef1f5
CY
1125 unsigned int node_cnt = 0;
1126
1127 if (!et)
1128 return;
1129
68e35385
CY
1130 if (inode->i_nlink && !is_bad_inode(inode) &&
1131 atomic_read(&et->node_cnt)) {
e7547dac
JK
1132 mutex_lock(&eti->extent_tree_lock);
1133 list_add_tail(&et->list, &eti->zombie_list);
1134 atomic_inc(&eti->total_zombie_tree);
1135 mutex_unlock(&eti->extent_tree_lock);
a28ef1f5
CY
1136 return;
1137 }
1138
1139 /* free all extent info belong to this extent tree */
e7547dac 1140 node_cnt = __destroy_extent_node(inode, type);
a28ef1f5
CY
1141
1142 /* delete extent tree entry in radix tree */
e7547dac 1143 mutex_lock(&eti->extent_tree_lock);
68e35385 1144 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
e7547dac 1145 radix_tree_delete(&eti->extent_tree_root, inode->i_ino);
a28ef1f5 1146 kmem_cache_free(extent_tree_slab, et);
e7547dac
JK
1147 atomic_dec(&eti->total_ext_tree);
1148 mutex_unlock(&eti->extent_tree_lock);
a28ef1f5 1149
e7547dac 1150 F2FS_I(inode)->extent_tree[type] = NULL;
a28ef1f5 1151
e7547dac 1152 trace_f2fs_destroy_extent_tree(inode, node_cnt, type);
a28ef1f5
CY
1153}
1154
e7547dac 1155void f2fs_destroy_extent_tree(struct inode *inode)
a28ef1f5 1156{
e7547dac 1157 __destroy_extent_tree(inode, EX_READ);
71644dff 1158 __destroy_extent_tree(inode, EX_BLOCK_AGE);
19b2c30d
CY
1159}
1160
e7547dac 1161static void __init_extent_tree_info(struct extent_tree_info *eti)
19b2c30d 1162{
e7547dac
JK
1163 INIT_RADIX_TREE(&eti->extent_tree_root, GFP_NOIO);
1164 mutex_init(&eti->extent_tree_lock);
1165 INIT_LIST_HEAD(&eti->extent_list);
1166 spin_lock_init(&eti->extent_lock);
1167 atomic_set(&eti->total_ext_tree, 0);
1168 INIT_LIST_HEAD(&eti->zombie_list);
1169 atomic_set(&eti->total_zombie_tree, 0);
1170 atomic_set(&eti->total_ext_node, 0);
a28ef1f5
CY
1171}
1172
4d57b86d 1173void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
a28ef1f5 1174{
e7547dac 1175 __init_extent_tree_info(&sbi->extent_tree[EX_READ]);
71644dff
JK
1176 __init_extent_tree_info(&sbi->extent_tree[EX_BLOCK_AGE]);
1177
1178 /* initialize for block age extents */
1179 atomic64_set(&sbi->allocated_data_blocks, 0);
1180 sbi->hot_data_age_threshold = DEF_HOT_DATA_AGE_THRESHOLD;
1181 sbi->warm_data_age_threshold = DEF_WARM_DATA_AGE_THRESHOLD;
d23be468 1182 sbi->last_age_weight = LAST_AGE_WEIGHT;
a28ef1f5
CY
1183}
1184
4d57b86d 1185int __init f2fs_create_extent_cache(void)
a28ef1f5
CY
1186{
1187 extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
1188 sizeof(struct extent_tree));
1189 if (!extent_tree_slab)
1190 return -ENOMEM;
1191 extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
1192 sizeof(struct extent_node));
1193 if (!extent_node_slab) {
1194 kmem_cache_destroy(extent_tree_slab);
1195 return -ENOMEM;
1196 }
1197 return 0;
1198}
1199
4d57b86d 1200void f2fs_destroy_extent_cache(void)
a28ef1f5
CY
1201{
1202 kmem_cache_destroy(extent_node_slab);
1203 kmem_cache_destroy(extent_tree_slab);
1204}