| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * f2fs extent cache support |
| 4 | * |
| 5 | * Copyright (c) 2015 Motorola Mobility |
| 6 | * Copyright (c) 2015 Samsung Electronics |
| 7 | * Authors: Jaegeuk Kim <jaegeuk@kernel.org> |
| 8 | * Chao Yu <chao2.yu@samsung.com> |
| 9 | * |
| 10 | * block_age-based extent cache added by: |
| 11 | * Copyright (c) 2022 xiaomi Co., Ltd. |
| 12 | * http://www.xiaomi.com/ |
| 13 | */ |
| 14 | |
| 15 | #include <linux/fs.h> |
| 16 | #include <linux/f2fs_fs.h> |
| 17 | |
| 18 | #include "f2fs.h" |
| 19 | #include "node.h" |
| 20 | #include <trace/events/f2fs.h> |
| 21 | |
| 22 | bool sanity_check_extent_cache(struct inode *inode, struct page *ipage) |
| 23 | { |
| 24 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 25 | struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext; |
| 26 | struct extent_info ei; |
| 27 | int devi; |
| 28 | |
| 29 | get_read_extent_info(&ei, i_ext); |
| 30 | |
| 31 | if (!ei.len) |
| 32 | return true; |
| 33 | |
| 34 | if (!f2fs_is_valid_blkaddr(sbi, ei.blk, DATA_GENERIC_ENHANCE) || |
| 35 | !f2fs_is_valid_blkaddr(sbi, ei.blk + ei.len - 1, |
| 36 | DATA_GENERIC_ENHANCE)) { |
| 37 | f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix", |
| 38 | __func__, inode->i_ino, |
| 39 | ei.blk, ei.fofs, ei.len); |
| 40 | return false; |
| 41 | } |
| 42 | |
| 43 | if (!IS_DEVICE_ALIASING(inode)) |
| 44 | return true; |
| 45 | |
| 46 | for (devi = 0; devi < sbi->s_ndevs; devi++) { |
| 47 | if (FDEV(devi).start_blk != ei.blk || |
| 48 | FDEV(devi).end_blk != ei.blk + ei.len - 1) |
| 49 | continue; |
| 50 | |
| 51 | if (devi == 0) { |
| 52 | f2fs_warn(sbi, |
| 53 | "%s: inode (ino=%lx) is an alias of meta device", |
| 54 | __func__, inode->i_ino); |
| 55 | return false; |
| 56 | } |
| 57 | |
| 58 | if (bdev_is_zoned(FDEV(devi).bdev)) { |
| 59 | f2fs_warn(sbi, |
| 60 | "%s: device alias inode (ino=%lx)'s extent info " |
| 61 | "[%u, %u, %u] maps to zoned block device", |
| 62 | __func__, inode->i_ino, ei.blk, ei.fofs, ei.len); |
| 63 | return false; |
| 64 | } |
| 65 | return true; |
| 66 | } |
| 67 | |
| 68 | f2fs_warn(sbi, "%s: device alias inode (ino=%lx)'s extent info " |
| 69 | "[%u, %u, %u] is inconsistent w/ any devices", |
| 70 | __func__, inode->i_ino, ei.blk, ei.fofs, ei.len); |
| 71 | return false; |
| 72 | } |
| 73 | |
| 74 | static void __set_extent_info(struct extent_info *ei, |
| 75 | unsigned int fofs, unsigned int len, |
| 76 | block_t blk, bool keep_clen, |
| 77 | unsigned long age, unsigned long last_blocks, |
| 78 | enum extent_type type) |
| 79 | { |
| 80 | ei->fofs = fofs; |
| 81 | ei->len = len; |
| 82 | |
| 83 | if (type == EX_READ) { |
| 84 | ei->blk = blk; |
| 85 | if (keep_clen) |
| 86 | return; |
| 87 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 88 | ei->c_len = 0; |
| 89 | #endif |
| 90 | } else if (type == EX_BLOCK_AGE) { |
| 91 | ei->age = age; |
| 92 | ei->last_blocks = last_blocks; |
| 93 | } |
| 94 | } |
| 95 | |
| 96 | static bool __init_may_extent_tree(struct inode *inode, enum extent_type type) |
| 97 | { |
| 98 | if (type == EX_READ) |
| 99 | return test_opt(F2FS_I_SB(inode), READ_EXTENT_CACHE) && |
| 100 | S_ISREG(inode->i_mode); |
| 101 | if (type == EX_BLOCK_AGE) |
| 102 | return test_opt(F2FS_I_SB(inode), AGE_EXTENT_CACHE) && |
| 103 | (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)); |
| 104 | return false; |
| 105 | } |
| 106 | |
| 107 | static bool __may_extent_tree(struct inode *inode, enum extent_type type) |
| 108 | { |
| 109 | if (IS_DEVICE_ALIASING(inode) && type == EX_READ) |
| 110 | return true; |
| 111 | |
| 112 | /* |
| 113 | * for recovered files during mount do not create extents |
| 114 | * if shrinker is not registered. |
| 115 | */ |
| 116 | if (list_empty(&F2FS_I_SB(inode)->s_list)) |
| 117 | return false; |
| 118 | |
| 119 | if (!__init_may_extent_tree(inode, type)) |
| 120 | return false; |
| 121 | |
| 122 | if (type == EX_READ) { |
| 123 | if (is_inode_flag_set(inode, FI_NO_EXTENT)) |
| 124 | return false; |
| 125 | if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) && |
| 126 | !f2fs_sb_has_readonly(F2FS_I_SB(inode))) |
| 127 | return false; |
| 128 | } else if (type == EX_BLOCK_AGE) { |
| 129 | if (is_inode_flag_set(inode, FI_COMPRESSED_FILE)) |
| 130 | return false; |
| 131 | if (file_is_cold(inode)) |
| 132 | return false; |
| 133 | } |
| 134 | return true; |
| 135 | } |
| 136 | |
| 137 | static void __try_update_largest_extent(struct extent_tree *et, |
| 138 | struct extent_node *en) |
| 139 | { |
| 140 | if (et->type != EX_READ) |
| 141 | return; |
| 142 | if (en->ei.len <= et->largest.len) |
| 143 | return; |
| 144 | |
| 145 | et->largest = en->ei; |
| 146 | et->largest_updated = true; |
| 147 | } |
| 148 | |
| 149 | static bool __is_extent_mergeable(struct extent_info *back, |
| 150 | struct extent_info *front, enum extent_type type) |
| 151 | { |
| 152 | if (type == EX_READ) { |
| 153 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 154 | if (back->c_len && back->len != back->c_len) |
| 155 | return false; |
| 156 | if (front->c_len && front->len != front->c_len) |
| 157 | return false; |
| 158 | #endif |
| 159 | return (back->fofs + back->len == front->fofs && |
| 160 | back->blk + back->len == front->blk); |
| 161 | } else if (type == EX_BLOCK_AGE) { |
| 162 | return (back->fofs + back->len == front->fofs && |
| 163 | abs(back->age - front->age) <= SAME_AGE_REGION && |
| 164 | abs(back->last_blocks - front->last_blocks) <= |
| 165 | SAME_AGE_REGION); |
| 166 | } |
| 167 | return false; |
| 168 | } |
| 169 | |
| 170 | static bool __is_back_mergeable(struct extent_info *cur, |
| 171 | struct extent_info *back, enum extent_type type) |
| 172 | { |
| 173 | return __is_extent_mergeable(back, cur, type); |
| 174 | } |
| 175 | |
| 176 | static bool __is_front_mergeable(struct extent_info *cur, |
| 177 | struct extent_info *front, enum extent_type type) |
| 178 | { |
| 179 | return __is_extent_mergeable(cur, front, type); |
| 180 | } |
| 181 | |
| 182 | static struct extent_node *__lookup_extent_node(struct rb_root_cached *root, |
| 183 | struct extent_node *cached_en, unsigned int fofs) |
| 184 | { |
| 185 | struct rb_node *node = root->rb_root.rb_node; |
| 186 | struct extent_node *en; |
| 187 | |
| 188 | /* check a cached entry */ |
| 189 | if (cached_en && cached_en->ei.fofs <= fofs && |
| 190 | cached_en->ei.fofs + cached_en->ei.len > fofs) |
| 191 | return cached_en; |
| 192 | |
| 193 | /* check rb_tree */ |
| 194 | while (node) { |
| 195 | en = rb_entry(node, struct extent_node, rb_node); |
| 196 | |
| 197 | if (fofs < en->ei.fofs) |
| 198 | node = node->rb_left; |
| 199 | else if (fofs >= en->ei.fofs + en->ei.len) |
| 200 | node = node->rb_right; |
| 201 | else |
| 202 | return en; |
| 203 | } |
| 204 | return NULL; |
| 205 | } |
| 206 | |
| 207 | /* |
| 208 | * lookup rb entry in position of @fofs in rb-tree, |
| 209 | * if hit, return the entry, otherwise, return NULL |
| 210 | * @prev_ex: extent before fofs |
| 211 | * @next_ex: extent after fofs |
| 212 | * @insert_p: insert point for new extent at fofs |
| 213 | * in order to simplify the insertion after. |
| 214 | * tree must stay unchanged between lookup and insertion. |
| 215 | */ |
| 216 | static struct extent_node *__lookup_extent_node_ret(struct rb_root_cached *root, |
| 217 | struct extent_node *cached_en, |
| 218 | unsigned int fofs, |
| 219 | struct extent_node **prev_entry, |
| 220 | struct extent_node **next_entry, |
| 221 | struct rb_node ***insert_p, |
| 222 | struct rb_node **insert_parent, |
| 223 | bool *leftmost) |
| 224 | { |
| 225 | struct rb_node **pnode = &root->rb_root.rb_node; |
| 226 | struct rb_node *parent = NULL, *tmp_node; |
| 227 | struct extent_node *en = cached_en; |
| 228 | |
| 229 | *insert_p = NULL; |
| 230 | *insert_parent = NULL; |
| 231 | *prev_entry = NULL; |
| 232 | *next_entry = NULL; |
| 233 | |
| 234 | if (RB_EMPTY_ROOT(&root->rb_root)) |
| 235 | return NULL; |
| 236 | |
| 237 | if (en && en->ei.fofs <= fofs && en->ei.fofs + en->ei.len > fofs) |
| 238 | goto lookup_neighbors; |
| 239 | |
| 240 | *leftmost = true; |
| 241 | |
| 242 | while (*pnode) { |
| 243 | parent = *pnode; |
| 244 | en = rb_entry(*pnode, struct extent_node, rb_node); |
| 245 | |
| 246 | if (fofs < en->ei.fofs) { |
| 247 | pnode = &(*pnode)->rb_left; |
| 248 | } else if (fofs >= en->ei.fofs + en->ei.len) { |
| 249 | pnode = &(*pnode)->rb_right; |
| 250 | *leftmost = false; |
| 251 | } else { |
| 252 | goto lookup_neighbors; |
| 253 | } |
| 254 | } |
| 255 | |
| 256 | *insert_p = pnode; |
| 257 | *insert_parent = parent; |
| 258 | |
| 259 | en = rb_entry(parent, struct extent_node, rb_node); |
| 260 | tmp_node = parent; |
| 261 | if (parent && fofs > en->ei.fofs) |
| 262 | tmp_node = rb_next(parent); |
| 263 | *next_entry = rb_entry_safe(tmp_node, struct extent_node, rb_node); |
| 264 | |
| 265 | tmp_node = parent; |
| 266 | if (parent && fofs < en->ei.fofs) |
| 267 | tmp_node = rb_prev(parent); |
| 268 | *prev_entry = rb_entry_safe(tmp_node, struct extent_node, rb_node); |
| 269 | return NULL; |
| 270 | |
| 271 | lookup_neighbors: |
| 272 | if (fofs == en->ei.fofs) { |
| 273 | /* lookup prev node for merging backward later */ |
| 274 | tmp_node = rb_prev(&en->rb_node); |
| 275 | *prev_entry = rb_entry_safe(tmp_node, |
| 276 | struct extent_node, rb_node); |
| 277 | } |
| 278 | if (fofs == en->ei.fofs + en->ei.len - 1) { |
| 279 | /* lookup next node for merging frontward later */ |
| 280 | tmp_node = rb_next(&en->rb_node); |
| 281 | *next_entry = rb_entry_safe(tmp_node, |
| 282 | struct extent_node, rb_node); |
| 283 | } |
| 284 | return en; |
| 285 | } |
| 286 | |
| 287 | static struct kmem_cache *extent_tree_slab; |
| 288 | static struct kmem_cache *extent_node_slab; |
| 289 | |
| 290 | static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi, |
| 291 | struct extent_tree *et, struct extent_info *ei, |
| 292 | struct rb_node *parent, struct rb_node **p, |
| 293 | bool leftmost) |
| 294 | { |
| 295 | struct extent_tree_info *eti = &sbi->extent_tree[et->type]; |
| 296 | struct extent_node *en; |
| 297 | |
| 298 | en = f2fs_kmem_cache_alloc(extent_node_slab, GFP_ATOMIC, false, sbi); |
| 299 | if (!en) |
| 300 | return NULL; |
| 301 | |
| 302 | en->ei = *ei; |
| 303 | INIT_LIST_HEAD(&en->list); |
| 304 | en->et = et; |
| 305 | |
| 306 | rb_link_node(&en->rb_node, parent, p); |
| 307 | rb_insert_color_cached(&en->rb_node, &et->root, leftmost); |
| 308 | atomic_inc(&et->node_cnt); |
| 309 | atomic_inc(&eti->total_ext_node); |
| 310 | return en; |
| 311 | } |
| 312 | |
| 313 | static void __detach_extent_node(struct f2fs_sb_info *sbi, |
| 314 | struct extent_tree *et, struct extent_node *en) |
| 315 | { |
| 316 | struct extent_tree_info *eti = &sbi->extent_tree[et->type]; |
| 317 | |
| 318 | rb_erase_cached(&en->rb_node, &et->root); |
| 319 | atomic_dec(&et->node_cnt); |
| 320 | atomic_dec(&eti->total_ext_node); |
| 321 | |
| 322 | if (et->cached_en == en) |
| 323 | et->cached_en = NULL; |
| 324 | kmem_cache_free(extent_node_slab, en); |
| 325 | } |
| 326 | |
| 327 | /* |
| 328 | * Flow to release an extent_node: |
| 329 | * 1. list_del_init |
| 330 | * 2. __detach_extent_node |
| 331 | * 3. kmem_cache_free. |
| 332 | */ |
| 333 | static void __release_extent_node(struct f2fs_sb_info *sbi, |
| 334 | struct extent_tree *et, struct extent_node *en) |
| 335 | { |
| 336 | struct extent_tree_info *eti = &sbi->extent_tree[et->type]; |
| 337 | |
| 338 | spin_lock(&eti->extent_lock); |
| 339 | f2fs_bug_on(sbi, list_empty(&en->list)); |
| 340 | list_del_init(&en->list); |
| 341 | spin_unlock(&eti->extent_lock); |
| 342 | |
| 343 | __detach_extent_node(sbi, et, en); |
| 344 | } |
| 345 | |
| 346 | static struct extent_tree *__grab_extent_tree(struct inode *inode, |
| 347 | enum extent_type type) |
| 348 | { |
| 349 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 350 | struct extent_tree_info *eti = &sbi->extent_tree[type]; |
| 351 | struct extent_tree *et; |
| 352 | nid_t ino = inode->i_ino; |
| 353 | |
| 354 | mutex_lock(&eti->extent_tree_lock); |
| 355 | et = radix_tree_lookup(&eti->extent_tree_root, ino); |
| 356 | if (!et) { |
| 357 | et = f2fs_kmem_cache_alloc(extent_tree_slab, |
| 358 | GFP_NOFS, true, NULL); |
| 359 | f2fs_radix_tree_insert(&eti->extent_tree_root, ino, et); |
| 360 | memset(et, 0, sizeof(struct extent_tree)); |
| 361 | et->ino = ino; |
| 362 | et->type = type; |
| 363 | et->root = RB_ROOT_CACHED; |
| 364 | et->cached_en = NULL; |
| 365 | rwlock_init(&et->lock); |
| 366 | INIT_LIST_HEAD(&et->list); |
| 367 | atomic_set(&et->node_cnt, 0); |
| 368 | atomic_inc(&eti->total_ext_tree); |
| 369 | } else { |
| 370 | atomic_dec(&eti->total_zombie_tree); |
| 371 | list_del_init(&et->list); |
| 372 | } |
| 373 | mutex_unlock(&eti->extent_tree_lock); |
| 374 | |
| 375 | /* never died until evict_inode */ |
| 376 | F2FS_I(inode)->extent_tree[type] = et; |
| 377 | |
| 378 | return et; |
| 379 | } |
| 380 | |
| 381 | static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi, |
| 382 | struct extent_tree *et, unsigned int nr_shrink) |
| 383 | { |
| 384 | struct rb_node *node, *next; |
| 385 | struct extent_node *en; |
| 386 | unsigned int count; |
| 387 | |
| 388 | node = rb_first_cached(&et->root); |
| 389 | |
| 390 | for (count = 0; node && count < nr_shrink; count++) { |
| 391 | next = rb_next(node); |
| 392 | en = rb_entry(node, struct extent_node, rb_node); |
| 393 | __release_extent_node(sbi, et, en); |
| 394 | node = next; |
| 395 | } |
| 396 | |
| 397 | return count; |
| 398 | } |
| 399 | |
| 400 | static void __drop_largest_extent(struct extent_tree *et, |
| 401 | pgoff_t fofs, unsigned int len) |
| 402 | { |
| 403 | if (fofs < (pgoff_t)et->largest.fofs + et->largest.len && |
| 404 | fofs + len > et->largest.fofs) { |
| 405 | et->largest.len = 0; |
| 406 | et->largest_updated = true; |
| 407 | } |
| 408 | } |
| 409 | |
| 410 | void f2fs_init_read_extent_tree(struct inode *inode, struct folio *ifolio) |
| 411 | { |
| 412 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 413 | struct extent_tree_info *eti = &sbi->extent_tree[EX_READ]; |
| 414 | struct f2fs_extent *i_ext = &F2FS_INODE(&ifolio->page)->i_ext; |
| 415 | struct extent_tree *et; |
| 416 | struct extent_node *en; |
| 417 | struct extent_info ei; |
| 418 | |
| 419 | if (!__may_extent_tree(inode, EX_READ)) { |
| 420 | /* drop largest read extent */ |
| 421 | if (i_ext->len) { |
| 422 | f2fs_folio_wait_writeback(ifolio, NODE, true, true); |
| 423 | i_ext->len = 0; |
| 424 | folio_mark_dirty(ifolio); |
| 425 | } |
| 426 | set_inode_flag(inode, FI_NO_EXTENT); |
| 427 | return; |
| 428 | } |
| 429 | |
| 430 | et = __grab_extent_tree(inode, EX_READ); |
| 431 | |
| 432 | get_read_extent_info(&ei, i_ext); |
| 433 | |
| 434 | write_lock(&et->lock); |
| 435 | if (atomic_read(&et->node_cnt) || !ei.len) |
| 436 | goto skip; |
| 437 | |
| 438 | if (IS_DEVICE_ALIASING(inode)) { |
| 439 | et->largest = ei; |
| 440 | goto skip; |
| 441 | } |
| 442 | |
| 443 | en = __attach_extent_node(sbi, et, &ei, NULL, |
| 444 | &et->root.rb_root.rb_node, true); |
| 445 | if (en) { |
| 446 | et->largest = en->ei; |
| 447 | et->cached_en = en; |
| 448 | |
| 449 | spin_lock(&eti->extent_lock); |
| 450 | list_add_tail(&en->list, &eti->extent_list); |
| 451 | spin_unlock(&eti->extent_lock); |
| 452 | } |
| 453 | skip: |
| 454 | /* Let's drop, if checkpoint got corrupted. */ |
| 455 | if (f2fs_cp_error(sbi)) { |
| 456 | et->largest.len = 0; |
| 457 | et->largest_updated = true; |
| 458 | } |
| 459 | write_unlock(&et->lock); |
| 460 | } |
| 461 | |
| 462 | void f2fs_init_age_extent_tree(struct inode *inode) |
| 463 | { |
| 464 | if (!__init_may_extent_tree(inode, EX_BLOCK_AGE)) |
| 465 | return; |
| 466 | __grab_extent_tree(inode, EX_BLOCK_AGE); |
| 467 | } |
| 468 | |
| 469 | void f2fs_init_extent_tree(struct inode *inode) |
| 470 | { |
| 471 | /* initialize read cache */ |
| 472 | if (__init_may_extent_tree(inode, EX_READ)) |
| 473 | __grab_extent_tree(inode, EX_READ); |
| 474 | |
| 475 | /* initialize block age cache */ |
| 476 | if (__init_may_extent_tree(inode, EX_BLOCK_AGE)) |
| 477 | __grab_extent_tree(inode, EX_BLOCK_AGE); |
| 478 | } |
| 479 | |
| 480 | static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs, |
| 481 | struct extent_info *ei, enum extent_type type) |
| 482 | { |
| 483 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 484 | struct extent_tree_info *eti = &sbi->extent_tree[type]; |
| 485 | struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; |
| 486 | struct extent_node *en; |
| 487 | bool ret = false; |
| 488 | |
| 489 | if (!et) |
| 490 | return false; |
| 491 | |
| 492 | trace_f2fs_lookup_extent_tree_start(inode, pgofs, type); |
| 493 | |
| 494 | read_lock(&et->lock); |
| 495 | |
| 496 | if (type == EX_READ && |
| 497 | et->largest.fofs <= pgofs && |
| 498 | (pgoff_t)et->largest.fofs + et->largest.len > pgofs) { |
| 499 | *ei = et->largest; |
| 500 | ret = true; |
| 501 | stat_inc_largest_node_hit(sbi); |
| 502 | goto out; |
| 503 | } |
| 504 | |
| 505 | if (IS_DEVICE_ALIASING(inode)) { |
| 506 | ret = false; |
| 507 | goto out; |
| 508 | } |
| 509 | |
| 510 | en = __lookup_extent_node(&et->root, et->cached_en, pgofs); |
| 511 | if (!en) |
| 512 | goto out; |
| 513 | |
| 514 | if (en == et->cached_en) |
| 515 | stat_inc_cached_node_hit(sbi, type); |
| 516 | else |
| 517 | stat_inc_rbtree_node_hit(sbi, type); |
| 518 | |
| 519 | *ei = en->ei; |
| 520 | spin_lock(&eti->extent_lock); |
| 521 | if (!list_empty(&en->list)) { |
| 522 | list_move_tail(&en->list, &eti->extent_list); |
| 523 | et->cached_en = en; |
| 524 | } |
| 525 | spin_unlock(&eti->extent_lock); |
| 526 | ret = true; |
| 527 | out: |
| 528 | stat_inc_total_hit(sbi, type); |
| 529 | read_unlock(&et->lock); |
| 530 | |
| 531 | if (type == EX_READ) |
| 532 | trace_f2fs_lookup_read_extent_tree_end(inode, pgofs, ei); |
| 533 | else if (type == EX_BLOCK_AGE) |
| 534 | trace_f2fs_lookup_age_extent_tree_end(inode, pgofs, ei); |
| 535 | return ret; |
| 536 | } |
| 537 | |
| 538 | static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi, |
| 539 | struct extent_tree *et, struct extent_info *ei, |
| 540 | struct extent_node *prev_ex, |
| 541 | struct extent_node *next_ex) |
| 542 | { |
| 543 | struct extent_tree_info *eti = &sbi->extent_tree[et->type]; |
| 544 | struct extent_node *en = NULL; |
| 545 | |
| 546 | if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, et->type)) { |
| 547 | prev_ex->ei.len += ei->len; |
| 548 | ei = &prev_ex->ei; |
| 549 | en = prev_ex; |
| 550 | } |
| 551 | |
| 552 | if (next_ex && __is_front_mergeable(ei, &next_ex->ei, et->type)) { |
| 553 | next_ex->ei.fofs = ei->fofs; |
| 554 | next_ex->ei.len += ei->len; |
| 555 | if (et->type == EX_READ) |
| 556 | next_ex->ei.blk = ei->blk; |
| 557 | if (en) |
| 558 | __release_extent_node(sbi, et, prev_ex); |
| 559 | |
| 560 | en = next_ex; |
| 561 | } |
| 562 | |
| 563 | if (!en) |
| 564 | return NULL; |
| 565 | |
| 566 | __try_update_largest_extent(et, en); |
| 567 | |
| 568 | spin_lock(&eti->extent_lock); |
| 569 | if (!list_empty(&en->list)) { |
| 570 | list_move_tail(&en->list, &eti->extent_list); |
| 571 | et->cached_en = en; |
| 572 | } |
| 573 | spin_unlock(&eti->extent_lock); |
| 574 | return en; |
| 575 | } |
| 576 | |
| 577 | static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi, |
| 578 | struct extent_tree *et, struct extent_info *ei, |
| 579 | struct rb_node **insert_p, |
| 580 | struct rb_node *insert_parent, |
| 581 | bool leftmost) |
| 582 | { |
| 583 | struct extent_tree_info *eti = &sbi->extent_tree[et->type]; |
| 584 | struct rb_node **p = &et->root.rb_root.rb_node; |
| 585 | struct rb_node *parent = NULL; |
| 586 | struct extent_node *en = NULL; |
| 587 | |
| 588 | if (insert_p && insert_parent) { |
| 589 | parent = insert_parent; |
| 590 | p = insert_p; |
| 591 | goto do_insert; |
| 592 | } |
| 593 | |
| 594 | leftmost = true; |
| 595 | |
| 596 | /* look up extent_node in the rb tree */ |
| 597 | while (*p) { |
| 598 | parent = *p; |
| 599 | en = rb_entry(parent, struct extent_node, rb_node); |
| 600 | |
| 601 | if (ei->fofs < en->ei.fofs) { |
| 602 | p = &(*p)->rb_left; |
| 603 | } else if (ei->fofs >= en->ei.fofs + en->ei.len) { |
| 604 | p = &(*p)->rb_right; |
| 605 | leftmost = false; |
| 606 | } else { |
| 607 | f2fs_bug_on(sbi, 1); |
| 608 | } |
| 609 | } |
| 610 | |
| 611 | do_insert: |
| 612 | en = __attach_extent_node(sbi, et, ei, parent, p, leftmost); |
| 613 | if (!en) |
| 614 | return NULL; |
| 615 | |
| 616 | __try_update_largest_extent(et, en); |
| 617 | |
| 618 | /* update in global extent list */ |
| 619 | spin_lock(&eti->extent_lock); |
| 620 | list_add_tail(&en->list, &eti->extent_list); |
| 621 | et->cached_en = en; |
| 622 | spin_unlock(&eti->extent_lock); |
| 623 | return en; |
| 624 | } |
| 625 | |
| 626 | static unsigned int __destroy_extent_node(struct inode *inode, |
| 627 | enum extent_type type) |
| 628 | { |
| 629 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 630 | struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; |
| 631 | unsigned int nr_shrink = type == EX_READ ? |
| 632 | READ_EXTENT_CACHE_SHRINK_NUMBER : |
| 633 | AGE_EXTENT_CACHE_SHRINK_NUMBER; |
| 634 | unsigned int node_cnt = 0; |
| 635 | |
| 636 | if (!et || !atomic_read(&et->node_cnt)) |
| 637 | return 0; |
| 638 | |
| 639 | while (atomic_read(&et->node_cnt)) { |
| 640 | write_lock(&et->lock); |
| 641 | node_cnt += __free_extent_tree(sbi, et, nr_shrink); |
| 642 | write_unlock(&et->lock); |
| 643 | } |
| 644 | |
| 645 | f2fs_bug_on(sbi, atomic_read(&et->node_cnt)); |
| 646 | |
| 647 | return node_cnt; |
| 648 | } |
| 649 | |
| 650 | static void __update_extent_tree_range(struct inode *inode, |
| 651 | struct extent_info *tei, enum extent_type type) |
| 652 | { |
| 653 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 654 | struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; |
| 655 | struct extent_node *en = NULL, *en1 = NULL; |
| 656 | struct extent_node *prev_en = NULL, *next_en = NULL; |
| 657 | struct extent_info ei, dei, prev; |
| 658 | struct rb_node **insert_p = NULL, *insert_parent = NULL; |
| 659 | unsigned int fofs = tei->fofs, len = tei->len; |
| 660 | unsigned int end = fofs + len; |
| 661 | bool updated = false; |
| 662 | bool leftmost = false; |
| 663 | |
| 664 | if (!et) |
| 665 | return; |
| 666 | |
| 667 | if (type == EX_READ) |
| 668 | trace_f2fs_update_read_extent_tree_range(inode, fofs, len, |
| 669 | tei->blk, 0); |
| 670 | else if (type == EX_BLOCK_AGE) |
| 671 | trace_f2fs_update_age_extent_tree_range(inode, fofs, len, |
| 672 | tei->age, tei->last_blocks); |
| 673 | |
| 674 | write_lock(&et->lock); |
| 675 | |
| 676 | if (type == EX_READ) { |
| 677 | if (is_inode_flag_set(inode, FI_NO_EXTENT)) { |
| 678 | write_unlock(&et->lock); |
| 679 | return; |
| 680 | } |
| 681 | |
| 682 | prev = et->largest; |
| 683 | dei.len = 0; |
| 684 | |
| 685 | /* |
| 686 | * drop largest extent before lookup, in case it's already |
| 687 | * been shrunk from extent tree |
| 688 | */ |
| 689 | __drop_largest_extent(et, fofs, len); |
| 690 | } |
| 691 | |
| 692 | /* 1. lookup first extent node in range [fofs, fofs + len - 1] */ |
| 693 | en = __lookup_extent_node_ret(&et->root, |
| 694 | et->cached_en, fofs, |
| 695 | &prev_en, &next_en, |
| 696 | &insert_p, &insert_parent, |
| 697 | &leftmost); |
| 698 | if (!en) |
| 699 | en = next_en; |
| 700 | |
| 701 | /* 2. invalidate all extent nodes in range [fofs, fofs + len - 1] */ |
| 702 | while (en && en->ei.fofs < end) { |
| 703 | unsigned int org_end; |
| 704 | int parts = 0; /* # of parts current extent split into */ |
| 705 | |
| 706 | next_en = en1 = NULL; |
| 707 | |
| 708 | dei = en->ei; |
| 709 | org_end = dei.fofs + dei.len; |
| 710 | f2fs_bug_on(sbi, fofs >= org_end); |
| 711 | |
| 712 | if (fofs > dei.fofs && (type != EX_READ || |
| 713 | fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN)) { |
| 714 | en->ei.len = fofs - en->ei.fofs; |
| 715 | prev_en = en; |
| 716 | parts = 1; |
| 717 | } |
| 718 | |
| 719 | if (end < org_end && (type != EX_READ || |
| 720 | (org_end - end >= F2FS_MIN_EXTENT_LEN && |
| 721 | atomic_read(&et->node_cnt) < |
| 722 | sbi->max_read_extent_count))) { |
| 723 | if (parts) { |
| 724 | __set_extent_info(&ei, |
| 725 | end, org_end - end, |
| 726 | end - dei.fofs + dei.blk, false, |
| 727 | dei.age, dei.last_blocks, |
| 728 | type); |
| 729 | en1 = __insert_extent_tree(sbi, et, &ei, |
| 730 | NULL, NULL, true); |
| 731 | next_en = en1; |
| 732 | } else { |
| 733 | __set_extent_info(&en->ei, |
| 734 | end, en->ei.len - (end - dei.fofs), |
| 735 | en->ei.blk + (end - dei.fofs), true, |
| 736 | dei.age, dei.last_blocks, |
| 737 | type); |
| 738 | next_en = en; |
| 739 | } |
| 740 | parts++; |
| 741 | } |
| 742 | |
| 743 | if (!next_en) { |
| 744 | struct rb_node *node = rb_next(&en->rb_node); |
| 745 | |
| 746 | next_en = rb_entry_safe(node, struct extent_node, |
| 747 | rb_node); |
| 748 | } |
| 749 | |
| 750 | if (parts) |
| 751 | __try_update_largest_extent(et, en); |
| 752 | else |
| 753 | __release_extent_node(sbi, et, en); |
| 754 | |
| 755 | /* |
| 756 | * if original extent is split into zero or two parts, extent |
| 757 | * tree has been altered by deletion or insertion, therefore |
| 758 | * invalidate pointers regard to tree. |
| 759 | */ |
| 760 | if (parts != 1) { |
| 761 | insert_p = NULL; |
| 762 | insert_parent = NULL; |
| 763 | } |
| 764 | en = next_en; |
| 765 | } |
| 766 | |
| 767 | if (type == EX_BLOCK_AGE) |
| 768 | goto update_age_extent_cache; |
| 769 | |
| 770 | /* 3. update extent in read extent cache */ |
| 771 | BUG_ON(type != EX_READ); |
| 772 | |
| 773 | if (tei->blk) { |
| 774 | __set_extent_info(&ei, fofs, len, tei->blk, false, |
| 775 | 0, 0, EX_READ); |
| 776 | if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en)) |
| 777 | __insert_extent_tree(sbi, et, &ei, |
| 778 | insert_p, insert_parent, leftmost); |
| 779 | |
| 780 | /* give up extent_cache, if split and small updates happen */ |
| 781 | if (dei.len >= 1 && |
| 782 | prev.len < F2FS_MIN_EXTENT_LEN && |
| 783 | et->largest.len < F2FS_MIN_EXTENT_LEN) { |
| 784 | et->largest.len = 0; |
| 785 | et->largest_updated = true; |
| 786 | set_inode_flag(inode, FI_NO_EXTENT); |
| 787 | } |
| 788 | } |
| 789 | |
| 790 | if (et->largest_updated) { |
| 791 | et->largest_updated = false; |
| 792 | updated = true; |
| 793 | } |
| 794 | goto out_read_extent_cache; |
| 795 | update_age_extent_cache: |
| 796 | if (!tei->last_blocks) |
| 797 | goto out_read_extent_cache; |
| 798 | |
| 799 | __set_extent_info(&ei, fofs, len, 0, false, |
| 800 | tei->age, tei->last_blocks, EX_BLOCK_AGE); |
| 801 | if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en)) |
| 802 | __insert_extent_tree(sbi, et, &ei, |
| 803 | insert_p, insert_parent, leftmost); |
| 804 | out_read_extent_cache: |
| 805 | write_unlock(&et->lock); |
| 806 | |
| 807 | if (is_inode_flag_set(inode, FI_NO_EXTENT)) |
| 808 | __destroy_extent_node(inode, EX_READ); |
| 809 | |
| 810 | if (updated) |
| 811 | f2fs_mark_inode_dirty_sync(inode, true); |
| 812 | } |
| 813 | |
| 814 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 815 | void f2fs_update_read_extent_tree_range_compressed(struct inode *inode, |
| 816 | pgoff_t fofs, block_t blkaddr, unsigned int llen, |
| 817 | unsigned int c_len) |
| 818 | { |
| 819 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 820 | struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ]; |
| 821 | struct extent_node *en = NULL; |
| 822 | struct extent_node *prev_en = NULL, *next_en = NULL; |
| 823 | struct extent_info ei; |
| 824 | struct rb_node **insert_p = NULL, *insert_parent = NULL; |
| 825 | bool leftmost = false; |
| 826 | |
| 827 | trace_f2fs_update_read_extent_tree_range(inode, fofs, llen, |
| 828 | blkaddr, c_len); |
| 829 | |
| 830 | /* it is safe here to check FI_NO_EXTENT w/o et->lock in ro image */ |
| 831 | if (is_inode_flag_set(inode, FI_NO_EXTENT)) |
| 832 | return; |
| 833 | |
| 834 | write_lock(&et->lock); |
| 835 | |
| 836 | en = __lookup_extent_node_ret(&et->root, |
| 837 | et->cached_en, fofs, |
| 838 | &prev_en, &next_en, |
| 839 | &insert_p, &insert_parent, |
| 840 | &leftmost); |
| 841 | if (en) |
| 842 | goto unlock_out; |
| 843 | |
| 844 | __set_extent_info(&ei, fofs, llen, blkaddr, true, 0, 0, EX_READ); |
| 845 | ei.c_len = c_len; |
| 846 | |
| 847 | if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en)) |
| 848 | __insert_extent_tree(sbi, et, &ei, |
| 849 | insert_p, insert_parent, leftmost); |
| 850 | unlock_out: |
| 851 | write_unlock(&et->lock); |
| 852 | } |
| 853 | #endif |
| 854 | |
| 855 | static unsigned long long __calculate_block_age(struct f2fs_sb_info *sbi, |
| 856 | unsigned long long new, |
| 857 | unsigned long long old) |
| 858 | { |
| 859 | unsigned int rem_old, rem_new; |
| 860 | unsigned long long res; |
| 861 | unsigned int weight = sbi->last_age_weight; |
| 862 | |
| 863 | res = div_u64_rem(new, 100, &rem_new) * (100 - weight) |
| 864 | + div_u64_rem(old, 100, &rem_old) * weight; |
| 865 | |
| 866 | if (rem_new) |
| 867 | res += rem_new * (100 - weight) / 100; |
| 868 | if (rem_old) |
| 869 | res += rem_old * weight / 100; |
| 870 | |
| 871 | return res; |
| 872 | } |
| 873 | |
| 874 | /* This returns a new age and allocated blocks in ei */ |
| 875 | static int __get_new_block_age(struct inode *inode, struct extent_info *ei, |
| 876 | block_t blkaddr) |
| 877 | { |
| 878 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 879 | loff_t f_size = i_size_read(inode); |
| 880 | unsigned long long cur_blocks = |
| 881 | atomic64_read(&sbi->allocated_data_blocks); |
| 882 | struct extent_info tei = *ei; /* only fofs and len are valid */ |
| 883 | |
| 884 | /* |
| 885 | * When I/O is not aligned to a PAGE_SIZE, update will happen to the last |
| 886 | * file block even in seq write. So don't record age for newly last file |
| 887 | * block here. |
| 888 | */ |
| 889 | if ((f_size >> PAGE_SHIFT) == ei->fofs && f_size & (PAGE_SIZE - 1) && |
| 890 | blkaddr == NEW_ADDR) |
| 891 | return -EINVAL; |
| 892 | |
| 893 | if (__lookup_extent_tree(inode, ei->fofs, &tei, EX_BLOCK_AGE)) { |
| 894 | unsigned long long cur_age; |
| 895 | |
| 896 | if (cur_blocks >= tei.last_blocks) |
| 897 | cur_age = cur_blocks - tei.last_blocks; |
| 898 | else |
| 899 | /* allocated_data_blocks overflow */ |
| 900 | cur_age = ULLONG_MAX - tei.last_blocks + cur_blocks; |
| 901 | |
| 902 | if (tei.age) |
| 903 | ei->age = __calculate_block_age(sbi, cur_age, tei.age); |
| 904 | else |
| 905 | ei->age = cur_age; |
| 906 | ei->last_blocks = cur_blocks; |
| 907 | WARN_ON(ei->age > cur_blocks); |
| 908 | return 0; |
| 909 | } |
| 910 | |
| 911 | f2fs_bug_on(sbi, blkaddr == NULL_ADDR); |
| 912 | |
| 913 | /* the data block was allocated for the first time */ |
| 914 | if (blkaddr == NEW_ADDR) |
| 915 | goto out; |
| 916 | |
| 917 | if (__is_valid_data_blkaddr(blkaddr) && |
| 918 | !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) |
| 919 | return -EINVAL; |
| 920 | out: |
| 921 | /* |
| 922 | * init block age with zero, this can happen when the block age extent |
| 923 | * was reclaimed due to memory constraint or system reboot |
| 924 | */ |
| 925 | ei->age = 0; |
| 926 | ei->last_blocks = cur_blocks; |
| 927 | return 0; |
| 928 | } |
| 929 | |
| 930 | static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type type) |
| 931 | { |
| 932 | struct extent_info ei = {}; |
| 933 | |
| 934 | if (!__may_extent_tree(dn->inode, type)) |
| 935 | return; |
| 936 | |
| 937 | ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(&dn->node_folio->page), dn->inode) + |
| 938 | dn->ofs_in_node; |
| 939 | ei.len = 1; |
| 940 | |
| 941 | if (type == EX_READ) { |
| 942 | if (dn->data_blkaddr == NEW_ADDR) |
| 943 | ei.blk = NULL_ADDR; |
| 944 | else |
| 945 | ei.blk = dn->data_blkaddr; |
| 946 | } else if (type == EX_BLOCK_AGE) { |
| 947 | if (__get_new_block_age(dn->inode, &ei, dn->data_blkaddr)) |
| 948 | return; |
| 949 | } |
| 950 | __update_extent_tree_range(dn->inode, &ei, type); |
| 951 | } |
| 952 | |
| 953 | static unsigned int __shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink, |
| 954 | enum extent_type type) |
| 955 | { |
| 956 | struct extent_tree_info *eti = &sbi->extent_tree[type]; |
| 957 | struct extent_tree *et, *next; |
| 958 | struct extent_node *en; |
| 959 | unsigned int node_cnt = 0, tree_cnt = 0; |
| 960 | int remained; |
| 961 | |
| 962 | if (!atomic_read(&eti->total_zombie_tree)) |
| 963 | goto free_node; |
| 964 | |
| 965 | if (!mutex_trylock(&eti->extent_tree_lock)) |
| 966 | goto out; |
| 967 | |
| 968 | /* 1. remove unreferenced extent tree */ |
| 969 | list_for_each_entry_safe(et, next, &eti->zombie_list, list) { |
| 970 | if (atomic_read(&et->node_cnt)) { |
| 971 | write_lock(&et->lock); |
| 972 | node_cnt += __free_extent_tree(sbi, et, |
| 973 | nr_shrink - node_cnt - tree_cnt); |
| 974 | write_unlock(&et->lock); |
| 975 | } |
| 976 | |
| 977 | if (atomic_read(&et->node_cnt)) |
| 978 | goto unlock_out; |
| 979 | |
| 980 | list_del_init(&et->list); |
| 981 | radix_tree_delete(&eti->extent_tree_root, et->ino); |
| 982 | kmem_cache_free(extent_tree_slab, et); |
| 983 | atomic_dec(&eti->total_ext_tree); |
| 984 | atomic_dec(&eti->total_zombie_tree); |
| 985 | tree_cnt++; |
| 986 | |
| 987 | if (node_cnt + tree_cnt >= nr_shrink) |
| 988 | goto unlock_out; |
| 989 | cond_resched(); |
| 990 | } |
| 991 | mutex_unlock(&eti->extent_tree_lock); |
| 992 | |
| 993 | free_node: |
| 994 | /* 2. remove LRU extent entries */ |
| 995 | if (!mutex_trylock(&eti->extent_tree_lock)) |
| 996 | goto out; |
| 997 | |
| 998 | remained = nr_shrink - (node_cnt + tree_cnt); |
| 999 | |
| 1000 | spin_lock(&eti->extent_lock); |
| 1001 | for (; remained > 0; remained--) { |
| 1002 | if (list_empty(&eti->extent_list)) |
| 1003 | break; |
| 1004 | en = list_first_entry(&eti->extent_list, |
| 1005 | struct extent_node, list); |
| 1006 | et = en->et; |
| 1007 | if (!write_trylock(&et->lock)) { |
| 1008 | /* refresh this extent node's position in extent list */ |
| 1009 | list_move_tail(&en->list, &eti->extent_list); |
| 1010 | continue; |
| 1011 | } |
| 1012 | |
| 1013 | list_del_init(&en->list); |
| 1014 | spin_unlock(&eti->extent_lock); |
| 1015 | |
| 1016 | __detach_extent_node(sbi, et, en); |
| 1017 | |
| 1018 | write_unlock(&et->lock); |
| 1019 | node_cnt++; |
| 1020 | spin_lock(&eti->extent_lock); |
| 1021 | } |
| 1022 | spin_unlock(&eti->extent_lock); |
| 1023 | |
| 1024 | unlock_out: |
| 1025 | mutex_unlock(&eti->extent_tree_lock); |
| 1026 | out: |
| 1027 | trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt, type); |
| 1028 | |
| 1029 | return node_cnt + tree_cnt; |
| 1030 | } |
| 1031 | |
| 1032 | /* read extent cache operations */ |
| 1033 | bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs, |
| 1034 | struct extent_info *ei) |
| 1035 | { |
| 1036 | if (!__may_extent_tree(inode, EX_READ)) |
| 1037 | return false; |
| 1038 | |
| 1039 | return __lookup_extent_tree(inode, pgofs, ei, EX_READ); |
| 1040 | } |
| 1041 | |
| 1042 | bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index, |
| 1043 | block_t *blkaddr) |
| 1044 | { |
| 1045 | struct extent_info ei = {}; |
| 1046 | |
| 1047 | if (!f2fs_lookup_read_extent_cache(inode, index, &ei)) |
| 1048 | return false; |
| 1049 | *blkaddr = ei.blk + index - ei.fofs; |
| 1050 | return true; |
| 1051 | } |
| 1052 | |
| 1053 | void f2fs_update_read_extent_cache(struct dnode_of_data *dn) |
| 1054 | { |
| 1055 | return __update_extent_cache(dn, EX_READ); |
| 1056 | } |
| 1057 | |
| 1058 | void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn, |
| 1059 | pgoff_t fofs, block_t blkaddr, unsigned int len) |
| 1060 | { |
| 1061 | struct extent_info ei = { |
| 1062 | .fofs = fofs, |
| 1063 | .len = len, |
| 1064 | .blk = blkaddr, |
| 1065 | }; |
| 1066 | |
| 1067 | if (!__may_extent_tree(dn->inode, EX_READ)) |
| 1068 | return; |
| 1069 | |
| 1070 | __update_extent_tree_range(dn->inode, &ei, EX_READ); |
| 1071 | } |
| 1072 | |
| 1073 | unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink) |
| 1074 | { |
| 1075 | if (!test_opt(sbi, READ_EXTENT_CACHE)) |
| 1076 | return 0; |
| 1077 | |
| 1078 | return __shrink_extent_tree(sbi, nr_shrink, EX_READ); |
| 1079 | } |
| 1080 | |
| 1081 | /* block age extent cache operations */ |
| 1082 | bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs, |
| 1083 | struct extent_info *ei) |
| 1084 | { |
| 1085 | if (!__may_extent_tree(inode, EX_BLOCK_AGE)) |
| 1086 | return false; |
| 1087 | |
| 1088 | return __lookup_extent_tree(inode, pgofs, ei, EX_BLOCK_AGE); |
| 1089 | } |
| 1090 | |
| 1091 | void f2fs_update_age_extent_cache(struct dnode_of_data *dn) |
| 1092 | { |
| 1093 | return __update_extent_cache(dn, EX_BLOCK_AGE); |
| 1094 | } |
| 1095 | |
| 1096 | void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn, |
| 1097 | pgoff_t fofs, unsigned int len) |
| 1098 | { |
| 1099 | struct extent_info ei = { |
| 1100 | .fofs = fofs, |
| 1101 | .len = len, |
| 1102 | }; |
| 1103 | |
| 1104 | if (!__may_extent_tree(dn->inode, EX_BLOCK_AGE)) |
| 1105 | return; |
| 1106 | |
| 1107 | __update_extent_tree_range(dn->inode, &ei, EX_BLOCK_AGE); |
| 1108 | } |
| 1109 | |
| 1110 | unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink) |
| 1111 | { |
| 1112 | if (!test_opt(sbi, AGE_EXTENT_CACHE)) |
| 1113 | return 0; |
| 1114 | |
| 1115 | return __shrink_extent_tree(sbi, nr_shrink, EX_BLOCK_AGE); |
| 1116 | } |
| 1117 | |
| 1118 | void f2fs_destroy_extent_node(struct inode *inode) |
| 1119 | { |
| 1120 | __destroy_extent_node(inode, EX_READ); |
| 1121 | __destroy_extent_node(inode, EX_BLOCK_AGE); |
| 1122 | } |
| 1123 | |
| 1124 | static void __drop_extent_tree(struct inode *inode, enum extent_type type) |
| 1125 | { |
| 1126 | struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; |
| 1127 | bool updated = false; |
| 1128 | |
| 1129 | if (!__may_extent_tree(inode, type)) |
| 1130 | return; |
| 1131 | |
| 1132 | write_lock(&et->lock); |
| 1133 | if (type == EX_READ) { |
| 1134 | set_inode_flag(inode, FI_NO_EXTENT); |
| 1135 | if (et->largest.len) { |
| 1136 | et->largest.len = 0; |
| 1137 | updated = true; |
| 1138 | } |
| 1139 | } |
| 1140 | write_unlock(&et->lock); |
| 1141 | |
| 1142 | __destroy_extent_node(inode, type); |
| 1143 | |
| 1144 | if (updated) |
| 1145 | f2fs_mark_inode_dirty_sync(inode, true); |
| 1146 | } |
| 1147 | |
| 1148 | void f2fs_drop_extent_tree(struct inode *inode) |
| 1149 | { |
| 1150 | __drop_extent_tree(inode, EX_READ); |
| 1151 | __drop_extent_tree(inode, EX_BLOCK_AGE); |
| 1152 | } |
| 1153 | |
| 1154 | static void __destroy_extent_tree(struct inode *inode, enum extent_type type) |
| 1155 | { |
| 1156 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 1157 | struct extent_tree_info *eti = &sbi->extent_tree[type]; |
| 1158 | struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; |
| 1159 | unsigned int node_cnt = 0; |
| 1160 | |
| 1161 | if (!et) |
| 1162 | return; |
| 1163 | |
| 1164 | if (inode->i_nlink && !is_bad_inode(inode) && |
| 1165 | atomic_read(&et->node_cnt)) { |
| 1166 | mutex_lock(&eti->extent_tree_lock); |
| 1167 | list_add_tail(&et->list, &eti->zombie_list); |
| 1168 | atomic_inc(&eti->total_zombie_tree); |
| 1169 | mutex_unlock(&eti->extent_tree_lock); |
| 1170 | return; |
| 1171 | } |
| 1172 | |
| 1173 | /* free all extent info belong to this extent tree */ |
| 1174 | node_cnt = __destroy_extent_node(inode, type); |
| 1175 | |
| 1176 | /* delete extent tree entry in radix tree */ |
| 1177 | mutex_lock(&eti->extent_tree_lock); |
| 1178 | f2fs_bug_on(sbi, atomic_read(&et->node_cnt)); |
| 1179 | radix_tree_delete(&eti->extent_tree_root, inode->i_ino); |
| 1180 | kmem_cache_free(extent_tree_slab, et); |
| 1181 | atomic_dec(&eti->total_ext_tree); |
| 1182 | mutex_unlock(&eti->extent_tree_lock); |
| 1183 | |
| 1184 | F2FS_I(inode)->extent_tree[type] = NULL; |
| 1185 | |
| 1186 | trace_f2fs_destroy_extent_tree(inode, node_cnt, type); |
| 1187 | } |
| 1188 | |
| 1189 | void f2fs_destroy_extent_tree(struct inode *inode) |
| 1190 | { |
| 1191 | __destroy_extent_tree(inode, EX_READ); |
| 1192 | __destroy_extent_tree(inode, EX_BLOCK_AGE); |
| 1193 | } |
| 1194 | |
| 1195 | static void __init_extent_tree_info(struct extent_tree_info *eti) |
| 1196 | { |
| 1197 | INIT_RADIX_TREE(&eti->extent_tree_root, GFP_NOIO); |
| 1198 | mutex_init(&eti->extent_tree_lock); |
| 1199 | INIT_LIST_HEAD(&eti->extent_list); |
| 1200 | spin_lock_init(&eti->extent_lock); |
| 1201 | atomic_set(&eti->total_ext_tree, 0); |
| 1202 | INIT_LIST_HEAD(&eti->zombie_list); |
| 1203 | atomic_set(&eti->total_zombie_tree, 0); |
| 1204 | atomic_set(&eti->total_ext_node, 0); |
| 1205 | } |
| 1206 | |
| 1207 | void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi) |
| 1208 | { |
| 1209 | __init_extent_tree_info(&sbi->extent_tree[EX_READ]); |
| 1210 | __init_extent_tree_info(&sbi->extent_tree[EX_BLOCK_AGE]); |
| 1211 | |
| 1212 | /* initialize for block age extents */ |
| 1213 | atomic64_set(&sbi->allocated_data_blocks, 0); |
| 1214 | sbi->hot_data_age_threshold = DEF_HOT_DATA_AGE_THRESHOLD; |
| 1215 | sbi->warm_data_age_threshold = DEF_WARM_DATA_AGE_THRESHOLD; |
| 1216 | sbi->last_age_weight = LAST_AGE_WEIGHT; |
| 1217 | sbi->max_read_extent_count = DEF_MAX_READ_EXTENT_COUNT; |
| 1218 | } |
| 1219 | |
| 1220 | int __init f2fs_create_extent_cache(void) |
| 1221 | { |
| 1222 | extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree", |
| 1223 | sizeof(struct extent_tree)); |
| 1224 | if (!extent_tree_slab) |
| 1225 | return -ENOMEM; |
| 1226 | extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node", |
| 1227 | sizeof(struct extent_node)); |
| 1228 | if (!extent_node_slab) { |
| 1229 | kmem_cache_destroy(extent_tree_slab); |
| 1230 | return -ENOMEM; |
| 1231 | } |
| 1232 | return 0; |
| 1233 | } |
| 1234 | |
| 1235 | void f2fs_destroy_extent_cache(void) |
| 1236 | { |
| 1237 | kmem_cache_destroy(extent_node_slab); |
| 1238 | kmem_cache_destroy(extent_tree_slab); |
| 1239 | } |