| 1 | /* |
| 2 | * Copyright (C) 2007 Oracle. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public |
| 6 | * License v2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public |
| 14 | * License along with this program; if not, write to the |
| 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
| 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ |
| 18 | |
| 19 | #include <linux/gfp.h> |
| 20 | #include <linux/slab.h> |
| 21 | #include <linux/blkdev.h> |
| 22 | #include "ctree.h" |
| 23 | #include "transaction.h" |
| 24 | #include "btrfs_inode.h" |
| 25 | #include "extent_io.h" |
| 26 | |
| 27 | |
| 28 | static u64 entry_end(struct btrfs_ordered_extent *entry) |
| 29 | { |
| 30 | if (entry->file_offset + entry->len < entry->file_offset) |
| 31 | return (u64)-1; |
| 32 | return entry->file_offset + entry->len; |
| 33 | } |
| 34 | |
| 35 | static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, |
| 36 | struct rb_node *node) |
| 37 | { |
| 38 | struct rb_node ** p = &root->rb_node; |
| 39 | struct rb_node * parent = NULL; |
| 40 | struct btrfs_ordered_extent *entry; |
| 41 | |
| 42 | while(*p) { |
| 43 | parent = *p; |
| 44 | entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); |
| 45 | |
| 46 | if (file_offset < entry->file_offset) |
| 47 | p = &(*p)->rb_left; |
| 48 | else if (file_offset >= entry_end(entry)) |
| 49 | p = &(*p)->rb_right; |
| 50 | else |
| 51 | return parent; |
| 52 | } |
| 53 | |
| 54 | rb_link_node(node, parent, p); |
| 55 | rb_insert_color(node, root); |
| 56 | return NULL; |
| 57 | } |
| 58 | |
| 59 | static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, |
| 60 | struct rb_node **prev_ret) |
| 61 | { |
| 62 | struct rb_node * n = root->rb_node; |
| 63 | struct rb_node *prev = NULL; |
| 64 | struct rb_node *test; |
| 65 | struct btrfs_ordered_extent *entry; |
| 66 | struct btrfs_ordered_extent *prev_entry = NULL; |
| 67 | |
| 68 | while(n) { |
| 69 | entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
| 70 | prev = n; |
| 71 | prev_entry = entry; |
| 72 | |
| 73 | if (file_offset < entry->file_offset) |
| 74 | n = n->rb_left; |
| 75 | else if (file_offset >= entry_end(entry)) |
| 76 | n = n->rb_right; |
| 77 | else |
| 78 | return n; |
| 79 | } |
| 80 | if (!prev_ret) |
| 81 | return NULL; |
| 82 | |
| 83 | while(prev && file_offset >= entry_end(prev_entry)) { |
| 84 | test = rb_next(prev); |
| 85 | if (!test) |
| 86 | break; |
| 87 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, |
| 88 | rb_node); |
| 89 | if (file_offset < entry_end(prev_entry)) |
| 90 | break; |
| 91 | |
| 92 | prev = test; |
| 93 | } |
| 94 | if (prev) |
| 95 | prev_entry = rb_entry(prev, struct btrfs_ordered_extent, |
| 96 | rb_node); |
| 97 | while(prev && file_offset < entry_end(prev_entry)) { |
| 98 | test = rb_prev(prev); |
| 99 | if (!test) |
| 100 | break; |
| 101 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, |
| 102 | rb_node); |
| 103 | prev = test; |
| 104 | } |
| 105 | *prev_ret = prev; |
| 106 | return NULL; |
| 107 | } |
| 108 | |
| 109 | static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) |
| 110 | { |
| 111 | if (file_offset < entry->file_offset || |
| 112 | entry->file_offset + entry->len <= file_offset) |
| 113 | return 0; |
| 114 | return 1; |
| 115 | } |
| 116 | |
| 117 | static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, |
| 118 | u64 file_offset) |
| 119 | { |
| 120 | struct rb_root *root = &tree->tree; |
| 121 | struct rb_node *prev; |
| 122 | struct rb_node *ret; |
| 123 | struct btrfs_ordered_extent *entry; |
| 124 | |
| 125 | if (tree->last) { |
| 126 | entry = rb_entry(tree->last, struct btrfs_ordered_extent, |
| 127 | rb_node); |
| 128 | if (offset_in_entry(entry, file_offset)) |
| 129 | return tree->last; |
| 130 | } |
| 131 | ret = __tree_search(root, file_offset, &prev); |
| 132 | if (!ret) |
| 133 | ret = prev; |
| 134 | if (ret) |
| 135 | tree->last = ret; |
| 136 | return ret; |
| 137 | } |
| 138 | |
| 139 | /* allocate and add a new ordered_extent into the per-inode tree. |
| 140 | * file_offset is the logical offset in the file |
| 141 | * |
| 142 | * start is the disk block number of an extent already reserved in the |
| 143 | * extent allocation tree |
| 144 | * |
| 145 | * len is the length of the extent |
| 146 | * |
| 147 | * This also sets the EXTENT_ORDERED bit on the range in the inode. |
| 148 | * |
| 149 | * The tree is given a single reference on the ordered extent that was |
| 150 | * inserted. |
| 151 | */ |
| 152 | int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
| 153 | u64 start, u64 len) |
| 154 | { |
| 155 | struct btrfs_ordered_inode_tree *tree; |
| 156 | struct rb_node *node; |
| 157 | struct btrfs_ordered_extent *entry; |
| 158 | |
| 159 | tree = &BTRFS_I(inode)->ordered_tree; |
| 160 | entry = kzalloc(sizeof(*entry), GFP_NOFS); |
| 161 | if (!entry) |
| 162 | return -ENOMEM; |
| 163 | |
| 164 | mutex_lock(&tree->mutex); |
| 165 | entry->file_offset = file_offset; |
| 166 | entry->start = start; |
| 167 | entry->len = len; |
| 168 | /* one ref for the tree */ |
| 169 | atomic_set(&entry->refs, 1); |
| 170 | init_waitqueue_head(&entry->wait); |
| 171 | INIT_LIST_HEAD(&entry->list); |
| 172 | |
| 173 | node = tree_insert(&tree->tree, file_offset, |
| 174 | &entry->rb_node); |
| 175 | if (node) { |
| 176 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 177 | atomic_inc(&entry->refs); |
| 178 | } |
| 179 | set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset, |
| 180 | entry_end(entry) - 1, GFP_NOFS); |
| 181 | |
| 182 | mutex_unlock(&tree->mutex); |
| 183 | BUG_ON(node); |
| 184 | return 0; |
| 185 | } |
| 186 | |
| 187 | /* |
| 188 | * Add a struct btrfs_ordered_sum into the list of checksums to be inserted |
| 189 | * when an ordered extent is finished. If the list covers more than one |
| 190 | * ordered extent, it is split across multiples. |
| 191 | */ |
| 192 | int btrfs_add_ordered_sum(struct inode *inode, |
| 193 | struct btrfs_ordered_extent *entry, |
| 194 | struct btrfs_ordered_sum *sum) |
| 195 | { |
| 196 | struct btrfs_ordered_inode_tree *tree; |
| 197 | |
| 198 | tree = &BTRFS_I(inode)->ordered_tree; |
| 199 | mutex_lock(&tree->mutex); |
| 200 | list_add_tail(&sum->list, &entry->list); |
| 201 | mutex_unlock(&tree->mutex); |
| 202 | return 0; |
| 203 | } |
| 204 | |
| 205 | /* |
| 206 | * this is used to account for finished IO across a given range |
| 207 | * of the file. The IO should not span ordered extents. If |
| 208 | * a given ordered_extent is completely done, 1 is returned, otherwise |
| 209 | * 0. |
| 210 | * |
| 211 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used |
| 212 | * to make sure this function only returns 1 once for a given ordered extent. |
| 213 | */ |
| 214 | int btrfs_dec_test_ordered_pending(struct inode *inode, |
| 215 | u64 file_offset, u64 io_size) |
| 216 | { |
| 217 | struct btrfs_ordered_inode_tree *tree; |
| 218 | struct rb_node *node; |
| 219 | struct btrfs_ordered_extent *entry; |
| 220 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
| 221 | int ret; |
| 222 | |
| 223 | tree = &BTRFS_I(inode)->ordered_tree; |
| 224 | mutex_lock(&tree->mutex); |
| 225 | clear_extent_ordered(io_tree, file_offset, file_offset + io_size - 1, |
| 226 | GFP_NOFS); |
| 227 | node = tree_search(tree, file_offset); |
| 228 | if (!node) { |
| 229 | ret = 1; |
| 230 | goto out; |
| 231 | } |
| 232 | |
| 233 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 234 | if (!offset_in_entry(entry, file_offset)) { |
| 235 | ret = 1; |
| 236 | goto out; |
| 237 | } |
| 238 | |
| 239 | ret = test_range_bit(io_tree, entry->file_offset, |
| 240 | entry->file_offset + entry->len - 1, |
| 241 | EXTENT_ORDERED, 0); |
| 242 | if (ret == 0) |
| 243 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
| 244 | out: |
| 245 | mutex_unlock(&tree->mutex); |
| 246 | return ret == 0; |
| 247 | } |
| 248 | |
| 249 | /* |
| 250 | * used to drop a reference on an ordered extent. This will free |
| 251 | * the extent if the last reference is dropped |
| 252 | */ |
| 253 | int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) |
| 254 | { |
| 255 | struct list_head *cur; |
| 256 | struct btrfs_ordered_sum *sum; |
| 257 | |
| 258 | if (atomic_dec_and_test(&entry->refs)) { |
| 259 | while(!list_empty(&entry->list)) { |
| 260 | cur = entry->list.next; |
| 261 | sum = list_entry(cur, struct btrfs_ordered_sum, list); |
| 262 | list_del(&sum->list); |
| 263 | kfree(sum); |
| 264 | } |
| 265 | kfree(entry); |
| 266 | } |
| 267 | return 0; |
| 268 | } |
| 269 | |
| 270 | /* |
| 271 | * remove an ordered extent from the tree. No references are dropped |
| 272 | * but, anyone waiting on this extent is woken up. |
| 273 | */ |
| 274 | int btrfs_remove_ordered_extent(struct inode *inode, |
| 275 | struct btrfs_ordered_extent *entry) |
| 276 | { |
| 277 | struct btrfs_ordered_inode_tree *tree; |
| 278 | struct rb_node *node; |
| 279 | |
| 280 | tree = &BTRFS_I(inode)->ordered_tree; |
| 281 | mutex_lock(&tree->mutex); |
| 282 | node = &entry->rb_node; |
| 283 | rb_erase(node, &tree->tree); |
| 284 | tree->last = NULL; |
| 285 | set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); |
| 286 | mutex_unlock(&tree->mutex); |
| 287 | wake_up(&entry->wait); |
| 288 | return 0; |
| 289 | } |
| 290 | |
| 291 | /* |
| 292 | * Used to start IO or wait for a given ordered extent to finish. |
| 293 | * |
| 294 | * If wait is one, this effectively waits on page writeback for all the pages |
| 295 | * in the extent, and it waits on the io completion code to insert |
| 296 | * metadata into the btree corresponding to the extent |
| 297 | */ |
| 298 | void btrfs_start_ordered_extent(struct inode *inode, |
| 299 | struct btrfs_ordered_extent *entry, |
| 300 | int wait) |
| 301 | { |
| 302 | u64 start = entry->file_offset; |
| 303 | u64 end = start + entry->len - 1; |
| 304 | |
| 305 | /* |
| 306 | * pages in the range can be dirty, clean or writeback. We |
| 307 | * start IO on any dirty ones so the wait doesn't stall waiting |
| 308 | * for pdflush to find them |
| 309 | */ |
| 310 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) |
| 311 | do_sync_file_range(file, start, end, SYNC_FILE_RANGE_WRITE); |
| 312 | #else |
| 313 | do_sync_mapping_range(inode->i_mapping, start, end, |
| 314 | SYNC_FILE_RANGE_WRITE); |
| 315 | #endif |
| 316 | if (wait) |
| 317 | wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, |
| 318 | &entry->flags)); |
| 319 | } |
| 320 | |
| 321 | /* |
| 322 | * Used to wait on ordered extents across a large range of bytes. |
| 323 | */ |
| 324 | void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) |
| 325 | { |
| 326 | u64 end; |
| 327 | u64 orig_end; |
| 328 | u64 wait_end; |
| 329 | struct btrfs_ordered_extent *ordered; |
| 330 | u64 mask = BTRFS_I(inode)->root->sectorsize - 1; |
| 331 | |
| 332 | if (start + len < start) { |
| 333 | wait_end = (inode->i_size + mask) & ~mask; |
| 334 | orig_end = (u64)-1; |
| 335 | } else { |
| 336 | orig_end = start + len - 1; |
| 337 | wait_end = orig_end; |
| 338 | } |
| 339 | again: |
| 340 | /* start IO across the range first to instantiate any delalloc |
| 341 | * extents |
| 342 | */ |
| 343 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) |
| 344 | do_sync_file_range(file, start, wait_end, SYNC_FILE_RANGE_WRITE); |
| 345 | #else |
| 346 | do_sync_mapping_range(inode->i_mapping, start, wait_end, |
| 347 | SYNC_FILE_RANGE_WRITE); |
| 348 | #endif |
| 349 | end = orig_end; |
| 350 | wait_on_extent_writeback(&BTRFS_I(inode)->io_tree, start, orig_end); |
| 351 | |
| 352 | while(1) { |
| 353 | ordered = btrfs_lookup_first_ordered_extent(inode, end); |
| 354 | if (!ordered) { |
| 355 | break; |
| 356 | } |
| 357 | if (ordered->file_offset > orig_end) { |
| 358 | btrfs_put_ordered_extent(ordered); |
| 359 | break; |
| 360 | } |
| 361 | if (ordered->file_offset + ordered->len < start) { |
| 362 | btrfs_put_ordered_extent(ordered); |
| 363 | break; |
| 364 | } |
| 365 | btrfs_start_ordered_extent(inode, ordered, 1); |
| 366 | end = ordered->file_offset; |
| 367 | btrfs_put_ordered_extent(ordered); |
| 368 | if (end == 0 || end == start) |
| 369 | break; |
| 370 | end--; |
| 371 | } |
| 372 | if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end, |
| 373 | EXTENT_ORDERED | EXTENT_DELALLOC, 0)) { |
| 374 | printk("inode %lu still ordered or delalloc after wait " |
| 375 | "%llu %llu\n", inode->i_ino, |
| 376 | (unsigned long long)start, |
| 377 | (unsigned long long)orig_end); |
| 378 | goto again; |
| 379 | } |
| 380 | } |
| 381 | |
| 382 | /* |
| 383 | * find an ordered extent corresponding to file_offset. return NULL if |
| 384 | * nothing is found, otherwise take a reference on the extent and return it |
| 385 | */ |
| 386 | struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, |
| 387 | u64 file_offset) |
| 388 | { |
| 389 | struct btrfs_ordered_inode_tree *tree; |
| 390 | struct rb_node *node; |
| 391 | struct btrfs_ordered_extent *entry = NULL; |
| 392 | |
| 393 | tree = &BTRFS_I(inode)->ordered_tree; |
| 394 | mutex_lock(&tree->mutex); |
| 395 | node = tree_search(tree, file_offset); |
| 396 | if (!node) |
| 397 | goto out; |
| 398 | |
| 399 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 400 | if (!offset_in_entry(entry, file_offset)) |
| 401 | entry = NULL; |
| 402 | if (entry) |
| 403 | atomic_inc(&entry->refs); |
| 404 | out: |
| 405 | mutex_unlock(&tree->mutex); |
| 406 | return entry; |
| 407 | } |
| 408 | |
| 409 | /* |
| 410 | * lookup and return any extent before 'file_offset'. NULL is returned |
| 411 | * if none is found |
| 412 | */ |
| 413 | struct btrfs_ordered_extent * |
| 414 | btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset) |
| 415 | { |
| 416 | struct btrfs_ordered_inode_tree *tree; |
| 417 | struct rb_node *node; |
| 418 | struct btrfs_ordered_extent *entry = NULL; |
| 419 | |
| 420 | tree = &BTRFS_I(inode)->ordered_tree; |
| 421 | mutex_lock(&tree->mutex); |
| 422 | node = tree_search(tree, file_offset); |
| 423 | if (!node) |
| 424 | goto out; |
| 425 | |
| 426 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 427 | atomic_inc(&entry->refs); |
| 428 | out: |
| 429 | mutex_unlock(&tree->mutex); |
| 430 | return entry; |
| 431 | } |
| 432 | |
| 433 | /* |
| 434 | * After an extent is done, call this to conditionally update the on disk |
| 435 | * i_size. i_size is updated to cover any fully written part of the file. |
| 436 | */ |
| 437 | int btrfs_ordered_update_i_size(struct inode *inode, |
| 438 | struct btrfs_ordered_extent *ordered) |
| 439 | { |
| 440 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; |
| 441 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
| 442 | u64 disk_i_size; |
| 443 | u64 new_i_size; |
| 444 | u64 i_size_test; |
| 445 | struct rb_node *node; |
| 446 | struct btrfs_ordered_extent *test; |
| 447 | |
| 448 | mutex_lock(&tree->mutex); |
| 449 | disk_i_size = BTRFS_I(inode)->disk_i_size; |
| 450 | |
| 451 | /* |
| 452 | * if the disk i_size is already at the inode->i_size, or |
| 453 | * this ordered extent is inside the disk i_size, we're done |
| 454 | */ |
| 455 | if (disk_i_size >= inode->i_size || |
| 456 | ordered->file_offset + ordered->len <= disk_i_size) { |
| 457 | goto out; |
| 458 | } |
| 459 | |
| 460 | /* |
| 461 | * we can't update the disk_isize if there are delalloc bytes |
| 462 | * between disk_i_size and this ordered extent |
| 463 | */ |
| 464 | if (test_range_bit(io_tree, disk_i_size, |
| 465 | ordered->file_offset + ordered->len - 1, |
| 466 | EXTENT_DELALLOC, 0)) { |
| 467 | goto out; |
| 468 | } |
| 469 | /* |
| 470 | * walk backward from this ordered extent to disk_i_size. |
| 471 | * if we find an ordered extent then we can't update disk i_size |
| 472 | * yet |
| 473 | */ |
| 474 | node = &ordered->rb_node; |
| 475 | while(1) { |
| 476 | node = rb_prev(node); |
| 477 | if (!node) |
| 478 | break; |
| 479 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 480 | if (test->file_offset + test->len <= disk_i_size) |
| 481 | break; |
| 482 | if (test->file_offset >= inode->i_size) |
| 483 | break; |
| 484 | if (test->file_offset >= disk_i_size) |
| 485 | goto out; |
| 486 | } |
| 487 | new_i_size = min_t(u64, entry_end(ordered), i_size_read(inode)); |
| 488 | |
| 489 | /* |
| 490 | * at this point, we know we can safely update i_size to at least |
| 491 | * the offset from this ordered extent. But, we need to |
| 492 | * walk forward and see if ios from higher up in the file have |
| 493 | * finished. |
| 494 | */ |
| 495 | node = rb_next(&ordered->rb_node); |
| 496 | i_size_test = 0; |
| 497 | if (node) { |
| 498 | /* |
| 499 | * do we have an area where IO might have finished |
| 500 | * between our ordered extent and the next one. |
| 501 | */ |
| 502 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 503 | if (test->file_offset > entry_end(ordered)) { |
| 504 | i_size_test = test->file_offset - 1; |
| 505 | } |
| 506 | } else { |
| 507 | i_size_test = i_size_read(inode); |
| 508 | } |
| 509 | |
| 510 | /* |
| 511 | * i_size_test is the end of a region after this ordered |
| 512 | * extent where there are no ordered extents. As long as there |
| 513 | * are no delalloc bytes in this area, it is safe to update |
| 514 | * disk_i_size to the end of the region. |
| 515 | */ |
| 516 | if (i_size_test > entry_end(ordered) && |
| 517 | !test_range_bit(io_tree, entry_end(ordered), i_size_test, |
| 518 | EXTENT_DELALLOC, 0)) { |
| 519 | new_i_size = min_t(u64, i_size_test, i_size_read(inode)); |
| 520 | } |
| 521 | BTRFS_I(inode)->disk_i_size = new_i_size; |
| 522 | out: |
| 523 | mutex_unlock(&tree->mutex); |
| 524 | return 0; |
| 525 | } |
| 526 | |
| 527 | /* |
| 528 | * search the ordered extents for one corresponding to 'offset' and |
| 529 | * try to find a checksum. This is used because we allow pages to |
| 530 | * be reclaimed before their checksum is actually put into the btree |
| 531 | */ |
| 532 | int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum) |
| 533 | { |
| 534 | struct btrfs_ordered_sum *ordered_sum; |
| 535 | struct btrfs_sector_sum *sector_sums; |
| 536 | struct btrfs_ordered_extent *ordered; |
| 537 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; |
| 538 | struct list_head *cur; |
| 539 | unsigned long num_sectors; |
| 540 | unsigned long i; |
| 541 | u32 sectorsize = BTRFS_I(inode)->root->sectorsize; |
| 542 | int ret = 1; |
| 543 | |
| 544 | ordered = btrfs_lookup_ordered_extent(inode, offset); |
| 545 | if (!ordered) |
| 546 | return 1; |
| 547 | |
| 548 | mutex_lock(&tree->mutex); |
| 549 | list_for_each_prev(cur, &ordered->list) { |
| 550 | ordered_sum = list_entry(cur, struct btrfs_ordered_sum, list); |
| 551 | if (offset >= ordered_sum->file_offset) { |
| 552 | num_sectors = ordered_sum->len / sectorsize; |
| 553 | sector_sums = &ordered_sum->sums; |
| 554 | for (i = 0; i < num_sectors; i++) { |
| 555 | if (sector_sums[i].offset == offset) { |
| 556 | *sum = sector_sums[i].sum; |
| 557 | ret = 0; |
| 558 | goto out; |
| 559 | } |
| 560 | } |
| 561 | } |
| 562 | } |
| 563 | out: |
| 564 | mutex_unlock(&tree->mutex); |
| 565 | return ret; |
| 566 | } |
| 567 | |