start = found_end + 1;
}
- free_extent_state(cached_state);
+ btrfs_free_extent_state(cached_state);
return ret;
}
}
btrfs_clear_extent_dirty(unpin, start, end, &cached_state);
- free_extent_state(cached_state);
+ btrfs_free_extent_state(cached_state);
btrfs_error_unpin_extent_range(fs_info, start, end);
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
cond_resched();
* (see wait_extent_bit()).
*/
ASSERT(!waitqueue_active(&state->wq));
- free_extent_state(state);
+ btrfs_free_extent_state(state);
cond_resched_lock(&tree->lock);
}
/*
return prealloc;
}
-void free_extent_state(struct extent_state *state)
+void btrfs_free_extent_state(struct extent_state *state)
{
if (!state)
return;
state->start = prev->start;
rb_erase(&prev->rb_node, &tree->state);
RB_CLEAR_NODE(&prev->rb_node);
- free_extent_state(prev);
+ btrfs_free_extent_state(prev);
}
}
state->end = next->end;
rb_erase(&next->rb_node, &tree->state);
RB_CLEAR_NODE(&next->rb_node);
- free_extent_state(next);
+ btrfs_free_extent_state(next);
}
}
} else if (prealloc->end > entry->end) {
node = &(*node)->rb_right;
} else {
- free_extent_state(prealloc);
+ btrfs_free_extent_state(prealloc);
return -EEXIST;
}
}
if (extent_state_in_tree(state)) {
rb_erase(&state->rb_node, &tree->state);
RB_CLEAR_NODE(&state->rb_node);
- free_extent_state(state);
+ btrfs_free_extent_state(state);
} else {
WARN_ON(1);
}
goto hit_next;
}
if (clear)
- free_extent_state(cached);
+ btrfs_free_extent_state(cached);
}
/* This search will find the extents that end after our range starts. */
out:
spin_unlock(&tree->lock);
if (prealloc)
- free_extent_state(prealloc);
+ btrfs_free_extent_state(prealloc);
return 0;
schedule();
spin_lock(&tree->lock);
finish_wait(&state->wq, &wait);
- free_extent_state(state);
+ btrfs_free_extent_state(state);
goto again;
}
start = state->end + 1;
if (cached_state && *cached_state) {
state = *cached_state;
*cached_state = NULL;
- free_extent_state(state);
+ btrfs_free_extent_state(state);
}
spin_unlock(&tree->lock);
}
* again. If we haven't found any, clear as well since
* it's now useless.
*/
- free_extent_state(*cached_state);
+ btrfs_free_extent_state(*cached_state);
*cached_state = NULL;
if (state)
goto got_it;
goto out;
}
- free_extent_state(*cached_state);
+ btrfs_free_extent_state(*cached_state);
*cached_state = NULL;
}
out:
spin_unlock(&tree->lock);
if (prealloc)
- free_extent_state(prealloc);
+ btrfs_free_extent_state(prealloc);
return ret;
out:
spin_unlock(&tree->lock);
if (prealloc)
- free_extent_state(prealloc);
+ btrfs_free_extent_state(prealloc);
return ret;
}
}
if (cached_state) {
- free_extent_state(*cached_state);
+ btrfs_free_extent_state(*cached_state);
*cached_state = state;
if (state)
refcount_inc(&state->refs);
u64 max_bytes, u32 bits, int contig,
struct extent_state **cached_state);
-void free_extent_state(struct extent_state *state);
+void btrfs_free_extent_state(struct extent_state *state);
bool btrfs_test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
struct extent_state *cached_state);
bool btrfs_test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit);
ret = unpin_extent_range(fs_info, start, end, true);
BUG_ON(ret);
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
- free_extent_state(cached_state);
+ btrfs_free_extent_state(cached_state);
cond_resched();
}
/* @delalloc_end can be -1, never go beyond @orig_end */
*end = min(delalloc_end, orig_end);
- free_extent_state(cached_state);
+ btrfs_free_extent_state(cached_state);
return false;
}
/* some of the folios are gone, lets avoid looping by
* shortening the size of the delalloc range we're searching
*/
- free_extent_state(cached_state);
+ btrfs_free_extent_state(cached_state);
cached_state = NULL;
if (!loops) {
max_bytes = PAGE_SIZE;
if (ret2 == 0)
ret = true;
out:
- free_extent_state(cached_state);
+ btrfs_free_extent_state(cached_state);
return ret;
}
ret = emit_last_fiemap_cache(fieinfo, &cache);
out:
- free_extent_state(delalloc_cached_state);
+ btrfs_free_extent_state(delalloc_cached_state);
kfree(cache.entries);
btrfs_free_backref_share_ctx(backref_ctx);
return ret;
btrfs_unlock_extent(&inode->io_tree, lockstart, lockend,
&cached_state);
else
- free_extent_state(cached_state);
+ btrfs_free_extent_state(cached_state);
btrfs_delalloc_release_extents(inode, reserved_len);
release_space(inode, *data_reserved, reserved_start, reserved_len,
only_release_metadata);
if (extents_locked)
btrfs_unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
else
- free_extent_state(cached_state);
+ btrfs_free_extent_state(cached_state);
btrfs_delalloc_release_extents(inode, reserved_len);
if (ret) {
if (private) {
kfree(private->filldir_buf);
- free_extent_state(private->llseek_cached_state);
+ btrfs_free_extent_state(private->llseek_cached_state);
kfree(private);
filp->private_data = NULL;
}
ret = filemap_fdatawrite_range(mapping, start, end);
if (!ret && wait_writeback)
ret = filemap_fdatawait_range(mapping, start, end);
- free_extent_state(cached_state);
+ btrfs_free_extent_state(cached_state);
if (ret)
break;
cached_state = NULL;
ret = 0;
if (!ret)
ret = filemap_fdatawait_range(mapping, start, end);
- free_extent_state(cached_state);
+ btrfs_free_extent_state(cached_state);
if (ret)
break;
cached_state = NULL;