| 1 | /* |
| 2 | * Copyright (C) 2008 Oracle. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public |
| 6 | * License v2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public |
| 14 | * License along with this program; if not, write to the |
| 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
| 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ |
| 18 | |
| 19 | #include <linux/sched.h> |
| 20 | #include <linux/sort.h> |
| 21 | #include "ctree.h" |
| 22 | #include "ref-cache.h" |
| 23 | #include "transaction.h" |
| 24 | |
| 25 | /* |
| 26 | * leaf refs are used to cache the information about which extents |
| 27 | * a given leaf has references on. This allows us to process that leaf |
| 28 | * in btrfs_drop_snapshot without needing to read it back from disk. |
| 29 | */ |
| 30 | |
| 31 | /* |
| 32 | * kmalloc a leaf reference struct and update the counters for the |
| 33 | * total ref cache size |
| 34 | */ |
| 35 | struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root, |
| 36 | int nr_extents) |
| 37 | { |
| 38 | struct btrfs_leaf_ref *ref; |
| 39 | size_t size = btrfs_leaf_ref_size(nr_extents); |
| 40 | |
| 41 | ref = kmalloc(size, GFP_NOFS); |
| 42 | if (ref) { |
| 43 | spin_lock(&root->fs_info->ref_cache_lock); |
| 44 | root->fs_info->total_ref_cache_size += size; |
| 45 | spin_unlock(&root->fs_info->ref_cache_lock); |
| 46 | |
| 47 | memset(ref, 0, sizeof(*ref)); |
| 48 | atomic_set(&ref->usage, 1); |
| 49 | INIT_LIST_HEAD(&ref->list); |
| 50 | } |
| 51 | return ref; |
| 52 | } |
| 53 | |
| 54 | /* |
| 55 | * free a leaf reference struct and update the counters for the |
| 56 | * total ref cache size |
| 57 | */ |
| 58 | void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) |
| 59 | { |
| 60 | if (!ref) |
| 61 | return; |
| 62 | WARN_ON(atomic_read(&ref->usage) == 0); |
| 63 | if (atomic_dec_and_test(&ref->usage)) { |
| 64 | size_t size = btrfs_leaf_ref_size(ref->nritems); |
| 65 | |
| 66 | BUG_ON(ref->in_tree); |
| 67 | kfree(ref); |
| 68 | |
| 69 | spin_lock(&root->fs_info->ref_cache_lock); |
| 70 | root->fs_info->total_ref_cache_size -= size; |
| 71 | spin_unlock(&root->fs_info->ref_cache_lock); |
| 72 | } |
| 73 | } |
| 74 | |
| 75 | static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, |
| 76 | struct rb_node *node) |
| 77 | { |
| 78 | struct rb_node **p = &root->rb_node; |
| 79 | struct rb_node *parent = NULL; |
| 80 | struct btrfs_leaf_ref *entry; |
| 81 | |
| 82 | while (*p) { |
| 83 | parent = *p; |
| 84 | entry = rb_entry(parent, struct btrfs_leaf_ref, rb_node); |
| 85 | |
| 86 | if (bytenr < entry->bytenr) |
| 87 | p = &(*p)->rb_left; |
| 88 | else if (bytenr > entry->bytenr) |
| 89 | p = &(*p)->rb_right; |
| 90 | else |
| 91 | return parent; |
| 92 | } |
| 93 | |
| 94 | entry = rb_entry(node, struct btrfs_leaf_ref, rb_node); |
| 95 | rb_link_node(node, parent, p); |
| 96 | rb_insert_color(node, root); |
| 97 | return NULL; |
| 98 | } |
| 99 | |
| 100 | static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) |
| 101 | { |
| 102 | struct rb_node *n = root->rb_node; |
| 103 | struct btrfs_leaf_ref *entry; |
| 104 | |
| 105 | while (n) { |
| 106 | entry = rb_entry(n, struct btrfs_leaf_ref, rb_node); |
| 107 | WARN_ON(!entry->in_tree); |
| 108 | |
| 109 | if (bytenr < entry->bytenr) |
| 110 | n = n->rb_left; |
| 111 | else if (bytenr > entry->bytenr) |
| 112 | n = n->rb_right; |
| 113 | else |
| 114 | return n; |
| 115 | } |
| 116 | return NULL; |
| 117 | } |
| 118 | |
| 119 | int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen, |
| 120 | int shared) |
| 121 | { |
| 122 | struct btrfs_leaf_ref *ref = NULL; |
| 123 | struct btrfs_leaf_ref_tree *tree = root->ref_tree; |
| 124 | |
| 125 | if (shared) |
| 126 | tree = &root->fs_info->shared_ref_tree; |
| 127 | if (!tree) |
| 128 | return 0; |
| 129 | |
| 130 | spin_lock(&tree->lock); |
| 131 | while (!list_empty(&tree->list)) { |
| 132 | ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list); |
| 133 | BUG_ON(ref->tree != tree); |
| 134 | if (ref->root_gen > max_root_gen) |
| 135 | break; |
| 136 | if (!xchg(&ref->in_tree, 0)) { |
| 137 | cond_resched_lock(&tree->lock); |
| 138 | continue; |
| 139 | } |
| 140 | |
| 141 | rb_erase(&ref->rb_node, &tree->root); |
| 142 | list_del_init(&ref->list); |
| 143 | |
| 144 | spin_unlock(&tree->lock); |
| 145 | btrfs_free_leaf_ref(root, ref); |
| 146 | cond_resched(); |
| 147 | spin_lock(&tree->lock); |
| 148 | } |
| 149 | spin_unlock(&tree->lock); |
| 150 | return 0; |
| 151 | } |
| 152 | |
| 153 | /* |
| 154 | * find the leaf ref for a given extent. This returns the ref struct with |
| 155 | * a usage reference incremented |
| 156 | */ |
| 157 | struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root, |
| 158 | u64 bytenr) |
| 159 | { |
| 160 | struct rb_node *rb; |
| 161 | struct btrfs_leaf_ref *ref = NULL; |
| 162 | struct btrfs_leaf_ref_tree *tree = root->ref_tree; |
| 163 | again: |
| 164 | if (tree) { |
| 165 | spin_lock(&tree->lock); |
| 166 | rb = tree_search(&tree->root, bytenr); |
| 167 | if (rb) |
| 168 | ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node); |
| 169 | if (ref) |
| 170 | atomic_inc(&ref->usage); |
| 171 | spin_unlock(&tree->lock); |
| 172 | if (ref) |
| 173 | return ref; |
| 174 | } |
| 175 | if (tree != &root->fs_info->shared_ref_tree) { |
| 176 | tree = &root->fs_info->shared_ref_tree; |
| 177 | goto again; |
| 178 | } |
| 179 | return NULL; |
| 180 | } |
| 181 | |
| 182 | /* |
| 183 | * add a fully filled in leaf ref struct |
| 184 | * remove all the refs older than a given root generation |
| 185 | */ |
| 186 | int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref, |
| 187 | int shared) |
| 188 | { |
| 189 | int ret = 0; |
| 190 | struct rb_node *rb; |
| 191 | struct btrfs_leaf_ref_tree *tree = root->ref_tree; |
| 192 | |
| 193 | if (shared) |
| 194 | tree = &root->fs_info->shared_ref_tree; |
| 195 | |
| 196 | spin_lock(&tree->lock); |
| 197 | rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node); |
| 198 | if (rb) { |
| 199 | ret = -EEXIST; |
| 200 | } else { |
| 201 | atomic_inc(&ref->usage); |
| 202 | ref->tree = tree; |
| 203 | ref->in_tree = 1; |
| 204 | list_add_tail(&ref->list, &tree->list); |
| 205 | } |
| 206 | spin_unlock(&tree->lock); |
| 207 | return ret; |
| 208 | } |
| 209 | |
| 210 | /* |
| 211 | * remove a single leaf ref from the tree. This drops the ref held by the tree |
| 212 | * only |
| 213 | */ |
| 214 | int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) |
| 215 | { |
| 216 | struct btrfs_leaf_ref_tree *tree; |
| 217 | |
| 218 | if (!xchg(&ref->in_tree, 0)) |
| 219 | return 0; |
| 220 | |
| 221 | tree = ref->tree; |
| 222 | spin_lock(&tree->lock); |
| 223 | |
| 224 | rb_erase(&ref->rb_node, &tree->root); |
| 225 | list_del_init(&ref->list); |
| 226 | |
| 227 | spin_unlock(&tree->lock); |
| 228 | |
| 229 | btrfs_free_leaf_ref(root, ref); |
| 230 | return 0; |
| 231 | } |