Commit | Line | Data |
---|---|---|
d1310b2e | 1 | #include <linux/err.h> |
d1310b2e | 2 | #include <linux/slab.h> |
a52d9a80 CM |
3 | #include <linux/module.h> |
4 | #include <linux/spinlock.h> | |
d1310b2e | 5 | #include <linux/hardirq.h> |
261507a0 | 6 | #include "ctree.h" |
a52d9a80 CM |
7 | #include "extent_map.h" |
8 | ||
86479a04 | 9 | |
a52d9a80 | 10 | static struct kmem_cache *extent_map_cache; |
ca664626 | 11 | |
2f4cbe64 | 12 | int __init extent_map_init(void) |
a52d9a80 | 13 | { |
837e1972 | 14 | extent_map_cache = kmem_cache_create("btrfs_extent_map", |
9601e3f6 CH |
15 | sizeof(struct extent_map), 0, |
16 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); | |
2f4cbe64 WB |
17 | if (!extent_map_cache) |
18 | return -ENOMEM; | |
2f4cbe64 | 19 | return 0; |
a52d9a80 CM |
20 | } |
21 | ||
17636e03 | 22 | void extent_map_exit(void) |
a52d9a80 | 23 | { |
a52d9a80 CM |
24 | if (extent_map_cache) |
25 | kmem_cache_destroy(extent_map_cache); | |
a52d9a80 CM |
26 | } |
27 | ||
9d2423c5 CH |
28 | /** |
29 | * extent_map_tree_init - initialize extent map tree | |
30 | * @tree: tree to initialize | |
9d2423c5 CH |
31 | * |
32 | * Initialize the extent tree @tree. Should be called for each new inode | |
33 | * or other user of the extent_map interface. | |
34 | */ | |
a8067e02 | 35 | void extent_map_tree_init(struct extent_map_tree *tree) |
a52d9a80 | 36 | { |
6bef4d31 | 37 | tree->map = RB_ROOT; |
5dc562c5 | 38 | INIT_LIST_HEAD(&tree->modified_extents); |
890871be | 39 | rwlock_init(&tree->lock); |
a52d9a80 | 40 | } |
a52d9a80 | 41 | |
9d2423c5 CH |
42 | /** |
43 | * alloc_extent_map - allocate new extent map structure | |
9d2423c5 CH |
44 | * |
45 | * Allocate a new extent_map structure. The new structure is | |
46 | * returned with a reference count of one and needs to be | |
47 | * freed using free_extent_map() | |
48 | */ | |
172ddd60 | 49 | struct extent_map *alloc_extent_map(void) |
a52d9a80 CM |
50 | { |
51 | struct extent_map *em; | |
172ddd60 | 52 | em = kmem_cache_alloc(extent_map_cache, GFP_NOFS); |
c26a9203 TI |
53 | if (!em) |
54 | return NULL; | |
a52d9a80 | 55 | em->in_tree = 0; |
d1310b2e | 56 | em->flags = 0; |
261507a0 | 57 | em->compress_type = BTRFS_COMPRESS_NONE; |
5dc562c5 | 58 | em->generation = 0; |
a52d9a80 | 59 | atomic_set(&em->refs, 1); |
5dc562c5 | 60 | INIT_LIST_HEAD(&em->list); |
a52d9a80 CM |
61 | return em; |
62 | } | |
a52d9a80 | 63 | |
9d2423c5 CH |
64 | /** |
65 | * free_extent_map - drop reference count of an extent_map | |
66 | * @em: extent map beeing releasead | |
67 | * | |
68 | * Drops the reference out on @em by one and free the structure | |
69 | * if the reference count hits zero. | |
70 | */ | |
a52d9a80 CM |
71 | void free_extent_map(struct extent_map *em) |
72 | { | |
2bf5a725 CM |
73 | if (!em) |
74 | return; | |
d1310b2e | 75 | WARN_ON(atomic_read(&em->refs) == 0); |
a52d9a80 CM |
76 | if (atomic_dec_and_test(&em->refs)) { |
77 | WARN_ON(em->in_tree); | |
5dc562c5 | 78 | WARN_ON(!list_empty(&em->list)); |
a52d9a80 CM |
79 | kmem_cache_free(extent_map_cache, em); |
80 | } | |
81 | } | |
a52d9a80 | 82 | |
a52d9a80 CM |
83 | static struct rb_node *tree_insert(struct rb_root *root, u64 offset, |
84 | struct rb_node *node) | |
85 | { | |
d397712b CM |
86 | struct rb_node **p = &root->rb_node; |
87 | struct rb_node *parent = NULL; | |
d1310b2e | 88 | struct extent_map *entry; |
a52d9a80 | 89 | |
d397712b | 90 | while (*p) { |
a52d9a80 | 91 | parent = *p; |
d1310b2e CM |
92 | entry = rb_entry(parent, struct extent_map, rb_node); |
93 | ||
94 | WARN_ON(!entry->in_tree); | |
a52d9a80 CM |
95 | |
96 | if (offset < entry->start) | |
97 | p = &(*p)->rb_left; | |
d1310b2e | 98 | else if (offset >= extent_map_end(entry)) |
a52d9a80 CM |
99 | p = &(*p)->rb_right; |
100 | else | |
101 | return parent; | |
102 | } | |
103 | ||
d1310b2e | 104 | entry = rb_entry(node, struct extent_map, rb_node); |
a52d9a80 CM |
105 | entry->in_tree = 1; |
106 | rb_link_node(node, parent, p); | |
107 | rb_insert_color(node, root); | |
108 | return NULL; | |
109 | } | |
110 | ||
d352ac68 CM |
111 | /* |
112 | * search through the tree for an extent_map with a given offset. If | |
113 | * it can't be found, try to find some neighboring extents | |
114 | */ | |
a52d9a80 | 115 | static struct rb_node *__tree_search(struct rb_root *root, u64 offset, |
5f56406a CM |
116 | struct rb_node **prev_ret, |
117 | struct rb_node **next_ret) | |
a52d9a80 | 118 | { |
d397712b | 119 | struct rb_node *n = root->rb_node; |
a52d9a80 | 120 | struct rb_node *prev = NULL; |
5f56406a | 121 | struct rb_node *orig_prev = NULL; |
d1310b2e CM |
122 | struct extent_map *entry; |
123 | struct extent_map *prev_entry = NULL; | |
a52d9a80 | 124 | |
d397712b | 125 | while (n) { |
d1310b2e | 126 | entry = rb_entry(n, struct extent_map, rb_node); |
a52d9a80 CM |
127 | prev = n; |
128 | prev_entry = entry; | |
129 | ||
d1310b2e CM |
130 | WARN_ON(!entry->in_tree); |
131 | ||
a52d9a80 CM |
132 | if (offset < entry->start) |
133 | n = n->rb_left; | |
d1310b2e | 134 | else if (offset >= extent_map_end(entry)) |
a52d9a80 CM |
135 | n = n->rb_right; |
136 | else | |
137 | return n; | |
138 | } | |
5f56406a CM |
139 | |
140 | if (prev_ret) { | |
141 | orig_prev = prev; | |
d397712b | 142 | while (prev && offset >= extent_map_end(prev_entry)) { |
5f56406a | 143 | prev = rb_next(prev); |
d1310b2e | 144 | prev_entry = rb_entry(prev, struct extent_map, rb_node); |
5f56406a CM |
145 | } |
146 | *prev_ret = prev; | |
147 | prev = orig_prev; | |
148 | } | |
149 | ||
150 | if (next_ret) { | |
d1310b2e | 151 | prev_entry = rb_entry(prev, struct extent_map, rb_node); |
d397712b | 152 | while (prev && offset < prev_entry->start) { |
5f56406a | 153 | prev = rb_prev(prev); |
d1310b2e | 154 | prev_entry = rb_entry(prev, struct extent_map, rb_node); |
5f56406a CM |
155 | } |
156 | *next_ret = prev; | |
a52d9a80 | 157 | } |
a52d9a80 CM |
158 | return NULL; |
159 | } | |
160 | ||
d352ac68 | 161 | /* check to see if two extent_map structs are adjacent and safe to merge */ |
d1310b2e | 162 | static int mergable_maps(struct extent_map *prev, struct extent_map *next) |
a52d9a80 | 163 | { |
7f3c74fb CM |
164 | if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) |
165 | return 0; | |
166 | ||
c8b97818 CM |
167 | /* |
168 | * don't merge compressed extents, we need to know their | |
169 | * actual size | |
170 | */ | |
171 | if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) | |
172 | return 0; | |
173 | ||
d1310b2e CM |
174 | if (extent_map_end(prev) == next->start && |
175 | prev->flags == next->flags && | |
176 | prev->bdev == next->bdev && | |
177 | ((next->block_start == EXTENT_MAP_HOLE && | |
178 | prev->block_start == EXTENT_MAP_HOLE) || | |
179 | (next->block_start == EXTENT_MAP_INLINE && | |
180 | prev->block_start == EXTENT_MAP_INLINE) || | |
181 | (next->block_start == EXTENT_MAP_DELALLOC && | |
182 | prev->block_start == EXTENT_MAP_DELALLOC) || | |
183 | (next->block_start < EXTENT_MAP_LAST_BYTE - 1 && | |
184 | next->block_start == extent_map_block_end(prev)))) { | |
185 | return 1; | |
186 | } | |
a52d9a80 CM |
187 | return 0; |
188 | } | |
189 | ||
4d2c8f62 | 190 | static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) |
a1ed835e | 191 | { |
a1ed835e CM |
192 | struct extent_map *merge = NULL; |
193 | struct rb_node *rb; | |
a1ed835e CM |
194 | |
195 | if (em->start != 0) { | |
196 | rb = rb_prev(&em->rb_node); | |
197 | if (rb) | |
198 | merge = rb_entry(rb, struct extent_map, rb_node); | |
199 | if (rb && mergable_maps(merge, em)) { | |
200 | em->start = merge->start; | |
201 | em->len += merge->len; | |
202 | em->block_len += merge->block_len; | |
203 | em->block_start = merge->block_start; | |
204 | merge->in_tree = 0; | |
5dc562c5 | 205 | if (merge->generation > em->generation) { |
4e2f84e6 LB |
206 | em->mod_start = em->start; |
207 | em->mod_len = em->len; | |
5dc562c5 JB |
208 | em->generation = merge->generation; |
209 | list_move(&em->list, &tree->modified_extents); | |
210 | } | |
211 | ||
212 | list_del_init(&merge->list); | |
a1ed835e CM |
213 | rb_erase(&merge->rb_node, &tree->map); |
214 | free_extent_map(merge); | |
215 | } | |
216 | } | |
217 | ||
218 | rb = rb_next(&em->rb_node); | |
219 | if (rb) | |
220 | merge = rb_entry(rb, struct extent_map, rb_node); | |
221 | if (rb && mergable_maps(em, merge)) { | |
222 | em->len += merge->len; | |
223 | em->block_len += merge->len; | |
224 | rb_erase(&merge->rb_node, &tree->map); | |
225 | merge->in_tree = 0; | |
5dc562c5 | 226 | if (merge->generation > em->generation) { |
4e2f84e6 | 227 | em->mod_len = em->len; |
5dc562c5 JB |
228 | em->generation = merge->generation; |
229 | list_move(&em->list, &tree->modified_extents); | |
230 | } | |
231 | list_del_init(&merge->list); | |
a1ed835e CM |
232 | free_extent_map(merge); |
233 | } | |
4d2c8f62 LZ |
234 | } |
235 | ||
5dc562c5 | 236 | /** |
52b1de91 | 237 | * unpin_extent_cache - unpin an extent from the cache |
5dc562c5 JB |
238 | * @tree: tree to unpin the extent in |
239 | * @start: logical offset in the file | |
240 | * @len: length of the extent | |
241 | * @gen: generation that this extent has been modified in | |
5dc562c5 JB |
242 | * |
243 | * Called after an extent has been written to disk properly. Set the generation | |
244 | * to the generation that actually added the file item to the inode so we know | |
245 | * we need to sync this extent when we call fsync(). | |
246 | */ | |
247 | int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, | |
248 | u64 gen) | |
4d2c8f62 LZ |
249 | { |
250 | int ret = 0; | |
251 | struct extent_map *em; | |
4e2f84e6 | 252 | bool prealloc = false; |
4d2c8f62 LZ |
253 | |
254 | write_lock(&tree->lock); | |
255 | em = lookup_extent_mapping(tree, start, len); | |
256 | ||
257 | WARN_ON(!em || em->start != start); | |
258 | ||
259 | if (!em) | |
260 | goto out; | |
261 | ||
5dc562c5 JB |
262 | list_move(&em->list, &tree->modified_extents); |
263 | em->generation = gen; | |
4d2c8f62 | 264 | clear_bit(EXTENT_FLAG_PINNED, &em->flags); |
4e2f84e6 LB |
265 | em->mod_start = em->start; |
266 | em->mod_len = em->len; | |
267 | ||
268 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { | |
269 | prealloc = true; | |
270 | clear_bit(EXTENT_FLAG_PREALLOC, &em->flags); | |
271 | } | |
4d2c8f62 LZ |
272 | |
273 | try_merge_map(tree, em); | |
4e2f84e6 LB |
274 | |
275 | if (prealloc) { | |
276 | em->mod_start = em->start; | |
277 | em->mod_len = em->len; | |
278 | } | |
279 | ||
a1ed835e CM |
280 | free_extent_map(em); |
281 | out: | |
282 | write_unlock(&tree->lock); | |
283 | return ret; | |
284 | ||
285 | } | |
286 | ||
9d2423c5 CH |
287 | /** |
288 | * add_extent_mapping - add new extent map to the extent tree | |
289 | * @tree: tree to insert new map in | |
290 | * @em: map to insert | |
291 | * | |
292 | * Insert @em into @tree or perform a simple forward/backward merge with | |
293 | * existing mappings. The extent_map struct passed in will be inserted | |
294 | * into the tree directly, with an additional reference taken, or a | |
25985edc | 295 | * reference dropped if the merge attempt was successful. |
a52d9a80 CM |
296 | */ |
297 | int add_extent_mapping(struct extent_map_tree *tree, | |
298 | struct extent_map *em) | |
299 | { | |
300 | int ret = 0; | |
a52d9a80 | 301 | struct rb_node *rb; |
7c2fe32a | 302 | struct extent_map *exist; |
a52d9a80 | 303 | |
7c2fe32a CM |
304 | exist = lookup_extent_mapping(tree, em->start, em->len); |
305 | if (exist) { | |
306 | free_extent_map(exist); | |
307 | ret = -EEXIST; | |
308 | goto out; | |
309 | } | |
d1310b2e | 310 | rb = tree_insert(&tree->map, em->start, &em->rb_node); |
a52d9a80 | 311 | if (rb) { |
a52d9a80 CM |
312 | ret = -EEXIST; |
313 | goto out; | |
314 | } | |
315 | atomic_inc(&em->refs); | |
4d2c8f62 | 316 | |
4e2f84e6 LB |
317 | em->mod_start = em->start; |
318 | em->mod_len = em->len; | |
319 | ||
4d2c8f62 | 320 | try_merge_map(tree, em); |
a52d9a80 | 321 | out: |
a52d9a80 CM |
322 | return ret; |
323 | } | |
a52d9a80 | 324 | |
d352ac68 | 325 | /* simple helper to do math around the end of an extent, handling wrap */ |
d1310b2e CM |
326 | static u64 range_end(u64 start, u64 len) |
327 | { | |
328 | if (start + len < start) | |
329 | return (u64)-1; | |
330 | return start + len; | |
331 | } | |
332 | ||
ed64f066 LZ |
333 | struct extent_map *__lookup_extent_mapping(struct extent_map_tree *tree, |
334 | u64 start, u64 len, int strict) | |
a52d9a80 CM |
335 | { |
336 | struct extent_map *em; | |
337 | struct rb_node *rb_node; | |
306929f3 CH |
338 | struct rb_node *prev = NULL; |
339 | struct rb_node *next = NULL; | |
340 | u64 end = range_end(start, len); | |
341 | ||
5f56406a | 342 | rb_node = __tree_search(&tree->map, start, &prev, &next); |
a52d9a80 | 343 | if (!rb_node) { |
ed64f066 LZ |
344 | if (prev) |
345 | rb_node = prev; | |
346 | else if (next) | |
347 | rb_node = next; | |
348 | else | |
349 | return NULL; | |
a52d9a80 | 350 | } |
ed64f066 | 351 | |
a52d9a80 | 352 | em = rb_entry(rb_node, struct extent_map, rb_node); |
d1310b2e | 353 | |
ed64f066 LZ |
354 | if (strict && !(end > em->start && start < extent_map_end(em))) |
355 | return NULL; | |
d1310b2e | 356 | |
a52d9a80 | 357 | atomic_inc(&em->refs); |
a52d9a80 CM |
358 | return em; |
359 | } | |
a52d9a80 | 360 | |
ed64f066 LZ |
361 | /** |
362 | * lookup_extent_mapping - lookup extent_map | |
363 | * @tree: tree to lookup in | |
364 | * @start: byte offset to start the search | |
365 | * @len: length of the lookup range | |
366 | * | |
367 | * Find and return the first extent_map struct in @tree that intersects the | |
368 | * [start, len] range. There may be additional objects in the tree that | |
369 | * intersect, so check the object returned carefully to make sure that no | |
370 | * additional lookups are needed. | |
371 | */ | |
372 | struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, | |
373 | u64 start, u64 len) | |
374 | { | |
375 | return __lookup_extent_mapping(tree, start, len, 1); | |
376 | } | |
377 | ||
b917b7c3 CM |
378 | /** |
379 | * search_extent_mapping - find a nearby extent map | |
380 | * @tree: tree to lookup in | |
381 | * @start: byte offset to start the search | |
382 | * @len: length of the lookup range | |
383 | * | |
384 | * Find and return the first extent_map struct in @tree that intersects the | |
385 | * [start, len] range. | |
386 | * | |
387 | * If one can't be found, any nearby extent may be returned | |
388 | */ | |
389 | struct extent_map *search_extent_mapping(struct extent_map_tree *tree, | |
390 | u64 start, u64 len) | |
391 | { | |
ed64f066 | 392 | return __lookup_extent_mapping(tree, start, len, 0); |
b917b7c3 CM |
393 | } |
394 | ||
9d2423c5 CH |
395 | /** |
396 | * remove_extent_mapping - removes an extent_map from the extent tree | |
397 | * @tree: extent tree to remove from | |
398 | * @em: extent map beeing removed | |
399 | * | |
400 | * Removes @em from @tree. No reference counts are dropped, and no checks | |
401 | * are done to see if the range is in use | |
a52d9a80 CM |
402 | */ |
403 | int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) | |
404 | { | |
d1310b2e | 405 | int ret = 0; |
a52d9a80 | 406 | |
7f3c74fb | 407 | WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); |
d1310b2e | 408 | rb_erase(&em->rb_node, &tree->map); |
ff44c6e3 JB |
409 | if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) |
410 | list_del_init(&em->list); | |
d1310b2e | 411 | em->in_tree = 0; |
a52d9a80 CM |
412 | return ret; |
413 | } |