Btrfs: deal with errors in write_dev_supers
[linux-2.6-block.git] / fs / btrfs / inode.c
CommitLineData
6cbd5570
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
8f18cf13 19#include <linux/kernel.h>
065631f6 20#include <linux/bio.h>
39279cc3 21#include <linux/buffer_head.h>
f2eb0a24 22#include <linux/file.h>
39279cc3
CM
23#include <linux/fs.h>
24#include <linux/pagemap.h>
25#include <linux/highmem.h>
26#include <linux/time.h>
27#include <linux/init.h>
28#include <linux/string.h>
39279cc3
CM
29#include <linux/backing-dev.h>
30#include <linux/mpage.h>
31#include <linux/swap.h>
32#include <linux/writeback.h>
33#include <linux/statfs.h>
34#include <linux/compat.h>
9ebefb18 35#include <linux/bit_spinlock.h>
5103e947 36#include <linux/xattr.h>
33268eaf 37#include <linux/posix_acl.h>
d899e052 38#include <linux/falloc.h>
5a0e3ad6 39#include <linux/slab.h>
7a36ddec 40#include <linux/ratelimit.h>
22c44fe6 41#include <linux/mount.h>
55e301fd 42#include <linux/btrfs.h>
53b381b3 43#include <linux/blkdev.h>
4b4e25f2 44#include "compat.h"
39279cc3
CM
45#include "ctree.h"
46#include "disk-io.h"
47#include "transaction.h"
48#include "btrfs_inode.h"
39279cc3 49#include "print-tree.h"
e6dcd2dc 50#include "ordered-data.h"
95819c05 51#include "xattr.h"
e02119d5 52#include "tree-log.h"
4a54c8c1 53#include "volumes.h"
c8b97818 54#include "compression.h"
b4ce94de 55#include "locking.h"
dc89e982 56#include "free-space-cache.h"
581bb050 57#include "inode-map.h"
38c227d8 58#include "backref.h"
39279cc3
CM
59
60struct btrfs_iget_args {
61 u64 ino;
62 struct btrfs_root *root;
63};
64
6e1d5dcc
AD
65static const struct inode_operations btrfs_dir_inode_operations;
66static const struct inode_operations btrfs_symlink_inode_operations;
67static const struct inode_operations btrfs_dir_ro_inode_operations;
68static const struct inode_operations btrfs_special_inode_operations;
69static const struct inode_operations btrfs_file_inode_operations;
7f09410b
AD
70static const struct address_space_operations btrfs_aops;
71static const struct address_space_operations btrfs_symlink_aops;
828c0950 72static const struct file_operations btrfs_dir_file_operations;
d1310b2e 73static struct extent_io_ops btrfs_extent_io_ops;
39279cc3
CM
74
75static struct kmem_cache *btrfs_inode_cachep;
8ccf6f19 76static struct kmem_cache *btrfs_delalloc_work_cachep;
39279cc3
CM
77struct kmem_cache *btrfs_trans_handle_cachep;
78struct kmem_cache *btrfs_transaction_cachep;
39279cc3 79struct kmem_cache *btrfs_path_cachep;
dc89e982 80struct kmem_cache *btrfs_free_space_cachep;
39279cc3
CM
81
82#define S_SHIFT 12
83static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
84 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
85 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
86 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
87 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
88 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
89 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
90 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
91};
92
3972f260 93static int btrfs_setsize(struct inode *inode, struct iattr *attr);
a41ad394 94static int btrfs_truncate(struct inode *inode);
5fd02043 95static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
771ed689
CM
96static noinline int cow_file_range(struct inode *inode,
97 struct page *locked_page,
98 u64 start, u64 end, int *page_started,
99 unsigned long *nr_written, int unlock);
70c8a91c
JB
100static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
101 u64 len, u64 orig_start,
102 u64 block_start, u64 block_len,
cc95bef6
JB
103 u64 orig_block_len, u64 ram_bytes,
104 int type);
7b128766 105
f34f57a3 106static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
2a7dba39
EP
107 struct inode *inode, struct inode *dir,
108 const struct qstr *qstr)
0279b4cd
JO
109{
110 int err;
111
f34f57a3 112 err = btrfs_init_acl(trans, inode, dir);
0279b4cd 113 if (!err)
2a7dba39 114 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
0279b4cd
JO
115 return err;
116}
117
c8b97818
CM
118/*
119 * this does all the hard work for inserting an inline extent into
120 * the btree. The caller should have done a btrfs_drop_extents so that
121 * no overlapping inline items exist in the btree
122 */
d397712b 123static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
c8b97818
CM
124 struct btrfs_root *root, struct inode *inode,
125 u64 start, size_t size, size_t compressed_size,
fe3f566c 126 int compress_type,
c8b97818
CM
127 struct page **compressed_pages)
128{
129 struct btrfs_key key;
130 struct btrfs_path *path;
131 struct extent_buffer *leaf;
132 struct page *page = NULL;
133 char *kaddr;
134 unsigned long ptr;
135 struct btrfs_file_extent_item *ei;
136 int err = 0;
137 int ret;
138 size_t cur_size = size;
139 size_t datasize;
140 unsigned long offset;
c8b97818 141
fe3f566c 142 if (compressed_size && compressed_pages)
c8b97818 143 cur_size = compressed_size;
c8b97818 144
d397712b
CM
145 path = btrfs_alloc_path();
146 if (!path)
c8b97818
CM
147 return -ENOMEM;
148
b9473439 149 path->leave_spinning = 1;
c8b97818 150
33345d01 151 key.objectid = btrfs_ino(inode);
c8b97818
CM
152 key.offset = start;
153 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
c8b97818
CM
154 datasize = btrfs_file_extent_calc_inline_size(cur_size);
155
156 inode_add_bytes(inode, size);
157 ret = btrfs_insert_empty_item(trans, root, path, &key,
158 datasize);
c8b97818
CM
159 if (ret) {
160 err = ret;
c8b97818
CM
161 goto fail;
162 }
163 leaf = path->nodes[0];
164 ei = btrfs_item_ptr(leaf, path->slots[0],
165 struct btrfs_file_extent_item);
166 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
167 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
168 btrfs_set_file_extent_encryption(leaf, ei, 0);
169 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
170 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
171 ptr = btrfs_file_extent_inline_start(ei);
172
261507a0 173 if (compress_type != BTRFS_COMPRESS_NONE) {
c8b97818
CM
174 struct page *cpage;
175 int i = 0;
d397712b 176 while (compressed_size > 0) {
c8b97818 177 cpage = compressed_pages[i];
5b050f04 178 cur_size = min_t(unsigned long, compressed_size,
c8b97818
CM
179 PAGE_CACHE_SIZE);
180
7ac687d9 181 kaddr = kmap_atomic(cpage);
c8b97818 182 write_extent_buffer(leaf, kaddr, ptr, cur_size);
7ac687d9 183 kunmap_atomic(kaddr);
c8b97818
CM
184
185 i++;
186 ptr += cur_size;
187 compressed_size -= cur_size;
188 }
189 btrfs_set_file_extent_compression(leaf, ei,
261507a0 190 compress_type);
c8b97818
CM
191 } else {
192 page = find_get_page(inode->i_mapping,
193 start >> PAGE_CACHE_SHIFT);
194 btrfs_set_file_extent_compression(leaf, ei, 0);
7ac687d9 195 kaddr = kmap_atomic(page);
c8b97818
CM
196 offset = start & (PAGE_CACHE_SIZE - 1);
197 write_extent_buffer(leaf, kaddr + offset, ptr, size);
7ac687d9 198 kunmap_atomic(kaddr);
c8b97818
CM
199 page_cache_release(page);
200 }
201 btrfs_mark_buffer_dirty(leaf);
202 btrfs_free_path(path);
203
c2167754
YZ
204 /*
205 * we're an inline extent, so nobody can
206 * extend the file past i_size without locking
207 * a page we already have locked.
208 *
209 * We must do any isize and inode updates
210 * before we unlock the pages. Otherwise we
211 * could end up racing with unlink.
212 */
c8b97818 213 BTRFS_I(inode)->disk_i_size = inode->i_size;
79787eaa 214 ret = btrfs_update_inode(trans, root, inode);
c2167754 215
79787eaa 216 return ret;
c8b97818
CM
217fail:
218 btrfs_free_path(path);
219 return err;
220}
221
222
223/*
224 * conditionally insert an inline extent into the file. This
225 * does the checks required to make sure the data is small enough
226 * to fit as an inline extent.
227 */
7f366cfe 228static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
c8b97818
CM
229 struct btrfs_root *root,
230 struct inode *inode, u64 start, u64 end,
fe3f566c 231 size_t compressed_size, int compress_type,
c8b97818
CM
232 struct page **compressed_pages)
233{
234 u64 isize = i_size_read(inode);
235 u64 actual_end = min(end + 1, isize);
236 u64 inline_len = actual_end - start;
fda2832f 237 u64 aligned_end = ALIGN(end, root->sectorsize);
c8b97818
CM
238 u64 data_len = inline_len;
239 int ret;
240
241 if (compressed_size)
242 data_len = compressed_size;
243
244 if (start > 0 ||
70b99e69 245 actual_end >= PAGE_CACHE_SIZE ||
c8b97818
CM
246 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
247 (!compressed_size &&
248 (actual_end & (root->sectorsize - 1)) == 0) ||
249 end + 1 < isize ||
250 data_len > root->fs_info->max_inline) {
251 return 1;
252 }
253
2671485d 254 ret = btrfs_drop_extents(trans, root, inode, start, aligned_end, 1);
79787eaa
JM
255 if (ret)
256 return ret;
c8b97818
CM
257
258 if (isize > actual_end)
259 inline_len = min_t(u64, isize, actual_end);
260 ret = insert_inline_extent(trans, root, inode, start,
261 inline_len, compressed_size,
fe3f566c 262 compress_type, compressed_pages);
2adcac1a 263 if (ret && ret != -ENOSPC) {
79787eaa
JM
264 btrfs_abort_transaction(trans, root, ret);
265 return ret;
2adcac1a
JB
266 } else if (ret == -ENOSPC) {
267 return 1;
79787eaa 268 }
2adcac1a 269
bdc20e67 270 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
0ca1f7ce 271 btrfs_delalloc_release_metadata(inode, end + 1 - start);
a1ed835e 272 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
c8b97818
CM
273 return 0;
274}
275
771ed689
CM
276struct async_extent {
277 u64 start;
278 u64 ram_size;
279 u64 compressed_size;
280 struct page **pages;
281 unsigned long nr_pages;
261507a0 282 int compress_type;
771ed689
CM
283 struct list_head list;
284};
285
286struct async_cow {
287 struct inode *inode;
288 struct btrfs_root *root;
289 struct page *locked_page;
290 u64 start;
291 u64 end;
292 struct list_head extents;
293 struct btrfs_work work;
294};
295
296static noinline int add_async_extent(struct async_cow *cow,
297 u64 start, u64 ram_size,
298 u64 compressed_size,
299 struct page **pages,
261507a0
LZ
300 unsigned long nr_pages,
301 int compress_type)
771ed689
CM
302{
303 struct async_extent *async_extent;
304
305 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
79787eaa 306 BUG_ON(!async_extent); /* -ENOMEM */
771ed689
CM
307 async_extent->start = start;
308 async_extent->ram_size = ram_size;
309 async_extent->compressed_size = compressed_size;
310 async_extent->pages = pages;
311 async_extent->nr_pages = nr_pages;
261507a0 312 async_extent->compress_type = compress_type;
771ed689
CM
313 list_add_tail(&async_extent->list, &cow->extents);
314 return 0;
315}
316
d352ac68 317/*
771ed689
CM
318 * we create compressed extents in two phases. The first
319 * phase compresses a range of pages that have already been
320 * locked (both pages and state bits are locked).
c8b97818 321 *
771ed689
CM
322 * This is done inside an ordered work queue, and the compression
323 * is spread across many cpus. The actual IO submission is step
324 * two, and the ordered work queue takes care of making sure that
325 * happens in the same order things were put onto the queue by
326 * writepages and friends.
c8b97818 327 *
771ed689
CM
328 * If this code finds it can't get good compression, it puts an
329 * entry onto the work queue to write the uncompressed bytes. This
330 * makes sure that both compressed inodes and uncompressed inodes
b2570314
AB
331 * are written in the same order that the flusher thread sent them
332 * down.
d352ac68 333 */
771ed689
CM
334static noinline int compress_file_range(struct inode *inode,
335 struct page *locked_page,
336 u64 start, u64 end,
337 struct async_cow *async_cow,
338 int *num_added)
b888db2b
CM
339{
340 struct btrfs_root *root = BTRFS_I(inode)->root;
341 struct btrfs_trans_handle *trans;
db94535d 342 u64 num_bytes;
db94535d 343 u64 blocksize = root->sectorsize;
c8b97818 344 u64 actual_end;
42dc7bab 345 u64 isize = i_size_read(inode);
e6dcd2dc 346 int ret = 0;
c8b97818
CM
347 struct page **pages = NULL;
348 unsigned long nr_pages;
349 unsigned long nr_pages_ret = 0;
350 unsigned long total_compressed = 0;
351 unsigned long total_in = 0;
352 unsigned long max_compressed = 128 * 1024;
771ed689 353 unsigned long max_uncompressed = 128 * 1024;
c8b97818
CM
354 int i;
355 int will_compress;
261507a0 356 int compress_type = root->fs_info->compress_type;
4adaa611 357 int redirty = 0;
b888db2b 358
4cb13e5d
LB
359 /* if this is a small write inside eof, kick off a defrag */
360 if ((end - start + 1) < 16 * 1024 &&
361 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
4cb5300b
CM
362 btrfs_add_inode_defrag(NULL, inode);
363
42dc7bab 364 actual_end = min_t(u64, isize, end + 1);
c8b97818
CM
365again:
366 will_compress = 0;
367 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
368 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
be20aa9d 369
f03d9301
CM
370 /*
371 * we don't want to send crud past the end of i_size through
372 * compression, that's just a waste of CPU time. So, if the
373 * end of the file is before the start of our current
374 * requested range of bytes, we bail out to the uncompressed
375 * cleanup code that can deal with all of this.
376 *
377 * It isn't really the fastest way to fix things, but this is a
378 * very uncommon corner.
379 */
380 if (actual_end <= start)
381 goto cleanup_and_bail_uncompressed;
382
c8b97818
CM
383 total_compressed = actual_end - start;
384
385 /* we want to make sure that amount of ram required to uncompress
386 * an extent is reasonable, so we limit the total size in ram
771ed689
CM
387 * of a compressed extent to 128k. This is a crucial number
388 * because it also controls how easily we can spread reads across
389 * cpus for decompression.
390 *
391 * We also want to make sure the amount of IO required to do
392 * a random read is reasonably small, so we limit the size of
393 * a compressed extent to 128k.
c8b97818
CM
394 */
395 total_compressed = min(total_compressed, max_uncompressed);
fda2832f 396 num_bytes = ALIGN(end - start + 1, blocksize);
be20aa9d 397 num_bytes = max(blocksize, num_bytes);
c8b97818
CM
398 total_in = 0;
399 ret = 0;
db94535d 400
771ed689
CM
401 /*
402 * we do compression for mount -o compress and when the
403 * inode has not been flagged as nocompress. This flag can
404 * change at any time if we discover bad compression ratios.
c8b97818 405 */
6cbff00f 406 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
1e701a32 407 (btrfs_test_opt(root, COMPRESS) ||
75e7cb7f
LB
408 (BTRFS_I(inode)->force_compress) ||
409 (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
c8b97818 410 WARN_ON(pages);
cfbc246e 411 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
560f7d75
LZ
412 if (!pages) {
413 /* just bail out to the uncompressed code */
414 goto cont;
415 }
c8b97818 416
261507a0
LZ
417 if (BTRFS_I(inode)->force_compress)
418 compress_type = BTRFS_I(inode)->force_compress;
419
4adaa611
CM
420 /*
421 * we need to call clear_page_dirty_for_io on each
422 * page in the range. Otherwise applications with the file
423 * mmap'd can wander in and change the page contents while
424 * we are compressing them.
425 *
426 * If the compression fails for any reason, we set the pages
427 * dirty again later on.
428 */
429 extent_range_clear_dirty_for_io(inode, start, end);
430 redirty = 1;
261507a0
LZ
431 ret = btrfs_compress_pages(compress_type,
432 inode->i_mapping, start,
433 total_compressed, pages,
434 nr_pages, &nr_pages_ret,
435 &total_in,
436 &total_compressed,
437 max_compressed);
c8b97818
CM
438
439 if (!ret) {
440 unsigned long offset = total_compressed &
441 (PAGE_CACHE_SIZE - 1);
442 struct page *page = pages[nr_pages_ret - 1];
443 char *kaddr;
444
445 /* zero the tail end of the last page, we might be
446 * sending it down to disk
447 */
448 if (offset) {
7ac687d9 449 kaddr = kmap_atomic(page);
c8b97818
CM
450 memset(kaddr + offset, 0,
451 PAGE_CACHE_SIZE - offset);
7ac687d9 452 kunmap_atomic(kaddr);
c8b97818
CM
453 }
454 will_compress = 1;
455 }
456 }
560f7d75 457cont:
c8b97818 458 if (start == 0) {
7a7eaa40 459 trans = btrfs_join_transaction(root);
79787eaa
JM
460 if (IS_ERR(trans)) {
461 ret = PTR_ERR(trans);
462 trans = NULL;
463 goto cleanup_and_out;
464 }
0ca1f7ce 465 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
771ed689 466
c8b97818 467 /* lets try to make an inline extent */
771ed689 468 if (ret || total_in < (actual_end - start)) {
c8b97818 469 /* we didn't compress the entire range, try
771ed689 470 * to make an uncompressed inline extent.
c8b97818
CM
471 */
472 ret = cow_file_range_inline(trans, root, inode,
fe3f566c 473 start, end, 0, 0, NULL);
c8b97818 474 } else {
771ed689 475 /* try making a compressed inline extent */
c8b97818
CM
476 ret = cow_file_range_inline(trans, root, inode,
477 start, end,
fe3f566c
LZ
478 total_compressed,
479 compress_type, pages);
c8b97818 480 }
79787eaa 481 if (ret <= 0) {
771ed689 482 /*
79787eaa
JM
483 * inline extent creation worked or returned error,
484 * we don't need to create any more async work items.
485 * Unlock and free up our temp pages.
771ed689 486 */
c8b97818 487 extent_clear_unlock_delalloc(inode,
a791e35e
CM
488 &BTRFS_I(inode)->io_tree,
489 start, end, NULL,
490 EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
a3429ab7 491 EXTENT_CLEAR_DELALLOC |
a791e35e 492 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
c2167754
YZ
493
494 btrfs_end_transaction(trans, root);
c8b97818
CM
495 goto free_pages_out;
496 }
c2167754 497 btrfs_end_transaction(trans, root);
c8b97818
CM
498 }
499
500 if (will_compress) {
501 /*
502 * we aren't doing an inline extent round the compressed size
503 * up to a block size boundary so the allocator does sane
504 * things
505 */
fda2832f 506 total_compressed = ALIGN(total_compressed, blocksize);
c8b97818
CM
507
508 /*
509 * one last check to make sure the compression is really a
510 * win, compare the page count read with the blocks on disk
511 */
fda2832f 512 total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
c8b97818
CM
513 if (total_compressed >= total_in) {
514 will_compress = 0;
515 } else {
c8b97818
CM
516 num_bytes = total_in;
517 }
518 }
519 if (!will_compress && pages) {
520 /*
521 * the compression code ran but failed to make things smaller,
522 * free any pages it allocated and our page pointer array
523 */
524 for (i = 0; i < nr_pages_ret; i++) {
70b99e69 525 WARN_ON(pages[i]->mapping);
c8b97818
CM
526 page_cache_release(pages[i]);
527 }
528 kfree(pages);
529 pages = NULL;
530 total_compressed = 0;
531 nr_pages_ret = 0;
532
533 /* flag the file so we don't compress in the future */
1e701a32
CM
534 if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
535 !(BTRFS_I(inode)->force_compress)) {
a555f810 536 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
1e701a32 537 }
c8b97818 538 }
771ed689
CM
539 if (will_compress) {
540 *num_added += 1;
c8b97818 541
771ed689
CM
542 /* the async work queues will take care of doing actual
543 * allocation on disk for these compressed pages,
544 * and will submit them to the elevator.
545 */
546 add_async_extent(async_cow, start, num_bytes,
261507a0
LZ
547 total_compressed, pages, nr_pages_ret,
548 compress_type);
179e29e4 549
24ae6365 550 if (start + num_bytes < end) {
771ed689
CM
551 start += num_bytes;
552 pages = NULL;
553 cond_resched();
554 goto again;
555 }
556 } else {
f03d9301 557cleanup_and_bail_uncompressed:
771ed689
CM
558 /*
559 * No compression, but we still need to write the pages in
560 * the file we've been given so far. redirty the locked
561 * page if it corresponds to our extent and set things up
562 * for the async work queue to run cow_file_range to do
563 * the normal delalloc dance
564 */
565 if (page_offset(locked_page) >= start &&
566 page_offset(locked_page) <= end) {
567 __set_page_dirty_nobuffers(locked_page);
568 /* unlocked later on in the async handlers */
569 }
4adaa611
CM
570 if (redirty)
571 extent_range_redirty_for_io(inode, start, end);
261507a0
LZ
572 add_async_extent(async_cow, start, end - start + 1,
573 0, NULL, 0, BTRFS_COMPRESS_NONE);
771ed689
CM
574 *num_added += 1;
575 }
3b951516 576
771ed689 577out:
79787eaa 578 return ret;
771ed689
CM
579
580free_pages_out:
581 for (i = 0; i < nr_pages_ret; i++) {
582 WARN_ON(pages[i]->mapping);
583 page_cache_release(pages[i]);
584 }
d397712b 585 kfree(pages);
771ed689
CM
586
587 goto out;
79787eaa
JM
588
589cleanup_and_out:
590 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
591 start, end, NULL,
592 EXTENT_CLEAR_UNLOCK_PAGE |
593 EXTENT_CLEAR_DIRTY |
594 EXTENT_CLEAR_DELALLOC |
595 EXTENT_SET_WRITEBACK |
596 EXTENT_END_WRITEBACK);
597 if (!trans || IS_ERR(trans))
598 btrfs_error(root->fs_info, ret, "Failed to join transaction");
599 else
600 btrfs_abort_transaction(trans, root, ret);
601 goto free_pages_out;
771ed689
CM
602}
603
604/*
605 * phase two of compressed writeback. This is the ordered portion
606 * of the code, which only gets called in the order the work was
607 * queued. We walk all the async extents created by compress_file_range
608 * and send them down to the disk.
609 */
610static noinline int submit_compressed_extents(struct inode *inode,
611 struct async_cow *async_cow)
612{
613 struct async_extent *async_extent;
614 u64 alloc_hint = 0;
615 struct btrfs_trans_handle *trans;
616 struct btrfs_key ins;
617 struct extent_map *em;
618 struct btrfs_root *root = BTRFS_I(inode)->root;
619 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
620 struct extent_io_tree *io_tree;
f5a84ee3 621 int ret = 0;
771ed689
CM
622
623 if (list_empty(&async_cow->extents))
624 return 0;
625
3e04e7f1 626again:
d397712b 627 while (!list_empty(&async_cow->extents)) {
771ed689
CM
628 async_extent = list_entry(async_cow->extents.next,
629 struct async_extent, list);
630 list_del(&async_extent->list);
c8b97818 631
771ed689
CM
632 io_tree = &BTRFS_I(inode)->io_tree;
633
f5a84ee3 634retry:
771ed689
CM
635 /* did the compression code fall back to uncompressed IO? */
636 if (!async_extent->pages) {
637 int page_started = 0;
638 unsigned long nr_written = 0;
639
640 lock_extent(io_tree, async_extent->start,
2ac55d41 641 async_extent->start +
d0082371 642 async_extent->ram_size - 1);
771ed689
CM
643
644 /* allocate blocks */
f5a84ee3
JB
645 ret = cow_file_range(inode, async_cow->locked_page,
646 async_extent->start,
647 async_extent->start +
648 async_extent->ram_size - 1,
649 &page_started, &nr_written, 0);
771ed689 650
79787eaa
JM
651 /* JDM XXX */
652
771ed689
CM
653 /*
654 * if page_started, cow_file_range inserted an
655 * inline extent and took care of all the unlocking
656 * and IO for us. Otherwise, we need to submit
657 * all those pages down to the drive.
658 */
f5a84ee3 659 if (!page_started && !ret)
771ed689
CM
660 extent_write_locked_range(io_tree,
661 inode, async_extent->start,
d397712b 662 async_extent->start +
771ed689
CM
663 async_extent->ram_size - 1,
664 btrfs_get_extent,
665 WB_SYNC_ALL);
3e04e7f1
JB
666 else if (ret)
667 unlock_page(async_cow->locked_page);
771ed689
CM
668 kfree(async_extent);
669 cond_resched();
670 continue;
671 }
672
673 lock_extent(io_tree, async_extent->start,
d0082371 674 async_extent->start + async_extent->ram_size - 1);
771ed689 675
7a7eaa40 676 trans = btrfs_join_transaction(root);
79787eaa
JM
677 if (IS_ERR(trans)) {
678 ret = PTR_ERR(trans);
679 } else {
680 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
681 ret = btrfs_reserve_extent(trans, root,
771ed689
CM
682 async_extent->compressed_size,
683 async_extent->compressed_size,
81c9ad23 684 0, alloc_hint, &ins, 1);
962197ba 685 if (ret && ret != -ENOSPC)
79787eaa
JM
686 btrfs_abort_transaction(trans, root, ret);
687 btrfs_end_transaction(trans, root);
688 }
c2167754 689
f5a84ee3
JB
690 if (ret) {
691 int i;
3e04e7f1 692
f5a84ee3
JB
693 for (i = 0; i < async_extent->nr_pages; i++) {
694 WARN_ON(async_extent->pages[i]->mapping);
695 page_cache_release(async_extent->pages[i]);
696 }
697 kfree(async_extent->pages);
698 async_extent->nr_pages = 0;
699 async_extent->pages = NULL;
3e04e7f1 700
79787eaa
JM
701 if (ret == -ENOSPC)
702 goto retry;
3e04e7f1 703 goto out_free;
f5a84ee3
JB
704 }
705
c2167754
YZ
706 /*
707 * here we're doing allocation and writeback of the
708 * compressed pages
709 */
710 btrfs_drop_extent_cache(inode, async_extent->start,
711 async_extent->start +
712 async_extent->ram_size - 1, 0);
713
172ddd60 714 em = alloc_extent_map();
3e04e7f1
JB
715 if (!em)
716 goto out_free_reserve;
771ed689
CM
717 em->start = async_extent->start;
718 em->len = async_extent->ram_size;
445a6944 719 em->orig_start = em->start;
2ab28f32
JB
720 em->mod_start = em->start;
721 em->mod_len = em->len;
c8b97818 722
771ed689
CM
723 em->block_start = ins.objectid;
724 em->block_len = ins.offset;
b4939680 725 em->orig_block_len = ins.offset;
cc95bef6 726 em->ram_bytes = async_extent->ram_size;
771ed689 727 em->bdev = root->fs_info->fs_devices->latest_bdev;
261507a0 728 em->compress_type = async_extent->compress_type;
771ed689
CM
729 set_bit(EXTENT_FLAG_PINNED, &em->flags);
730 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
70c8a91c 731 em->generation = -1;
771ed689 732
d397712b 733 while (1) {
890871be 734 write_lock(&em_tree->lock);
09a2a8f9 735 ret = add_extent_mapping(em_tree, em, 1);
890871be 736 write_unlock(&em_tree->lock);
771ed689
CM
737 if (ret != -EEXIST) {
738 free_extent_map(em);
739 break;
740 }
741 btrfs_drop_extent_cache(inode, async_extent->start,
742 async_extent->start +
743 async_extent->ram_size - 1, 0);
744 }
745
3e04e7f1
JB
746 if (ret)
747 goto out_free_reserve;
748
261507a0
LZ
749 ret = btrfs_add_ordered_extent_compress(inode,
750 async_extent->start,
751 ins.objectid,
752 async_extent->ram_size,
753 ins.offset,
754 BTRFS_ORDERED_COMPRESSED,
755 async_extent->compress_type);
3e04e7f1
JB
756 if (ret)
757 goto out_free_reserve;
771ed689 758
771ed689
CM
759 /*
760 * clear dirty, set writeback and unlock the pages.
761 */
762 extent_clear_unlock_delalloc(inode,
a791e35e
CM
763 &BTRFS_I(inode)->io_tree,
764 async_extent->start,
765 async_extent->start +
766 async_extent->ram_size - 1,
767 NULL, EXTENT_CLEAR_UNLOCK_PAGE |
768 EXTENT_CLEAR_UNLOCK |
a3429ab7 769 EXTENT_CLEAR_DELALLOC |
a791e35e 770 EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
771ed689
CM
771
772 ret = btrfs_submit_compressed_write(inode,
d397712b
CM
773 async_extent->start,
774 async_extent->ram_size,
775 ins.objectid,
776 ins.offset, async_extent->pages,
777 async_extent->nr_pages);
771ed689
CM
778 alloc_hint = ins.objectid + ins.offset;
779 kfree(async_extent);
3e04e7f1
JB
780 if (ret)
781 goto out;
771ed689
CM
782 cond_resched();
783 }
79787eaa
JM
784 ret = 0;
785out:
786 return ret;
3e04e7f1
JB
787out_free_reserve:
788 btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
79787eaa 789out_free:
3e04e7f1
JB
790 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
791 async_extent->start,
792 async_extent->start +
793 async_extent->ram_size - 1,
794 NULL, EXTENT_CLEAR_UNLOCK_PAGE |
795 EXTENT_CLEAR_UNLOCK |
796 EXTENT_CLEAR_DELALLOC |
797 EXTENT_CLEAR_DIRTY |
798 EXTENT_SET_WRITEBACK |
799 EXTENT_END_WRITEBACK);
79787eaa 800 kfree(async_extent);
3e04e7f1 801 goto again;
771ed689
CM
802}
803
4b46fce2
JB
804static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
805 u64 num_bytes)
806{
807 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
808 struct extent_map *em;
809 u64 alloc_hint = 0;
810
811 read_lock(&em_tree->lock);
812 em = search_extent_mapping(em_tree, start, num_bytes);
813 if (em) {
814 /*
815 * if block start isn't an actual block number then find the
816 * first block in this inode and use that as a hint. If that
817 * block is also bogus then just don't worry about it.
818 */
819 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
820 free_extent_map(em);
821 em = search_extent_mapping(em_tree, 0, 0);
822 if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
823 alloc_hint = em->block_start;
824 if (em)
825 free_extent_map(em);
826 } else {
827 alloc_hint = em->block_start;
828 free_extent_map(em);
829 }
830 }
831 read_unlock(&em_tree->lock);
832
833 return alloc_hint;
834}
835
771ed689
CM
836/*
837 * when extent_io.c finds a delayed allocation range in the file,
838 * the call backs end up in this code. The basic idea is to
839 * allocate extents on disk for the range, and create ordered data structs
840 * in ram to track those extents.
841 *
842 * locked_page is the page that writepage had locked already. We use
843 * it to make sure we don't do extra locks or unlocks.
844 *
845 * *page_started is set to one if we unlock locked_page and do everything
846 * required to start IO on it. It may be clean and already done with
847 * IO when we return.
848 */
b7d5b0a8
MX
849static noinline int __cow_file_range(struct btrfs_trans_handle *trans,
850 struct inode *inode,
851 struct btrfs_root *root,
852 struct page *locked_page,
853 u64 start, u64 end, int *page_started,
854 unsigned long *nr_written,
855 int unlock)
771ed689 856{
771ed689
CM
857 u64 alloc_hint = 0;
858 u64 num_bytes;
859 unsigned long ram_size;
860 u64 disk_num_bytes;
861 u64 cur_alloc_size;
862 u64 blocksize = root->sectorsize;
771ed689
CM
863 struct btrfs_key ins;
864 struct extent_map *em;
865 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
866 int ret = 0;
867
83eea1f1 868 BUG_ON(btrfs_is_free_space_inode(inode));
771ed689 869
fda2832f 870 num_bytes = ALIGN(end - start + 1, blocksize);
771ed689
CM
871 num_bytes = max(blocksize, num_bytes);
872 disk_num_bytes = num_bytes;
771ed689 873
4cb5300b 874 /* if this is a small write inside eof, kick off defrag */
4cb13e5d
LB
875 if (num_bytes < 64 * 1024 &&
876 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
4cb5300b
CM
877 btrfs_add_inode_defrag(trans, inode);
878
771ed689
CM
879 if (start == 0) {
880 /* lets try to make an inline extent */
881 ret = cow_file_range_inline(trans, root, inode,
fe3f566c 882 start, end, 0, 0, NULL);
771ed689
CM
883 if (ret == 0) {
884 extent_clear_unlock_delalloc(inode,
a791e35e
CM
885 &BTRFS_I(inode)->io_tree,
886 start, end, NULL,
887 EXTENT_CLEAR_UNLOCK_PAGE |
888 EXTENT_CLEAR_UNLOCK |
889 EXTENT_CLEAR_DELALLOC |
890 EXTENT_CLEAR_DIRTY |
891 EXTENT_SET_WRITEBACK |
892 EXTENT_END_WRITEBACK);
c2167754 893
771ed689
CM
894 *nr_written = *nr_written +
895 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
896 *page_started = 1;
771ed689 897 goto out;
79787eaa
JM
898 } else if (ret < 0) {
899 btrfs_abort_transaction(trans, root, ret);
900 goto out_unlock;
771ed689
CM
901 }
902 }
903
904 BUG_ON(disk_num_bytes >
6c41761f 905 btrfs_super_total_bytes(root->fs_info->super_copy));
771ed689 906
4b46fce2 907 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
771ed689
CM
908 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
909
d397712b 910 while (disk_num_bytes > 0) {
a791e35e
CM
911 unsigned long op;
912
287a0ab9 913 cur_alloc_size = disk_num_bytes;
e6dcd2dc 914 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
771ed689 915 root->sectorsize, 0, alloc_hint,
81c9ad23 916 &ins, 1);
79787eaa
JM
917 if (ret < 0) {
918 btrfs_abort_transaction(trans, root, ret);
919 goto out_unlock;
920 }
d397712b 921
172ddd60 922 em = alloc_extent_map();
ace68bac
LB
923 if (!em)
924 goto out_reserve;
e6dcd2dc 925 em->start = start;
445a6944 926 em->orig_start = em->start;
771ed689
CM
927 ram_size = ins.offset;
928 em->len = ins.offset;
2ab28f32
JB
929 em->mod_start = em->start;
930 em->mod_len = em->len;
c8b97818 931
e6dcd2dc 932 em->block_start = ins.objectid;
c8b97818 933 em->block_len = ins.offset;
b4939680 934 em->orig_block_len = ins.offset;
cc95bef6 935 em->ram_bytes = ram_size;
e6dcd2dc 936 em->bdev = root->fs_info->fs_devices->latest_bdev;
7f3c74fb 937 set_bit(EXTENT_FLAG_PINNED, &em->flags);
70c8a91c 938 em->generation = -1;
c8b97818 939
d397712b 940 while (1) {
890871be 941 write_lock(&em_tree->lock);
09a2a8f9 942 ret = add_extent_mapping(em_tree, em, 1);
890871be 943 write_unlock(&em_tree->lock);
e6dcd2dc
CM
944 if (ret != -EEXIST) {
945 free_extent_map(em);
946 break;
947 }
948 btrfs_drop_extent_cache(inode, start,
c8b97818 949 start + ram_size - 1, 0);
e6dcd2dc 950 }
ace68bac
LB
951 if (ret)
952 goto out_reserve;
e6dcd2dc 953
98d20f67 954 cur_alloc_size = ins.offset;
e6dcd2dc 955 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
771ed689 956 ram_size, cur_alloc_size, 0);
ace68bac
LB
957 if (ret)
958 goto out_reserve;
c8b97818 959
17d217fe
YZ
960 if (root->root_key.objectid ==
961 BTRFS_DATA_RELOC_TREE_OBJECTID) {
962 ret = btrfs_reloc_clone_csums(inode, start,
963 cur_alloc_size);
79787eaa
JM
964 if (ret) {
965 btrfs_abort_transaction(trans, root, ret);
ace68bac 966 goto out_reserve;
79787eaa 967 }
17d217fe
YZ
968 }
969
d397712b 970 if (disk_num_bytes < cur_alloc_size)
3b951516 971 break;
d397712b 972
c8b97818
CM
973 /* we're not doing compressed IO, don't unlock the first
974 * page (which the caller expects to stay locked), don't
975 * clear any dirty bits and don't set any writeback bits
8b62b72b
CM
976 *
977 * Do set the Private2 bit so we know this page was properly
978 * setup for writepage
c8b97818 979 */
a791e35e
CM
980 op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
981 op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
982 EXTENT_SET_PRIVATE2;
983
c8b97818
CM
984 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
985 start, start + ram_size - 1,
a791e35e 986 locked_page, op);
c8b97818 987 disk_num_bytes -= cur_alloc_size;
c59f8951
CM
988 num_bytes -= cur_alloc_size;
989 alloc_hint = ins.objectid + ins.offset;
990 start += cur_alloc_size;
b888db2b 991 }
79787eaa 992out:
be20aa9d 993 return ret;
b7d5b0a8 994
ace68bac
LB
995out_reserve:
996 btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
79787eaa
JM
997out_unlock:
998 extent_clear_unlock_delalloc(inode,
999 &BTRFS_I(inode)->io_tree,
beb42dd7 1000 start, end, locked_page,
79787eaa
JM
1001 EXTENT_CLEAR_UNLOCK_PAGE |
1002 EXTENT_CLEAR_UNLOCK |
1003 EXTENT_CLEAR_DELALLOC |
1004 EXTENT_CLEAR_DIRTY |
1005 EXTENT_SET_WRITEBACK |
1006 EXTENT_END_WRITEBACK);
1007
1008 goto out;
771ed689 1009}
c8b97818 1010
b7d5b0a8
MX
1011static noinline int cow_file_range(struct inode *inode,
1012 struct page *locked_page,
1013 u64 start, u64 end, int *page_started,
1014 unsigned long *nr_written,
1015 int unlock)
1016{
1017 struct btrfs_trans_handle *trans;
1018 struct btrfs_root *root = BTRFS_I(inode)->root;
1019 int ret;
1020
1021 trans = btrfs_join_transaction(root);
1022 if (IS_ERR(trans)) {
1023 extent_clear_unlock_delalloc(inode,
1024 &BTRFS_I(inode)->io_tree,
1025 start, end, locked_page,
1026 EXTENT_CLEAR_UNLOCK_PAGE |
1027 EXTENT_CLEAR_UNLOCK |
1028 EXTENT_CLEAR_DELALLOC |
1029 EXTENT_CLEAR_DIRTY |
1030 EXTENT_SET_WRITEBACK |
1031 EXTENT_END_WRITEBACK);
1032 return PTR_ERR(trans);
1033 }
1034 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1035
1036 ret = __cow_file_range(trans, inode, root, locked_page, start, end,
1037 page_started, nr_written, unlock);
1038
1039 btrfs_end_transaction(trans, root);
1040
1041 return ret;
1042}
1043
771ed689
CM
1044/*
1045 * work queue call back to started compression on a file and pages
1046 */
1047static noinline void async_cow_start(struct btrfs_work *work)
1048{
1049 struct async_cow *async_cow;
1050 int num_added = 0;
1051 async_cow = container_of(work, struct async_cow, work);
1052
1053 compress_file_range(async_cow->inode, async_cow->locked_page,
1054 async_cow->start, async_cow->end, async_cow,
1055 &num_added);
8180ef88 1056 if (num_added == 0) {
cb77fcd8 1057 btrfs_add_delayed_iput(async_cow->inode);
771ed689 1058 async_cow->inode = NULL;
8180ef88 1059 }
771ed689
CM
1060}
1061
1062/*
1063 * work queue call back to submit previously compressed pages
1064 */
1065static noinline void async_cow_submit(struct btrfs_work *work)
1066{
1067 struct async_cow *async_cow;
1068 struct btrfs_root *root;
1069 unsigned long nr_pages;
1070
1071 async_cow = container_of(work, struct async_cow, work);
1072
1073 root = async_cow->root;
1074 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1075 PAGE_CACHE_SHIFT;
1076
66657b31 1077 if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
287082b0 1078 5 * 1024 * 1024 &&
771ed689
CM
1079 waitqueue_active(&root->fs_info->async_submit_wait))
1080 wake_up(&root->fs_info->async_submit_wait);
1081
d397712b 1082 if (async_cow->inode)
771ed689 1083 submit_compressed_extents(async_cow->inode, async_cow);
771ed689 1084}
c8b97818 1085
771ed689
CM
1086static noinline void async_cow_free(struct btrfs_work *work)
1087{
1088 struct async_cow *async_cow;
1089 async_cow = container_of(work, struct async_cow, work);
8180ef88 1090 if (async_cow->inode)
cb77fcd8 1091 btrfs_add_delayed_iput(async_cow->inode);
771ed689
CM
1092 kfree(async_cow);
1093}
1094
1095static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1096 u64 start, u64 end, int *page_started,
1097 unsigned long *nr_written)
1098{
1099 struct async_cow *async_cow;
1100 struct btrfs_root *root = BTRFS_I(inode)->root;
1101 unsigned long nr_pages;
1102 u64 cur_end;
287082b0 1103 int limit = 10 * 1024 * 1024;
771ed689 1104
a3429ab7
CM
1105 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1106 1, 0, NULL, GFP_NOFS);
d397712b 1107 while (start < end) {
771ed689 1108 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
79787eaa 1109 BUG_ON(!async_cow); /* -ENOMEM */
8180ef88 1110 async_cow->inode = igrab(inode);
771ed689
CM
1111 async_cow->root = root;
1112 async_cow->locked_page = locked_page;
1113 async_cow->start = start;
1114
6cbff00f 1115 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
771ed689
CM
1116 cur_end = end;
1117 else
1118 cur_end = min(end, start + 512 * 1024 - 1);
1119
1120 async_cow->end = cur_end;
1121 INIT_LIST_HEAD(&async_cow->extents);
1122
1123 async_cow->work.func = async_cow_start;
1124 async_cow->work.ordered_func = async_cow_submit;
1125 async_cow->work.ordered_free = async_cow_free;
1126 async_cow->work.flags = 0;
1127
771ed689
CM
1128 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
1129 PAGE_CACHE_SHIFT;
1130 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1131
1132 btrfs_queue_worker(&root->fs_info->delalloc_workers,
1133 &async_cow->work);
1134
1135 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1136 wait_event(root->fs_info->async_submit_wait,
1137 (atomic_read(&root->fs_info->async_delalloc_pages) <
1138 limit));
1139 }
1140
d397712b 1141 while (atomic_read(&root->fs_info->async_submit_draining) &&
771ed689
CM
1142 atomic_read(&root->fs_info->async_delalloc_pages)) {
1143 wait_event(root->fs_info->async_submit_wait,
1144 (atomic_read(&root->fs_info->async_delalloc_pages) ==
1145 0));
1146 }
1147
1148 *nr_written += nr_pages;
1149 start = cur_end + 1;
1150 }
1151 *page_started = 1;
1152 return 0;
be20aa9d
CM
1153}
1154
d397712b 1155static noinline int csum_exist_in_range(struct btrfs_root *root,
17d217fe
YZ
1156 u64 bytenr, u64 num_bytes)
1157{
1158 int ret;
1159 struct btrfs_ordered_sum *sums;
1160 LIST_HEAD(list);
1161
07d400a6 1162 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
a2de733c 1163 bytenr + num_bytes - 1, &list, 0);
17d217fe
YZ
1164 if (ret == 0 && list_empty(&list))
1165 return 0;
1166
1167 while (!list_empty(&list)) {
1168 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1169 list_del(&sums->list);
1170 kfree(sums);
1171 }
1172 return 1;
1173}
1174
d352ac68
CM
1175/*
1176 * when nowcow writeback call back. This checks for snapshots or COW copies
1177 * of the extents that exist in the file, and COWs the file as required.
1178 *
1179 * If no cow copies or snapshots exist, we write directly to the existing
1180 * blocks on disk
1181 */
7f366cfe
CM
1182static noinline int run_delalloc_nocow(struct inode *inode,
1183 struct page *locked_page,
771ed689
CM
1184 u64 start, u64 end, int *page_started, int force,
1185 unsigned long *nr_written)
be20aa9d 1186{
be20aa9d 1187 struct btrfs_root *root = BTRFS_I(inode)->root;
7ea394f1 1188 struct btrfs_trans_handle *trans;
be20aa9d 1189 struct extent_buffer *leaf;
be20aa9d 1190 struct btrfs_path *path;
80ff3856 1191 struct btrfs_file_extent_item *fi;
be20aa9d 1192 struct btrfs_key found_key;
80ff3856
YZ
1193 u64 cow_start;
1194 u64 cur_offset;
1195 u64 extent_end;
5d4f98a2 1196 u64 extent_offset;
80ff3856
YZ
1197 u64 disk_bytenr;
1198 u64 num_bytes;
b4939680 1199 u64 disk_num_bytes;
cc95bef6 1200 u64 ram_bytes;
80ff3856 1201 int extent_type;
79787eaa 1202 int ret, err;
d899e052 1203 int type;
80ff3856
YZ
1204 int nocow;
1205 int check_prev = 1;
82d5902d 1206 bool nolock;
33345d01 1207 u64 ino = btrfs_ino(inode);
be20aa9d
CM
1208
1209 path = btrfs_alloc_path();
17ca04af
JB
1210 if (!path) {
1211 extent_clear_unlock_delalloc(inode,
1212 &BTRFS_I(inode)->io_tree,
1213 start, end, locked_page,
1214 EXTENT_CLEAR_UNLOCK_PAGE |
1215 EXTENT_CLEAR_UNLOCK |
1216 EXTENT_CLEAR_DELALLOC |
1217 EXTENT_CLEAR_DIRTY |
1218 EXTENT_SET_WRITEBACK |
1219 EXTENT_END_WRITEBACK);
d8926bb3 1220 return -ENOMEM;
17ca04af 1221 }
82d5902d 1222
83eea1f1 1223 nolock = btrfs_is_free_space_inode(inode);
82d5902d
LZ
1224
1225 if (nolock)
7a7eaa40 1226 trans = btrfs_join_transaction_nolock(root);
82d5902d 1227 else
7a7eaa40 1228 trans = btrfs_join_transaction(root);
ff5714cc 1229
79787eaa 1230 if (IS_ERR(trans)) {
17ca04af
JB
1231 extent_clear_unlock_delalloc(inode,
1232 &BTRFS_I(inode)->io_tree,
1233 start, end, locked_page,
1234 EXTENT_CLEAR_UNLOCK_PAGE |
1235 EXTENT_CLEAR_UNLOCK |
1236 EXTENT_CLEAR_DELALLOC |
1237 EXTENT_CLEAR_DIRTY |
1238 EXTENT_SET_WRITEBACK |
1239 EXTENT_END_WRITEBACK);
79787eaa
JM
1240 btrfs_free_path(path);
1241 return PTR_ERR(trans);
1242 }
1243
74b21075 1244 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
be20aa9d 1245
80ff3856
YZ
1246 cow_start = (u64)-1;
1247 cur_offset = start;
1248 while (1) {
33345d01 1249 ret = btrfs_lookup_file_extent(trans, root, path, ino,
80ff3856 1250 cur_offset, 0);
79787eaa
JM
1251 if (ret < 0) {
1252 btrfs_abort_transaction(trans, root, ret);
1253 goto error;
1254 }
80ff3856
YZ
1255 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1256 leaf = path->nodes[0];
1257 btrfs_item_key_to_cpu(leaf, &found_key,
1258 path->slots[0] - 1);
33345d01 1259 if (found_key.objectid == ino &&
80ff3856
YZ
1260 found_key.type == BTRFS_EXTENT_DATA_KEY)
1261 path->slots[0]--;
1262 }
1263 check_prev = 0;
1264next_slot:
1265 leaf = path->nodes[0];
1266 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1267 ret = btrfs_next_leaf(root, path);
79787eaa
JM
1268 if (ret < 0) {
1269 btrfs_abort_transaction(trans, root, ret);
1270 goto error;
1271 }
80ff3856
YZ
1272 if (ret > 0)
1273 break;
1274 leaf = path->nodes[0];
1275 }
be20aa9d 1276
80ff3856
YZ
1277 nocow = 0;
1278 disk_bytenr = 0;
17d217fe 1279 num_bytes = 0;
80ff3856
YZ
1280 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1281
33345d01 1282 if (found_key.objectid > ino ||
80ff3856
YZ
1283 found_key.type > BTRFS_EXTENT_DATA_KEY ||
1284 found_key.offset > end)
1285 break;
1286
1287 if (found_key.offset > cur_offset) {
1288 extent_end = found_key.offset;
e9061e21 1289 extent_type = 0;
80ff3856
YZ
1290 goto out_check;
1291 }
1292
1293 fi = btrfs_item_ptr(leaf, path->slots[0],
1294 struct btrfs_file_extent_item);
1295 extent_type = btrfs_file_extent_type(leaf, fi);
1296
cc95bef6 1297 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
d899e052
YZ
1298 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1299 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
80ff3856 1300 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
5d4f98a2 1301 extent_offset = btrfs_file_extent_offset(leaf, fi);
80ff3856
YZ
1302 extent_end = found_key.offset +
1303 btrfs_file_extent_num_bytes(leaf, fi);
b4939680
JB
1304 disk_num_bytes =
1305 btrfs_file_extent_disk_num_bytes(leaf, fi);
80ff3856
YZ
1306 if (extent_end <= start) {
1307 path->slots[0]++;
1308 goto next_slot;
1309 }
17d217fe
YZ
1310 if (disk_bytenr == 0)
1311 goto out_check;
80ff3856
YZ
1312 if (btrfs_file_extent_compression(leaf, fi) ||
1313 btrfs_file_extent_encryption(leaf, fi) ||
1314 btrfs_file_extent_other_encoding(leaf, fi))
1315 goto out_check;
d899e052
YZ
1316 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1317 goto out_check;
d2fb3437 1318 if (btrfs_extent_readonly(root, disk_bytenr))
80ff3856 1319 goto out_check;
33345d01 1320 if (btrfs_cross_ref_exist(trans, root, ino,
5d4f98a2
YZ
1321 found_key.offset -
1322 extent_offset, disk_bytenr))
17d217fe 1323 goto out_check;
5d4f98a2 1324 disk_bytenr += extent_offset;
17d217fe
YZ
1325 disk_bytenr += cur_offset - found_key.offset;
1326 num_bytes = min(end + 1, extent_end) - cur_offset;
1327 /*
1328 * force cow if csum exists in the range.
1329 * this ensure that csum for a given extent are
1330 * either valid or do not exist.
1331 */
1332 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1333 goto out_check;
80ff3856
YZ
1334 nocow = 1;
1335 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1336 extent_end = found_key.offset +
1337 btrfs_file_extent_inline_len(leaf, fi);
1338 extent_end = ALIGN(extent_end, root->sectorsize);
1339 } else {
1340 BUG_ON(1);
1341 }
1342out_check:
1343 if (extent_end <= start) {
1344 path->slots[0]++;
1345 goto next_slot;
1346 }
1347 if (!nocow) {
1348 if (cow_start == (u64)-1)
1349 cow_start = cur_offset;
1350 cur_offset = extent_end;
1351 if (cur_offset > end)
1352 break;
1353 path->slots[0]++;
1354 goto next_slot;
7ea394f1
YZ
1355 }
1356
b3b4aa74 1357 btrfs_release_path(path);
80ff3856 1358 if (cow_start != (u64)-1) {
b7d5b0a8
MX
1359 ret = __cow_file_range(trans, inode, root, locked_page,
1360 cow_start, found_key.offset - 1,
1361 page_started, nr_written, 1);
79787eaa
JM
1362 if (ret) {
1363 btrfs_abort_transaction(trans, root, ret);
1364 goto error;
1365 }
80ff3856 1366 cow_start = (u64)-1;
7ea394f1 1367 }
80ff3856 1368
d899e052
YZ
1369 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1370 struct extent_map *em;
1371 struct extent_map_tree *em_tree;
1372 em_tree = &BTRFS_I(inode)->extent_tree;
172ddd60 1373 em = alloc_extent_map();
79787eaa 1374 BUG_ON(!em); /* -ENOMEM */
d899e052 1375 em->start = cur_offset;
70c8a91c 1376 em->orig_start = found_key.offset - extent_offset;
d899e052
YZ
1377 em->len = num_bytes;
1378 em->block_len = num_bytes;
1379 em->block_start = disk_bytenr;
b4939680 1380 em->orig_block_len = disk_num_bytes;
cc95bef6 1381 em->ram_bytes = ram_bytes;
d899e052 1382 em->bdev = root->fs_info->fs_devices->latest_bdev;
2ab28f32
JB
1383 em->mod_start = em->start;
1384 em->mod_len = em->len;
d899e052 1385 set_bit(EXTENT_FLAG_PINNED, &em->flags);
b11e234d 1386 set_bit(EXTENT_FLAG_FILLING, &em->flags);
70c8a91c 1387 em->generation = -1;
d899e052 1388 while (1) {
890871be 1389 write_lock(&em_tree->lock);
09a2a8f9 1390 ret = add_extent_mapping(em_tree, em, 1);
890871be 1391 write_unlock(&em_tree->lock);
d899e052
YZ
1392 if (ret != -EEXIST) {
1393 free_extent_map(em);
1394 break;
1395 }
1396 btrfs_drop_extent_cache(inode, em->start,
1397 em->start + em->len - 1, 0);
1398 }
1399 type = BTRFS_ORDERED_PREALLOC;
1400 } else {
1401 type = BTRFS_ORDERED_NOCOW;
1402 }
80ff3856
YZ
1403
1404 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
d899e052 1405 num_bytes, num_bytes, type);
79787eaa 1406 BUG_ON(ret); /* -ENOMEM */
771ed689 1407
efa56464
YZ
1408 if (root->root_key.objectid ==
1409 BTRFS_DATA_RELOC_TREE_OBJECTID) {
1410 ret = btrfs_reloc_clone_csums(inode, cur_offset,
1411 num_bytes);
79787eaa
JM
1412 if (ret) {
1413 btrfs_abort_transaction(trans, root, ret);
1414 goto error;
1415 }
efa56464
YZ
1416 }
1417
d899e052 1418 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
a791e35e
CM
1419 cur_offset, cur_offset + num_bytes - 1,
1420 locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1421 EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1422 EXTENT_SET_PRIVATE2);
80ff3856
YZ
1423 cur_offset = extent_end;
1424 if (cur_offset > end)
1425 break;
be20aa9d 1426 }
b3b4aa74 1427 btrfs_release_path(path);
80ff3856 1428
17ca04af 1429 if (cur_offset <= end && cow_start == (u64)-1) {
80ff3856 1430 cow_start = cur_offset;
17ca04af
JB
1431 cur_offset = end;
1432 }
1433
80ff3856 1434 if (cow_start != (u64)-1) {
b7d5b0a8
MX
1435 ret = __cow_file_range(trans, inode, root, locked_page,
1436 cow_start, end,
1437 page_started, nr_written, 1);
79787eaa
JM
1438 if (ret) {
1439 btrfs_abort_transaction(trans, root, ret);
1440 goto error;
1441 }
80ff3856
YZ
1442 }
1443
79787eaa 1444error:
a698d075 1445 err = btrfs_end_transaction(trans, root);
79787eaa
JM
1446 if (!ret)
1447 ret = err;
1448
17ca04af
JB
1449 if (ret && cur_offset < end)
1450 extent_clear_unlock_delalloc(inode,
1451 &BTRFS_I(inode)->io_tree,
1452 cur_offset, end, locked_page,
1453 EXTENT_CLEAR_UNLOCK_PAGE |
1454 EXTENT_CLEAR_UNLOCK |
1455 EXTENT_CLEAR_DELALLOC |
1456 EXTENT_CLEAR_DIRTY |
1457 EXTENT_SET_WRITEBACK |
1458 EXTENT_END_WRITEBACK);
1459
7ea394f1 1460 btrfs_free_path(path);
79787eaa 1461 return ret;
be20aa9d
CM
1462}
1463
d352ac68
CM
1464/*
1465 * extent_io.c call back to do delayed allocation processing
1466 */
c8b97818 1467static int run_delalloc_range(struct inode *inode, struct page *locked_page,
771ed689
CM
1468 u64 start, u64 end, int *page_started,
1469 unsigned long *nr_written)
be20aa9d 1470{
be20aa9d 1471 int ret;
7f366cfe 1472 struct btrfs_root *root = BTRFS_I(inode)->root;
a2135011 1473
7ddf5a42 1474 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) {
c8b97818 1475 ret = run_delalloc_nocow(inode, locked_page, start, end,
d397712b 1476 page_started, 1, nr_written);
7ddf5a42 1477 } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) {
d899e052 1478 ret = run_delalloc_nocow(inode, locked_page, start, end,
d397712b 1479 page_started, 0, nr_written);
7ddf5a42
JB
1480 } else if (!btrfs_test_opt(root, COMPRESS) &&
1481 !(BTRFS_I(inode)->force_compress) &&
1482 !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) {
7f366cfe
CM
1483 ret = cow_file_range(inode, locked_page, start, end,
1484 page_started, nr_written, 1);
7ddf5a42
JB
1485 } else {
1486 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1487 &BTRFS_I(inode)->runtime_flags);
771ed689 1488 ret = cow_file_range_async(inode, locked_page, start, end,
d397712b 1489 page_started, nr_written);
7ddf5a42 1490 }
b888db2b
CM
1491 return ret;
1492}
1493
1bf85046
JM
1494static void btrfs_split_extent_hook(struct inode *inode,
1495 struct extent_state *orig, u64 split)
9ed74f2d 1496{
0ca1f7ce 1497 /* not delalloc, ignore it */
9ed74f2d 1498 if (!(orig->state & EXTENT_DELALLOC))
1bf85046 1499 return;
9ed74f2d 1500
9e0baf60
JB
1501 spin_lock(&BTRFS_I(inode)->lock);
1502 BTRFS_I(inode)->outstanding_extents++;
1503 spin_unlock(&BTRFS_I(inode)->lock);
9ed74f2d
JB
1504}
1505
1506/*
1507 * extent_io.c merge_extent_hook, used to track merged delayed allocation
1508 * extents so we can keep track of new extents that are just merged onto old
1509 * extents, such as when we are doing sequential writes, so we can properly
1510 * account for the metadata space we'll need.
1511 */
1bf85046
JM
1512static void btrfs_merge_extent_hook(struct inode *inode,
1513 struct extent_state *new,
1514 struct extent_state *other)
9ed74f2d 1515{
9ed74f2d
JB
1516 /* not delalloc, ignore it */
1517 if (!(other->state & EXTENT_DELALLOC))
1bf85046 1518 return;
9ed74f2d 1519
9e0baf60
JB
1520 spin_lock(&BTRFS_I(inode)->lock);
1521 BTRFS_I(inode)->outstanding_extents--;
1522 spin_unlock(&BTRFS_I(inode)->lock);
9ed74f2d
JB
1523}
1524
d352ac68
CM
1525/*
1526 * extent_io.c set_bit_hook, used to track delayed allocation
1527 * bytes in this file, and to maintain the list of inodes that
1528 * have pending delalloc work to be done.
1529 */
1bf85046
JM
1530static void btrfs_set_bit_hook(struct inode *inode,
1531 struct extent_state *state, int *bits)
291d673e 1532{
9ed74f2d 1533
75eff68e
CM
1534 /*
1535 * set_bit and clear bit hooks normally require _irqsave/restore
27160b6b 1536 * but in this case, we are only testing for the DELALLOC
75eff68e
CM
1537 * bit, which is only set or cleared with irqs on
1538 */
0ca1f7ce 1539 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
291d673e 1540 struct btrfs_root *root = BTRFS_I(inode)->root;
0ca1f7ce 1541 u64 len = state->end + 1 - state->start;
83eea1f1 1542 bool do_list = !btrfs_is_free_space_inode(inode);
9ed74f2d 1543
9e0baf60 1544 if (*bits & EXTENT_FIRST_DELALLOC) {
0ca1f7ce 1545 *bits &= ~EXTENT_FIRST_DELALLOC;
9e0baf60
JB
1546 } else {
1547 spin_lock(&BTRFS_I(inode)->lock);
1548 BTRFS_I(inode)->outstanding_extents++;
1549 spin_unlock(&BTRFS_I(inode)->lock);
1550 }
287a0ab9 1551
963d678b
MX
1552 __percpu_counter_add(&root->fs_info->delalloc_bytes, len,
1553 root->fs_info->delalloc_batch);
df0af1a5 1554 spin_lock(&BTRFS_I(inode)->lock);
0ca1f7ce 1555 BTRFS_I(inode)->delalloc_bytes += len;
df0af1a5
MX
1556 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1557 &BTRFS_I(inode)->runtime_flags)) {
1558 spin_lock(&root->fs_info->delalloc_lock);
1559 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1560 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1561 &root->fs_info->delalloc_inodes);
1562 set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1563 &BTRFS_I(inode)->runtime_flags);
1564 }
1565 spin_unlock(&root->fs_info->delalloc_lock);
ea8c2819 1566 }
df0af1a5 1567 spin_unlock(&BTRFS_I(inode)->lock);
291d673e 1568 }
291d673e
CM
1569}
1570
d352ac68
CM
1571/*
1572 * extent_io.c clear_bit_hook, see set_bit_hook for why
1573 */
1bf85046
JM
1574static void btrfs_clear_bit_hook(struct inode *inode,
1575 struct extent_state *state, int *bits)
291d673e 1576{
75eff68e
CM
1577 /*
1578 * set_bit and clear bit hooks normally require _irqsave/restore
27160b6b 1579 * but in this case, we are only testing for the DELALLOC
75eff68e
CM
1580 * bit, which is only set or cleared with irqs on
1581 */
0ca1f7ce 1582 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
291d673e 1583 struct btrfs_root *root = BTRFS_I(inode)->root;
0ca1f7ce 1584 u64 len = state->end + 1 - state->start;
83eea1f1 1585 bool do_list = !btrfs_is_free_space_inode(inode);
bcbfce8a 1586
9e0baf60 1587 if (*bits & EXTENT_FIRST_DELALLOC) {
0ca1f7ce 1588 *bits &= ~EXTENT_FIRST_DELALLOC;
9e0baf60
JB
1589 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1590 spin_lock(&BTRFS_I(inode)->lock);
1591 BTRFS_I(inode)->outstanding_extents--;
1592 spin_unlock(&BTRFS_I(inode)->lock);
1593 }
0ca1f7ce
YZ
1594
1595 if (*bits & EXTENT_DO_ACCOUNTING)
1596 btrfs_delalloc_release_metadata(inode, len);
1597
0cb59c99
JB
1598 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1599 && do_list)
0ca1f7ce 1600 btrfs_free_reserved_data_space(inode, len);
9ed74f2d 1601
963d678b
MX
1602 __percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
1603 root->fs_info->delalloc_batch);
df0af1a5 1604 spin_lock(&BTRFS_I(inode)->lock);
0ca1f7ce 1605 BTRFS_I(inode)->delalloc_bytes -= len;
0cb59c99 1606 if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
df0af1a5
MX
1607 test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1608 &BTRFS_I(inode)->runtime_flags)) {
1609 spin_lock(&root->fs_info->delalloc_lock);
1610 if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1611 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1612 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1613 &BTRFS_I(inode)->runtime_flags);
1614 }
1615 spin_unlock(&root->fs_info->delalloc_lock);
ea8c2819 1616 }
df0af1a5 1617 spin_unlock(&BTRFS_I(inode)->lock);
291d673e 1618 }
291d673e
CM
1619}
1620
d352ac68
CM
1621/*
1622 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1623 * we don't create bios that span stripes or chunks
1624 */
64a16701 1625int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
c8b97818
CM
1626 size_t size, struct bio *bio,
1627 unsigned long bio_flags)
239b14b3
CM
1628{
1629 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
a62b9401 1630 u64 logical = (u64)bio->bi_sector << 9;
239b14b3
CM
1631 u64 length = 0;
1632 u64 map_length;
239b14b3
CM
1633 int ret;
1634
771ed689
CM
1635 if (bio_flags & EXTENT_BIO_COMPRESSED)
1636 return 0;
1637
f2d8d74d 1638 length = bio->bi_size;
239b14b3 1639 map_length = length;
64a16701 1640 ret = btrfs_map_block(root->fs_info, rw, logical,
f188591e 1641 &map_length, NULL, 0);
3ec706c8 1642 /* Will always return 0 with map_multi == NULL */
3444a972 1643 BUG_ON(ret < 0);
d397712b 1644 if (map_length < length + size)
239b14b3 1645 return 1;
3444a972 1646 return 0;
239b14b3
CM
1647}
1648
d352ac68
CM
1649/*
1650 * in order to insert checksums into the metadata in large chunks,
1651 * we wait until bio submission time. All the pages in the bio are
1652 * checksummed and sums are attached onto the ordered extent record.
1653 *
1654 * At IO completion time the cums attached on the ordered extent record
1655 * are inserted into the btree
1656 */
d397712b
CM
1657static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1658 struct bio *bio, int mirror_num,
eaf25d93
CM
1659 unsigned long bio_flags,
1660 u64 bio_offset)
065631f6 1661{
065631f6 1662 struct btrfs_root *root = BTRFS_I(inode)->root;
065631f6 1663 int ret = 0;
e015640f 1664
d20f7043 1665 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
79787eaa 1666 BUG_ON(ret); /* -ENOMEM */
4a69a410
CM
1667 return 0;
1668}
e015640f 1669
4a69a410
CM
1670/*
1671 * in order to insert checksums into the metadata in large chunks,
1672 * we wait until bio submission time. All the pages in the bio are
1673 * checksummed and sums are attached onto the ordered extent record.
1674 *
1675 * At IO completion time the cums attached on the ordered extent record
1676 * are inserted into the btree
1677 */
b2950863 1678static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
eaf25d93
CM
1679 int mirror_num, unsigned long bio_flags,
1680 u64 bio_offset)
4a69a410
CM
1681{
1682 struct btrfs_root *root = BTRFS_I(inode)->root;
61891923
SB
1683 int ret;
1684
1685 ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
1686 if (ret)
1687 bio_endio(bio, ret);
1688 return ret;
44b8bd7e
CM
1689}
1690
d352ac68 1691/*
cad321ad
CM
1692 * extent_io.c submission hook. This does the right thing for csum calculation
1693 * on write, or reading the csums from the tree before a read
d352ac68 1694 */
b2950863 1695static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
eaf25d93
CM
1696 int mirror_num, unsigned long bio_flags,
1697 u64 bio_offset)
44b8bd7e
CM
1698{
1699 struct btrfs_root *root = BTRFS_I(inode)->root;
1700 int ret = 0;
19b9bdb0 1701 int skip_sum;
0417341e 1702 int metadata = 0;
b812ce28 1703 int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
44b8bd7e 1704
6cbff00f 1705 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
cad321ad 1706
83eea1f1 1707 if (btrfs_is_free_space_inode(inode))
0417341e
JM
1708 metadata = 2;
1709
7b6d91da 1710 if (!(rw & REQ_WRITE)) {
5fd02043
JB
1711 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1712 if (ret)
61891923 1713 goto out;
5fd02043 1714
d20f7043 1715 if (bio_flags & EXTENT_BIO_COMPRESSED) {
61891923
SB
1716 ret = btrfs_submit_compressed_read(inode, bio,
1717 mirror_num,
1718 bio_flags);
1719 goto out;
c2db1073
TI
1720 } else if (!skip_sum) {
1721 ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1722 if (ret)
61891923 1723 goto out;
c2db1073 1724 }
4d1b5fb4 1725 goto mapit;
b812ce28 1726 } else if (async && !skip_sum) {
17d217fe
YZ
1727 /* csum items have already been cloned */
1728 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1729 goto mapit;
19b9bdb0 1730 /* we're doing a write, do the async checksumming */
61891923 1731 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
44b8bd7e 1732 inode, rw, bio, mirror_num,
eaf25d93
CM
1733 bio_flags, bio_offset,
1734 __btrfs_submit_bio_start,
4a69a410 1735 __btrfs_submit_bio_done);
61891923 1736 goto out;
b812ce28
JB
1737 } else if (!skip_sum) {
1738 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1739 if (ret)
1740 goto out;
19b9bdb0
CM
1741 }
1742
0b86a832 1743mapit:
61891923
SB
1744 ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
1745
1746out:
1747 if (ret < 0)
1748 bio_endio(bio, ret);
1749 return ret;
065631f6 1750}
6885f308 1751
d352ac68
CM
1752/*
1753 * given a list of ordered sums record them in the inode. This happens
1754 * at IO completion time based on sums calculated at bio submission time.
1755 */
ba1da2f4 1756static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
e6dcd2dc
CM
1757 struct inode *inode, u64 file_offset,
1758 struct list_head *list)
1759{
e6dcd2dc
CM
1760 struct btrfs_ordered_sum *sum;
1761
c6e30871 1762 list_for_each_entry(sum, list, list) {
39847c4d 1763 trans->adding_csums = 1;
d20f7043
CM
1764 btrfs_csum_file_blocks(trans,
1765 BTRFS_I(inode)->root->fs_info->csum_root, sum);
39847c4d 1766 trans->adding_csums = 0;
e6dcd2dc
CM
1767 }
1768 return 0;
1769}
1770
2ac55d41
JB
1771int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1772 struct extent_state **cached_state)
ea8c2819 1773{
6c1500f2 1774 WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0);
ea8c2819 1775 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
2ac55d41 1776 cached_state, GFP_NOFS);
ea8c2819
CM
1777}
1778
d352ac68 1779/* see btrfs_writepage_start_hook for details on why this is required */
247e743c
CM
1780struct btrfs_writepage_fixup {
1781 struct page *page;
1782 struct btrfs_work work;
1783};
1784
b2950863 1785static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
247e743c
CM
1786{
1787 struct btrfs_writepage_fixup *fixup;
1788 struct btrfs_ordered_extent *ordered;
2ac55d41 1789 struct extent_state *cached_state = NULL;
247e743c
CM
1790 struct page *page;
1791 struct inode *inode;
1792 u64 page_start;
1793 u64 page_end;
87826df0 1794 int ret;
247e743c
CM
1795
1796 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1797 page = fixup->page;
4a096752 1798again:
247e743c
CM
1799 lock_page(page);
1800 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1801 ClearPageChecked(page);
1802 goto out_page;
1803 }
1804
1805 inode = page->mapping->host;
1806 page_start = page_offset(page);
1807 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1808
2ac55d41 1809 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
d0082371 1810 &cached_state);
4a096752
CM
1811
1812 /* already ordered? We're done */
8b62b72b 1813 if (PagePrivate2(page))
247e743c 1814 goto out;
4a096752
CM
1815
1816 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1817 if (ordered) {
2ac55d41
JB
1818 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1819 page_end, &cached_state, GFP_NOFS);
4a096752
CM
1820 unlock_page(page);
1821 btrfs_start_ordered_extent(inode, ordered, 1);
87826df0 1822 btrfs_put_ordered_extent(ordered);
4a096752
CM
1823 goto again;
1824 }
247e743c 1825
87826df0
JM
1826 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
1827 if (ret) {
1828 mapping_set_error(page->mapping, ret);
1829 end_extent_writepage(page, ret, page_start, page_end);
1830 ClearPageChecked(page);
1831 goto out;
1832 }
1833
2ac55d41 1834 btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
247e743c 1835 ClearPageChecked(page);
87826df0 1836 set_page_dirty(page);
247e743c 1837out:
2ac55d41
JB
1838 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1839 &cached_state, GFP_NOFS);
247e743c
CM
1840out_page:
1841 unlock_page(page);
1842 page_cache_release(page);
b897abec 1843 kfree(fixup);
247e743c
CM
1844}
1845
1846/*
1847 * There are a few paths in the higher layers of the kernel that directly
1848 * set the page dirty bit without asking the filesystem if it is a
1849 * good idea. This causes problems because we want to make sure COW
1850 * properly happens and the data=ordered rules are followed.
1851 *
c8b97818 1852 * In our case any range that doesn't have the ORDERED bit set
247e743c
CM
1853 * hasn't been properly setup for IO. We kick off an async process
1854 * to fix it up. The async helper will wait for ordered extents, set
1855 * the delalloc bit and make it safe to write the page.
1856 */
b2950863 1857static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
247e743c
CM
1858{
1859 struct inode *inode = page->mapping->host;
1860 struct btrfs_writepage_fixup *fixup;
1861 struct btrfs_root *root = BTRFS_I(inode)->root;
247e743c 1862
8b62b72b
CM
1863 /* this page is properly in the ordered list */
1864 if (TestClearPagePrivate2(page))
247e743c
CM
1865 return 0;
1866
1867 if (PageChecked(page))
1868 return -EAGAIN;
1869
1870 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1871 if (!fixup)
1872 return -EAGAIN;
f421950f 1873
247e743c
CM
1874 SetPageChecked(page);
1875 page_cache_get(page);
1876 fixup->work.func = btrfs_writepage_fixup_worker;
1877 fixup->page = page;
1878 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
87826df0 1879 return -EBUSY;
247e743c
CM
1880}
1881
d899e052
YZ
1882static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1883 struct inode *inode, u64 file_pos,
1884 u64 disk_bytenr, u64 disk_num_bytes,
1885 u64 num_bytes, u64 ram_bytes,
1886 u8 compression, u8 encryption,
1887 u16 other_encoding, int extent_type)
1888{
1889 struct btrfs_root *root = BTRFS_I(inode)->root;
1890 struct btrfs_file_extent_item *fi;
1891 struct btrfs_path *path;
1892 struct extent_buffer *leaf;
1893 struct btrfs_key ins;
d899e052
YZ
1894 int ret;
1895
1896 path = btrfs_alloc_path();
d8926bb3
MF
1897 if (!path)
1898 return -ENOMEM;
d899e052 1899
b9473439 1900 path->leave_spinning = 1;
a1ed835e
CM
1901
1902 /*
1903 * we may be replacing one extent in the tree with another.
1904 * The new extent is pinned in the extent map, and we don't want
1905 * to drop it from the cache until it is completely in the btree.
1906 *
1907 * So, tell btrfs_drop_extents to leave this extent in the cache.
1908 * the caller is expected to unpin it and allow it to be merged
1909 * with the others.
1910 */
5dc562c5 1911 ret = btrfs_drop_extents(trans, root, inode, file_pos,
2671485d 1912 file_pos + num_bytes, 0);
79787eaa
JM
1913 if (ret)
1914 goto out;
d899e052 1915
33345d01 1916 ins.objectid = btrfs_ino(inode);
d899e052
YZ
1917 ins.offset = file_pos;
1918 ins.type = BTRFS_EXTENT_DATA_KEY;
1919 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
79787eaa
JM
1920 if (ret)
1921 goto out;
d899e052
YZ
1922 leaf = path->nodes[0];
1923 fi = btrfs_item_ptr(leaf, path->slots[0],
1924 struct btrfs_file_extent_item);
1925 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1926 btrfs_set_file_extent_type(leaf, fi, extent_type);
1927 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1928 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1929 btrfs_set_file_extent_offset(leaf, fi, 0);
1930 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1931 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1932 btrfs_set_file_extent_compression(leaf, fi, compression);
1933 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1934 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
b9473439 1935
d899e052 1936 btrfs_mark_buffer_dirty(leaf);
ce195332 1937 btrfs_release_path(path);
d899e052
YZ
1938
1939 inode_add_bytes(inode, num_bytes);
d899e052
YZ
1940
1941 ins.objectid = disk_bytenr;
1942 ins.offset = disk_num_bytes;
1943 ins.type = BTRFS_EXTENT_ITEM_KEY;
5d4f98a2
YZ
1944 ret = btrfs_alloc_reserved_file_extent(trans, root,
1945 root->root_key.objectid,
33345d01 1946 btrfs_ino(inode), file_pos, &ins);
79787eaa 1947out:
d899e052 1948 btrfs_free_path(path);
b9473439 1949
79787eaa 1950 return ret;
d899e052
YZ
1951}
1952
38c227d8
LB
1953/* snapshot-aware defrag */
1954struct sa_defrag_extent_backref {
1955 struct rb_node node;
1956 struct old_sa_defrag_extent *old;
1957 u64 root_id;
1958 u64 inum;
1959 u64 file_pos;
1960 u64 extent_offset;
1961 u64 num_bytes;
1962 u64 generation;
1963};
1964
1965struct old_sa_defrag_extent {
1966 struct list_head list;
1967 struct new_sa_defrag_extent *new;
1968
1969 u64 extent_offset;
1970 u64 bytenr;
1971 u64 offset;
1972 u64 len;
1973 int count;
1974};
1975
1976struct new_sa_defrag_extent {
1977 struct rb_root root;
1978 struct list_head head;
1979 struct btrfs_path *path;
1980 struct inode *inode;
1981 u64 file_pos;
1982 u64 len;
1983 u64 bytenr;
1984 u64 disk_len;
1985 u8 compress_type;
1986};
1987
1988static int backref_comp(struct sa_defrag_extent_backref *b1,
1989 struct sa_defrag_extent_backref *b2)
1990{
1991 if (b1->root_id < b2->root_id)
1992 return -1;
1993 else if (b1->root_id > b2->root_id)
1994 return 1;
1995
1996 if (b1->inum < b2->inum)
1997 return -1;
1998 else if (b1->inum > b2->inum)
1999 return 1;
2000
2001 if (b1->file_pos < b2->file_pos)
2002 return -1;
2003 else if (b1->file_pos > b2->file_pos)
2004 return 1;
2005
2006 /*
2007 * [------------------------------] ===> (a range of space)
2008 * |<--->| |<---->| =============> (fs/file tree A)
2009 * |<---------------------------->| ===> (fs/file tree B)
2010 *
2011 * A range of space can refer to two file extents in one tree while
2012 * refer to only one file extent in another tree.
2013 *
2014 * So we may process a disk offset more than one time(two extents in A)
2015 * and locate at the same extent(one extent in B), then insert two same
2016 * backrefs(both refer to the extent in B).
2017 */
2018 return 0;
2019}
2020
2021static void backref_insert(struct rb_root *root,
2022 struct sa_defrag_extent_backref *backref)
2023{
2024 struct rb_node **p = &root->rb_node;
2025 struct rb_node *parent = NULL;
2026 struct sa_defrag_extent_backref *entry;
2027 int ret;
2028
2029 while (*p) {
2030 parent = *p;
2031 entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2032
2033 ret = backref_comp(backref, entry);
2034 if (ret < 0)
2035 p = &(*p)->rb_left;
2036 else
2037 p = &(*p)->rb_right;
2038 }
2039
2040 rb_link_node(&backref->node, parent, p);
2041 rb_insert_color(&backref->node, root);
2042}
2043
2044/*
2045 * Note the backref might has changed, and in this case we just return 0.
2046 */
2047static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2048 void *ctx)
2049{
2050 struct btrfs_file_extent_item *extent;
2051 struct btrfs_fs_info *fs_info;
2052 struct old_sa_defrag_extent *old = ctx;
2053 struct new_sa_defrag_extent *new = old->new;
2054 struct btrfs_path *path = new->path;
2055 struct btrfs_key key;
2056 struct btrfs_root *root;
2057 struct sa_defrag_extent_backref *backref;
2058 struct extent_buffer *leaf;
2059 struct inode *inode = new->inode;
2060 int slot;
2061 int ret;
2062 u64 extent_offset;
2063 u64 num_bytes;
2064
2065 if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2066 inum == btrfs_ino(inode))
2067 return 0;
2068
2069 key.objectid = root_id;
2070 key.type = BTRFS_ROOT_ITEM_KEY;
2071 key.offset = (u64)-1;
2072
2073 fs_info = BTRFS_I(inode)->root->fs_info;
2074 root = btrfs_read_fs_root_no_name(fs_info, &key);
2075 if (IS_ERR(root)) {
2076 if (PTR_ERR(root) == -ENOENT)
2077 return 0;
2078 WARN_ON(1);
2079 pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
2080 inum, offset, root_id);
2081 return PTR_ERR(root);
2082 }
2083
2084 key.objectid = inum;
2085 key.type = BTRFS_EXTENT_DATA_KEY;
2086 if (offset > (u64)-1 << 32)
2087 key.offset = 0;
2088 else
2089 key.offset = offset;
2090
2091 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2092 if (ret < 0) {
2093 WARN_ON(1);
2094 return ret;
2095 }
2096
2097 while (1) {
2098 cond_resched();
2099
2100 leaf = path->nodes[0];
2101 slot = path->slots[0];
2102
2103 if (slot >= btrfs_header_nritems(leaf)) {
2104 ret = btrfs_next_leaf(root, path);
2105 if (ret < 0) {
2106 goto out;
2107 } else if (ret > 0) {
2108 ret = 0;
2109 goto out;
2110 }
2111 continue;
2112 }
2113
2114 path->slots[0]++;
2115
2116 btrfs_item_key_to_cpu(leaf, &key, slot);
2117
2118 if (key.objectid > inum)
2119 goto out;
2120
2121 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2122 continue;
2123
2124 extent = btrfs_item_ptr(leaf, slot,
2125 struct btrfs_file_extent_item);
2126
2127 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2128 continue;
2129
2130 extent_offset = btrfs_file_extent_offset(leaf, extent);
2131 if (key.offset - extent_offset != offset)
2132 continue;
2133
2134 num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2135 if (extent_offset >= old->extent_offset + old->offset +
2136 old->len || extent_offset + num_bytes <=
2137 old->extent_offset + old->offset)
2138 continue;
2139
2140 break;
2141 }
2142
2143 backref = kmalloc(sizeof(*backref), GFP_NOFS);
2144 if (!backref) {
2145 ret = -ENOENT;
2146 goto out;
2147 }
2148
2149 backref->root_id = root_id;
2150 backref->inum = inum;
2151 backref->file_pos = offset + extent_offset;
2152 backref->num_bytes = num_bytes;
2153 backref->extent_offset = extent_offset;
2154 backref->generation = btrfs_file_extent_generation(leaf, extent);
2155 backref->old = old;
2156 backref_insert(&new->root, backref);
2157 old->count++;
2158out:
2159 btrfs_release_path(path);
2160 WARN_ON(ret);
2161 return ret;
2162}
2163
2164static noinline bool record_extent_backrefs(struct btrfs_path *path,
2165 struct new_sa_defrag_extent *new)
2166{
2167 struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
2168 struct old_sa_defrag_extent *old, *tmp;
2169 int ret;
2170
2171 new->path = path;
2172
2173 list_for_each_entry_safe(old, tmp, &new->head, list) {
2174 ret = iterate_inodes_from_logical(old->bytenr, fs_info,
2175 path, record_one_backref,
2176 old);
2177 BUG_ON(ret < 0 && ret != -ENOENT);
2178
2179 /* no backref to be processed for this extent */
2180 if (!old->count) {
2181 list_del(&old->list);
2182 kfree(old);
2183 }
2184 }
2185
2186 if (list_empty(&new->head))
2187 return false;
2188
2189 return true;
2190}
2191
2192static int relink_is_mergable(struct extent_buffer *leaf,
2193 struct btrfs_file_extent_item *fi,
2194 u64 disk_bytenr)
2195{
2196 if (btrfs_file_extent_disk_bytenr(leaf, fi) != disk_bytenr)
2197 return 0;
2198
2199 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2200 return 0;
2201
2202 if (btrfs_file_extent_compression(leaf, fi) ||
2203 btrfs_file_extent_encryption(leaf, fi) ||
2204 btrfs_file_extent_other_encoding(leaf, fi))
2205 return 0;
2206
2207 return 1;
2208}
2209
2210/*
2211 * Note the backref might has changed, and in this case we just return 0.
2212 */
2213static noinline int relink_extent_backref(struct btrfs_path *path,
2214 struct sa_defrag_extent_backref *prev,
2215 struct sa_defrag_extent_backref *backref)
2216{
2217 struct btrfs_file_extent_item *extent;
2218 struct btrfs_file_extent_item *item;
2219 struct btrfs_ordered_extent *ordered;
2220 struct btrfs_trans_handle *trans;
2221 struct btrfs_fs_info *fs_info;
2222 struct btrfs_root *root;
2223 struct btrfs_key key;
2224 struct extent_buffer *leaf;
2225 struct old_sa_defrag_extent *old = backref->old;
2226 struct new_sa_defrag_extent *new = old->new;
2227 struct inode *src_inode = new->inode;
2228 struct inode *inode;
2229 struct extent_state *cached = NULL;
2230 int ret = 0;
2231 u64 start;
2232 u64 len;
2233 u64 lock_start;
2234 u64 lock_end;
2235 bool merge = false;
2236 int index;
2237
2238 if (prev && prev->root_id == backref->root_id &&
2239 prev->inum == backref->inum &&
2240 prev->file_pos + prev->num_bytes == backref->file_pos)
2241 merge = true;
2242
2243 /* step 1: get root */
2244 key.objectid = backref->root_id;
2245 key.type = BTRFS_ROOT_ITEM_KEY;
2246 key.offset = (u64)-1;
2247
2248 fs_info = BTRFS_I(src_inode)->root->fs_info;
2249 index = srcu_read_lock(&fs_info->subvol_srcu);
2250
2251 root = btrfs_read_fs_root_no_name(fs_info, &key);
2252 if (IS_ERR(root)) {
2253 srcu_read_unlock(&fs_info->subvol_srcu, index);
2254 if (PTR_ERR(root) == -ENOENT)
2255 return 0;
2256 return PTR_ERR(root);
2257 }
2258 if (btrfs_root_refs(&root->root_item) == 0) {
2259 srcu_read_unlock(&fs_info->subvol_srcu, index);
2260 /* parse ENOENT to 0 */
2261 return 0;
2262 }
2263
2264 /* step 2: get inode */
2265 key.objectid = backref->inum;
2266 key.type = BTRFS_INODE_ITEM_KEY;
2267 key.offset = 0;
2268
2269 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2270 if (IS_ERR(inode)) {
2271 srcu_read_unlock(&fs_info->subvol_srcu, index);
2272 return 0;
2273 }
2274
2275 srcu_read_unlock(&fs_info->subvol_srcu, index);
2276
2277 /* step 3: relink backref */
2278 lock_start = backref->file_pos;
2279 lock_end = backref->file_pos + backref->num_bytes - 1;
2280 lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2281 0, &cached);
2282
2283 ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2284 if (ordered) {
2285 btrfs_put_ordered_extent(ordered);
2286 goto out_unlock;
2287 }
2288
2289 trans = btrfs_join_transaction(root);
2290 if (IS_ERR(trans)) {
2291 ret = PTR_ERR(trans);
2292 goto out_unlock;
2293 }
2294
2295 key.objectid = backref->inum;
2296 key.type = BTRFS_EXTENT_DATA_KEY;
2297 key.offset = backref->file_pos;
2298
2299 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2300 if (ret < 0) {
2301 goto out_free_path;
2302 } else if (ret > 0) {
2303 ret = 0;
2304 goto out_free_path;
2305 }
2306
2307 extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2308 struct btrfs_file_extent_item);
2309
2310 if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2311 backref->generation)
2312 goto out_free_path;
2313
2314 btrfs_release_path(path);
2315
2316 start = backref->file_pos;
2317 if (backref->extent_offset < old->extent_offset + old->offset)
2318 start += old->extent_offset + old->offset -
2319 backref->extent_offset;
2320
2321 len = min(backref->extent_offset + backref->num_bytes,
2322 old->extent_offset + old->offset + old->len);
2323 len -= max(backref->extent_offset, old->extent_offset + old->offset);
2324
2325 ret = btrfs_drop_extents(trans, root, inode, start,
2326 start + len, 1);
2327 if (ret)
2328 goto out_free_path;
2329again:
2330 key.objectid = btrfs_ino(inode);
2331 key.type = BTRFS_EXTENT_DATA_KEY;
2332 key.offset = start;
2333
a09a0a70 2334 path->leave_spinning = 1;
38c227d8
LB
2335 if (merge) {
2336 struct btrfs_file_extent_item *fi;
2337 u64 extent_len;
2338 struct btrfs_key found_key;
2339
2340 ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
2341 if (ret < 0)
2342 goto out_free_path;
2343
2344 path->slots[0]--;
2345 leaf = path->nodes[0];
2346 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2347
2348 fi = btrfs_item_ptr(leaf, path->slots[0],
2349 struct btrfs_file_extent_item);
2350 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2351
2352 if (relink_is_mergable(leaf, fi, new->bytenr) &&
2353 extent_len + found_key.offset == start) {
2354 btrfs_set_file_extent_num_bytes(leaf, fi,
2355 extent_len + len);
2356 btrfs_mark_buffer_dirty(leaf);
2357 inode_add_bytes(inode, len);
2358
2359 ret = 1;
2360 goto out_free_path;
2361 } else {
2362 merge = false;
2363 btrfs_release_path(path);
2364 goto again;
2365 }
2366 }
2367
2368 ret = btrfs_insert_empty_item(trans, root, path, &key,
2369 sizeof(*extent));
2370 if (ret) {
2371 btrfs_abort_transaction(trans, root, ret);
2372 goto out_free_path;
2373 }
2374
2375 leaf = path->nodes[0];
2376 item = btrfs_item_ptr(leaf, path->slots[0],
2377 struct btrfs_file_extent_item);
2378 btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2379 btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2380 btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2381 btrfs_set_file_extent_num_bytes(leaf, item, len);
2382 btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2383 btrfs_set_file_extent_generation(leaf, item, trans->transid);
2384 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2385 btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2386 btrfs_set_file_extent_encryption(leaf, item, 0);
2387 btrfs_set_file_extent_other_encoding(leaf, item, 0);
2388
2389 btrfs_mark_buffer_dirty(leaf);
2390 inode_add_bytes(inode, len);
a09a0a70 2391 btrfs_release_path(path);
38c227d8
LB
2392
2393 ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
2394 new->disk_len, 0,
2395 backref->root_id, backref->inum,
2396 new->file_pos, 0); /* start - extent_offset */
2397 if (ret) {
2398 btrfs_abort_transaction(trans, root, ret);
2399 goto out_free_path;
2400 }
2401
2402 ret = 1;
2403out_free_path:
2404 btrfs_release_path(path);
a09a0a70 2405 path->leave_spinning = 0;
38c227d8
LB
2406 btrfs_end_transaction(trans, root);
2407out_unlock:
2408 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2409 &cached, GFP_NOFS);
2410 iput(inode);
2411 return ret;
2412}
2413
2414static void relink_file_extents(struct new_sa_defrag_extent *new)
2415{
2416 struct btrfs_path *path;
2417 struct old_sa_defrag_extent *old, *tmp;
2418 struct sa_defrag_extent_backref *backref;
2419 struct sa_defrag_extent_backref *prev = NULL;
2420 struct inode *inode;
2421 struct btrfs_root *root;
2422 struct rb_node *node;
2423 int ret;
2424
2425 inode = new->inode;
2426 root = BTRFS_I(inode)->root;
2427
2428 path = btrfs_alloc_path();
2429 if (!path)
2430 return;
2431
2432 if (!record_extent_backrefs(path, new)) {
2433 btrfs_free_path(path);
2434 goto out;
2435 }
2436 btrfs_release_path(path);
2437
2438 while (1) {
2439 node = rb_first(&new->root);
2440 if (!node)
2441 break;
2442 rb_erase(node, &new->root);
2443
2444 backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2445
2446 ret = relink_extent_backref(path, prev, backref);
2447 WARN_ON(ret < 0);
2448
2449 kfree(prev);
2450
2451 if (ret == 1)
2452 prev = backref;
2453 else
2454 prev = NULL;
2455 cond_resched();
2456 }
2457 kfree(prev);
2458
2459 btrfs_free_path(path);
2460
2461 list_for_each_entry_safe(old, tmp, &new->head, list) {
2462 list_del(&old->list);
2463 kfree(old);
2464 }
2465out:
2466 atomic_dec(&root->fs_info->defrag_running);
2467 wake_up(&root->fs_info->transaction_wait);
2468
2469 kfree(new);
2470}
2471
2472static struct new_sa_defrag_extent *
2473record_old_file_extents(struct inode *inode,
2474 struct btrfs_ordered_extent *ordered)
2475{
2476 struct btrfs_root *root = BTRFS_I(inode)->root;
2477 struct btrfs_path *path;
2478 struct btrfs_key key;
2479 struct old_sa_defrag_extent *old, *tmp;
2480 struct new_sa_defrag_extent *new;
2481 int ret;
2482
2483 new = kmalloc(sizeof(*new), GFP_NOFS);
2484 if (!new)
2485 return NULL;
2486
2487 new->inode = inode;
2488 new->file_pos = ordered->file_offset;
2489 new->len = ordered->len;
2490 new->bytenr = ordered->start;
2491 new->disk_len = ordered->disk_len;
2492 new->compress_type = ordered->compress_type;
2493 new->root = RB_ROOT;
2494 INIT_LIST_HEAD(&new->head);
2495
2496 path = btrfs_alloc_path();
2497 if (!path)
2498 goto out_kfree;
2499
2500 key.objectid = btrfs_ino(inode);
2501 key.type = BTRFS_EXTENT_DATA_KEY;
2502 key.offset = new->file_pos;
2503
2504 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2505 if (ret < 0)
2506 goto out_free_path;
2507 if (ret > 0 && path->slots[0] > 0)
2508 path->slots[0]--;
2509
2510 /* find out all the old extents for the file range */
2511 while (1) {
2512 struct btrfs_file_extent_item *extent;
2513 struct extent_buffer *l;
2514 int slot;
2515 u64 num_bytes;
2516 u64 offset;
2517 u64 end;
2518 u64 disk_bytenr;
2519 u64 extent_offset;
2520
2521 l = path->nodes[0];
2522 slot = path->slots[0];
2523
2524 if (slot >= btrfs_header_nritems(l)) {
2525 ret = btrfs_next_leaf(root, path);
2526 if (ret < 0)
2527 goto out_free_list;
2528 else if (ret > 0)
2529 break;
2530 continue;
2531 }
2532
2533 btrfs_item_key_to_cpu(l, &key, slot);
2534
2535 if (key.objectid != btrfs_ino(inode))
2536 break;
2537 if (key.type != BTRFS_EXTENT_DATA_KEY)
2538 break;
2539 if (key.offset >= new->file_pos + new->len)
2540 break;
2541
2542 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2543
2544 num_bytes = btrfs_file_extent_num_bytes(l, extent);
2545 if (key.offset + num_bytes < new->file_pos)
2546 goto next;
2547
2548 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2549 if (!disk_bytenr)
2550 goto next;
2551
2552 extent_offset = btrfs_file_extent_offset(l, extent);
2553
2554 old = kmalloc(sizeof(*old), GFP_NOFS);
2555 if (!old)
2556 goto out_free_list;
2557
2558 offset = max(new->file_pos, key.offset);
2559 end = min(new->file_pos + new->len, key.offset + num_bytes);
2560
2561 old->bytenr = disk_bytenr;
2562 old->extent_offset = extent_offset;
2563 old->offset = offset - key.offset;
2564 old->len = end - offset;
2565 old->new = new;
2566 old->count = 0;
2567 list_add_tail(&old->list, &new->head);
2568next:
2569 path->slots[0]++;
2570 cond_resched();
2571 }
2572
2573 btrfs_free_path(path);
2574 atomic_inc(&root->fs_info->defrag_running);
2575
2576 return new;
2577
2578out_free_list:
2579 list_for_each_entry_safe(old, tmp, &new->head, list) {
2580 list_del(&old->list);
2581 kfree(old);
2582 }
2583out_free_path:
2584 btrfs_free_path(path);
2585out_kfree:
2586 kfree(new);
2587 return NULL;
2588}
2589
5d13a98f
CM
2590/*
2591 * helper function for btrfs_finish_ordered_io, this
2592 * just reads in some of the csum leaves to prime them into ram
2593 * before we start the transaction. It limits the amount of btree
2594 * reads required while inside the transaction.
2595 */
d352ac68
CM
2596/* as ordered data IO finishes, this gets called so we can finish
2597 * an ordered extent if the range of bytes in the file it covers are
2598 * fully written.
2599 */
5fd02043 2600static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
e6dcd2dc 2601{
5fd02043 2602 struct inode *inode = ordered_extent->inode;
e6dcd2dc 2603 struct btrfs_root *root = BTRFS_I(inode)->root;
0ca1f7ce 2604 struct btrfs_trans_handle *trans = NULL;
e6dcd2dc 2605 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2ac55d41 2606 struct extent_state *cached_state = NULL;
38c227d8 2607 struct new_sa_defrag_extent *new = NULL;
261507a0 2608 int compress_type = 0;
e6dcd2dc 2609 int ret;
82d5902d 2610 bool nolock;
e6dcd2dc 2611
83eea1f1 2612 nolock = btrfs_is_free_space_inode(inode);
0cb59c99 2613
5fd02043
JB
2614 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2615 ret = -EIO;
2616 goto out;
2617 }
2618
c2167754 2619 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
79787eaa 2620 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
6c760c07
JB
2621 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2622 if (nolock)
2623 trans = btrfs_join_transaction_nolock(root);
2624 else
2625 trans = btrfs_join_transaction(root);
2626 if (IS_ERR(trans)) {
2627 ret = PTR_ERR(trans);
2628 trans = NULL;
2629 goto out;
c2167754 2630 }
6c760c07
JB
2631 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2632 ret = btrfs_update_inode_fallback(trans, root, inode);
2633 if (ret) /* -ENOMEM or corruption */
2634 btrfs_abort_transaction(trans, root, ret);
c2167754
YZ
2635 goto out;
2636 }
e6dcd2dc 2637
2ac55d41
JB
2638 lock_extent_bits(io_tree, ordered_extent->file_offset,
2639 ordered_extent->file_offset + ordered_extent->len - 1,
d0082371 2640 0, &cached_state);
e6dcd2dc 2641
38c227d8
LB
2642 ret = test_range_bit(io_tree, ordered_extent->file_offset,
2643 ordered_extent->file_offset + ordered_extent->len - 1,
2644 EXTENT_DEFRAG, 1, cached_state);
2645 if (ret) {
2646 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2647 if (last_snapshot >= BTRFS_I(inode)->generation)
2648 /* the inode is shared */
2649 new = record_old_file_extents(inode, ordered_extent);
2650
2651 clear_extent_bit(io_tree, ordered_extent->file_offset,
2652 ordered_extent->file_offset + ordered_extent->len - 1,
2653 EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
2654 }
2655
0cb59c99 2656 if (nolock)
7a7eaa40 2657 trans = btrfs_join_transaction_nolock(root);
0cb59c99 2658 else
7a7eaa40 2659 trans = btrfs_join_transaction(root);
79787eaa
JM
2660 if (IS_ERR(trans)) {
2661 ret = PTR_ERR(trans);
2662 trans = NULL;
2663 goto out_unlock;
2664 }
0ca1f7ce 2665 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
c2167754 2666
c8b97818 2667 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
261507a0 2668 compress_type = ordered_extent->compress_type;
d899e052 2669 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
261507a0 2670 BUG_ON(compress_type);
920bbbfb 2671 ret = btrfs_mark_extent_written(trans, inode,
d899e052
YZ
2672 ordered_extent->file_offset,
2673 ordered_extent->file_offset +
2674 ordered_extent->len);
d899e052 2675 } else {
0af3d00b 2676 BUG_ON(root == root->fs_info->tree_root);
d899e052
YZ
2677 ret = insert_reserved_file_extent(trans, inode,
2678 ordered_extent->file_offset,
2679 ordered_extent->start,
2680 ordered_extent->disk_len,
2681 ordered_extent->len,
2682 ordered_extent->len,
261507a0 2683 compress_type, 0, 0,
d899e052 2684 BTRFS_FILE_EXTENT_REG);
d899e052 2685 }
5dc562c5
JB
2686 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
2687 ordered_extent->file_offset, ordered_extent->len,
2688 trans->transid);
79787eaa
JM
2689 if (ret < 0) {
2690 btrfs_abort_transaction(trans, root, ret);
5fd02043 2691 goto out_unlock;
79787eaa 2692 }
2ac55d41 2693
e6dcd2dc
CM
2694 add_pending_csums(trans, inode, ordered_extent->file_offset,
2695 &ordered_extent->list);
2696
6c760c07
JB
2697 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2698 ret = btrfs_update_inode_fallback(trans, root, inode);
2699 if (ret) { /* -ENOMEM or corruption */
2700 btrfs_abort_transaction(trans, root, ret);
2701 goto out_unlock;
1ef30be1
JB
2702 }
2703 ret = 0;
5fd02043
JB
2704out_unlock:
2705 unlock_extent_cached(io_tree, ordered_extent->file_offset,
2706 ordered_extent->file_offset +
2707 ordered_extent->len - 1, &cached_state, GFP_NOFS);
c2167754 2708out:
5b0e95bf 2709 if (root != root->fs_info->tree_root)
0cb59c99 2710 btrfs_delalloc_release_metadata(inode, ordered_extent->len);
a698d075
MX
2711 if (trans)
2712 btrfs_end_transaction(trans, root);
0cb59c99 2713
0bec9ef5 2714 if (ret) {
5fd02043
JB
2715 clear_extent_uptodate(io_tree, ordered_extent->file_offset,
2716 ordered_extent->file_offset +
2717 ordered_extent->len - 1, NULL, GFP_NOFS);
2718
0bec9ef5
JB
2719 /*
2720 * If the ordered extent had an IOERR or something else went
2721 * wrong we need to return the space for this ordered extent
2722 * back to the allocator.
2723 */
2724 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2725 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
2726 btrfs_free_reserved_extent(root, ordered_extent->start,
2727 ordered_extent->disk_len);
2728 }
2729
2730
5fd02043 2731 /*
8bad3c02
LB
2732 * This needs to be done to make sure anybody waiting knows we are done
2733 * updating everything for this ordered extent.
5fd02043
JB
2734 */
2735 btrfs_remove_ordered_extent(inode, ordered_extent);
2736
38c227d8
LB
2737 /* for snapshot-aware defrag */
2738 if (new)
2739 relink_file_extents(new);
2740
e6dcd2dc
CM
2741 /* once for us */
2742 btrfs_put_ordered_extent(ordered_extent);
2743 /* once for the tree */
2744 btrfs_put_ordered_extent(ordered_extent);
2745
5fd02043
JB
2746 return ret;
2747}
2748
2749static void finish_ordered_fn(struct btrfs_work *work)
2750{
2751 struct btrfs_ordered_extent *ordered_extent;
2752 ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
2753 btrfs_finish_ordered_io(ordered_extent);
e6dcd2dc
CM
2754}
2755
b2950863 2756static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
211f90e6
CM
2757 struct extent_state *state, int uptodate)
2758{
5fd02043
JB
2759 struct inode *inode = page->mapping->host;
2760 struct btrfs_root *root = BTRFS_I(inode)->root;
2761 struct btrfs_ordered_extent *ordered_extent = NULL;
2762 struct btrfs_workers *workers;
2763
1abe9b8a 2764 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
2765
8b62b72b 2766 ClearPagePrivate2(page);
5fd02043
JB
2767 if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
2768 end - start + 1, uptodate))
2769 return 0;
2770
2771 ordered_extent->work.func = finish_ordered_fn;
2772 ordered_extent->work.flags = 0;
2773
83eea1f1 2774 if (btrfs_is_free_space_inode(inode))
5fd02043
JB
2775 workers = &root->fs_info->endio_freespace_worker;
2776 else
2777 workers = &root->fs_info->endio_write_workers;
2778 btrfs_queue_worker(workers, &ordered_extent->work);
2779
2780 return 0;
211f90e6
CM
2781}
2782
d352ac68
CM
2783/*
2784 * when reads are done, we need to check csums to verify the data is correct
4a54c8c1
JS
2785 * if there's a match, we allow the bio to finish. If not, the code in
2786 * extent_io.c will try to find good copies for us.
d352ac68 2787 */
b2950863 2788static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
5cf1ab56 2789 struct extent_state *state, int mirror)
07157aac 2790{
4eee4fa4 2791 size_t offset = start - page_offset(page);
07157aac 2792 struct inode *inode = page->mapping->host;
d1310b2e 2793 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
07157aac 2794 char *kaddr;
aadfeb6e 2795 u64 private = ~(u32)0;
07157aac 2796 int ret;
ff79f819
CM
2797 struct btrfs_root *root = BTRFS_I(inode)->root;
2798 u32 csum = ~(u32)0;
c2cf52eb
SK
2799 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
2800 DEFAULT_RATELIMIT_BURST);
d1310b2e 2801
d20f7043
CM
2802 if (PageChecked(page)) {
2803 ClearPageChecked(page);
2804 goto good;
2805 }
6cbff00f
CH
2806
2807 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
08d2f347 2808 goto good;
17d217fe
YZ
2809
2810 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
9655d298 2811 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
17d217fe
YZ
2812 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
2813 GFP_NOFS);
b6cda9bc 2814 return 0;
17d217fe 2815 }
d20f7043 2816
c2e639f0 2817 if (state && state->start == start) {
70dec807
CM
2818 private = state->private;
2819 ret = 0;
2820 } else {
2821 ret = get_state_private(io_tree, start, &private);
2822 }
7ac687d9 2823 kaddr = kmap_atomic(page);
d397712b 2824 if (ret)
07157aac 2825 goto zeroit;
d397712b 2826
b0496686 2827 csum = btrfs_csum_data(kaddr + offset, csum, end - start + 1);
ff79f819 2828 btrfs_csum_final(csum, (char *)&csum);
d397712b 2829 if (csum != private)
07157aac 2830 goto zeroit;
d397712b 2831
7ac687d9 2832 kunmap_atomic(kaddr);
d20f7043 2833good:
07157aac
CM
2834 return 0;
2835
2836zeroit:
c2cf52eb
SK
2837 if (__ratelimit(&_rs))
2838 btrfs_info(root->fs_info, "csum failed ino %llu off %llu csum %u private %llu",
2839 (unsigned long long)btrfs_ino(page->mapping->host),
2840 (unsigned long long)start, csum,
2841 (unsigned long long)private);
db94535d
CM
2842 memset(kaddr + offset, 1, end - start + 1);
2843 flush_dcache_page(page);
7ac687d9 2844 kunmap_atomic(kaddr);
3b951516
CM
2845 if (private == 0)
2846 return 0;
7e38326f 2847 return -EIO;
07157aac 2848}
b888db2b 2849
24bbcf04
YZ
2850struct delayed_iput {
2851 struct list_head list;
2852 struct inode *inode;
2853};
2854
79787eaa
JM
2855/* JDM: If this is fs-wide, why can't we add a pointer to
2856 * btrfs_inode instead and avoid the allocation? */
24bbcf04
YZ
2857void btrfs_add_delayed_iput(struct inode *inode)
2858{
2859 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2860 struct delayed_iput *delayed;
2861
2862 if (atomic_add_unless(&inode->i_count, -1, 1))
2863 return;
2864
2865 delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
2866 delayed->inode = inode;
2867
2868 spin_lock(&fs_info->delayed_iput_lock);
2869 list_add_tail(&delayed->list, &fs_info->delayed_iputs);
2870 spin_unlock(&fs_info->delayed_iput_lock);
2871}
2872
2873void btrfs_run_delayed_iputs(struct btrfs_root *root)
2874{
2875 LIST_HEAD(list);
2876 struct btrfs_fs_info *fs_info = root->fs_info;
2877 struct delayed_iput *delayed;
2878 int empty;
2879
2880 spin_lock(&fs_info->delayed_iput_lock);
2881 empty = list_empty(&fs_info->delayed_iputs);
2882 spin_unlock(&fs_info->delayed_iput_lock);
2883 if (empty)
2884 return;
2885
24bbcf04
YZ
2886 spin_lock(&fs_info->delayed_iput_lock);
2887 list_splice_init(&fs_info->delayed_iputs, &list);
2888 spin_unlock(&fs_info->delayed_iput_lock);
2889
2890 while (!list_empty(&list)) {
2891 delayed = list_entry(list.next, struct delayed_iput, list);
2892 list_del(&delayed->list);
2893 iput(delayed->inode);
2894 kfree(delayed);
2895 }
24bbcf04
YZ
2896}
2897
d68fc57b 2898/*
42b2aa86 2899 * This is called in transaction commit time. If there are no orphan
d68fc57b
YZ
2900 * files in the subvolume, it removes orphan item and frees block_rsv
2901 * structure.
2902 */
2903void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2904 struct btrfs_root *root)
2905{
90290e19 2906 struct btrfs_block_rsv *block_rsv;
d68fc57b
YZ
2907 int ret;
2908
8a35d95f 2909 if (atomic_read(&root->orphan_inodes) ||
d68fc57b
YZ
2910 root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
2911 return;
2912
90290e19 2913 spin_lock(&root->orphan_lock);
8a35d95f 2914 if (atomic_read(&root->orphan_inodes)) {
90290e19
JB
2915 spin_unlock(&root->orphan_lock);
2916 return;
2917 }
2918
2919 if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
2920 spin_unlock(&root->orphan_lock);
2921 return;
2922 }
2923
2924 block_rsv = root->orphan_block_rsv;
2925 root->orphan_block_rsv = NULL;
2926 spin_unlock(&root->orphan_lock);
2927
d68fc57b
YZ
2928 if (root->orphan_item_inserted &&
2929 btrfs_root_refs(&root->root_item) > 0) {
2930 ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
2931 root->root_key.objectid);
2932 BUG_ON(ret);
2933 root->orphan_item_inserted = 0;
2934 }
2935
90290e19
JB
2936 if (block_rsv) {
2937 WARN_ON(block_rsv->size > 0);
2938 btrfs_free_block_rsv(root, block_rsv);
d68fc57b
YZ
2939 }
2940}
2941
7b128766
JB
2942/*
2943 * This creates an orphan entry for the given inode in case something goes
2944 * wrong in the middle of an unlink/truncate.
d68fc57b
YZ
2945 *
2946 * NOTE: caller of this function should reserve 5 units of metadata for
2947 * this function.
7b128766
JB
2948 */
2949int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2950{
2951 struct btrfs_root *root = BTRFS_I(inode)->root;
d68fc57b
YZ
2952 struct btrfs_block_rsv *block_rsv = NULL;
2953 int reserve = 0;
2954 int insert = 0;
2955 int ret;
7b128766 2956
d68fc57b 2957 if (!root->orphan_block_rsv) {
66d8f3dd 2958 block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
b532402e
TI
2959 if (!block_rsv)
2960 return -ENOMEM;
d68fc57b 2961 }
7b128766 2962
d68fc57b
YZ
2963 spin_lock(&root->orphan_lock);
2964 if (!root->orphan_block_rsv) {
2965 root->orphan_block_rsv = block_rsv;
2966 } else if (block_rsv) {
2967 btrfs_free_block_rsv(root, block_rsv);
2968 block_rsv = NULL;
7b128766 2969 }
7b128766 2970
8a35d95f
JB
2971 if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2972 &BTRFS_I(inode)->runtime_flags)) {
d68fc57b
YZ
2973#if 0
2974 /*
2975 * For proper ENOSPC handling, we should do orphan
2976 * cleanup when mounting. But this introduces backward
2977 * compatibility issue.
2978 */
2979 if (!xchg(&root->orphan_item_inserted, 1))
2980 insert = 2;
2981 else
2982 insert = 1;
2983#endif
2984 insert = 1;
321f0e70 2985 atomic_inc(&root->orphan_inodes);
7b128766
JB
2986 }
2987
72ac3c0d
JB
2988 if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
2989 &BTRFS_I(inode)->runtime_flags))
d68fc57b 2990 reserve = 1;
d68fc57b 2991 spin_unlock(&root->orphan_lock);
7b128766 2992
d68fc57b
YZ
2993 /* grab metadata reservation from transaction handle */
2994 if (reserve) {
2995 ret = btrfs_orphan_reserve_metadata(trans, inode);
79787eaa 2996 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
d68fc57b 2997 }
7b128766 2998
d68fc57b
YZ
2999 /* insert an orphan item to track this unlinked/truncated file */
3000 if (insert >= 1) {
33345d01 3001 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
79787eaa 3002 if (ret && ret != -EEXIST) {
8a35d95f
JB
3003 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3004 &BTRFS_I(inode)->runtime_flags);
79787eaa
JM
3005 btrfs_abort_transaction(trans, root, ret);
3006 return ret;
3007 }
3008 ret = 0;
d68fc57b
YZ
3009 }
3010
3011 /* insert an orphan item to track subvolume contains orphan files */
3012 if (insert >= 2) {
3013 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
3014 root->root_key.objectid);
79787eaa
JM
3015 if (ret && ret != -EEXIST) {
3016 btrfs_abort_transaction(trans, root, ret);
3017 return ret;
3018 }
d68fc57b
YZ
3019 }
3020 return 0;
7b128766
JB
3021}
3022
3023/*
3024 * We have done the truncate/delete so we can go ahead and remove the orphan
3025 * item for this particular inode.
3026 */
3027int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
3028{
3029 struct btrfs_root *root = BTRFS_I(inode)->root;
d68fc57b
YZ
3030 int delete_item = 0;
3031 int release_rsv = 0;
7b128766
JB
3032 int ret = 0;
3033
d68fc57b 3034 spin_lock(&root->orphan_lock);
8a35d95f
JB
3035 if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3036 &BTRFS_I(inode)->runtime_flags))
d68fc57b 3037 delete_item = 1;
7b128766 3038
72ac3c0d
JB
3039 if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3040 &BTRFS_I(inode)->runtime_flags))
d68fc57b 3041 release_rsv = 1;
d68fc57b 3042 spin_unlock(&root->orphan_lock);
7b128766 3043
d68fc57b 3044 if (trans && delete_item) {
33345d01 3045 ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
79787eaa 3046 BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
d68fc57b 3047 }
7b128766 3048
8a35d95f 3049 if (release_rsv) {
d68fc57b 3050 btrfs_orphan_release_metadata(inode);
8a35d95f
JB
3051 atomic_dec(&root->orphan_inodes);
3052 }
7b128766 3053
d68fc57b 3054 return 0;
7b128766
JB
3055}
3056
3057/*
3058 * this cleans up any orphans that may be left on the list from the last use
3059 * of this root.
3060 */
66b4ffd1 3061int btrfs_orphan_cleanup(struct btrfs_root *root)
7b128766
JB
3062{
3063 struct btrfs_path *path;
3064 struct extent_buffer *leaf;
7b128766
JB
3065 struct btrfs_key key, found_key;
3066 struct btrfs_trans_handle *trans;
3067 struct inode *inode;
8f6d7f4f 3068 u64 last_objectid = 0;
7b128766
JB
3069 int ret = 0, nr_unlink = 0, nr_truncate = 0;
3070
d68fc57b 3071 if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
66b4ffd1 3072 return 0;
c71bf099
YZ
3073
3074 path = btrfs_alloc_path();
66b4ffd1
JB
3075 if (!path) {
3076 ret = -ENOMEM;
3077 goto out;
3078 }
7b128766
JB
3079 path->reada = -1;
3080
3081 key.objectid = BTRFS_ORPHAN_OBJECTID;
3082 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
3083 key.offset = (u64)-1;
3084
7b128766
JB
3085 while (1) {
3086 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
66b4ffd1
JB
3087 if (ret < 0)
3088 goto out;
7b128766
JB
3089
3090 /*
3091 * if ret == 0 means we found what we were searching for, which
25985edc 3092 * is weird, but possible, so only screw with path if we didn't
7b128766
JB
3093 * find the key and see if we have stuff that matches
3094 */
3095 if (ret > 0) {
66b4ffd1 3096 ret = 0;
7b128766
JB
3097 if (path->slots[0] == 0)
3098 break;
3099 path->slots[0]--;
3100 }
3101
3102 /* pull out the item */
3103 leaf = path->nodes[0];
7b128766
JB
3104 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3105
3106 /* make sure the item matches what we want */
3107 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3108 break;
3109 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
3110 break;
3111
3112 /* release the path since we're done with it */
b3b4aa74 3113 btrfs_release_path(path);
7b128766
JB
3114
3115 /*
3116 * this is where we are basically btrfs_lookup, without the
3117 * crossing root thing. we store the inode number in the
3118 * offset of the orphan item.
3119 */
8f6d7f4f
JB
3120
3121 if (found_key.offset == last_objectid) {
c2cf52eb
SK
3122 btrfs_err(root->fs_info,
3123 "Error removing orphan entry, stopping orphan cleanup");
8f6d7f4f
JB
3124 ret = -EINVAL;
3125 goto out;
3126 }
3127
3128 last_objectid = found_key.offset;
3129
5d4f98a2
YZ
3130 found_key.objectid = found_key.offset;
3131 found_key.type = BTRFS_INODE_ITEM_KEY;
3132 found_key.offset = 0;
73f73415 3133 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
a8c9e576
JB
3134 ret = PTR_RET(inode);
3135 if (ret && ret != -ESTALE)
66b4ffd1 3136 goto out;
7b128766 3137
f8e9e0b0
AJ
3138 if (ret == -ESTALE && root == root->fs_info->tree_root) {
3139 struct btrfs_root *dead_root;
3140 struct btrfs_fs_info *fs_info = root->fs_info;
3141 int is_dead_root = 0;
3142
3143 /*
3144 * this is an orphan in the tree root. Currently these
3145 * could come from 2 sources:
3146 * a) a snapshot deletion in progress
3147 * b) a free space cache inode
3148 * We need to distinguish those two, as the snapshot
3149 * orphan must not get deleted.
3150 * find_dead_roots already ran before us, so if this
3151 * is a snapshot deletion, we should find the root
3152 * in the dead_roots list
3153 */
3154 spin_lock(&fs_info->trans_lock);
3155 list_for_each_entry(dead_root, &fs_info->dead_roots,
3156 root_list) {
3157 if (dead_root->root_key.objectid ==
3158 found_key.objectid) {
3159 is_dead_root = 1;
3160 break;
3161 }
3162 }
3163 spin_unlock(&fs_info->trans_lock);
3164 if (is_dead_root) {
3165 /* prevent this orphan from being found again */
3166 key.offset = found_key.objectid - 1;
3167 continue;
3168 }
3169 }
7b128766 3170 /*
a8c9e576
JB
3171 * Inode is already gone but the orphan item is still there,
3172 * kill the orphan item.
7b128766 3173 */
a8c9e576
JB
3174 if (ret == -ESTALE) {
3175 trans = btrfs_start_transaction(root, 1);
66b4ffd1
JB
3176 if (IS_ERR(trans)) {
3177 ret = PTR_ERR(trans);
3178 goto out;
3179 }
c2cf52eb
SK
3180 btrfs_debug(root->fs_info, "auto deleting %Lu",
3181 found_key.objectid);
a8c9e576
JB
3182 ret = btrfs_del_orphan_item(trans, root,
3183 found_key.objectid);
79787eaa 3184 BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
5b21f2ed 3185 btrfs_end_transaction(trans, root);
7b128766
JB
3186 continue;
3187 }
3188
a8c9e576
JB
3189 /*
3190 * add this inode to the orphan list so btrfs_orphan_del does
3191 * the proper thing when we hit it
3192 */
8a35d95f
JB
3193 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3194 &BTRFS_I(inode)->runtime_flags);
925396ec 3195 atomic_inc(&root->orphan_inodes);
a8c9e576 3196
7b128766
JB
3197 /* if we have links, this was a truncate, lets do that */
3198 if (inode->i_nlink) {
a41ad394
JB
3199 if (!S_ISREG(inode->i_mode)) {
3200 WARN_ON(1);
3201 iput(inode);
3202 continue;
3203 }
7b128766 3204 nr_truncate++;
f3fe820c
JB
3205
3206 /* 1 for the orphan item deletion. */
3207 trans = btrfs_start_transaction(root, 1);
3208 if (IS_ERR(trans)) {
3209 ret = PTR_ERR(trans);
3210 goto out;
3211 }
3212 ret = btrfs_orphan_add(trans, inode);
3213 btrfs_end_transaction(trans, root);
3214 if (ret)
3215 goto out;
3216
66b4ffd1 3217 ret = btrfs_truncate(inode);
4a7d0f68
JB
3218 if (ret)
3219 btrfs_orphan_del(NULL, inode);
7b128766
JB
3220 } else {
3221 nr_unlink++;
3222 }
3223
3224 /* this will do delete_inode and everything for us */
3225 iput(inode);
66b4ffd1
JB
3226 if (ret)
3227 goto out;
7b128766 3228 }
3254c876
MX
3229 /* release the path since we're done with it */
3230 btrfs_release_path(path);
3231
d68fc57b
YZ
3232 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3233
3234 if (root->orphan_block_rsv)
3235 btrfs_block_rsv_release(root, root->orphan_block_rsv,
3236 (u64)-1);
3237
3238 if (root->orphan_block_rsv || root->orphan_item_inserted) {
7a7eaa40 3239 trans = btrfs_join_transaction(root);
66b4ffd1
JB
3240 if (!IS_ERR(trans))
3241 btrfs_end_transaction(trans, root);
d68fc57b 3242 }
7b128766
JB
3243
3244 if (nr_unlink)
4884b476 3245 btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink);
7b128766 3246 if (nr_truncate)
4884b476 3247 btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate);
66b4ffd1
JB
3248
3249out:
3250 if (ret)
c2cf52eb
SK
3251 btrfs_crit(root->fs_info,
3252 "could not do orphan cleanup %d", ret);
66b4ffd1
JB
3253 btrfs_free_path(path);
3254 return ret;
7b128766
JB
3255}
3256
46a53cca
CM
3257/*
3258 * very simple check to peek ahead in the leaf looking for xattrs. If we
3259 * don't find any xattrs, we know there can't be any acls.
3260 *
3261 * slot is the slot the inode is in, objectid is the objectid of the inode
3262 */
3263static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3264 int slot, u64 objectid)
3265{
3266 u32 nritems = btrfs_header_nritems(leaf);
3267 struct btrfs_key found_key;
3268 int scanned = 0;
3269
3270 slot++;
3271 while (slot < nritems) {
3272 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3273
3274 /* we found a different objectid, there must not be acls */
3275 if (found_key.objectid != objectid)
3276 return 0;
3277
3278 /* we found an xattr, assume we've got an acl */
3279 if (found_key.type == BTRFS_XATTR_ITEM_KEY)
3280 return 1;
3281
3282 /*
3283 * we found a key greater than an xattr key, there can't
3284 * be any acls later on
3285 */
3286 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3287 return 0;
3288
3289 slot++;
3290 scanned++;
3291
3292 /*
3293 * it goes inode, inode backrefs, xattrs, extents,
3294 * so if there are a ton of hard links to an inode there can
3295 * be a lot of backrefs. Don't waste time searching too hard,
3296 * this is just an optimization
3297 */
3298 if (scanned >= 8)
3299 break;
3300 }
3301 /* we hit the end of the leaf before we found an xattr or
3302 * something larger than an xattr. We have to assume the inode
3303 * has acls
3304 */
3305 return 1;
3306}
3307
d352ac68
CM
3308/*
3309 * read an inode from the btree into the in-memory inode
3310 */
5d4f98a2 3311static void btrfs_read_locked_inode(struct inode *inode)
39279cc3
CM
3312{
3313 struct btrfs_path *path;
5f39d397 3314 struct extent_buffer *leaf;
39279cc3 3315 struct btrfs_inode_item *inode_item;
0b86a832 3316 struct btrfs_timespec *tspec;
39279cc3
CM
3317 struct btrfs_root *root = BTRFS_I(inode)->root;
3318 struct btrfs_key location;
46a53cca 3319 int maybe_acls;
618e21d5 3320 u32 rdev;
39279cc3 3321 int ret;
2f7e33d4
MX
3322 bool filled = false;
3323
3324 ret = btrfs_fill_inode(inode, &rdev);
3325 if (!ret)
3326 filled = true;
39279cc3
CM
3327
3328 path = btrfs_alloc_path();
1748f843
MF
3329 if (!path)
3330 goto make_bad;
3331
d90c7321 3332 path->leave_spinning = 1;
39279cc3 3333 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
dc17ff8f 3334
39279cc3 3335 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
5f39d397 3336 if (ret)
39279cc3 3337 goto make_bad;
39279cc3 3338
5f39d397 3339 leaf = path->nodes[0];
2f7e33d4
MX
3340
3341 if (filled)
3342 goto cache_acl;
3343
5f39d397
CM
3344 inode_item = btrfs_item_ptr(leaf, path->slots[0],
3345 struct btrfs_inode_item);
5f39d397 3346 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
bfe86848 3347 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
2f2f43d3
EB
3348 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3349 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
dbe674a9 3350 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
5f39d397
CM
3351
3352 tspec = btrfs_inode_atime(inode_item);
3353 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3354 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3355
3356 tspec = btrfs_inode_mtime(inode_item);
3357 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3358 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3359
3360 tspec = btrfs_inode_ctime(inode_item);
3361 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3362 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3363
a76a3cd4 3364 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
e02119d5 3365 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
5dc562c5
JB
3366 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3367
3368 /*
3369 * If we were modified in the current generation and evicted from memory
3370 * and then re-read we need to do a full sync since we don't have any
3371 * idea about which extents were modified before we were evicted from
3372 * cache.
3373 */
3374 if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
3375 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3376 &BTRFS_I(inode)->runtime_flags);
3377
0c4d2d95 3378 inode->i_version = btrfs_inode_sequence(leaf, inode_item);
e02119d5 3379 inode->i_generation = BTRFS_I(inode)->generation;
618e21d5 3380 inode->i_rdev = 0;
5f39d397
CM
3381 rdev = btrfs_inode_rdev(leaf, inode_item);
3382
aec7477b 3383 BTRFS_I(inode)->index_cnt = (u64)-1;
d2fb3437 3384 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2f7e33d4 3385cache_acl:
46a53cca
CM
3386 /*
3387 * try to precache a NULL acl entry for files that don't have
3388 * any xattrs or acls
3389 */
33345d01
LZ
3390 maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3391 btrfs_ino(inode));
72c04902
AV
3392 if (!maybe_acls)
3393 cache_no_acl(inode);
46a53cca 3394
39279cc3 3395 btrfs_free_path(path);
39279cc3 3396
39279cc3 3397 switch (inode->i_mode & S_IFMT) {
39279cc3
CM
3398 case S_IFREG:
3399 inode->i_mapping->a_ops = &btrfs_aops;
04160088 3400 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
d1310b2e 3401 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
39279cc3
CM
3402 inode->i_fop = &btrfs_file_operations;
3403 inode->i_op = &btrfs_file_inode_operations;
3404 break;
3405 case S_IFDIR:
3406 inode->i_fop = &btrfs_dir_file_operations;
3407 if (root == root->fs_info->tree_root)
3408 inode->i_op = &btrfs_dir_ro_inode_operations;
3409 else
3410 inode->i_op = &btrfs_dir_inode_operations;
3411 break;
3412 case S_IFLNK:
3413 inode->i_op = &btrfs_symlink_inode_operations;
3414 inode->i_mapping->a_ops = &btrfs_symlink_aops;
04160088 3415 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
39279cc3 3416 break;
618e21d5 3417 default:
0279b4cd 3418 inode->i_op = &btrfs_special_inode_operations;
618e21d5
JB
3419 init_special_inode(inode, inode->i_mode, rdev);
3420 break;
39279cc3 3421 }
6cbff00f
CH
3422
3423 btrfs_update_iflags(inode);
39279cc3
CM
3424 return;
3425
3426make_bad:
39279cc3 3427 btrfs_free_path(path);
39279cc3
CM
3428 make_bad_inode(inode);
3429}
3430
d352ac68
CM
3431/*
3432 * given a leaf and an inode, copy the inode fields into the leaf
3433 */
e02119d5
CM
3434static void fill_inode_item(struct btrfs_trans_handle *trans,
3435 struct extent_buffer *leaf,
5f39d397 3436 struct btrfs_inode_item *item,
39279cc3
CM
3437 struct inode *inode)
3438{
51fab693
LB
3439 struct btrfs_map_token token;
3440
3441 btrfs_init_map_token(&token);
5f39d397 3442
51fab693
LB
3443 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3444 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3445 btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3446 &token);
3447 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3448 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
5f39d397 3449
51fab693
LB
3450 btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
3451 inode->i_atime.tv_sec, &token);
3452 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
3453 inode->i_atime.tv_nsec, &token);
5f39d397 3454
51fab693
LB
3455 btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
3456 inode->i_mtime.tv_sec, &token);
3457 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
3458 inode->i_mtime.tv_nsec, &token);
5f39d397 3459
51fab693
LB
3460 btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
3461 inode->i_ctime.tv_sec, &token);
3462 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
3463 inode->i_ctime.tv_nsec, &token);
5f39d397 3464
51fab693
LB
3465 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3466 &token);
3467 btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3468 &token);
3469 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3470 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3471 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3472 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3473 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
39279cc3
CM
3474}
3475
d352ac68
CM
3476/*
3477 * copy everything in the in-memory inode into the btree.
3478 */
2115133f 3479static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
d397712b 3480 struct btrfs_root *root, struct inode *inode)
39279cc3
CM
3481{
3482 struct btrfs_inode_item *inode_item;
3483 struct btrfs_path *path;
5f39d397 3484 struct extent_buffer *leaf;
39279cc3
CM
3485 int ret;
3486
3487 path = btrfs_alloc_path();
16cdcec7
MX
3488 if (!path)
3489 return -ENOMEM;
3490
b9473439 3491 path->leave_spinning = 1;
16cdcec7
MX
3492 ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
3493 1);
39279cc3
CM
3494 if (ret) {
3495 if (ret > 0)
3496 ret = -ENOENT;
3497 goto failed;
3498 }
3499
b4ce94de 3500 btrfs_unlock_up_safe(path, 1);
5f39d397
CM
3501 leaf = path->nodes[0];
3502 inode_item = btrfs_item_ptr(leaf, path->slots[0],
16cdcec7 3503 struct btrfs_inode_item);
39279cc3 3504
e02119d5 3505 fill_inode_item(trans, leaf, inode_item, inode);
5f39d397 3506 btrfs_mark_buffer_dirty(leaf);
15ee9bc7 3507 btrfs_set_inode_last_trans(trans, inode);
39279cc3
CM
3508 ret = 0;
3509failed:
39279cc3
CM
3510 btrfs_free_path(path);
3511 return ret;
3512}
3513
2115133f
CM
3514/*
3515 * copy everything in the in-memory inode into the btree.
3516 */
3517noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3518 struct btrfs_root *root, struct inode *inode)
3519{
3520 int ret;
3521
3522 /*
3523 * If the inode is a free space inode, we can deadlock during commit
3524 * if we put it into the delayed code.
3525 *
3526 * The data relocation inode should also be directly updated
3527 * without delay
3528 */
83eea1f1 3529 if (!btrfs_is_free_space_inode(inode)
2115133f 3530 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
8ea05e3a
AB
3531 btrfs_update_root_times(trans, root);
3532
2115133f
CM
3533 ret = btrfs_delayed_update_inode(trans, root, inode);
3534 if (!ret)
3535 btrfs_set_inode_last_trans(trans, inode);
3536 return ret;
3537 }
3538
3539 return btrfs_update_inode_item(trans, root, inode);
3540}
3541
be6aef60
JB
3542noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3543 struct btrfs_root *root,
3544 struct inode *inode)
2115133f
CM
3545{
3546 int ret;
3547
3548 ret = btrfs_update_inode(trans, root, inode);
3549 if (ret == -ENOSPC)
3550 return btrfs_update_inode_item(trans, root, inode);
3551 return ret;
3552}
3553
d352ac68
CM
3554/*
3555 * unlink helper that gets used here in inode.c and in the tree logging
3556 * recovery code. It remove a link in a directory with a given name, and
3557 * also drops the back refs in the inode to the directory
3558 */
92986796
AV
3559static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3560 struct btrfs_root *root,
3561 struct inode *dir, struct inode *inode,
3562 const char *name, int name_len)
39279cc3
CM
3563{
3564 struct btrfs_path *path;
39279cc3 3565 int ret = 0;
5f39d397 3566 struct extent_buffer *leaf;
39279cc3 3567 struct btrfs_dir_item *di;
5f39d397 3568 struct btrfs_key key;
aec7477b 3569 u64 index;
33345d01
LZ
3570 u64 ino = btrfs_ino(inode);
3571 u64 dir_ino = btrfs_ino(dir);
39279cc3
CM
3572
3573 path = btrfs_alloc_path();
54aa1f4d
CM
3574 if (!path) {
3575 ret = -ENOMEM;
554233a6 3576 goto out;
54aa1f4d
CM
3577 }
3578
b9473439 3579 path->leave_spinning = 1;
33345d01 3580 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
39279cc3
CM
3581 name, name_len, -1);
3582 if (IS_ERR(di)) {
3583 ret = PTR_ERR(di);
3584 goto err;
3585 }
3586 if (!di) {
3587 ret = -ENOENT;
3588 goto err;
3589 }
5f39d397
CM
3590 leaf = path->nodes[0];
3591 btrfs_dir_item_key_to_cpu(leaf, di, &key);
39279cc3 3592 ret = btrfs_delete_one_dir_name(trans, root, path, di);
54aa1f4d
CM
3593 if (ret)
3594 goto err;
b3b4aa74 3595 btrfs_release_path(path);
39279cc3 3596
33345d01
LZ
3597 ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
3598 dir_ino, &index);
aec7477b 3599 if (ret) {
c2cf52eb
SK
3600 btrfs_info(root->fs_info,
3601 "failed to delete reference to %.*s, inode %llu parent %llu",
3602 name_len, name,
3603 (unsigned long long)ino, (unsigned long long)dir_ino);
79787eaa 3604 btrfs_abort_transaction(trans, root, ret);
aec7477b
JB
3605 goto err;
3606 }
3607
16cdcec7 3608 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
79787eaa
JM
3609 if (ret) {
3610 btrfs_abort_transaction(trans, root, ret);
39279cc3 3611 goto err;
79787eaa 3612 }
39279cc3 3613
e02119d5 3614 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
33345d01 3615 inode, dir_ino);
79787eaa
JM
3616 if (ret != 0 && ret != -ENOENT) {
3617 btrfs_abort_transaction(trans, root, ret);
3618 goto err;
3619 }
e02119d5
CM
3620
3621 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
3622 dir, index);
6418c961
CM
3623 if (ret == -ENOENT)
3624 ret = 0;
d4e3991b
ZB
3625 else if (ret)
3626 btrfs_abort_transaction(trans, root, ret);
39279cc3
CM
3627err:
3628 btrfs_free_path(path);
e02119d5
CM
3629 if (ret)
3630 goto out;
3631
3632 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
0c4d2d95
JB
3633 inode_inc_iversion(inode);
3634 inode_inc_iversion(dir);
e02119d5 3635 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
b9959295 3636 ret = btrfs_update_inode(trans, root, dir);
e02119d5 3637out:
39279cc3
CM
3638 return ret;
3639}
3640
92986796
AV
3641int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3642 struct btrfs_root *root,
3643 struct inode *dir, struct inode *inode,
3644 const char *name, int name_len)
3645{
3646 int ret;
3647 ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
3648 if (!ret) {
3649 btrfs_drop_nlink(inode);
3650 ret = btrfs_update_inode(trans, root, inode);
3651 }
3652 return ret;
3653}
3654
3655
a22285a6
YZ
3656/* helper to check if there is any shared block in the path */
3657static int check_path_shared(struct btrfs_root *root,
3658 struct btrfs_path *path)
39279cc3 3659{
a22285a6
YZ
3660 struct extent_buffer *eb;
3661 int level;
0e4dcbef 3662 u64 refs = 1;
5df6a9f6 3663
a22285a6 3664 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
dedefd72
JB
3665 int ret;
3666
a22285a6
YZ
3667 if (!path->nodes[level])
3668 break;
3669 eb = path->nodes[level];
3670 if (!btrfs_block_can_be_shared(root, eb))
3671 continue;
3173a18f 3672 ret = btrfs_lookup_extent_info(NULL, root, eb->start, level, 1,
a22285a6
YZ
3673 &refs, NULL);
3674 if (refs > 1)
3675 return 1;
5df6a9f6 3676 }
dedefd72 3677 return 0;
39279cc3
CM
3678}
3679
a22285a6
YZ
3680/*
3681 * helper to start transaction for unlink and rmdir.
3682 *
3683 * unlink and rmdir are special in btrfs, they do not always free space.
3684 * so in enospc case, we should make sure they will free space before
3685 * allowing them to use the global metadata reservation.
3686 */
3687static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
3688 struct dentry *dentry)
4df27c4d 3689{
39279cc3 3690 struct btrfs_trans_handle *trans;
a22285a6 3691 struct btrfs_root *root = BTRFS_I(dir)->root;
4df27c4d 3692 struct btrfs_path *path;
4df27c4d 3693 struct btrfs_dir_item *di;
7b128766 3694 struct inode *inode = dentry->d_inode;
4df27c4d 3695 u64 index;
a22285a6
YZ
3696 int check_link = 1;
3697 int err = -ENOSPC;
4df27c4d 3698 int ret;
33345d01
LZ
3699 u64 ino = btrfs_ino(inode);
3700 u64 dir_ino = btrfs_ino(dir);
4df27c4d 3701
e70bea5f
JB
3702 /*
3703 * 1 for the possible orphan item
3704 * 1 for the dir item
3705 * 1 for the dir index
3706 * 1 for the inode ref
e70bea5f
JB
3707 * 1 for the inode
3708 */
6e137ed3 3709 trans = btrfs_start_transaction(root, 5);
a22285a6
YZ
3710 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
3711 return trans;
4df27c4d 3712
33345d01 3713 if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
a22285a6 3714 return ERR_PTR(-ENOSPC);
4df27c4d 3715
a22285a6
YZ
3716 /* check if there is someone else holds reference */
3717 if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
3718 return ERR_PTR(-ENOSPC);
4df27c4d 3719
a22285a6
YZ
3720 if (atomic_read(&inode->i_count) > 2)
3721 return ERR_PTR(-ENOSPC);
4df27c4d 3722
a22285a6
YZ
3723 if (xchg(&root->fs_info->enospc_unlink, 1))
3724 return ERR_PTR(-ENOSPC);
3725
3726 path = btrfs_alloc_path();
3727 if (!path) {
3728 root->fs_info->enospc_unlink = 0;
3729 return ERR_PTR(-ENOMEM);
4df27c4d
YZ
3730 }
3731
3880a1b4
JB
3732 /* 1 for the orphan item */
3733 trans = btrfs_start_transaction(root, 1);
5df6a9f6 3734 if (IS_ERR(trans)) {
a22285a6
YZ
3735 btrfs_free_path(path);
3736 root->fs_info->enospc_unlink = 0;
3737 return trans;
3738 }
4df27c4d 3739
a22285a6
YZ
3740 path->skip_locking = 1;
3741 path->search_commit_root = 1;
4df27c4d 3742
a22285a6
YZ
3743 ret = btrfs_lookup_inode(trans, root, path,
3744 &BTRFS_I(dir)->location, 0);
3745 if (ret < 0) {
3746 err = ret;
3747 goto out;
3748 }
3749 if (ret == 0) {
3750 if (check_path_shared(root, path))
3751 goto out;
3752 } else {
3753 check_link = 0;
5df6a9f6 3754 }
b3b4aa74 3755 btrfs_release_path(path);
a22285a6
YZ
3756
3757 ret = btrfs_lookup_inode(trans, root, path,
3758 &BTRFS_I(inode)->location, 0);
3759 if (ret < 0) {
3760 err = ret;
3761 goto out;
3762 }
3763 if (ret == 0) {
3764 if (check_path_shared(root, path))
3765 goto out;
3766 } else {
3767 check_link = 0;
3768 }
b3b4aa74 3769 btrfs_release_path(path);
a22285a6
YZ
3770
3771 if (ret == 0 && S_ISREG(inode->i_mode)) {
3772 ret = btrfs_lookup_file_extent(trans, root, path,
33345d01 3773 ino, (u64)-1, 0);
a22285a6
YZ
3774 if (ret < 0) {
3775 err = ret;
3776 goto out;
3777 }
79787eaa 3778 BUG_ON(ret == 0); /* Corruption */
a22285a6
YZ
3779 if (check_path_shared(root, path))
3780 goto out;
b3b4aa74 3781 btrfs_release_path(path);
a22285a6
YZ
3782 }
3783
3784 if (!check_link) {
3785 err = 0;
3786 goto out;
3787 }
3788
33345d01 3789 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
a22285a6
YZ
3790 dentry->d_name.name, dentry->d_name.len, 0);
3791 if (IS_ERR(di)) {
3792 err = PTR_ERR(di);
3793 goto out;
3794 }
3795 if (di) {
3796 if (check_path_shared(root, path))
3797 goto out;
3798 } else {
3799 err = 0;
3800 goto out;
3801 }
b3b4aa74 3802 btrfs_release_path(path);
a22285a6 3803
f186373f
MF
3804 ret = btrfs_get_inode_ref_index(trans, root, path, dentry->d_name.name,
3805 dentry->d_name.len, ino, dir_ino, 0,
3806 &index);
3807 if (ret) {
3808 err = ret;
a22285a6
YZ
3809 goto out;
3810 }
f186373f 3811
a22285a6
YZ
3812 if (check_path_shared(root, path))
3813 goto out;
f186373f 3814
b3b4aa74 3815 btrfs_release_path(path);
a22285a6 3816
16cdcec7
MX
3817 /*
3818 * This is a commit root search, if we can lookup inode item and other
3819 * relative items in the commit root, it means the transaction of
3820 * dir/file creation has been committed, and the dir index item that we
3821 * delay to insert has also been inserted into the commit root. So
3822 * we needn't worry about the delayed insertion of the dir index item
3823 * here.
3824 */
33345d01 3825 di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
a22285a6
YZ
3826 dentry->d_name.name, dentry->d_name.len, 0);
3827 if (IS_ERR(di)) {
3828 err = PTR_ERR(di);
3829 goto out;
3830 }
3831 BUG_ON(ret == -ENOENT);
3832 if (check_path_shared(root, path))
3833 goto out;
3834
3835 err = 0;
3836out:
3837 btrfs_free_path(path);
3880a1b4
JB
3838 /* Migrate the orphan reservation over */
3839 if (!err)
3840 err = btrfs_block_rsv_migrate(trans->block_rsv,
3841 &root->fs_info->global_block_rsv,
5a77d76c 3842 trans->bytes_reserved);
3880a1b4 3843
a22285a6
YZ
3844 if (err) {
3845 btrfs_end_transaction(trans, root);
3846 root->fs_info->enospc_unlink = 0;
3847 return ERR_PTR(err);
3848 }
3849
3850 trans->block_rsv = &root->fs_info->global_block_rsv;
3851 return trans;
3852}
3853
3854static void __unlink_end_trans(struct btrfs_trans_handle *trans,
3855 struct btrfs_root *root)
3856{
66d8f3dd 3857 if (trans->block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL) {
5a77d76c
JB
3858 btrfs_block_rsv_release(root, trans->block_rsv,
3859 trans->bytes_reserved);
3860 trans->block_rsv = &root->fs_info->trans_block_rsv;
a22285a6
YZ
3861 BUG_ON(!root->fs_info->enospc_unlink);
3862 root->fs_info->enospc_unlink = 0;
3863 }
7ad85bb7 3864 btrfs_end_transaction(trans, root);
a22285a6
YZ
3865}
3866
3867static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
3868{
3869 struct btrfs_root *root = BTRFS_I(dir)->root;
3870 struct btrfs_trans_handle *trans;
3871 struct inode *inode = dentry->d_inode;
3872 int ret;
a22285a6
YZ
3873
3874 trans = __unlink_start_trans(dir, dentry);
3875 if (IS_ERR(trans))
3876 return PTR_ERR(trans);
5f39d397 3877
12fcfd22
CM
3878 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
3879
e02119d5
CM
3880 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3881 dentry->d_name.name, dentry->d_name.len);
b532402e
TI
3882 if (ret)
3883 goto out;
7b128766 3884
a22285a6 3885 if (inode->i_nlink == 0) {
7b128766 3886 ret = btrfs_orphan_add(trans, inode);
b532402e
TI
3887 if (ret)
3888 goto out;
a22285a6 3889 }
7b128766 3890
b532402e 3891out:
a22285a6 3892 __unlink_end_trans(trans, root);
b53d3f5d 3893 btrfs_btree_balance_dirty(root);
39279cc3
CM
3894 return ret;
3895}
3896
4df27c4d
YZ
3897int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3898 struct btrfs_root *root,
3899 struct inode *dir, u64 objectid,
3900 const char *name, int name_len)
3901{
3902 struct btrfs_path *path;
3903 struct extent_buffer *leaf;
3904 struct btrfs_dir_item *di;
3905 struct btrfs_key key;
3906 u64 index;
3907 int ret;
33345d01 3908 u64 dir_ino = btrfs_ino(dir);
4df27c4d
YZ
3909
3910 path = btrfs_alloc_path();
3911 if (!path)
3912 return -ENOMEM;
3913
33345d01 3914 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4df27c4d 3915 name, name_len, -1);
79787eaa
JM
3916 if (IS_ERR_OR_NULL(di)) {
3917 if (!di)
3918 ret = -ENOENT;
3919 else
3920 ret = PTR_ERR(di);
3921 goto out;
3922 }
4df27c4d
YZ
3923
3924 leaf = path->nodes[0];
3925 btrfs_dir_item_key_to_cpu(leaf, di, &key);
3926 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
3927 ret = btrfs_delete_one_dir_name(trans, root, path, di);
79787eaa
JM
3928 if (ret) {
3929 btrfs_abort_transaction(trans, root, ret);
3930 goto out;
3931 }
b3b4aa74 3932 btrfs_release_path(path);
4df27c4d
YZ
3933
3934 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
3935 objectid, root->root_key.objectid,
33345d01 3936 dir_ino, &index, name, name_len);
4df27c4d 3937 if (ret < 0) {
79787eaa
JM
3938 if (ret != -ENOENT) {
3939 btrfs_abort_transaction(trans, root, ret);
3940 goto out;
3941 }
33345d01 3942 di = btrfs_search_dir_index_item(root, path, dir_ino,
4df27c4d 3943 name, name_len);
79787eaa
JM
3944 if (IS_ERR_OR_NULL(di)) {
3945 if (!di)
3946 ret = -ENOENT;
3947 else
3948 ret = PTR_ERR(di);
3949 btrfs_abort_transaction(trans, root, ret);
3950 goto out;
3951 }
4df27c4d
YZ
3952
3953 leaf = path->nodes[0];
3954 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
b3b4aa74 3955 btrfs_release_path(path);
4df27c4d
YZ
3956 index = key.offset;
3957 }
945d8962 3958 btrfs_release_path(path);
4df27c4d 3959
16cdcec7 3960 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
79787eaa
JM
3961 if (ret) {
3962 btrfs_abort_transaction(trans, root, ret);
3963 goto out;
3964 }
4df27c4d
YZ
3965
3966 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
0c4d2d95 3967 inode_inc_iversion(dir);
4df27c4d 3968 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
5a24e84c 3969 ret = btrfs_update_inode_fallback(trans, root, dir);
79787eaa
JM
3970 if (ret)
3971 btrfs_abort_transaction(trans, root, ret);
3972out:
71d7aed0 3973 btrfs_free_path(path);
79787eaa 3974 return ret;
4df27c4d
YZ
3975}
3976
39279cc3
CM
3977static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3978{
3979 struct inode *inode = dentry->d_inode;
1832a6d5 3980 int err = 0;
39279cc3 3981 struct btrfs_root *root = BTRFS_I(dir)->root;
39279cc3 3982 struct btrfs_trans_handle *trans;
39279cc3 3983
b3ae244e 3984 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
134d4512 3985 return -ENOTEMPTY;
b3ae244e
DS
3986 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
3987 return -EPERM;
134d4512 3988
a22285a6
YZ
3989 trans = __unlink_start_trans(dir, dentry);
3990 if (IS_ERR(trans))
5df6a9f6 3991 return PTR_ERR(trans);
5df6a9f6 3992
33345d01 3993 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4df27c4d
YZ
3994 err = btrfs_unlink_subvol(trans, root, dir,
3995 BTRFS_I(inode)->location.objectid,
3996 dentry->d_name.name,
3997 dentry->d_name.len);
3998 goto out;
3999 }
4000
7b128766
JB
4001 err = btrfs_orphan_add(trans, inode);
4002 if (err)
4df27c4d 4003 goto out;
7b128766 4004
39279cc3 4005 /* now the directory is empty */
e02119d5
CM
4006 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
4007 dentry->d_name.name, dentry->d_name.len);
d397712b 4008 if (!err)
dbe674a9 4009 btrfs_i_size_write(inode, 0);
4df27c4d 4010out:
a22285a6 4011 __unlink_end_trans(trans, root);
b53d3f5d 4012 btrfs_btree_balance_dirty(root);
3954401f 4013
39279cc3
CM
4014 return err;
4015}
4016
39279cc3
CM
4017/*
4018 * this can truncate away extent items, csum items and directory items.
4019 * It starts at a high offset and removes keys until it can't find
d352ac68 4020 * any higher than new_size
39279cc3
CM
4021 *
4022 * csum items that cross the new i_size are truncated to the new size
4023 * as well.
7b128766
JB
4024 *
4025 * min_type is the minimum key type to truncate down to. If set to 0, this
4026 * will kill all the items on this inode, including the INODE_ITEM_KEY.
39279cc3 4027 */
8082510e
YZ
4028int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4029 struct btrfs_root *root,
4030 struct inode *inode,
4031 u64 new_size, u32 min_type)
39279cc3 4032{
39279cc3 4033 struct btrfs_path *path;
5f39d397 4034 struct extent_buffer *leaf;
39279cc3 4035 struct btrfs_file_extent_item *fi;
8082510e
YZ
4036 struct btrfs_key key;
4037 struct btrfs_key found_key;
39279cc3 4038 u64 extent_start = 0;
db94535d 4039 u64 extent_num_bytes = 0;
5d4f98a2 4040 u64 extent_offset = 0;
39279cc3 4041 u64 item_end = 0;
8082510e 4042 u32 found_type = (u8)-1;
39279cc3
CM
4043 int found_extent;
4044 int del_item;
85e21bac
CM
4045 int pending_del_nr = 0;
4046 int pending_del_slot = 0;
179e29e4 4047 int extent_type = -1;
8082510e
YZ
4048 int ret;
4049 int err = 0;
33345d01 4050 u64 ino = btrfs_ino(inode);
8082510e
YZ
4051
4052 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
39279cc3 4053
0eb0e19c
MF
4054 path = btrfs_alloc_path();
4055 if (!path)
4056 return -ENOMEM;
4057 path->reada = -1;
4058
5dc562c5
JB
4059 /*
4060 * We want to drop from the next block forward in case this new size is
4061 * not block aligned since we will be keeping the last block of the
4062 * extent just the way it is.
4063 */
0af3d00b 4064 if (root->ref_cows || root == root->fs_info->tree_root)
fda2832f
QW
4065 btrfs_drop_extent_cache(inode, ALIGN(new_size,
4066 root->sectorsize), (u64)-1, 0);
8082510e 4067
16cdcec7
MX
4068 /*
4069 * This function is also used to drop the items in the log tree before
4070 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4071 * it is used to drop the loged items. So we shouldn't kill the delayed
4072 * items.
4073 */
4074 if (min_type == 0 && root == BTRFS_I(inode)->root)
4075 btrfs_kill_delayed_inode_items(inode);
4076
33345d01 4077 key.objectid = ino;
39279cc3 4078 key.offset = (u64)-1;
5f39d397
CM
4079 key.type = (u8)-1;
4080
85e21bac 4081search_again:
b9473439 4082 path->leave_spinning = 1;
85e21bac 4083 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8082510e
YZ
4084 if (ret < 0) {
4085 err = ret;
4086 goto out;
4087 }
d397712b 4088
85e21bac 4089 if (ret > 0) {
e02119d5
CM
4090 /* there are no items in the tree for us to truncate, we're
4091 * done
4092 */
8082510e
YZ
4093 if (path->slots[0] == 0)
4094 goto out;
85e21bac
CM
4095 path->slots[0]--;
4096 }
4097
d397712b 4098 while (1) {
39279cc3 4099 fi = NULL;
5f39d397
CM
4100 leaf = path->nodes[0];
4101 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4102 found_type = btrfs_key_type(&found_key);
39279cc3 4103
33345d01 4104 if (found_key.objectid != ino)
39279cc3 4105 break;
5f39d397 4106
85e21bac 4107 if (found_type < min_type)
39279cc3
CM
4108 break;
4109
5f39d397 4110 item_end = found_key.offset;
39279cc3 4111 if (found_type == BTRFS_EXTENT_DATA_KEY) {
5f39d397 4112 fi = btrfs_item_ptr(leaf, path->slots[0],
39279cc3 4113 struct btrfs_file_extent_item);
179e29e4
CM
4114 extent_type = btrfs_file_extent_type(leaf, fi);
4115 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
5f39d397 4116 item_end +=
db94535d 4117 btrfs_file_extent_num_bytes(leaf, fi);
179e29e4 4118 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
179e29e4 4119 item_end += btrfs_file_extent_inline_len(leaf,
c8b97818 4120 fi);
39279cc3 4121 }
008630c1 4122 item_end--;
39279cc3 4123 }
8082510e
YZ
4124 if (found_type > min_type) {
4125 del_item = 1;
4126 } else {
4127 if (item_end < new_size)
b888db2b 4128 break;
8082510e
YZ
4129 if (found_key.offset >= new_size)
4130 del_item = 1;
4131 else
4132 del_item = 0;
39279cc3 4133 }
39279cc3 4134 found_extent = 0;
39279cc3 4135 /* FIXME, shrink the extent if the ref count is only 1 */
179e29e4
CM
4136 if (found_type != BTRFS_EXTENT_DATA_KEY)
4137 goto delete;
4138
4139 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
39279cc3 4140 u64 num_dec;
db94535d 4141 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
f70a9a6b 4142 if (!del_item) {
db94535d
CM
4143 u64 orig_num_bytes =
4144 btrfs_file_extent_num_bytes(leaf, fi);
fda2832f
QW
4145 extent_num_bytes = ALIGN(new_size -
4146 found_key.offset,
4147 root->sectorsize);
db94535d
CM
4148 btrfs_set_file_extent_num_bytes(leaf, fi,
4149 extent_num_bytes);
4150 num_dec = (orig_num_bytes -
9069218d 4151 extent_num_bytes);
e02119d5 4152 if (root->ref_cows && extent_start != 0)
a76a3cd4 4153 inode_sub_bytes(inode, num_dec);
5f39d397 4154 btrfs_mark_buffer_dirty(leaf);
39279cc3 4155 } else {
db94535d
CM
4156 extent_num_bytes =
4157 btrfs_file_extent_disk_num_bytes(leaf,
4158 fi);
5d4f98a2
YZ
4159 extent_offset = found_key.offset -
4160 btrfs_file_extent_offset(leaf, fi);
4161
39279cc3 4162 /* FIXME blocksize != 4096 */
9069218d 4163 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
39279cc3
CM
4164 if (extent_start != 0) {
4165 found_extent = 1;
e02119d5 4166 if (root->ref_cows)
a76a3cd4 4167 inode_sub_bytes(inode, num_dec);
e02119d5 4168 }
39279cc3 4169 }
9069218d 4170 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
c8b97818
CM
4171 /*
4172 * we can't truncate inline items that have had
4173 * special encodings
4174 */
4175 if (!del_item &&
4176 btrfs_file_extent_compression(leaf, fi) == 0 &&
4177 btrfs_file_extent_encryption(leaf, fi) == 0 &&
4178 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
e02119d5
CM
4179 u32 size = new_size - found_key.offset;
4180
4181 if (root->ref_cows) {
a76a3cd4
YZ
4182 inode_sub_bytes(inode, item_end + 1 -
4183 new_size);
e02119d5
CM
4184 }
4185 size =
4186 btrfs_file_extent_calc_inline_size(size);
afe5fea7 4187 btrfs_truncate_item(root, path, size, 1);
e02119d5 4188 } else if (root->ref_cows) {
a76a3cd4
YZ
4189 inode_sub_bytes(inode, item_end + 1 -
4190 found_key.offset);
9069218d 4191 }
39279cc3 4192 }
179e29e4 4193delete:
39279cc3 4194 if (del_item) {
85e21bac
CM
4195 if (!pending_del_nr) {
4196 /* no pending yet, add ourselves */
4197 pending_del_slot = path->slots[0];
4198 pending_del_nr = 1;
4199 } else if (pending_del_nr &&
4200 path->slots[0] + 1 == pending_del_slot) {
4201 /* hop on the pending chunk */
4202 pending_del_nr++;
4203 pending_del_slot = path->slots[0];
4204 } else {
d397712b 4205 BUG();
85e21bac 4206 }
39279cc3
CM
4207 } else {
4208 break;
4209 }
0af3d00b
JB
4210 if (found_extent && (root->ref_cows ||
4211 root == root->fs_info->tree_root)) {
b9473439 4212 btrfs_set_path_blocking(path);
39279cc3 4213 ret = btrfs_free_extent(trans, root, extent_start,
5d4f98a2
YZ
4214 extent_num_bytes, 0,
4215 btrfs_header_owner(leaf),
66d7e7f0 4216 ino, extent_offset, 0);
39279cc3
CM
4217 BUG_ON(ret);
4218 }
85e21bac 4219
8082510e
YZ
4220 if (found_type == BTRFS_INODE_ITEM_KEY)
4221 break;
4222
4223 if (path->slots[0] == 0 ||
4224 path->slots[0] != pending_del_slot) {
8082510e
YZ
4225 if (pending_del_nr) {
4226 ret = btrfs_del_items(trans, root, path,
4227 pending_del_slot,
4228 pending_del_nr);
79787eaa
JM
4229 if (ret) {
4230 btrfs_abort_transaction(trans,
4231 root, ret);
4232 goto error;
4233 }
8082510e
YZ
4234 pending_del_nr = 0;
4235 }
b3b4aa74 4236 btrfs_release_path(path);
85e21bac 4237 goto search_again;
8082510e
YZ
4238 } else {
4239 path->slots[0]--;
85e21bac 4240 }
39279cc3 4241 }
8082510e 4242out:
85e21bac
CM
4243 if (pending_del_nr) {
4244 ret = btrfs_del_items(trans, root, path, pending_del_slot,
4245 pending_del_nr);
79787eaa
JM
4246 if (ret)
4247 btrfs_abort_transaction(trans, root, ret);
85e21bac 4248 }
79787eaa 4249error:
39279cc3 4250 btrfs_free_path(path);
8082510e 4251 return err;
39279cc3
CM
4252}
4253
4254/*
2aaa6655
JB
4255 * btrfs_truncate_page - read, zero a chunk and write a page
4256 * @inode - inode that we're zeroing
4257 * @from - the offset to start zeroing
4258 * @len - the length to zero, 0 to zero the entire range respective to the
4259 * offset
4260 * @front - zero up to the offset instead of from the offset on
4261 *
4262 * This will find the page for the "from" offset and cow the page and zero the
4263 * part we want to zero. This is used with truncate and hole punching.
39279cc3 4264 */
2aaa6655
JB
4265int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
4266 int front)
39279cc3 4267{
2aaa6655 4268 struct address_space *mapping = inode->i_mapping;
db94535d 4269 struct btrfs_root *root = BTRFS_I(inode)->root;
e6dcd2dc
CM
4270 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4271 struct btrfs_ordered_extent *ordered;
2ac55d41 4272 struct extent_state *cached_state = NULL;
e6dcd2dc 4273 char *kaddr;
db94535d 4274 u32 blocksize = root->sectorsize;
39279cc3
CM
4275 pgoff_t index = from >> PAGE_CACHE_SHIFT;
4276 unsigned offset = from & (PAGE_CACHE_SIZE-1);
4277 struct page *page;
3b16a4e3 4278 gfp_t mask = btrfs_alloc_write_mask(mapping);
39279cc3 4279 int ret = 0;
a52d9a80 4280 u64 page_start;
e6dcd2dc 4281 u64 page_end;
39279cc3 4282
2aaa6655
JB
4283 if ((offset & (blocksize - 1)) == 0 &&
4284 (!len || ((len & (blocksize - 1)) == 0)))
39279cc3 4285 goto out;
0ca1f7ce 4286 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
5d5e103a
JB
4287 if (ret)
4288 goto out;
39279cc3 4289
211c17f5 4290again:
3b16a4e3 4291 page = find_or_create_page(mapping, index, mask);
5d5e103a 4292 if (!page) {
0ca1f7ce 4293 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
ac6a2b36 4294 ret = -ENOMEM;
39279cc3 4295 goto out;
5d5e103a 4296 }
e6dcd2dc
CM
4297
4298 page_start = page_offset(page);
4299 page_end = page_start + PAGE_CACHE_SIZE - 1;
4300
39279cc3 4301 if (!PageUptodate(page)) {
9ebefb18 4302 ret = btrfs_readpage(NULL, page);
39279cc3 4303 lock_page(page);
211c17f5
CM
4304 if (page->mapping != mapping) {
4305 unlock_page(page);
4306 page_cache_release(page);
4307 goto again;
4308 }
39279cc3
CM
4309 if (!PageUptodate(page)) {
4310 ret = -EIO;
89642229 4311 goto out_unlock;
39279cc3
CM
4312 }
4313 }
211c17f5 4314 wait_on_page_writeback(page);
e6dcd2dc 4315
d0082371 4316 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
e6dcd2dc
CM
4317 set_page_extent_mapped(page);
4318
4319 ordered = btrfs_lookup_ordered_extent(inode, page_start);
4320 if (ordered) {
2ac55d41
JB
4321 unlock_extent_cached(io_tree, page_start, page_end,
4322 &cached_state, GFP_NOFS);
e6dcd2dc
CM
4323 unlock_page(page);
4324 page_cache_release(page);
eb84ae03 4325 btrfs_start_ordered_extent(inode, ordered, 1);
e6dcd2dc
CM
4326 btrfs_put_ordered_extent(ordered);
4327 goto again;
4328 }
4329
2ac55d41 4330 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
9e8a4a8b
LB
4331 EXTENT_DIRTY | EXTENT_DELALLOC |
4332 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
2ac55d41 4333 0, 0, &cached_state, GFP_NOFS);
5d5e103a 4334
2ac55d41
JB
4335 ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
4336 &cached_state);
9ed74f2d 4337 if (ret) {
2ac55d41
JB
4338 unlock_extent_cached(io_tree, page_start, page_end,
4339 &cached_state, GFP_NOFS);
9ed74f2d
JB
4340 goto out_unlock;
4341 }
4342
e6dcd2dc 4343 if (offset != PAGE_CACHE_SIZE) {
2aaa6655
JB
4344 if (!len)
4345 len = PAGE_CACHE_SIZE - offset;
e6dcd2dc 4346 kaddr = kmap(page);
2aaa6655
JB
4347 if (front)
4348 memset(kaddr, 0, offset);
4349 else
4350 memset(kaddr + offset, 0, len);
e6dcd2dc
CM
4351 flush_dcache_page(page);
4352 kunmap(page);
4353 }
247e743c 4354 ClearPageChecked(page);
e6dcd2dc 4355 set_page_dirty(page);
2ac55d41
JB
4356 unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
4357 GFP_NOFS);
39279cc3 4358
89642229 4359out_unlock:
5d5e103a 4360 if (ret)
0ca1f7ce 4361 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
39279cc3
CM
4362 unlock_page(page);
4363 page_cache_release(page);
4364out:
4365 return ret;
4366}
4367
695a0d0d
JB
4368/*
4369 * This function puts in dummy file extents for the area we're creating a hole
4370 * for. So if we are truncating this file to a larger size we need to insert
4371 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4372 * the range between oldsize and size
4373 */
a41ad394 4374int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
39279cc3 4375{
9036c102
YZ
4376 struct btrfs_trans_handle *trans;
4377 struct btrfs_root *root = BTRFS_I(inode)->root;
4378 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
a22285a6 4379 struct extent_map *em = NULL;
2ac55d41 4380 struct extent_state *cached_state = NULL;
5dc562c5 4381 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
fda2832f
QW
4382 u64 hole_start = ALIGN(oldsize, root->sectorsize);
4383 u64 block_end = ALIGN(size, root->sectorsize);
9036c102
YZ
4384 u64 last_byte;
4385 u64 cur_offset;
4386 u64 hole_size;
9ed74f2d 4387 int err = 0;
39279cc3 4388
9036c102
YZ
4389 if (size <= hole_start)
4390 return 0;
4391
9036c102
YZ
4392 while (1) {
4393 struct btrfs_ordered_extent *ordered;
4394 btrfs_wait_ordered_range(inode, hole_start,
4395 block_end - hole_start);
2ac55d41 4396 lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
d0082371 4397 &cached_state);
9036c102
YZ
4398 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
4399 if (!ordered)
4400 break;
2ac55d41
JB
4401 unlock_extent_cached(io_tree, hole_start, block_end - 1,
4402 &cached_state, GFP_NOFS);
9036c102
YZ
4403 btrfs_put_ordered_extent(ordered);
4404 }
39279cc3 4405
9036c102
YZ
4406 cur_offset = hole_start;
4407 while (1) {
4408 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4409 block_end - cur_offset, 0);
79787eaa
JM
4410 if (IS_ERR(em)) {
4411 err = PTR_ERR(em);
f2767956 4412 em = NULL;
79787eaa
JM
4413 break;
4414 }
9036c102 4415 last_byte = min(extent_map_end(em), block_end);
fda2832f 4416 last_byte = ALIGN(last_byte , root->sectorsize);
8082510e 4417 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
5dc562c5 4418 struct extent_map *hole_em;
9036c102 4419 hole_size = last_byte - cur_offset;
9ed74f2d 4420
3642320e 4421 trans = btrfs_start_transaction(root, 3);
a22285a6
YZ
4422 if (IS_ERR(trans)) {
4423 err = PTR_ERR(trans);
9ed74f2d 4424 break;
a22285a6 4425 }
8082510e 4426
5dc562c5
JB
4427 err = btrfs_drop_extents(trans, root, inode,
4428 cur_offset,
2671485d 4429 cur_offset + hole_size, 1);
5b397377 4430 if (err) {
79787eaa 4431 btrfs_abort_transaction(trans, root, err);
5b397377 4432 btrfs_end_transaction(trans, root);
3893e33b 4433 break;
5b397377 4434 }
8082510e 4435
9036c102 4436 err = btrfs_insert_file_extent(trans, root,
33345d01 4437 btrfs_ino(inode), cur_offset, 0,
9036c102
YZ
4438 0, hole_size, 0, hole_size,
4439 0, 0, 0);
5b397377 4440 if (err) {
79787eaa 4441 btrfs_abort_transaction(trans, root, err);
5b397377 4442 btrfs_end_transaction(trans, root);
3893e33b 4443 break;
5b397377 4444 }
8082510e 4445
5dc562c5
JB
4446 btrfs_drop_extent_cache(inode, cur_offset,
4447 cur_offset + hole_size - 1, 0);
4448 hole_em = alloc_extent_map();
4449 if (!hole_em) {
4450 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4451 &BTRFS_I(inode)->runtime_flags);
4452 goto next;
4453 }
4454 hole_em->start = cur_offset;
4455 hole_em->len = hole_size;
4456 hole_em->orig_start = cur_offset;
8082510e 4457
5dc562c5
JB
4458 hole_em->block_start = EXTENT_MAP_HOLE;
4459 hole_em->block_len = 0;
b4939680 4460 hole_em->orig_block_len = 0;
cc95bef6 4461 hole_em->ram_bytes = hole_size;
5dc562c5
JB
4462 hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
4463 hole_em->compress_type = BTRFS_COMPRESS_NONE;
4464 hole_em->generation = trans->transid;
8082510e 4465
5dc562c5
JB
4466 while (1) {
4467 write_lock(&em_tree->lock);
09a2a8f9 4468 err = add_extent_mapping(em_tree, hole_em, 1);
5dc562c5
JB
4469 write_unlock(&em_tree->lock);
4470 if (err != -EEXIST)
4471 break;
4472 btrfs_drop_extent_cache(inode, cur_offset,
4473 cur_offset +
4474 hole_size - 1, 0);
4475 }
4476 free_extent_map(hole_em);
4477next:
3642320e 4478 btrfs_update_inode(trans, root, inode);
8082510e 4479 btrfs_end_transaction(trans, root);
9036c102
YZ
4480 }
4481 free_extent_map(em);
a22285a6 4482 em = NULL;
9036c102 4483 cur_offset = last_byte;
8082510e 4484 if (cur_offset >= block_end)
9036c102
YZ
4485 break;
4486 }
1832a6d5 4487
a22285a6 4488 free_extent_map(em);
2ac55d41
JB
4489 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
4490 GFP_NOFS);
9036c102
YZ
4491 return err;
4492}
39279cc3 4493
3972f260 4494static int btrfs_setsize(struct inode *inode, struct iattr *attr)
8082510e 4495{
f4a2f4c5
MX
4496 struct btrfs_root *root = BTRFS_I(inode)->root;
4497 struct btrfs_trans_handle *trans;
a41ad394 4498 loff_t oldsize = i_size_read(inode);
3972f260
ES
4499 loff_t newsize = attr->ia_size;
4500 int mask = attr->ia_valid;
8082510e
YZ
4501 int ret;
4502
a41ad394 4503 if (newsize == oldsize)
8082510e
YZ
4504 return 0;
4505
3972f260
ES
4506 /*
4507 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4508 * special case where we need to update the times despite not having
4509 * these flags set. For all other operations the VFS set these flags
4510 * explicitly if it wants a timestamp update.
4511 */
4512 if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME))))
4513 inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
4514
a41ad394 4515 if (newsize > oldsize) {
a41ad394
JB
4516 truncate_pagecache(inode, oldsize, newsize);
4517 ret = btrfs_cont_expand(inode, oldsize, newsize);
f4a2f4c5 4518 if (ret)
8082510e 4519 return ret;
8082510e 4520
f4a2f4c5
MX
4521 trans = btrfs_start_transaction(root, 1);
4522 if (IS_ERR(trans))
4523 return PTR_ERR(trans);
4524
4525 i_size_write(inode, newsize);
4526 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
4527 ret = btrfs_update_inode(trans, root, inode);
7ad85bb7 4528 btrfs_end_transaction(trans, root);
a41ad394 4529 } else {
8082510e 4530
a41ad394
JB
4531 /*
4532 * We're truncating a file that used to have good data down to
4533 * zero. Make sure it gets into the ordered flush list so that
4534 * any new writes get down to disk quickly.
4535 */
4536 if (newsize == 0)
72ac3c0d
JB
4537 set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
4538 &BTRFS_I(inode)->runtime_flags);
8082510e 4539
f3fe820c
JB
4540 /*
4541 * 1 for the orphan item we're going to add
4542 * 1 for the orphan item deletion.
4543 */
4544 trans = btrfs_start_transaction(root, 2);
4545 if (IS_ERR(trans))
4546 return PTR_ERR(trans);
4547
4548 /*
4549 * We need to do this in case we fail at _any_ point during the
4550 * actual truncate. Once we do the truncate_setsize we could
4551 * invalidate pages which forces any outstanding ordered io to
4552 * be instantly completed which will give us extents that need
4553 * to be truncated. If we fail to get an orphan inode down we
4554 * could have left over extents that were never meant to live,
4555 * so we need to garuntee from this point on that everything
4556 * will be consistent.
4557 */
4558 ret = btrfs_orphan_add(trans, inode);
4559 btrfs_end_transaction(trans, root);
4560 if (ret)
4561 return ret;
4562
a41ad394
JB
4563 /* we don't support swapfiles, so vmtruncate shouldn't fail */
4564 truncate_setsize(inode, newsize);
2e60a51e
MX
4565
4566 /* Disable nonlocked read DIO to avoid the end less truncate */
4567 btrfs_inode_block_unlocked_dio(inode);
4568 inode_dio_wait(inode);
4569 btrfs_inode_resume_unlocked_dio(inode);
4570
a41ad394 4571 ret = btrfs_truncate(inode);
f3fe820c
JB
4572 if (ret && inode->i_nlink)
4573 btrfs_orphan_del(NULL, inode);
8082510e
YZ
4574 }
4575
a41ad394 4576 return ret;
8082510e
YZ
4577}
4578
9036c102
YZ
4579static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
4580{
4581 struct inode *inode = dentry->d_inode;
b83cc969 4582 struct btrfs_root *root = BTRFS_I(inode)->root;
9036c102 4583 int err;
39279cc3 4584
b83cc969
LZ
4585 if (btrfs_root_readonly(root))
4586 return -EROFS;
4587
9036c102
YZ
4588 err = inode_change_ok(inode, attr);
4589 if (err)
4590 return err;
2bf5a725 4591
5a3f23d5 4592 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3972f260 4593 err = btrfs_setsize(inode, attr);
8082510e
YZ
4594 if (err)
4595 return err;
39279cc3 4596 }
9036c102 4597
1025774c
CH
4598 if (attr->ia_valid) {
4599 setattr_copy(inode, attr);
0c4d2d95 4600 inode_inc_iversion(inode);
22c44fe6 4601 err = btrfs_dirty_inode(inode);
1025774c 4602
22c44fe6 4603 if (!err && attr->ia_valid & ATTR_MODE)
1025774c
CH
4604 err = btrfs_acl_chmod(inode);
4605 }
33268eaf 4606
39279cc3
CM
4607 return err;
4608}
61295eb8 4609
bd555975 4610void btrfs_evict_inode(struct inode *inode)
39279cc3
CM
4611{
4612 struct btrfs_trans_handle *trans;
4613 struct btrfs_root *root = BTRFS_I(inode)->root;
726c35fa 4614 struct btrfs_block_rsv *rsv, *global_rsv;
07127184 4615 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
39279cc3
CM
4616 int ret;
4617
1abe9b8a 4618 trace_btrfs_inode_evict(inode);
4619
39279cc3 4620 truncate_inode_pages(&inode->i_data, 0);
0af3d00b 4621 if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
83eea1f1 4622 btrfs_is_free_space_inode(inode)))
bd555975
AV
4623 goto no_delete;
4624
39279cc3 4625 if (is_bad_inode(inode)) {
7b128766 4626 btrfs_orphan_del(NULL, inode);
39279cc3
CM
4627 goto no_delete;
4628 }
bd555975 4629 /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
4a096752 4630 btrfs_wait_ordered_range(inode, 0, (u64)-1);
5f39d397 4631
c71bf099 4632 if (root->fs_info->log_root_recovering) {
6bf02314 4633 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
8a35d95f 4634 &BTRFS_I(inode)->runtime_flags));
c71bf099
YZ
4635 goto no_delete;
4636 }
4637
76dda93c
YZ
4638 if (inode->i_nlink > 0) {
4639 BUG_ON(btrfs_root_refs(&root->root_item) != 0);
4640 goto no_delete;
4641 }
4642
0e8c36a9
MX
4643 ret = btrfs_commit_inode_delayed_inode(inode);
4644 if (ret) {
4645 btrfs_orphan_del(NULL, inode);
4646 goto no_delete;
4647 }
4648
66d8f3dd 4649 rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
4289a667
JB
4650 if (!rsv) {
4651 btrfs_orphan_del(NULL, inode);
4652 goto no_delete;
4653 }
4a338542 4654 rsv->size = min_size;
ca7e70f5 4655 rsv->failfast = 1;
726c35fa 4656 global_rsv = &root->fs_info->global_block_rsv;
4289a667 4657
dbe674a9 4658 btrfs_i_size_write(inode, 0);
5f39d397 4659
4289a667 4660 /*
8407aa46
MX
4661 * This is a bit simpler than btrfs_truncate since we've already
4662 * reserved our space for our orphan item in the unlink, so we just
4663 * need to reserve some slack space in case we add bytes and update
4664 * inode item when doing the truncate.
4289a667 4665 */
8082510e 4666 while (1) {
08e007d2
MX
4667 ret = btrfs_block_rsv_refill(root, rsv, min_size,
4668 BTRFS_RESERVE_FLUSH_LIMIT);
726c35fa
JB
4669
4670 /*
4671 * Try and steal from the global reserve since we will
4672 * likely not use this space anyway, we want to try as
4673 * hard as possible to get this to work.
4674 */
4675 if (ret)
4676 ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
d68fc57b 4677
d68fc57b 4678 if (ret) {
c2cf52eb
SK
4679 btrfs_warn(root->fs_info,
4680 "Could not get space for a delete, will truncate on mount %d",
4681 ret);
4289a667
JB
4682 btrfs_orphan_del(NULL, inode);
4683 btrfs_free_block_rsv(root, rsv);
4684 goto no_delete;
d68fc57b 4685 }
7b128766 4686
0e8c36a9 4687 trans = btrfs_join_transaction(root);
4289a667
JB
4688 if (IS_ERR(trans)) {
4689 btrfs_orphan_del(NULL, inode);
4690 btrfs_free_block_rsv(root, rsv);
4691 goto no_delete;
d68fc57b 4692 }
7b128766 4693
4289a667
JB
4694 trans->block_rsv = rsv;
4695
d68fc57b 4696 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
ca7e70f5 4697 if (ret != -ENOSPC)
8082510e 4698 break;
85e21bac 4699
8407aa46 4700 trans->block_rsv = &root->fs_info->trans_block_rsv;
8082510e
YZ
4701 btrfs_end_transaction(trans, root);
4702 trans = NULL;
b53d3f5d 4703 btrfs_btree_balance_dirty(root);
8082510e 4704 }
5f39d397 4705
4289a667
JB
4706 btrfs_free_block_rsv(root, rsv);
4707
8082510e 4708 if (ret == 0) {
4289a667 4709 trans->block_rsv = root->orphan_block_rsv;
8082510e
YZ
4710 ret = btrfs_orphan_del(trans, inode);
4711 BUG_ON(ret);
4712 }
54aa1f4d 4713
4289a667 4714 trans->block_rsv = &root->fs_info->trans_block_rsv;
581bb050
LZ
4715 if (!(root == root->fs_info->tree_root ||
4716 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
33345d01 4717 btrfs_return_ino(root, btrfs_ino(inode));
581bb050 4718
54aa1f4d 4719 btrfs_end_transaction(trans, root);
b53d3f5d 4720 btrfs_btree_balance_dirty(root);
39279cc3 4721no_delete:
dbd5768f 4722 clear_inode(inode);
8082510e 4723 return;
39279cc3
CM
4724}
4725
4726/*
4727 * this returns the key found in the dir entry in the location pointer.
4728 * If no dir entries were found, location->objectid is 0.
4729 */
4730static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
4731 struct btrfs_key *location)
4732{
4733 const char *name = dentry->d_name.name;
4734 int namelen = dentry->d_name.len;
4735 struct btrfs_dir_item *di;
4736 struct btrfs_path *path;
4737 struct btrfs_root *root = BTRFS_I(dir)->root;
0d9f7f3e 4738 int ret = 0;
39279cc3
CM
4739
4740 path = btrfs_alloc_path();
d8926bb3
MF
4741 if (!path)
4742 return -ENOMEM;
3954401f 4743
33345d01 4744 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
39279cc3 4745 namelen, 0);
0d9f7f3e
Y
4746 if (IS_ERR(di))
4747 ret = PTR_ERR(di);
d397712b 4748
c704005d 4749 if (IS_ERR_OR_NULL(di))
3954401f 4750 goto out_err;
d397712b 4751
5f39d397 4752 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
39279cc3 4753out:
39279cc3
CM
4754 btrfs_free_path(path);
4755 return ret;
3954401f
CM
4756out_err:
4757 location->objectid = 0;
4758 goto out;
39279cc3
CM
4759}
4760
4761/*
4762 * when we hit a tree root in a directory, the btrfs part of the inode
4763 * needs to be changed to reflect the root directory of the tree root. This
4764 * is kind of like crossing a mount point.
4765 */
4766static int fixup_tree_root_location(struct btrfs_root *root,
4df27c4d
YZ
4767 struct inode *dir,
4768 struct dentry *dentry,
4769 struct btrfs_key *location,
4770 struct btrfs_root **sub_root)
39279cc3 4771{
4df27c4d
YZ
4772 struct btrfs_path *path;
4773 struct btrfs_root *new_root;
4774 struct btrfs_root_ref *ref;
4775 struct extent_buffer *leaf;
4776 int ret;
4777 int err = 0;
39279cc3 4778
4df27c4d
YZ
4779 path = btrfs_alloc_path();
4780 if (!path) {
4781 err = -ENOMEM;
4782 goto out;
4783 }
39279cc3 4784
4df27c4d
YZ
4785 err = -ENOENT;
4786 ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
4787 BTRFS_I(dir)->root->root_key.objectid,
4788 location->objectid);
4789 if (ret) {
4790 if (ret < 0)
4791 err = ret;
4792 goto out;
4793 }
39279cc3 4794
4df27c4d
YZ
4795 leaf = path->nodes[0];
4796 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
33345d01 4797 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
4df27c4d
YZ
4798 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
4799 goto out;
39279cc3 4800
4df27c4d
YZ
4801 ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
4802 (unsigned long)(ref + 1),
4803 dentry->d_name.len);
4804 if (ret)
4805 goto out;
4806
b3b4aa74 4807 btrfs_release_path(path);
4df27c4d
YZ
4808
4809 new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
4810 if (IS_ERR(new_root)) {
4811 err = PTR_ERR(new_root);
4812 goto out;
4813 }
4814
4815 if (btrfs_root_refs(&new_root->root_item) == 0) {
4816 err = -ENOENT;
4817 goto out;
4818 }
4819
4820 *sub_root = new_root;
4821 location->objectid = btrfs_root_dirid(&new_root->root_item);
4822 location->type = BTRFS_INODE_ITEM_KEY;
4823 location->offset = 0;
4824 err = 0;
4825out:
4826 btrfs_free_path(path);
4827 return err;
39279cc3
CM
4828}
4829
5d4f98a2
YZ
4830static void inode_tree_add(struct inode *inode)
4831{
4832 struct btrfs_root *root = BTRFS_I(inode)->root;
4833 struct btrfs_inode *entry;
03e860bd
FNP
4834 struct rb_node **p;
4835 struct rb_node *parent;
33345d01 4836 u64 ino = btrfs_ino(inode);
03e860bd
FNP
4837again:
4838 p = &root->inode_tree.rb_node;
4839 parent = NULL;
5d4f98a2 4840
1d3382cb 4841 if (inode_unhashed(inode))
76dda93c
YZ
4842 return;
4843
5d4f98a2
YZ
4844 spin_lock(&root->inode_lock);
4845 while (*p) {
4846 parent = *p;
4847 entry = rb_entry(parent, struct btrfs_inode, rb_node);
4848
33345d01 4849 if (ino < btrfs_ino(&entry->vfs_inode))
03e860bd 4850 p = &parent->rb_left;
33345d01 4851 else if (ino > btrfs_ino(&entry->vfs_inode))
03e860bd 4852 p = &parent->rb_right;
5d4f98a2
YZ
4853 else {
4854 WARN_ON(!(entry->vfs_inode.i_state &
a4ffdde6 4855 (I_WILL_FREE | I_FREEING)));
03e860bd
FNP
4856 rb_erase(parent, &root->inode_tree);
4857 RB_CLEAR_NODE(parent);
4858 spin_unlock(&root->inode_lock);
4859 goto again;
5d4f98a2
YZ
4860 }
4861 }
4862 rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
4863 rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
4864 spin_unlock(&root->inode_lock);
4865}
4866
4867static void inode_tree_del(struct inode *inode)
4868{
4869 struct btrfs_root *root = BTRFS_I(inode)->root;
76dda93c 4870 int empty = 0;
5d4f98a2 4871
03e860bd 4872 spin_lock(&root->inode_lock);
5d4f98a2 4873 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
5d4f98a2 4874 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
5d4f98a2 4875 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
76dda93c 4876 empty = RB_EMPTY_ROOT(&root->inode_tree);
5d4f98a2 4877 }
03e860bd 4878 spin_unlock(&root->inode_lock);
76dda93c 4879
0af3d00b
JB
4880 /*
4881 * Free space cache has inodes in the tree root, but the tree root has a
4882 * root_refs of 0, so this could end up dropping the tree root as a
4883 * snapshot, so we need the extra !root->fs_info->tree_root check to
4884 * make sure we don't drop it.
4885 */
4886 if (empty && btrfs_root_refs(&root->root_item) == 0 &&
4887 root != root->fs_info->tree_root) {
76dda93c
YZ
4888 synchronize_srcu(&root->fs_info->subvol_srcu);
4889 spin_lock(&root->inode_lock);
4890 empty = RB_EMPTY_ROOT(&root->inode_tree);
4891 spin_unlock(&root->inode_lock);
4892 if (empty)
4893 btrfs_add_dead_root(root);
4894 }
4895}
4896
143bede5 4897void btrfs_invalidate_inodes(struct btrfs_root *root)
76dda93c
YZ
4898{
4899 struct rb_node *node;
4900 struct rb_node *prev;
4901 struct btrfs_inode *entry;
4902 struct inode *inode;
4903 u64 objectid = 0;
4904
4905 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4906
4907 spin_lock(&root->inode_lock);
4908again:
4909 node = root->inode_tree.rb_node;
4910 prev = NULL;
4911 while (node) {
4912 prev = node;
4913 entry = rb_entry(node, struct btrfs_inode, rb_node);
4914
33345d01 4915 if (objectid < btrfs_ino(&entry->vfs_inode))
76dda93c 4916 node = node->rb_left;
33345d01 4917 else if (objectid > btrfs_ino(&entry->vfs_inode))
76dda93c
YZ
4918 node = node->rb_right;
4919 else
4920 break;
4921 }
4922 if (!node) {
4923 while (prev) {
4924 entry = rb_entry(prev, struct btrfs_inode, rb_node);
33345d01 4925 if (objectid <= btrfs_ino(&entry->vfs_inode)) {
76dda93c
YZ
4926 node = prev;
4927 break;
4928 }
4929 prev = rb_next(prev);
4930 }
4931 }
4932 while (node) {
4933 entry = rb_entry(node, struct btrfs_inode, rb_node);
33345d01 4934 objectid = btrfs_ino(&entry->vfs_inode) + 1;
76dda93c
YZ
4935 inode = igrab(&entry->vfs_inode);
4936 if (inode) {
4937 spin_unlock(&root->inode_lock);
4938 if (atomic_read(&inode->i_count) > 1)
4939 d_prune_aliases(inode);
4940 /*
45321ac5 4941 * btrfs_drop_inode will have it removed from
76dda93c
YZ
4942 * the inode cache when its usage count
4943 * hits zero.
4944 */
4945 iput(inode);
4946 cond_resched();
4947 spin_lock(&root->inode_lock);
4948 goto again;
4949 }
4950
4951 if (cond_resched_lock(&root->inode_lock))
4952 goto again;
4953
4954 node = rb_next(node);
4955 }
4956 spin_unlock(&root->inode_lock);
5d4f98a2
YZ
4957}
4958
e02119d5
CM
4959static int btrfs_init_locked_inode(struct inode *inode, void *p)
4960{
4961 struct btrfs_iget_args *args = p;
4962 inode->i_ino = args->ino;
e02119d5 4963 BTRFS_I(inode)->root = args->root;
39279cc3
CM
4964 return 0;
4965}
4966
4967static int btrfs_find_actor(struct inode *inode, void *opaque)
4968{
4969 struct btrfs_iget_args *args = opaque;
33345d01 4970 return args->ino == btrfs_ino(inode) &&
d397712b 4971 args->root == BTRFS_I(inode)->root;
39279cc3
CM
4972}
4973
5d4f98a2
YZ
4974static struct inode *btrfs_iget_locked(struct super_block *s,
4975 u64 objectid,
4976 struct btrfs_root *root)
39279cc3
CM
4977{
4978 struct inode *inode;
4979 struct btrfs_iget_args args;
4980 args.ino = objectid;
4981 args.root = root;
4982
4983 inode = iget5_locked(s, objectid, btrfs_find_actor,
4984 btrfs_init_locked_inode,
4985 (void *)&args);
4986 return inode;
4987}
4988
1a54ef8c
BR
4989/* Get an inode object given its location and corresponding root.
4990 * Returns in *is_new if the inode was read from disk
4991 */
4992struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
73f73415 4993 struct btrfs_root *root, int *new)
1a54ef8c
BR
4994{
4995 struct inode *inode;
4996
4997 inode = btrfs_iget_locked(s, location->objectid, root);
4998 if (!inode)
5d4f98a2 4999 return ERR_PTR(-ENOMEM);
1a54ef8c
BR
5000
5001 if (inode->i_state & I_NEW) {
5002 BTRFS_I(inode)->root = root;
5003 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
5004 btrfs_read_locked_inode(inode);
1748f843
MF
5005 if (!is_bad_inode(inode)) {
5006 inode_tree_add(inode);
5007 unlock_new_inode(inode);
5008 if (new)
5009 *new = 1;
5010 } else {
e0b6d65b
ST
5011 unlock_new_inode(inode);
5012 iput(inode);
5013 inode = ERR_PTR(-ESTALE);
1748f843
MF
5014 }
5015 }
5016
1a54ef8c
BR
5017 return inode;
5018}
5019
4df27c4d
YZ
5020static struct inode *new_simple_dir(struct super_block *s,
5021 struct btrfs_key *key,
5022 struct btrfs_root *root)
5023{
5024 struct inode *inode = new_inode(s);
5025
5026 if (!inode)
5027 return ERR_PTR(-ENOMEM);
5028
4df27c4d
YZ
5029 BTRFS_I(inode)->root = root;
5030 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
72ac3c0d 5031 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
4df27c4d
YZ
5032
5033 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
848cce0d 5034 inode->i_op = &btrfs_dir_ro_inode_operations;
4df27c4d
YZ
5035 inode->i_fop = &simple_dir_operations;
5036 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5037 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
5038
5039 return inode;
5040}
5041
3de4586c 5042struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
39279cc3 5043{
d397712b 5044 struct inode *inode;
4df27c4d 5045 struct btrfs_root *root = BTRFS_I(dir)->root;
39279cc3
CM
5046 struct btrfs_root *sub_root = root;
5047 struct btrfs_key location;
76dda93c 5048 int index;
b4aff1f8 5049 int ret = 0;
39279cc3
CM
5050
5051 if (dentry->d_name.len > BTRFS_NAME_LEN)
5052 return ERR_PTR(-ENAMETOOLONG);
5f39d397 5053
39e3c955 5054 ret = btrfs_inode_by_name(dir, dentry, &location);
39279cc3
CM
5055 if (ret < 0)
5056 return ERR_PTR(ret);
5f39d397 5057
4df27c4d
YZ
5058 if (location.objectid == 0)
5059 return NULL;
5060
5061 if (location.type == BTRFS_INODE_ITEM_KEY) {
73f73415 5062 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
4df27c4d
YZ
5063 return inode;
5064 }
5065
5066 BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
5067
76dda93c 5068 index = srcu_read_lock(&root->fs_info->subvol_srcu);
4df27c4d
YZ
5069 ret = fixup_tree_root_location(root, dir, dentry,
5070 &location, &sub_root);
5071 if (ret < 0) {
5072 if (ret != -ENOENT)
5073 inode = ERR_PTR(ret);
5074 else
5075 inode = new_simple_dir(dir->i_sb, &location, sub_root);
5076 } else {
73f73415 5077 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
39279cc3 5078 }
76dda93c
YZ
5079 srcu_read_unlock(&root->fs_info->subvol_srcu, index);
5080
34d19bad 5081 if (!IS_ERR(inode) && root != sub_root) {
c71bf099
YZ
5082 down_read(&root->fs_info->cleanup_work_sem);
5083 if (!(inode->i_sb->s_flags & MS_RDONLY))
66b4ffd1 5084 ret = btrfs_orphan_cleanup(sub_root);
c71bf099 5085 up_read(&root->fs_info->cleanup_work_sem);
66b4ffd1
JB
5086 if (ret)
5087 inode = ERR_PTR(ret);
c71bf099
YZ
5088 }
5089
3de4586c
CM
5090 return inode;
5091}
5092
fe15ce44 5093static int btrfs_dentry_delete(const struct dentry *dentry)
76dda93c
YZ
5094{
5095 struct btrfs_root *root;
848cce0d 5096 struct inode *inode = dentry->d_inode;
76dda93c 5097
848cce0d
LZ
5098 if (!inode && !IS_ROOT(dentry))
5099 inode = dentry->d_parent->d_inode;
76dda93c 5100
848cce0d
LZ
5101 if (inode) {
5102 root = BTRFS_I(inode)->root;
efefb143
YZ
5103 if (btrfs_root_refs(&root->root_item) == 0)
5104 return 1;
848cce0d
LZ
5105
5106 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5107 return 1;
efefb143 5108 }
76dda93c
YZ
5109 return 0;
5110}
5111
b4aff1f8
JB
5112static void btrfs_dentry_release(struct dentry *dentry)
5113{
5114 if (dentry->d_fsdata)
5115 kfree(dentry->d_fsdata);
5116}
5117
3de4586c 5118static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
00cd8dd3 5119 unsigned int flags)
3de4586c 5120{
a66e7cc6
JB
5121 struct dentry *ret;
5122
5123 ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
a66e7cc6 5124 return ret;
39279cc3
CM
5125}
5126
16cdcec7 5127unsigned char btrfs_filetype_table[] = {
39279cc3
CM
5128 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
5129};
5130
cbdf5a24
DW
5131static int btrfs_real_readdir(struct file *filp, void *dirent,
5132 filldir_t filldir)
39279cc3 5133{
496ad9aa 5134 struct inode *inode = file_inode(filp);
39279cc3
CM
5135 struct btrfs_root *root = BTRFS_I(inode)->root;
5136 struct btrfs_item *item;
5137 struct btrfs_dir_item *di;
5138 struct btrfs_key key;
5f39d397 5139 struct btrfs_key found_key;
39279cc3 5140 struct btrfs_path *path;
16cdcec7
MX
5141 struct list_head ins_list;
5142 struct list_head del_list;
39279cc3 5143 int ret;
5f39d397 5144 struct extent_buffer *leaf;
39279cc3 5145 int slot;
39279cc3
CM
5146 unsigned char d_type;
5147 int over = 0;
5148 u32 di_cur;
5149 u32 di_total;
5150 u32 di_len;
5151 int key_type = BTRFS_DIR_INDEX_KEY;
5f39d397
CM
5152 char tmp_name[32];
5153 char *name_ptr;
5154 int name_len;
16cdcec7 5155 int is_curr = 0; /* filp->f_pos points to the current index? */
39279cc3
CM
5156
5157 /* FIXME, use a real flag for deciding about the key type */
5158 if (root->fs_info->tree_root == root)
5159 key_type = BTRFS_DIR_ITEM_KEY;
5f39d397 5160
3954401f
CM
5161 /* special case for "." */
5162 if (filp->f_pos == 0) {
3765fefa
HS
5163 over = filldir(dirent, ".", 1,
5164 filp->f_pos, btrfs_ino(inode), DT_DIR);
3954401f
CM
5165 if (over)
5166 return 0;
5167 filp->f_pos = 1;
5168 }
3954401f
CM
5169 /* special case for .., just use the back ref */
5170 if (filp->f_pos == 1) {
5ecc7e5d 5171 u64 pino = parent_ino(filp->f_path.dentry);
3954401f 5172 over = filldir(dirent, "..", 2,
3765fefa 5173 filp->f_pos, pino, DT_DIR);
3954401f 5174 if (over)
49593bfa 5175 return 0;
3954401f
CM
5176 filp->f_pos = 2;
5177 }
49593bfa 5178 path = btrfs_alloc_path();
16cdcec7
MX
5179 if (!path)
5180 return -ENOMEM;
ff5714cc 5181
026fd317 5182 path->reada = 1;
49593bfa 5183
16cdcec7
MX
5184 if (key_type == BTRFS_DIR_INDEX_KEY) {
5185 INIT_LIST_HEAD(&ins_list);
5186 INIT_LIST_HEAD(&del_list);
5187 btrfs_get_delayed_items(inode, &ins_list, &del_list);
5188 }
5189
39279cc3
CM
5190 btrfs_set_key_type(&key, key_type);
5191 key.offset = filp->f_pos;
33345d01 5192 key.objectid = btrfs_ino(inode);
5f39d397 5193
39279cc3
CM
5194 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5195 if (ret < 0)
5196 goto err;
49593bfa
DW
5197
5198 while (1) {
5f39d397 5199 leaf = path->nodes[0];
39279cc3 5200 slot = path->slots[0];
b9e03af0
LZ
5201 if (slot >= btrfs_header_nritems(leaf)) {
5202 ret = btrfs_next_leaf(root, path);
5203 if (ret < 0)
5204 goto err;
5205 else if (ret > 0)
5206 break;
5207 continue;
39279cc3 5208 }
3de4586c 5209
5f39d397
CM
5210 item = btrfs_item_nr(leaf, slot);
5211 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5212
5213 if (found_key.objectid != key.objectid)
39279cc3 5214 break;
5f39d397 5215 if (btrfs_key_type(&found_key) != key_type)
39279cc3 5216 break;
5f39d397 5217 if (found_key.offset < filp->f_pos)
b9e03af0 5218 goto next;
16cdcec7
MX
5219 if (key_type == BTRFS_DIR_INDEX_KEY &&
5220 btrfs_should_delete_dir_index(&del_list,
5221 found_key.offset))
5222 goto next;
5f39d397
CM
5223
5224 filp->f_pos = found_key.offset;
16cdcec7 5225 is_curr = 1;
49593bfa 5226
39279cc3
CM
5227 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
5228 di_cur = 0;
5f39d397 5229 di_total = btrfs_item_size(leaf, item);
49593bfa
DW
5230
5231 while (di_cur < di_total) {
5f39d397
CM
5232 struct btrfs_key location;
5233
22a94d44
JB
5234 if (verify_dir_item(root, leaf, di))
5235 break;
5236
5f39d397 5237 name_len = btrfs_dir_name_len(leaf, di);
49593bfa 5238 if (name_len <= sizeof(tmp_name)) {
5f39d397
CM
5239 name_ptr = tmp_name;
5240 } else {
5241 name_ptr = kmalloc(name_len, GFP_NOFS);
49593bfa
DW
5242 if (!name_ptr) {
5243 ret = -ENOMEM;
5244 goto err;
5245 }
5f39d397
CM
5246 }
5247 read_extent_buffer(leaf, name_ptr,
5248 (unsigned long)(di + 1), name_len);
5249
5250 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
5251 btrfs_dir_item_key_to_cpu(leaf, di, &location);
3de4586c 5252
fede766f 5253
3de4586c 5254 /* is this a reference to our own snapshot? If so
8c9c2bf7
AJ
5255 * skip it.
5256 *
5257 * In contrast to old kernels, we insert the snapshot's
5258 * dir item and dir index after it has been created, so
5259 * we won't find a reference to our own snapshot. We
5260 * still keep the following code for backward
5261 * compatibility.
3de4586c
CM
5262 */
5263 if (location.type == BTRFS_ROOT_ITEM_KEY &&
5264 location.objectid == root->root_key.objectid) {
5265 over = 0;
5266 goto skip;
5267 }
5f39d397 5268 over = filldir(dirent, name_ptr, name_len,
49593bfa 5269 found_key.offset, location.objectid,
39279cc3 5270 d_type);
5f39d397 5271
3de4586c 5272skip:
5f39d397
CM
5273 if (name_ptr != tmp_name)
5274 kfree(name_ptr);
5275
39279cc3
CM
5276 if (over)
5277 goto nopos;
5103e947 5278 di_len = btrfs_dir_name_len(leaf, di) +
49593bfa 5279 btrfs_dir_data_len(leaf, di) + sizeof(*di);
39279cc3
CM
5280 di_cur += di_len;
5281 di = (struct btrfs_dir_item *)((char *)di + di_len);
5282 }
b9e03af0
LZ
5283next:
5284 path->slots[0]++;
39279cc3 5285 }
49593bfa 5286
16cdcec7
MX
5287 if (key_type == BTRFS_DIR_INDEX_KEY) {
5288 if (is_curr)
5289 filp->f_pos++;
5290 ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
5291 &ins_list);
5292 if (ret)
5293 goto nopos;
5294 }
5295
49593bfa 5296 /* Reached end of directory/root. Bump pos past the last item. */
5e591a07 5297 if (key_type == BTRFS_DIR_INDEX_KEY)
406266ab
JE
5298 /*
5299 * 32-bit glibc will use getdents64, but then strtol -
5300 * so the last number we can serve is this.
5301 */
5302 filp->f_pos = 0x7fffffff;
5e591a07
YZ
5303 else
5304 filp->f_pos++;
39279cc3
CM
5305nopos:
5306 ret = 0;
5307err:
16cdcec7
MX
5308 if (key_type == BTRFS_DIR_INDEX_KEY)
5309 btrfs_put_delayed_items(&ins_list, &del_list);
39279cc3 5310 btrfs_free_path(path);
39279cc3
CM
5311 return ret;
5312}
5313
a9185b41 5314int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
39279cc3
CM
5315{
5316 struct btrfs_root *root = BTRFS_I(inode)->root;
5317 struct btrfs_trans_handle *trans;
5318 int ret = 0;
0af3d00b 5319 bool nolock = false;
39279cc3 5320
72ac3c0d 5321 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
4ca8b41e
CM
5322 return 0;
5323
83eea1f1 5324 if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
82d5902d 5325 nolock = true;
0af3d00b 5326
a9185b41 5327 if (wbc->sync_mode == WB_SYNC_ALL) {
0af3d00b 5328 if (nolock)
7a7eaa40 5329 trans = btrfs_join_transaction_nolock(root);
0af3d00b 5330 else
7a7eaa40 5331 trans = btrfs_join_transaction(root);
3612b495
TI
5332 if (IS_ERR(trans))
5333 return PTR_ERR(trans);
a698d075 5334 ret = btrfs_commit_transaction(trans, root);
39279cc3
CM
5335 }
5336 return ret;
5337}
5338
5339/*
54aa1f4d 5340 * This is somewhat expensive, updating the tree every time the
39279cc3
CM
5341 * inode changes. But, it is most likely to find the inode in cache.
5342 * FIXME, needs more benchmarking...there are no reasons other than performance
5343 * to keep or drop this code.
5344 */
22c44fe6 5345int btrfs_dirty_inode(struct inode *inode)
39279cc3
CM
5346{
5347 struct btrfs_root *root = BTRFS_I(inode)->root;
5348 struct btrfs_trans_handle *trans;
8929ecfa
YZ
5349 int ret;
5350
72ac3c0d 5351 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
22c44fe6 5352 return 0;
39279cc3 5353
7a7eaa40 5354 trans = btrfs_join_transaction(root);
22c44fe6
JB
5355 if (IS_ERR(trans))
5356 return PTR_ERR(trans);
8929ecfa
YZ
5357
5358 ret = btrfs_update_inode(trans, root, inode);
94b60442
CM
5359 if (ret && ret == -ENOSPC) {
5360 /* whoops, lets try again with the full transaction */
5361 btrfs_end_transaction(trans, root);
5362 trans = btrfs_start_transaction(root, 1);
22c44fe6
JB
5363 if (IS_ERR(trans))
5364 return PTR_ERR(trans);
8929ecfa 5365
94b60442 5366 ret = btrfs_update_inode(trans, root, inode);
94b60442 5367 }
39279cc3 5368 btrfs_end_transaction(trans, root);
16cdcec7
MX
5369 if (BTRFS_I(inode)->delayed_node)
5370 btrfs_balance_delayed_items(root);
22c44fe6
JB
5371
5372 return ret;
5373}
5374
5375/*
5376 * This is a copy of file_update_time. We need this so we can return error on
5377 * ENOSPC for updating the inode in the case of file write and mmap writes.
5378 */
e41f941a
JB
5379static int btrfs_update_time(struct inode *inode, struct timespec *now,
5380 int flags)
22c44fe6 5381{
2bc55652
AB
5382 struct btrfs_root *root = BTRFS_I(inode)->root;
5383
5384 if (btrfs_root_readonly(root))
5385 return -EROFS;
5386
e41f941a 5387 if (flags & S_VERSION)
22c44fe6 5388 inode_inc_iversion(inode);
e41f941a
JB
5389 if (flags & S_CTIME)
5390 inode->i_ctime = *now;
5391 if (flags & S_MTIME)
5392 inode->i_mtime = *now;
5393 if (flags & S_ATIME)
5394 inode->i_atime = *now;
5395 return btrfs_dirty_inode(inode);
39279cc3
CM
5396}
5397
d352ac68
CM
5398/*
5399 * find the highest existing sequence number in a directory
5400 * and then set the in-memory index_cnt variable to reflect
5401 * free sequence numbers
5402 */
aec7477b
JB
5403static int btrfs_set_inode_index_count(struct inode *inode)
5404{
5405 struct btrfs_root *root = BTRFS_I(inode)->root;
5406 struct btrfs_key key, found_key;
5407 struct btrfs_path *path;
5408 struct extent_buffer *leaf;
5409 int ret;
5410
33345d01 5411 key.objectid = btrfs_ino(inode);
aec7477b
JB
5412 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
5413 key.offset = (u64)-1;
5414
5415 path = btrfs_alloc_path();
5416 if (!path)
5417 return -ENOMEM;
5418
5419 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5420 if (ret < 0)
5421 goto out;
5422 /* FIXME: we should be able to handle this */
5423 if (ret == 0)
5424 goto out;
5425 ret = 0;
5426
5427 /*
5428 * MAGIC NUMBER EXPLANATION:
5429 * since we search a directory based on f_pos we have to start at 2
5430 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
5431 * else has to start at 2
5432 */
5433 if (path->slots[0] == 0) {
5434 BTRFS_I(inode)->index_cnt = 2;
5435 goto out;
5436 }
5437
5438 path->slots[0]--;
5439
5440 leaf = path->nodes[0];
5441 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5442
33345d01 5443 if (found_key.objectid != btrfs_ino(inode) ||
aec7477b
JB
5444 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
5445 BTRFS_I(inode)->index_cnt = 2;
5446 goto out;
5447 }
5448
5449 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
5450out:
5451 btrfs_free_path(path);
5452 return ret;
5453}
5454
d352ac68
CM
5455/*
5456 * helper to find a free sequence number in a given directory. This current
5457 * code is very simple, later versions will do smarter things in the btree
5458 */
3de4586c 5459int btrfs_set_inode_index(struct inode *dir, u64 *index)
aec7477b
JB
5460{
5461 int ret = 0;
5462
5463 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
16cdcec7
MX
5464 ret = btrfs_inode_delayed_dir_index_count(dir);
5465 if (ret) {
5466 ret = btrfs_set_inode_index_count(dir);
5467 if (ret)
5468 return ret;
5469 }
aec7477b
JB
5470 }
5471
00e4e6b3 5472 *index = BTRFS_I(dir)->index_cnt;
aec7477b
JB
5473 BTRFS_I(dir)->index_cnt++;
5474
5475 return ret;
5476}
5477
39279cc3
CM
5478static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
5479 struct btrfs_root *root,
aec7477b 5480 struct inode *dir,
9c58309d 5481 const char *name, int name_len,
175a4eb7
AV
5482 u64 ref_objectid, u64 objectid,
5483 umode_t mode, u64 *index)
39279cc3
CM
5484{
5485 struct inode *inode;
5f39d397 5486 struct btrfs_inode_item *inode_item;
39279cc3 5487 struct btrfs_key *location;
5f39d397 5488 struct btrfs_path *path;
9c58309d
CM
5489 struct btrfs_inode_ref *ref;
5490 struct btrfs_key key[2];
5491 u32 sizes[2];
5492 unsigned long ptr;
39279cc3
CM
5493 int ret;
5494 int owner;
5495
5f39d397 5496 path = btrfs_alloc_path();
d8926bb3
MF
5497 if (!path)
5498 return ERR_PTR(-ENOMEM);
5f39d397 5499
39279cc3 5500 inode = new_inode(root->fs_info->sb);
8fb27640
YS
5501 if (!inode) {
5502 btrfs_free_path(path);
39279cc3 5503 return ERR_PTR(-ENOMEM);
8fb27640 5504 }
39279cc3 5505
581bb050
LZ
5506 /*
5507 * we have to initialize this early, so we can reclaim the inode
5508 * number if we fail afterwards in this function.
5509 */
5510 inode->i_ino = objectid;
5511
aec7477b 5512 if (dir) {
1abe9b8a 5513 trace_btrfs_inode_request(dir);
5514
3de4586c 5515 ret = btrfs_set_inode_index(dir, index);
09771430 5516 if (ret) {
8fb27640 5517 btrfs_free_path(path);
09771430 5518 iput(inode);
aec7477b 5519 return ERR_PTR(ret);
09771430 5520 }
aec7477b
JB
5521 }
5522 /*
5523 * index_cnt is ignored for everything but a dir,
5524 * btrfs_get_inode_index_count has an explanation for the magic
5525 * number
5526 */
5527 BTRFS_I(inode)->index_cnt = 2;
39279cc3 5528 BTRFS_I(inode)->root = root;
e02119d5 5529 BTRFS_I(inode)->generation = trans->transid;
76195853 5530 inode->i_generation = BTRFS_I(inode)->generation;
b888db2b 5531
5dc562c5
JB
5532 /*
5533 * We could have gotten an inode number from somebody who was fsynced
5534 * and then removed in this same transaction, so let's just set full
5535 * sync since it will be a full sync anyway and this will blow away the
5536 * old info in the log.
5537 */
5538 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
5539
569254b0 5540 if (S_ISDIR(mode))
39279cc3
CM
5541 owner = 0;
5542 else
5543 owner = 1;
9c58309d
CM
5544
5545 key[0].objectid = objectid;
5546 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
5547 key[0].offset = 0;
5548
f186373f
MF
5549 /*
5550 * Start new inodes with an inode_ref. This is slightly more
5551 * efficient for small numbers of hard links since they will
5552 * be packed into one item. Extended refs will kick in if we
5553 * add more hard links than can fit in the ref item.
5554 */
9c58309d
CM
5555 key[1].objectid = objectid;
5556 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
5557 key[1].offset = ref_objectid;
5558
5559 sizes[0] = sizeof(struct btrfs_inode_item);
5560 sizes[1] = name_len + sizeof(*ref);
5561
b9473439 5562 path->leave_spinning = 1;
9c58309d
CM
5563 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
5564 if (ret != 0)
5f39d397
CM
5565 goto fail;
5566
ecc11fab 5567 inode_init_owner(inode, dir, mode);
a76a3cd4 5568 inode_set_bytes(inode, 0);
39279cc3 5569 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
5f39d397
CM
5570 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
5571 struct btrfs_inode_item);
293f7e07
LZ
5572 memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
5573 sizeof(*inode_item));
e02119d5 5574 fill_inode_item(trans, path->nodes[0], inode_item, inode);
9c58309d
CM
5575
5576 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
5577 struct btrfs_inode_ref);
5578 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
00e4e6b3 5579 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
9c58309d
CM
5580 ptr = (unsigned long)(ref + 1);
5581 write_extent_buffer(path->nodes[0], name, ptr, name_len);
5582
5f39d397
CM
5583 btrfs_mark_buffer_dirty(path->nodes[0]);
5584 btrfs_free_path(path);
5585
39279cc3
CM
5586 location = &BTRFS_I(inode)->location;
5587 location->objectid = objectid;
39279cc3
CM
5588 location->offset = 0;
5589 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
5590
6cbff00f
CH
5591 btrfs_inherit_iflags(inode, dir);
5592
569254b0 5593 if (S_ISREG(mode)) {
94272164
CM
5594 if (btrfs_test_opt(root, NODATASUM))
5595 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
213490b3 5596 if (btrfs_test_opt(root, NODATACOW))
f2bdf9a8
JB
5597 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
5598 BTRFS_INODE_NODATASUM;
94272164
CM
5599 }
5600
39279cc3 5601 insert_inode_hash(inode);
5d4f98a2 5602 inode_tree_add(inode);
1abe9b8a 5603
5604 trace_btrfs_inode_new(inode);
1973f0fa 5605 btrfs_set_inode_last_trans(trans, inode);
1abe9b8a 5606
8ea05e3a
AB
5607 btrfs_update_root_times(trans, root);
5608
39279cc3 5609 return inode;
5f39d397 5610fail:
aec7477b
JB
5611 if (dir)
5612 BTRFS_I(dir)->index_cnt--;
5f39d397 5613 btrfs_free_path(path);
09771430 5614 iput(inode);
5f39d397 5615 return ERR_PTR(ret);
39279cc3
CM
5616}
5617
5618static inline u8 btrfs_inode_type(struct inode *inode)
5619{
5620 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
5621}
5622
d352ac68
CM
5623/*
5624 * utility function to add 'inode' into 'parent_inode' with
5625 * a give name and a given sequence number.
5626 * if 'add_backref' is true, also insert a backref from the
5627 * inode to the parent directory.
5628 */
e02119d5
CM
5629int btrfs_add_link(struct btrfs_trans_handle *trans,
5630 struct inode *parent_inode, struct inode *inode,
5631 const char *name, int name_len, int add_backref, u64 index)
39279cc3 5632{
4df27c4d 5633 int ret = 0;
39279cc3 5634 struct btrfs_key key;
e02119d5 5635 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
33345d01
LZ
5636 u64 ino = btrfs_ino(inode);
5637 u64 parent_ino = btrfs_ino(parent_inode);
5f39d397 5638
33345d01 5639 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4df27c4d
YZ
5640 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
5641 } else {
33345d01 5642 key.objectid = ino;
4df27c4d
YZ
5643 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
5644 key.offset = 0;
5645 }
5646
33345d01 5647 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4df27c4d
YZ
5648 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
5649 key.objectid, root->root_key.objectid,
33345d01 5650 parent_ino, index, name, name_len);
4df27c4d 5651 } else if (add_backref) {
33345d01
LZ
5652 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
5653 parent_ino, index);
4df27c4d 5654 }
39279cc3 5655
79787eaa
JM
5656 /* Nothing to clean up yet */
5657 if (ret)
5658 return ret;
4df27c4d 5659
79787eaa
JM
5660 ret = btrfs_insert_dir_item(trans, root, name, name_len,
5661 parent_inode, &key,
5662 btrfs_inode_type(inode), index);
9c52057c 5663 if (ret == -EEXIST || ret == -EOVERFLOW)
79787eaa
JM
5664 goto fail_dir_item;
5665 else if (ret) {
5666 btrfs_abort_transaction(trans, root, ret);
5667 return ret;
39279cc3 5668 }
79787eaa
JM
5669
5670 btrfs_i_size_write(parent_inode, parent_inode->i_size +
5671 name_len * 2);
0c4d2d95 5672 inode_inc_iversion(parent_inode);
79787eaa
JM
5673 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
5674 ret = btrfs_update_inode(trans, root, parent_inode);
5675 if (ret)
5676 btrfs_abort_transaction(trans, root, ret);
39279cc3 5677 return ret;
fe66a05a
CM
5678
5679fail_dir_item:
5680 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
5681 u64 local_index;
5682 int err;
5683 err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
5684 key.objectid, root->root_key.objectid,
5685 parent_ino, &local_index, name, name_len);
5686
5687 } else if (add_backref) {
5688 u64 local_index;
5689 int err;
5690
5691 err = btrfs_del_inode_ref(trans, root, name, name_len,
5692 ino, parent_ino, &local_index);
5693 }
5694 return ret;
39279cc3
CM
5695}
5696
5697static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
a1b075d2
JB
5698 struct inode *dir, struct dentry *dentry,
5699 struct inode *inode, int backref, u64 index)
39279cc3 5700{
a1b075d2
JB
5701 int err = btrfs_add_link(trans, dir, inode,
5702 dentry->d_name.name, dentry->d_name.len,
5703 backref, index);
39279cc3
CM
5704 if (err > 0)
5705 err = -EEXIST;
5706 return err;
5707}
5708
618e21d5 5709static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
1a67aafb 5710 umode_t mode, dev_t rdev)
618e21d5
JB
5711{
5712 struct btrfs_trans_handle *trans;
5713 struct btrfs_root *root = BTRFS_I(dir)->root;
1832a6d5 5714 struct inode *inode = NULL;
618e21d5
JB
5715 int err;
5716 int drop_inode = 0;
5717 u64 objectid;
00e4e6b3 5718 u64 index = 0;
618e21d5
JB
5719
5720 if (!new_valid_dev(rdev))
5721 return -EINVAL;
5722
9ed74f2d
JB
5723 /*
5724 * 2 for inode item and ref
5725 * 2 for dir items
5726 * 1 for xattr if selinux is on
5727 */
a22285a6
YZ
5728 trans = btrfs_start_transaction(root, 5);
5729 if (IS_ERR(trans))
5730 return PTR_ERR(trans);
1832a6d5 5731
581bb050
LZ
5732 err = btrfs_find_free_ino(root, &objectid);
5733 if (err)
5734 goto out_unlock;
5735
aec7477b 5736 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
33345d01 5737 dentry->d_name.len, btrfs_ino(dir), objectid,
d82a6f1d 5738 mode, &index);
7cf96da3
TI
5739 if (IS_ERR(inode)) {
5740 err = PTR_ERR(inode);
618e21d5 5741 goto out_unlock;
7cf96da3 5742 }
618e21d5 5743
2a7dba39 5744 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
33268eaf
JB
5745 if (err) {
5746 drop_inode = 1;
5747 goto out_unlock;
5748 }
5749
ad19db71
CS
5750 /*
5751 * If the active LSM wants to access the inode during
5752 * d_instantiate it needs these. Smack checks to see
5753 * if the filesystem supports xattrs by looking at the
5754 * ops vector.
5755 */
5756
5757 inode->i_op = &btrfs_special_inode_operations;
a1b075d2 5758 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
618e21d5
JB
5759 if (err)
5760 drop_inode = 1;
5761 else {
618e21d5 5762 init_special_inode(inode, inode->i_mode, rdev);
1b4ab1bb 5763 btrfs_update_inode(trans, root, inode);
08c422c2 5764 d_instantiate(dentry, inode);
618e21d5 5765 }
618e21d5 5766out_unlock:
7ad85bb7 5767 btrfs_end_transaction(trans, root);
b53d3f5d 5768 btrfs_btree_balance_dirty(root);
618e21d5
JB
5769 if (drop_inode) {
5770 inode_dec_link_count(inode);
5771 iput(inode);
5772 }
618e21d5
JB
5773 return err;
5774}
5775
39279cc3 5776static int btrfs_create(struct inode *dir, struct dentry *dentry,
ebfc3b49 5777 umode_t mode, bool excl)
39279cc3
CM
5778{
5779 struct btrfs_trans_handle *trans;
5780 struct btrfs_root *root = BTRFS_I(dir)->root;
1832a6d5 5781 struct inode *inode = NULL;
43baa579 5782 int drop_inode_on_err = 0;
a22285a6 5783 int err;
39279cc3 5784 u64 objectid;
00e4e6b3 5785 u64 index = 0;
39279cc3 5786
9ed74f2d
JB
5787 /*
5788 * 2 for inode item and ref
5789 * 2 for dir items
5790 * 1 for xattr if selinux is on
5791 */
a22285a6
YZ
5792 trans = btrfs_start_transaction(root, 5);
5793 if (IS_ERR(trans))
5794 return PTR_ERR(trans);
9ed74f2d 5795
581bb050
LZ
5796 err = btrfs_find_free_ino(root, &objectid);
5797 if (err)
5798 goto out_unlock;
5799
aec7477b 5800 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
33345d01 5801 dentry->d_name.len, btrfs_ino(dir), objectid,
d82a6f1d 5802 mode, &index);
7cf96da3
TI
5803 if (IS_ERR(inode)) {
5804 err = PTR_ERR(inode);
39279cc3 5805 goto out_unlock;
7cf96da3 5806 }
43baa579 5807 drop_inode_on_err = 1;
39279cc3 5808
2a7dba39 5809 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
43baa579 5810 if (err)
33268eaf 5811 goto out_unlock;
33268eaf 5812
9185aa58
FB
5813 err = btrfs_update_inode(trans, root, inode);
5814 if (err)
5815 goto out_unlock;
5816
ad19db71
CS
5817 /*
5818 * If the active LSM wants to access the inode during
5819 * d_instantiate it needs these. Smack checks to see
5820 * if the filesystem supports xattrs by looking at the
5821 * ops vector.
5822 */
5823 inode->i_fop = &btrfs_file_operations;
5824 inode->i_op = &btrfs_file_inode_operations;
5825
a1b075d2 5826 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
39279cc3 5827 if (err)
43baa579
FB
5828 goto out_unlock;
5829
5830 inode->i_mapping->a_ops = &btrfs_aops;
5831 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5832 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
5833 d_instantiate(dentry, inode);
5834
39279cc3 5835out_unlock:
7ad85bb7 5836 btrfs_end_transaction(trans, root);
43baa579 5837 if (err && drop_inode_on_err) {
39279cc3
CM
5838 inode_dec_link_count(inode);
5839 iput(inode);
5840 }
b53d3f5d 5841 btrfs_btree_balance_dirty(root);
39279cc3
CM
5842 return err;
5843}
5844
5845static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
5846 struct dentry *dentry)
5847{
5848 struct btrfs_trans_handle *trans;
5849 struct btrfs_root *root = BTRFS_I(dir)->root;
5850 struct inode *inode = old_dentry->d_inode;
00e4e6b3 5851 u64 index;
39279cc3
CM
5852 int err;
5853 int drop_inode = 0;
5854
4a8be425
TH
5855 /* do not allow sys_link's with other subvols of the same device */
5856 if (root->objectid != BTRFS_I(inode)->root->objectid)
3ab3564f 5857 return -EXDEV;
4a8be425 5858
f186373f 5859 if (inode->i_nlink >= BTRFS_LINK_MAX)
c055e99e 5860 return -EMLINK;
4a8be425 5861
3de4586c 5862 err = btrfs_set_inode_index(dir, &index);
aec7477b
JB
5863 if (err)
5864 goto fail;
5865
a22285a6 5866 /*
7e6b6465 5867 * 2 items for inode and inode ref
a22285a6 5868 * 2 items for dir items
7e6b6465 5869 * 1 item for parent inode
a22285a6 5870 */
7e6b6465 5871 trans = btrfs_start_transaction(root, 5);
a22285a6
YZ
5872 if (IS_ERR(trans)) {
5873 err = PTR_ERR(trans);
5874 goto fail;
5875 }
5f39d397 5876
3153495d 5877 btrfs_inc_nlink(inode);
0c4d2d95 5878 inode_inc_iversion(inode);
3153495d 5879 inode->i_ctime = CURRENT_TIME;
7de9c6ee 5880 ihold(inode);
e9976151 5881 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
aec7477b 5882
a1b075d2 5883 err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
5f39d397 5884
a5719521 5885 if (err) {
54aa1f4d 5886 drop_inode = 1;
a5719521 5887 } else {
10d9f309 5888 struct dentry *parent = dentry->d_parent;
a5719521 5889 err = btrfs_update_inode(trans, root, inode);
79787eaa
JM
5890 if (err)
5891 goto fail;
08c422c2 5892 d_instantiate(dentry, inode);
6a912213 5893 btrfs_log_new_name(trans, inode, NULL, parent);
a5719521 5894 }
39279cc3 5895
7ad85bb7 5896 btrfs_end_transaction(trans, root);
1832a6d5 5897fail:
39279cc3
CM
5898 if (drop_inode) {
5899 inode_dec_link_count(inode);
5900 iput(inode);
5901 }
b53d3f5d 5902 btrfs_btree_balance_dirty(root);
39279cc3
CM
5903 return err;
5904}
5905
18bb1db3 5906static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
39279cc3 5907{
b9d86667 5908 struct inode *inode = NULL;
39279cc3
CM
5909 struct btrfs_trans_handle *trans;
5910 struct btrfs_root *root = BTRFS_I(dir)->root;
5911 int err = 0;
5912 int drop_on_err = 0;
b9d86667 5913 u64 objectid = 0;
00e4e6b3 5914 u64 index = 0;
39279cc3 5915
9ed74f2d
JB
5916 /*
5917 * 2 items for inode and ref
5918 * 2 items for dir items
5919 * 1 for xattr if selinux is on
5920 */
a22285a6
YZ
5921 trans = btrfs_start_transaction(root, 5);
5922 if (IS_ERR(trans))
5923 return PTR_ERR(trans);
39279cc3 5924
581bb050
LZ
5925 err = btrfs_find_free_ino(root, &objectid);
5926 if (err)
5927 goto out_fail;
5928
aec7477b 5929 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
33345d01 5930 dentry->d_name.len, btrfs_ino(dir), objectid,
d82a6f1d 5931 S_IFDIR | mode, &index);
39279cc3
CM
5932 if (IS_ERR(inode)) {
5933 err = PTR_ERR(inode);
5934 goto out_fail;
5935 }
5f39d397 5936
39279cc3 5937 drop_on_err = 1;
33268eaf 5938
2a7dba39 5939 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
33268eaf
JB
5940 if (err)
5941 goto out_fail;
5942
39279cc3
CM
5943 inode->i_op = &btrfs_dir_inode_operations;
5944 inode->i_fop = &btrfs_dir_file_operations;
39279cc3 5945
dbe674a9 5946 btrfs_i_size_write(inode, 0);
39279cc3
CM
5947 err = btrfs_update_inode(trans, root, inode);
5948 if (err)
5949 goto out_fail;
5f39d397 5950
a1b075d2
JB
5951 err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
5952 dentry->d_name.len, 0, index);
39279cc3
CM
5953 if (err)
5954 goto out_fail;
5f39d397 5955
39279cc3
CM
5956 d_instantiate(dentry, inode);
5957 drop_on_err = 0;
39279cc3
CM
5958
5959out_fail:
7ad85bb7 5960 btrfs_end_transaction(trans, root);
39279cc3
CM
5961 if (drop_on_err)
5962 iput(inode);
b53d3f5d 5963 btrfs_btree_balance_dirty(root);
39279cc3
CM
5964 return err;
5965}
5966
d352ac68
CM
5967/* helper for btfs_get_extent. Given an existing extent in the tree,
5968 * and an extent that you want to insert, deal with overlap and insert
5969 * the new extent into the tree.
5970 */
3b951516
CM
5971static int merge_extent_mapping(struct extent_map_tree *em_tree,
5972 struct extent_map *existing,
e6dcd2dc
CM
5973 struct extent_map *em,
5974 u64 map_start, u64 map_len)
3b951516
CM
5975{
5976 u64 start_diff;
3b951516 5977
e6dcd2dc
CM
5978 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
5979 start_diff = map_start - em->start;
5980 em->start = map_start;
5981 em->len = map_len;
c8b97818
CM
5982 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
5983 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
e6dcd2dc 5984 em->block_start += start_diff;
c8b97818
CM
5985 em->block_len -= start_diff;
5986 }
09a2a8f9 5987 return add_extent_mapping(em_tree, em, 0);
3b951516
CM
5988}
5989
c8b97818
CM
5990static noinline int uncompress_inline(struct btrfs_path *path,
5991 struct inode *inode, struct page *page,
5992 size_t pg_offset, u64 extent_offset,
5993 struct btrfs_file_extent_item *item)
5994{
5995 int ret;
5996 struct extent_buffer *leaf = path->nodes[0];
5997 char *tmp;
5998 size_t max_size;
5999 unsigned long inline_size;
6000 unsigned long ptr;
261507a0 6001 int compress_type;
c8b97818
CM
6002
6003 WARN_ON(pg_offset != 0);
261507a0 6004 compress_type = btrfs_file_extent_compression(leaf, item);
c8b97818
CM
6005 max_size = btrfs_file_extent_ram_bytes(leaf, item);
6006 inline_size = btrfs_file_extent_inline_item_len(leaf,
6007 btrfs_item_nr(leaf, path->slots[0]));
6008 tmp = kmalloc(inline_size, GFP_NOFS);
8d413713
TI
6009 if (!tmp)
6010 return -ENOMEM;
c8b97818
CM
6011 ptr = btrfs_file_extent_inline_start(item);
6012
6013 read_extent_buffer(leaf, tmp, ptr, inline_size);
6014
5b050f04 6015 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
261507a0
LZ
6016 ret = btrfs_decompress(compress_type, tmp, page,
6017 extent_offset, inline_size, max_size);
c8b97818 6018 if (ret) {
7ac687d9 6019 char *kaddr = kmap_atomic(page);
c8b97818
CM
6020 unsigned long copy_size = min_t(u64,
6021 PAGE_CACHE_SIZE - pg_offset,
6022 max_size - extent_offset);
6023 memset(kaddr + pg_offset, 0, copy_size);
7ac687d9 6024 kunmap_atomic(kaddr);
c8b97818
CM
6025 }
6026 kfree(tmp);
6027 return 0;
6028}
6029
d352ac68
CM
6030/*
6031 * a bit scary, this does extent mapping from logical file offset to the disk.
d397712b
CM
6032 * the ugly parts come from merging extents from the disk with the in-ram
6033 * representation. This gets more complex because of the data=ordered code,
d352ac68
CM
6034 * where the in-ram extents might be locked pending data=ordered completion.
6035 *
6036 * This also copies inline extents directly into the page.
6037 */
d397712b 6038
a52d9a80 6039struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
70dec807 6040 size_t pg_offset, u64 start, u64 len,
a52d9a80
CM
6041 int create)
6042{
6043 int ret;
6044 int err = 0;
db94535d 6045 u64 bytenr;
a52d9a80
CM
6046 u64 extent_start = 0;
6047 u64 extent_end = 0;
33345d01 6048 u64 objectid = btrfs_ino(inode);
a52d9a80 6049 u32 found_type;
f421950f 6050 struct btrfs_path *path = NULL;
a52d9a80
CM
6051 struct btrfs_root *root = BTRFS_I(inode)->root;
6052 struct btrfs_file_extent_item *item;
5f39d397
CM
6053 struct extent_buffer *leaf;
6054 struct btrfs_key found_key;
a52d9a80
CM
6055 struct extent_map *em = NULL;
6056 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
d1310b2e 6057 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
a52d9a80 6058 struct btrfs_trans_handle *trans = NULL;
261507a0 6059 int compress_type;
a52d9a80 6060
a52d9a80 6061again:
890871be 6062 read_lock(&em_tree->lock);
d1310b2e 6063 em = lookup_extent_mapping(em_tree, start, len);
a061fc8d
CM
6064 if (em)
6065 em->bdev = root->fs_info->fs_devices->latest_bdev;
890871be 6066 read_unlock(&em_tree->lock);
d1310b2e 6067
a52d9a80 6068 if (em) {
e1c4b745
CM
6069 if (em->start > start || em->start + em->len <= start)
6070 free_extent_map(em);
6071 else if (em->block_start == EXTENT_MAP_INLINE && page)
70dec807
CM
6072 free_extent_map(em);
6073 else
6074 goto out;
a52d9a80 6075 }
172ddd60 6076 em = alloc_extent_map();
a52d9a80 6077 if (!em) {
d1310b2e
CM
6078 err = -ENOMEM;
6079 goto out;
a52d9a80 6080 }
e6dcd2dc 6081 em->bdev = root->fs_info->fs_devices->latest_bdev;
d1310b2e 6082 em->start = EXTENT_MAP_HOLE;
445a6944 6083 em->orig_start = EXTENT_MAP_HOLE;
d1310b2e 6084 em->len = (u64)-1;
c8b97818 6085 em->block_len = (u64)-1;
f421950f
CM
6086
6087 if (!path) {
6088 path = btrfs_alloc_path();
026fd317
JB
6089 if (!path) {
6090 err = -ENOMEM;
6091 goto out;
6092 }
6093 /*
6094 * Chances are we'll be called again, so go ahead and do
6095 * readahead
6096 */
6097 path->reada = 1;
f421950f
CM
6098 }
6099
179e29e4
CM
6100 ret = btrfs_lookup_file_extent(trans, root, path,
6101 objectid, start, trans != NULL);
a52d9a80
CM
6102 if (ret < 0) {
6103 err = ret;
6104 goto out;
6105 }
6106
6107 if (ret != 0) {
6108 if (path->slots[0] == 0)
6109 goto not_found;
6110 path->slots[0]--;
6111 }
6112
5f39d397
CM
6113 leaf = path->nodes[0];
6114 item = btrfs_item_ptr(leaf, path->slots[0],
a52d9a80 6115 struct btrfs_file_extent_item);
a52d9a80 6116 /* are we inside the extent that was found? */
5f39d397
CM
6117 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6118 found_type = btrfs_key_type(&found_key);
6119 if (found_key.objectid != objectid ||
a52d9a80
CM
6120 found_type != BTRFS_EXTENT_DATA_KEY) {
6121 goto not_found;
6122 }
6123
5f39d397
CM
6124 found_type = btrfs_file_extent_type(leaf, item);
6125 extent_start = found_key.offset;
261507a0 6126 compress_type = btrfs_file_extent_compression(leaf, item);
d899e052
YZ
6127 if (found_type == BTRFS_FILE_EXTENT_REG ||
6128 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
a52d9a80 6129 extent_end = extent_start +
db94535d 6130 btrfs_file_extent_num_bytes(leaf, item);
9036c102
YZ
6131 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6132 size_t size;
6133 size = btrfs_file_extent_inline_len(leaf, item);
fda2832f 6134 extent_end = ALIGN(extent_start + size, root->sectorsize);
9036c102
YZ
6135 }
6136
6137 if (start >= extent_end) {
6138 path->slots[0]++;
6139 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6140 ret = btrfs_next_leaf(root, path);
6141 if (ret < 0) {
6142 err = ret;
6143 goto out;
a52d9a80 6144 }
9036c102
YZ
6145 if (ret > 0)
6146 goto not_found;
6147 leaf = path->nodes[0];
a52d9a80 6148 }
9036c102
YZ
6149 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6150 if (found_key.objectid != objectid ||
6151 found_key.type != BTRFS_EXTENT_DATA_KEY)
6152 goto not_found;
6153 if (start + len <= found_key.offset)
6154 goto not_found;
6155 em->start = start;
70c8a91c 6156 em->orig_start = start;
9036c102
YZ
6157 em->len = found_key.offset - start;
6158 goto not_found_em;
6159 }
6160
cc95bef6 6161 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
d899e052
YZ
6162 if (found_type == BTRFS_FILE_EXTENT_REG ||
6163 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
9036c102
YZ
6164 em->start = extent_start;
6165 em->len = extent_end - extent_start;
ff5b7ee3
YZ
6166 em->orig_start = extent_start -
6167 btrfs_file_extent_offset(leaf, item);
b4939680
JB
6168 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf,
6169 item);
db94535d
CM
6170 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
6171 if (bytenr == 0) {
5f39d397 6172 em->block_start = EXTENT_MAP_HOLE;
a52d9a80
CM
6173 goto insert;
6174 }
261507a0 6175 if (compress_type != BTRFS_COMPRESS_NONE) {
c8b97818 6176 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
261507a0 6177 em->compress_type = compress_type;
c8b97818 6178 em->block_start = bytenr;
b4939680 6179 em->block_len = em->orig_block_len;
c8b97818
CM
6180 } else {
6181 bytenr += btrfs_file_extent_offset(leaf, item);
6182 em->block_start = bytenr;
6183 em->block_len = em->len;
d899e052
YZ
6184 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
6185 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
c8b97818 6186 }
a52d9a80
CM
6187 goto insert;
6188 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5f39d397 6189 unsigned long ptr;
a52d9a80 6190 char *map;
3326d1b0
CM
6191 size_t size;
6192 size_t extent_offset;
6193 size_t copy_size;
a52d9a80 6194
689f9346 6195 em->block_start = EXTENT_MAP_INLINE;
c8b97818 6196 if (!page || create) {
689f9346 6197 em->start = extent_start;
9036c102 6198 em->len = extent_end - extent_start;
689f9346
Y
6199 goto out;
6200 }
5f39d397 6201
9036c102
YZ
6202 size = btrfs_file_extent_inline_len(leaf, item);
6203 extent_offset = page_offset(page) + pg_offset - extent_start;
70dec807 6204 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
3326d1b0 6205 size - extent_offset);
3326d1b0 6206 em->start = extent_start + extent_offset;
fda2832f 6207 em->len = ALIGN(copy_size, root->sectorsize);
b4939680 6208 em->orig_block_len = em->len;
70c8a91c 6209 em->orig_start = em->start;
261507a0 6210 if (compress_type) {
c8b97818 6211 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
261507a0
LZ
6212 em->compress_type = compress_type;
6213 }
689f9346 6214 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
179e29e4 6215 if (create == 0 && !PageUptodate(page)) {
261507a0
LZ
6216 if (btrfs_file_extent_compression(leaf, item) !=
6217 BTRFS_COMPRESS_NONE) {
c8b97818
CM
6218 ret = uncompress_inline(path, inode, page,
6219 pg_offset,
6220 extent_offset, item);
79787eaa 6221 BUG_ON(ret); /* -ENOMEM */
c8b97818
CM
6222 } else {
6223 map = kmap(page);
6224 read_extent_buffer(leaf, map + pg_offset, ptr,
6225 copy_size);
93c82d57
CM
6226 if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
6227 memset(map + pg_offset + copy_size, 0,
6228 PAGE_CACHE_SIZE - pg_offset -
6229 copy_size);
6230 }
c8b97818
CM
6231 kunmap(page);
6232 }
179e29e4
CM
6233 flush_dcache_page(page);
6234 } else if (create && PageUptodate(page)) {
6bf7e080 6235 BUG();
179e29e4
CM
6236 if (!trans) {
6237 kunmap(page);
6238 free_extent_map(em);
6239 em = NULL;
ff5714cc 6240
b3b4aa74 6241 btrfs_release_path(path);
7a7eaa40 6242 trans = btrfs_join_transaction(root);
ff5714cc 6243
3612b495
TI
6244 if (IS_ERR(trans))
6245 return ERR_CAST(trans);
179e29e4
CM
6246 goto again;
6247 }
c8b97818 6248 map = kmap(page);
70dec807 6249 write_extent_buffer(leaf, map + pg_offset, ptr,
179e29e4 6250 copy_size);
c8b97818 6251 kunmap(page);
179e29e4 6252 btrfs_mark_buffer_dirty(leaf);
a52d9a80 6253 }
d1310b2e 6254 set_extent_uptodate(io_tree, em->start,
507903b8 6255 extent_map_end(em) - 1, NULL, GFP_NOFS);
a52d9a80
CM
6256 goto insert;
6257 } else {
31b1a2bd 6258 WARN(1, KERN_ERR "btrfs unknown found_type %d\n", found_type);
a52d9a80
CM
6259 }
6260not_found:
6261 em->start = start;
70c8a91c 6262 em->orig_start = start;
d1310b2e 6263 em->len = len;
a52d9a80 6264not_found_em:
5f39d397 6265 em->block_start = EXTENT_MAP_HOLE;
9036c102 6266 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
a52d9a80 6267insert:
b3b4aa74 6268 btrfs_release_path(path);
d1310b2e 6269 if (em->start > start || extent_map_end(em) <= start) {
c2cf52eb
SK
6270 btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]",
6271 (unsigned long long)em->start,
6272 (unsigned long long)em->len,
6273 (unsigned long long)start,
6274 (unsigned long long)len);
a52d9a80
CM
6275 err = -EIO;
6276 goto out;
6277 }
d1310b2e
CM
6278
6279 err = 0;
890871be 6280 write_lock(&em_tree->lock);
09a2a8f9 6281 ret = add_extent_mapping(em_tree, em, 0);
3b951516
CM
6282 /* it is possible that someone inserted the extent into the tree
6283 * while we had the lock dropped. It is also possible that
6284 * an overlapping map exists in the tree
6285 */
a52d9a80 6286 if (ret == -EEXIST) {
3b951516 6287 struct extent_map *existing;
e6dcd2dc
CM
6288
6289 ret = 0;
6290
3b951516 6291 existing = lookup_extent_mapping(em_tree, start, len);
e1c4b745
CM
6292 if (existing && (existing->start > start ||
6293 existing->start + existing->len <= start)) {
6294 free_extent_map(existing);
6295 existing = NULL;
6296 }
3b951516
CM
6297 if (!existing) {
6298 existing = lookup_extent_mapping(em_tree, em->start,
6299 em->len);
6300 if (existing) {
6301 err = merge_extent_mapping(em_tree, existing,
e6dcd2dc
CM
6302 em, start,
6303 root->sectorsize);
3b951516
CM
6304 free_extent_map(existing);
6305 if (err) {
6306 free_extent_map(em);
6307 em = NULL;
6308 }
6309 } else {
6310 err = -EIO;
3b951516
CM
6311 free_extent_map(em);
6312 em = NULL;
6313 }
6314 } else {
6315 free_extent_map(em);
6316 em = existing;
e6dcd2dc 6317 err = 0;
a52d9a80 6318 }
a52d9a80 6319 }
890871be 6320 write_unlock(&em_tree->lock);
a52d9a80 6321out:
1abe9b8a 6322
f0bd95ea
TI
6323 if (em)
6324 trace_btrfs_get_extent(root, em);
1abe9b8a 6325
f421950f
CM
6326 if (path)
6327 btrfs_free_path(path);
a52d9a80
CM
6328 if (trans) {
6329 ret = btrfs_end_transaction(trans, root);
d397712b 6330 if (!err)
a52d9a80
CM
6331 err = ret;
6332 }
a52d9a80
CM
6333 if (err) {
6334 free_extent_map(em);
a52d9a80
CM
6335 return ERR_PTR(err);
6336 }
79787eaa 6337 BUG_ON(!em); /* Error is always set */
a52d9a80
CM
6338 return em;
6339}
6340
ec29ed5b
CM
6341struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
6342 size_t pg_offset, u64 start, u64 len,
6343 int create)
6344{
6345 struct extent_map *em;
6346 struct extent_map *hole_em = NULL;
6347 u64 range_start = start;
6348 u64 end;
6349 u64 found;
6350 u64 found_end;
6351 int err = 0;
6352
6353 em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
6354 if (IS_ERR(em))
6355 return em;
6356 if (em) {
6357 /*
f9e4fb53
LB
6358 * if our em maps to
6359 * - a hole or
6360 * - a pre-alloc extent,
6361 * there might actually be delalloc bytes behind it.
ec29ed5b 6362 */
f9e4fb53
LB
6363 if (em->block_start != EXTENT_MAP_HOLE &&
6364 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
ec29ed5b
CM
6365 return em;
6366 else
6367 hole_em = em;
6368 }
6369
6370 /* check to see if we've wrapped (len == -1 or similar) */
6371 end = start + len;
6372 if (end < start)
6373 end = (u64)-1;
6374 else
6375 end -= 1;
6376
6377 em = NULL;
6378
6379 /* ok, we didn't find anything, lets look for delalloc */
6380 found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
6381 end, len, EXTENT_DELALLOC, 1);
6382 found_end = range_start + found;
6383 if (found_end < range_start)
6384 found_end = (u64)-1;
6385
6386 /*
6387 * we didn't find anything useful, return
6388 * the original results from get_extent()
6389 */
6390 if (range_start > end || found_end <= start) {
6391 em = hole_em;
6392 hole_em = NULL;
6393 goto out;
6394 }
6395
6396 /* adjust the range_start to make sure it doesn't
6397 * go backwards from the start they passed in
6398 */
6399 range_start = max(start,range_start);
6400 found = found_end - range_start;
6401
6402 if (found > 0) {
6403 u64 hole_start = start;
6404 u64 hole_len = len;
6405
172ddd60 6406 em = alloc_extent_map();
ec29ed5b
CM
6407 if (!em) {
6408 err = -ENOMEM;
6409 goto out;
6410 }
6411 /*
6412 * when btrfs_get_extent can't find anything it
6413 * returns one huge hole
6414 *
6415 * make sure what it found really fits our range, and
6416 * adjust to make sure it is based on the start from
6417 * the caller
6418 */
6419 if (hole_em) {
6420 u64 calc_end = extent_map_end(hole_em);
6421
6422 if (calc_end <= start || (hole_em->start > end)) {
6423 free_extent_map(hole_em);
6424 hole_em = NULL;
6425 } else {
6426 hole_start = max(hole_em->start, start);
6427 hole_len = calc_end - hole_start;
6428 }
6429 }
6430 em->bdev = NULL;
6431 if (hole_em && range_start > hole_start) {
6432 /* our hole starts before our delalloc, so we
6433 * have to return just the parts of the hole
6434 * that go until the delalloc starts
6435 */
6436 em->len = min(hole_len,
6437 range_start - hole_start);
6438 em->start = hole_start;
6439 em->orig_start = hole_start;
6440 /*
6441 * don't adjust block start at all,
6442 * it is fixed at EXTENT_MAP_HOLE
6443 */
6444 em->block_start = hole_em->block_start;
6445 em->block_len = hole_len;
f9e4fb53
LB
6446 if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
6447 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
ec29ed5b
CM
6448 } else {
6449 em->start = range_start;
6450 em->len = found;
6451 em->orig_start = range_start;
6452 em->block_start = EXTENT_MAP_DELALLOC;
6453 em->block_len = found;
6454 }
6455 } else if (hole_em) {
6456 return hole_em;
6457 }
6458out:
6459
6460 free_extent_map(hole_em);
6461 if (err) {
6462 free_extent_map(em);
6463 return ERR_PTR(err);
6464 }
6465 return em;
6466}
6467
4b46fce2
JB
6468static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
6469 u64 start, u64 len)
6470{
6471 struct btrfs_root *root = BTRFS_I(inode)->root;
6472 struct btrfs_trans_handle *trans;
70c8a91c 6473 struct extent_map *em;
4b46fce2
JB
6474 struct btrfs_key ins;
6475 u64 alloc_hint;
6476 int ret;
4b46fce2 6477
7a7eaa40 6478 trans = btrfs_join_transaction(root);
3612b495
TI
6479 if (IS_ERR(trans))
6480 return ERR_CAST(trans);
4b46fce2
JB
6481
6482 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
6483
6484 alloc_hint = get_extent_allocation_hint(inode, start, len);
6485 ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
81c9ad23 6486 alloc_hint, &ins, 1);
4b46fce2
JB
6487 if (ret) {
6488 em = ERR_PTR(ret);
6489 goto out;
6490 }
6491
70c8a91c 6492 em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
cc95bef6 6493 ins.offset, ins.offset, ins.offset, 0);
70c8a91c
JB
6494 if (IS_ERR(em))
6495 goto out;
4b46fce2
JB
6496
6497 ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
6498 ins.offset, ins.offset, 0);
6499 if (ret) {
6500 btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
6501 em = ERR_PTR(ret);
6502 }
6503out:
6504 btrfs_end_transaction(trans, root);
6505 return em;
6506}
6507
46bfbb5c
CM
6508/*
6509 * returns 1 when the nocow is safe, < 1 on error, 0 if the
6510 * block must be cow'd
6511 */
6512static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
eb384b55
JB
6513 struct inode *inode, u64 offset, u64 *len,
6514 u64 *orig_start, u64 *orig_block_len,
6515 u64 *ram_bytes)
46bfbb5c
CM
6516{
6517 struct btrfs_path *path;
6518 int ret;
6519 struct extent_buffer *leaf;
6520 struct btrfs_root *root = BTRFS_I(inode)->root;
6521 struct btrfs_file_extent_item *fi;
6522 struct btrfs_key key;
6523 u64 disk_bytenr;
6524 u64 backref_offset;
6525 u64 extent_end;
6526 u64 num_bytes;
6527 int slot;
6528 int found_type;
6529
6530 path = btrfs_alloc_path();
6531 if (!path)
6532 return -ENOMEM;
6533
33345d01 6534 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
46bfbb5c
CM
6535 offset, 0);
6536 if (ret < 0)
6537 goto out;
6538
6539 slot = path->slots[0];
6540 if (ret == 1) {
6541 if (slot == 0) {
6542 /* can't find the item, must cow */
6543 ret = 0;
6544 goto out;
6545 }
6546 slot--;
6547 }
6548 ret = 0;
6549 leaf = path->nodes[0];
6550 btrfs_item_key_to_cpu(leaf, &key, slot);
33345d01 6551 if (key.objectid != btrfs_ino(inode) ||
46bfbb5c
CM
6552 key.type != BTRFS_EXTENT_DATA_KEY) {
6553 /* not our file or wrong item type, must cow */
6554 goto out;
6555 }
6556
6557 if (key.offset > offset) {
6558 /* Wrong offset, must cow */
6559 goto out;
6560 }
6561
6562 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
6563 found_type = btrfs_file_extent_type(leaf, fi);
6564 if (found_type != BTRFS_FILE_EXTENT_REG &&
6565 found_type != BTRFS_FILE_EXTENT_PREALLOC) {
6566 /* not a regular extent, must cow */
6567 goto out;
6568 }
6569 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
6570 backref_offset = btrfs_file_extent_offset(leaf, fi);
6571
eb384b55
JB
6572 *orig_start = key.offset - backref_offset;
6573 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
6574 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
6575
46bfbb5c 6576 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
eb384b55 6577 if (extent_end < offset + *len) {
46bfbb5c
CM
6578 /* extent doesn't include our full range, must cow */
6579 goto out;
6580 }
6581
6582 if (btrfs_extent_readonly(root, disk_bytenr))
6583 goto out;
6584
6585 /*
6586 * look for other files referencing this extent, if we
6587 * find any we must cow
6588 */
33345d01 6589 if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
46bfbb5c
CM
6590 key.offset - backref_offset, disk_bytenr))
6591 goto out;
6592
6593 /*
6594 * adjust disk_bytenr and num_bytes to cover just the bytes
6595 * in this extent we are about to write. If there
6596 * are any csums in that range we have to cow in order
6597 * to keep the csums correct
6598 */
6599 disk_bytenr += backref_offset;
6600 disk_bytenr += offset - key.offset;
eb384b55 6601 num_bytes = min(offset + *len, extent_end) - offset;
46bfbb5c
CM
6602 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
6603 goto out;
6604 /*
6605 * all of the above have passed, it is safe to overwrite this extent
6606 * without cow
6607 */
eb384b55 6608 *len = num_bytes;
46bfbb5c
CM
6609 ret = 1;
6610out:
6611 btrfs_free_path(path);
6612 return ret;
6613}
6614
eb838e73
JB
6615static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
6616 struct extent_state **cached_state, int writing)
6617{
6618 struct btrfs_ordered_extent *ordered;
6619 int ret = 0;
6620
6621 while (1) {
6622 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6623 0, cached_state);
6624 /*
6625 * We're concerned with the entire range that we're going to be
6626 * doing DIO to, so we need to make sure theres no ordered
6627 * extents in this range.
6628 */
6629 ordered = btrfs_lookup_ordered_range(inode, lockstart,
6630 lockend - lockstart + 1);
6631
6632 /*
6633 * We need to make sure there are no buffered pages in this
6634 * range either, we could have raced between the invalidate in
6635 * generic_file_direct_write and locking the extent. The
6636 * invalidate needs to happen so that reads after a write do not
6637 * get stale data.
6638 */
6639 if (!ordered && (!writing ||
6640 !test_range_bit(&BTRFS_I(inode)->io_tree,
6641 lockstart, lockend, EXTENT_UPTODATE, 0,
6642 *cached_state)))
6643 break;
6644
6645 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6646 cached_state, GFP_NOFS);
6647
6648 if (ordered) {
6649 btrfs_start_ordered_extent(inode, ordered, 1);
6650 btrfs_put_ordered_extent(ordered);
6651 } else {
6652 /* Screw you mmap */
6653 ret = filemap_write_and_wait_range(inode->i_mapping,
6654 lockstart,
6655 lockend);
6656 if (ret)
6657 break;
6658
6659 /*
6660 * If we found a page that couldn't be invalidated just
6661 * fall back to buffered.
6662 */
6663 ret = invalidate_inode_pages2_range(inode->i_mapping,
6664 lockstart >> PAGE_CACHE_SHIFT,
6665 lockend >> PAGE_CACHE_SHIFT);
6666 if (ret)
6667 break;
6668 }
6669
6670 cond_resched();
6671 }
6672
6673 return ret;
6674}
6675
69ffb543
JB
6676static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
6677 u64 len, u64 orig_start,
6678 u64 block_start, u64 block_len,
cc95bef6
JB
6679 u64 orig_block_len, u64 ram_bytes,
6680 int type)
69ffb543
JB
6681{
6682 struct extent_map_tree *em_tree;
6683 struct extent_map *em;
6684 struct btrfs_root *root = BTRFS_I(inode)->root;
6685 int ret;
6686
6687 em_tree = &BTRFS_I(inode)->extent_tree;
6688 em = alloc_extent_map();
6689 if (!em)
6690 return ERR_PTR(-ENOMEM);
6691
6692 em->start = start;
6693 em->orig_start = orig_start;
2ab28f32
JB
6694 em->mod_start = start;
6695 em->mod_len = len;
69ffb543
JB
6696 em->len = len;
6697 em->block_len = block_len;
6698 em->block_start = block_start;
6699 em->bdev = root->fs_info->fs_devices->latest_bdev;
b4939680 6700 em->orig_block_len = orig_block_len;
cc95bef6 6701 em->ram_bytes = ram_bytes;
70c8a91c 6702 em->generation = -1;
69ffb543
JB
6703 set_bit(EXTENT_FLAG_PINNED, &em->flags);
6704 if (type == BTRFS_ORDERED_PREALLOC)
b11e234d 6705 set_bit(EXTENT_FLAG_FILLING, &em->flags);
69ffb543
JB
6706
6707 do {
6708 btrfs_drop_extent_cache(inode, em->start,
6709 em->start + em->len - 1, 0);
6710 write_lock(&em_tree->lock);
09a2a8f9 6711 ret = add_extent_mapping(em_tree, em, 1);
69ffb543
JB
6712 write_unlock(&em_tree->lock);
6713 } while (ret == -EEXIST);
6714
6715 if (ret) {
6716 free_extent_map(em);
6717 return ERR_PTR(ret);
6718 }
6719
6720 return em;
6721}
6722
6723
4b46fce2
JB
6724static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
6725 struct buffer_head *bh_result, int create)
6726{
6727 struct extent_map *em;
6728 struct btrfs_root *root = BTRFS_I(inode)->root;
eb838e73 6729 struct extent_state *cached_state = NULL;
4b46fce2 6730 u64 start = iblock << inode->i_blkbits;
eb838e73 6731 u64 lockstart, lockend;
4b46fce2 6732 u64 len = bh_result->b_size;
46bfbb5c 6733 struct btrfs_trans_handle *trans;
eb838e73 6734 int unlock_bits = EXTENT_LOCKED;
0934856d 6735 int ret = 0;
eb838e73 6736
172a5049 6737 if (create)
eb838e73 6738 unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
172a5049 6739 else
c329861d 6740 len = min_t(u64, len, root->sectorsize);
eb838e73 6741
c329861d
JB
6742 lockstart = start;
6743 lockend = start + len - 1;
6744
eb838e73
JB
6745 /*
6746 * If this errors out it's because we couldn't invalidate pagecache for
6747 * this range and we need to fallback to buffered.
6748 */
6749 if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
6750 return -ENOTBLK;
6751
4b46fce2 6752 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
eb838e73
JB
6753 if (IS_ERR(em)) {
6754 ret = PTR_ERR(em);
6755 goto unlock_err;
6756 }
4b46fce2
JB
6757
6758 /*
6759 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
6760 * io. INLINE is special, and we could probably kludge it in here, but
6761 * it's still buffered so for safety lets just fall back to the generic
6762 * buffered path.
6763 *
6764 * For COMPRESSED we _have_ to read the entire extent in so we can
6765 * decompress it, so there will be buffering required no matter what we
6766 * do, so go ahead and fallback to buffered.
6767 *
6768 * We return -ENOTBLK because thats what makes DIO go ahead and go back
6769 * to buffered IO. Don't blame me, this is the price we pay for using
6770 * the generic code.
6771 */
6772 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
6773 em->block_start == EXTENT_MAP_INLINE) {
6774 free_extent_map(em);
eb838e73
JB
6775 ret = -ENOTBLK;
6776 goto unlock_err;
4b46fce2
JB
6777 }
6778
6779 /* Just a good old fashioned hole, return */
6780 if (!create && (em->block_start == EXTENT_MAP_HOLE ||
6781 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
6782 free_extent_map(em);
eb838e73 6783 goto unlock_err;
4b46fce2
JB
6784 }
6785
6786 /*
6787 * We don't allocate a new extent in the following cases
6788 *
6789 * 1) The inode is marked as NODATACOW. In this case we'll just use the
6790 * existing extent.
6791 * 2) The extent is marked as PREALLOC. We're good to go here and can
6792 * just use the extent.
6793 *
6794 */
46bfbb5c 6795 if (!create) {
eb838e73
JB
6796 len = min(len, em->len - (start - em->start));
6797 lockstart = start + len;
6798 goto unlock;
46bfbb5c 6799 }
4b46fce2
JB
6800
6801 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
6802 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
6803 em->block_start != EXTENT_MAP_HOLE)) {
4b46fce2
JB
6804 int type;
6805 int ret;
eb384b55 6806 u64 block_start, orig_start, orig_block_len, ram_bytes;
4b46fce2
JB
6807
6808 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6809 type = BTRFS_ORDERED_PREALLOC;
6810 else
6811 type = BTRFS_ORDERED_NOCOW;
46bfbb5c 6812 len = min(len, em->len - (start - em->start));
4b46fce2 6813 block_start = em->block_start + (start - em->start);
46bfbb5c
CM
6814
6815 /*
6816 * we're not going to log anything, but we do need
6817 * to make sure the current transaction stays open
6818 * while we look for nocow cross refs
6819 */
7a7eaa40 6820 trans = btrfs_join_transaction(root);
3612b495 6821 if (IS_ERR(trans))
46bfbb5c
CM
6822 goto must_cow;
6823
eb384b55
JB
6824 if (can_nocow_odirect(trans, inode, start, &len, &orig_start,
6825 &orig_block_len, &ram_bytes) == 1) {
69ffb543
JB
6826 if (type == BTRFS_ORDERED_PREALLOC) {
6827 free_extent_map(em);
6828 em = create_pinned_em(inode, start, len,
6829 orig_start,
b4939680 6830 block_start, len,
cc95bef6
JB
6831 orig_block_len,
6832 ram_bytes, type);
69ffb543
JB
6833 if (IS_ERR(em)) {
6834 btrfs_end_transaction(trans, root);
6835 goto unlock_err;
6836 }
6837 }
6838
46bfbb5c
CM
6839 ret = btrfs_add_ordered_extent_dio(inode, start,
6840 block_start, len, len, type);
6841 btrfs_end_transaction(trans, root);
6842 if (ret) {
6843 free_extent_map(em);
eb838e73 6844 goto unlock_err;
46bfbb5c
CM
6845 }
6846 goto unlock;
4b46fce2 6847 }
46bfbb5c 6848 btrfs_end_transaction(trans, root);
4b46fce2 6849 }
46bfbb5c
CM
6850must_cow:
6851 /*
6852 * this will cow the extent, reset the len in case we changed
6853 * it above
6854 */
6855 len = bh_result->b_size;
70c8a91c
JB
6856 free_extent_map(em);
6857 em = btrfs_new_extent_direct(inode, start, len);
eb838e73
JB
6858 if (IS_ERR(em)) {
6859 ret = PTR_ERR(em);
6860 goto unlock_err;
6861 }
46bfbb5c
CM
6862 len = min(len, em->len - (start - em->start));
6863unlock:
4b46fce2
JB
6864 bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
6865 inode->i_blkbits;
46bfbb5c 6866 bh_result->b_size = len;
4b46fce2
JB
6867 bh_result->b_bdev = em->bdev;
6868 set_buffer_mapped(bh_result);
c3473e83
JB
6869 if (create) {
6870 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6871 set_buffer_new(bh_result);
6872
6873 /*
6874 * Need to update the i_size under the extent lock so buffered
6875 * readers will get the updated i_size when we unlock.
6876 */
6877 if (start + len > i_size_read(inode))
6878 i_size_write(inode, start + len);
0934856d 6879
172a5049
MX
6880 spin_lock(&BTRFS_I(inode)->lock);
6881 BTRFS_I(inode)->outstanding_extents++;
6882 spin_unlock(&BTRFS_I(inode)->lock);
6883
0934856d
MX
6884 ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6885 lockstart + len - 1, EXTENT_DELALLOC, NULL,
6886 &cached_state, GFP_NOFS);
6887 BUG_ON(ret);
c3473e83 6888 }
4b46fce2 6889
eb838e73
JB
6890 /*
6891 * In the case of write we need to clear and unlock the entire range,
6892 * in the case of read we need to unlock only the end area that we
6893 * aren't using if there is any left over space.
6894 */
24c03fa5 6895 if (lockstart < lockend) {
0934856d
MX
6896 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6897 lockend, unlock_bits, 1, 0,
6898 &cached_state, GFP_NOFS);
24c03fa5 6899 } else {
eb838e73 6900 free_extent_state(cached_state);
24c03fa5 6901 }
eb838e73 6902
4b46fce2
JB
6903 free_extent_map(em);
6904
6905 return 0;
eb838e73
JB
6906
6907unlock_err:
eb838e73
JB
6908 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6909 unlock_bits, 1, 0, &cached_state, GFP_NOFS);
6910 return ret;
4b46fce2
JB
6911}
6912
6913struct btrfs_dio_private {
6914 struct inode *inode;
6915 u64 logical_offset;
6916 u64 disk_bytenr;
6917 u64 bytes;
4b46fce2 6918 void *private;
e65e1535
MX
6919
6920 /* number of bios pending for this dio */
6921 atomic_t pending_bios;
6922
6923 /* IO errors */
6924 int errors;
6925
6926 struct bio *orig_bio;
4b46fce2
JB
6927};
6928
6929static void btrfs_endio_direct_read(struct bio *bio, int err)
6930{
e65e1535 6931 struct btrfs_dio_private *dip = bio->bi_private;
4b46fce2
JB
6932 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
6933 struct bio_vec *bvec = bio->bi_io_vec;
4b46fce2 6934 struct inode *inode = dip->inode;
c2cf52eb 6935 struct btrfs_root *root = BTRFS_I(inode)->root;
4b46fce2 6936 u64 start;
4b46fce2
JB
6937
6938 start = dip->logical_offset;
6939 do {
6940 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
6941 struct page *page = bvec->bv_page;
6942 char *kaddr;
6943 u32 csum = ~(u32)0;
c329861d 6944 u64 private = ~(u32)0;
4b46fce2
JB
6945 unsigned long flags;
6946
c329861d
JB
6947 if (get_state_private(&BTRFS_I(inode)->io_tree,
6948 start, &private))
6949 goto failed;
4b46fce2 6950 local_irq_save(flags);
7ac687d9 6951 kaddr = kmap_atomic(page);
b0496686 6952 csum = btrfs_csum_data(kaddr + bvec->bv_offset,
4b46fce2
JB
6953 csum, bvec->bv_len);
6954 btrfs_csum_final(csum, (char *)&csum);
7ac687d9 6955 kunmap_atomic(kaddr);
4b46fce2
JB
6956 local_irq_restore(flags);
6957
6958 flush_dcache_page(bvec->bv_page);
c329861d
JB
6959 if (csum != private) {
6960failed:
c2cf52eb
SK
6961 btrfs_err(root->fs_info, "csum failed ino %llu off %llu csum %u private %u",
6962 (unsigned long long)btrfs_ino(inode),
6963 (unsigned long long)start,
6964 csum, (unsigned)private);
4b46fce2
JB
6965 err = -EIO;
6966 }
6967 }
6968
6969 start += bvec->bv_len;
4b46fce2
JB
6970 bvec++;
6971 } while (bvec <= bvec_end);
6972
6973 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
d0082371 6974 dip->logical_offset + dip->bytes - 1);
4b46fce2
JB
6975 bio->bi_private = dip->private;
6976
4b46fce2 6977 kfree(dip);
c0da7aa1
JB
6978
6979 /* If we had a csum failure make sure to clear the uptodate flag */
6980 if (err)
6981 clear_bit(BIO_UPTODATE, &bio->bi_flags);
4b46fce2
JB
6982 dio_end_io(bio, err);
6983}
6984
6985static void btrfs_endio_direct_write(struct bio *bio, int err)
6986{
6987 struct btrfs_dio_private *dip = bio->bi_private;
6988 struct inode *inode = dip->inode;
6989 struct btrfs_root *root = BTRFS_I(inode)->root;
4b46fce2 6990 struct btrfs_ordered_extent *ordered = NULL;
163cf09c
CM
6991 u64 ordered_offset = dip->logical_offset;
6992 u64 ordered_bytes = dip->bytes;
4b46fce2
JB
6993 int ret;
6994
6995 if (err)
6996 goto out_done;
163cf09c
CM
6997again:
6998 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
6999 &ordered_offset,
5fd02043 7000 ordered_bytes, !err);
4b46fce2 7001 if (!ret)
163cf09c 7002 goto out_test;
4b46fce2 7003
5fd02043
JB
7004 ordered->work.func = finish_ordered_fn;
7005 ordered->work.flags = 0;
7006 btrfs_queue_worker(&root->fs_info->endio_write_workers,
7007 &ordered->work);
163cf09c
CM
7008out_test:
7009 /*
7010 * our bio might span multiple ordered extents. If we haven't
7011 * completed the accounting for the whole dio, go back and try again
7012 */
7013 if (ordered_offset < dip->logical_offset + dip->bytes) {
7014 ordered_bytes = dip->logical_offset + dip->bytes -
7015 ordered_offset;
5fd02043 7016 ordered = NULL;
163cf09c
CM
7017 goto again;
7018 }
4b46fce2
JB
7019out_done:
7020 bio->bi_private = dip->private;
7021
4b46fce2 7022 kfree(dip);
c0da7aa1
JB
7023
7024 /* If we had an error make sure to clear the uptodate flag */
7025 if (err)
7026 clear_bit(BIO_UPTODATE, &bio->bi_flags);
4b46fce2
JB
7027 dio_end_io(bio, err);
7028}
7029
eaf25d93
CM
7030static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
7031 struct bio *bio, int mirror_num,
7032 unsigned long bio_flags, u64 offset)
7033{
7034 int ret;
7035 struct btrfs_root *root = BTRFS_I(inode)->root;
7036 ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
79787eaa 7037 BUG_ON(ret); /* -ENOMEM */
eaf25d93
CM
7038 return 0;
7039}
7040
e65e1535
MX
7041static void btrfs_end_dio_bio(struct bio *bio, int err)
7042{
7043 struct btrfs_dio_private *dip = bio->bi_private;
7044
7045 if (err) {
33345d01 7046 printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
3dd1462e 7047 "sector %#Lx len %u err no %d\n",
33345d01 7048 (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
3dd1462e 7049 (unsigned long long)bio->bi_sector, bio->bi_size, err);
e65e1535
MX
7050 dip->errors = 1;
7051
7052 /*
7053 * before atomic variable goto zero, we must make sure
7054 * dip->errors is perceived to be set.
7055 */
7056 smp_mb__before_atomic_dec();
7057 }
7058
7059 /* if there are more bios still pending for this dio, just exit */
7060 if (!atomic_dec_and_test(&dip->pending_bios))
7061 goto out;
7062
7063 if (dip->errors)
7064 bio_io_error(dip->orig_bio);
7065 else {
7066 set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
7067 bio_endio(dip->orig_bio, 0);
7068 }
7069out:
7070 bio_put(bio);
7071}
7072
7073static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
7074 u64 first_sector, gfp_t gfp_flags)
7075{
7076 int nr_vecs = bio_get_nr_vecs(bdev);
7077 return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
7078}
7079
7080static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
7081 int rw, u64 file_offset, int skip_sum,
c329861d 7082 int async_submit)
e65e1535
MX
7083{
7084 int write = rw & REQ_WRITE;
7085 struct btrfs_root *root = BTRFS_I(inode)->root;
7086 int ret;
7087
b812ce28
JB
7088 if (async_submit)
7089 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
7090
e65e1535 7091 bio_get(bio);
5fd02043
JB
7092
7093 if (!write) {
7094 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
7095 if (ret)
7096 goto err;
7097 }
e65e1535 7098
1ae39938
JB
7099 if (skip_sum)
7100 goto map;
7101
7102 if (write && async_submit) {
e65e1535
MX
7103 ret = btrfs_wq_submit_bio(root->fs_info,
7104 inode, rw, bio, 0, 0,
7105 file_offset,
7106 __btrfs_submit_bio_start_direct_io,
7107 __btrfs_submit_bio_done);
7108 goto err;
1ae39938
JB
7109 } else if (write) {
7110 /*
7111 * If we aren't doing async submit, calculate the csum of the
7112 * bio now.
7113 */
7114 ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
7115 if (ret)
7116 goto err;
c2db1073 7117 } else if (!skip_sum) {
c329861d 7118 ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset);
c2db1073
TI
7119 if (ret)
7120 goto err;
7121 }
e65e1535 7122
1ae39938
JB
7123map:
7124 ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
e65e1535
MX
7125err:
7126 bio_put(bio);
7127 return ret;
7128}
7129
7130static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
7131 int skip_sum)
7132{
7133 struct inode *inode = dip->inode;
7134 struct btrfs_root *root = BTRFS_I(inode)->root;
e65e1535
MX
7135 struct bio *bio;
7136 struct bio *orig_bio = dip->orig_bio;
7137 struct bio_vec *bvec = orig_bio->bi_io_vec;
7138 u64 start_sector = orig_bio->bi_sector;
7139 u64 file_offset = dip->logical_offset;
7140 u64 submit_len = 0;
7141 u64 map_length;
7142 int nr_pages = 0;
e65e1535 7143 int ret = 0;
1ae39938 7144 int async_submit = 0;
e65e1535 7145
e65e1535 7146 map_length = orig_bio->bi_size;
53b381b3 7147 ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
e65e1535
MX
7148 &map_length, NULL, 0);
7149 if (ret) {
64728bbb 7150 bio_put(orig_bio);
e65e1535
MX
7151 return -EIO;
7152 }
02f57c7a
JB
7153 if (map_length >= orig_bio->bi_size) {
7154 bio = orig_bio;
7155 goto submit;
7156 }
7157
53b381b3
DW
7158 /* async crcs make it difficult to collect full stripe writes. */
7159 if (btrfs_get_alloc_profile(root, 1) &
7160 (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6))
7161 async_submit = 0;
7162 else
7163 async_submit = 1;
7164
02f57c7a
JB
7165 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
7166 if (!bio)
7167 return -ENOMEM;
7168 bio->bi_private = dip;
7169 bio->bi_end_io = btrfs_end_dio_bio;
7170 atomic_inc(&dip->pending_bios);
7171
e65e1535
MX
7172 while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
7173 if (unlikely(map_length < submit_len + bvec->bv_len ||
7174 bio_add_page(bio, bvec->bv_page, bvec->bv_len,
7175 bvec->bv_offset) < bvec->bv_len)) {
7176 /*
7177 * inc the count before we submit the bio so
7178 * we know the end IO handler won't happen before
7179 * we inc the count. Otherwise, the dip might get freed
7180 * before we're done setting it up
7181 */
7182 atomic_inc(&dip->pending_bios);
7183 ret = __btrfs_submit_dio_bio(bio, inode, rw,
7184 file_offset, skip_sum,
c329861d 7185 async_submit);
e65e1535
MX
7186 if (ret) {
7187 bio_put(bio);
7188 atomic_dec(&dip->pending_bios);
7189 goto out_err;
7190 }
7191
e65e1535
MX
7192 start_sector += submit_len >> 9;
7193 file_offset += submit_len;
7194
7195 submit_len = 0;
7196 nr_pages = 0;
7197
7198 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
7199 start_sector, GFP_NOFS);
7200 if (!bio)
7201 goto out_err;
7202 bio->bi_private = dip;
7203 bio->bi_end_io = btrfs_end_dio_bio;
7204
7205 map_length = orig_bio->bi_size;
53b381b3 7206 ret = btrfs_map_block(root->fs_info, rw,
3ec706c8 7207 start_sector << 9,
e65e1535
MX
7208 &map_length, NULL, 0);
7209 if (ret) {
7210 bio_put(bio);
7211 goto out_err;
7212 }
7213 } else {
7214 submit_len += bvec->bv_len;
7215 nr_pages ++;
7216 bvec++;
7217 }
7218 }
7219
02f57c7a 7220submit:
e65e1535 7221 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
c329861d 7222 async_submit);
e65e1535
MX
7223 if (!ret)
7224 return 0;
7225
7226 bio_put(bio);
7227out_err:
7228 dip->errors = 1;
7229 /*
7230 * before atomic variable goto zero, we must
7231 * make sure dip->errors is perceived to be set.
7232 */
7233 smp_mb__before_atomic_dec();
7234 if (atomic_dec_and_test(&dip->pending_bios))
7235 bio_io_error(dip->orig_bio);
7236
7237 /* bio_end_io() will handle error, so we needn't return it */
7238 return 0;
7239}
7240
4b46fce2
JB
7241static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
7242 loff_t file_offset)
7243{
7244 struct btrfs_root *root = BTRFS_I(inode)->root;
7245 struct btrfs_dio_private *dip;
7246 struct bio_vec *bvec = bio->bi_io_vec;
4b46fce2 7247 int skip_sum;
7b6d91da 7248 int write = rw & REQ_WRITE;
4b46fce2
JB
7249 int ret = 0;
7250
7251 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
7252
7253 dip = kmalloc(sizeof(*dip), GFP_NOFS);
7254 if (!dip) {
7255 ret = -ENOMEM;
7256 goto free_ordered;
7257 }
4b46fce2
JB
7258
7259 dip->private = bio->bi_private;
7260 dip->inode = inode;
7261 dip->logical_offset = file_offset;
7262
4b46fce2
JB
7263 dip->bytes = 0;
7264 do {
7265 dip->bytes += bvec->bv_len;
7266 bvec++;
7267 } while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));
7268
46bfbb5c 7269 dip->disk_bytenr = (u64)bio->bi_sector << 9;
4b46fce2 7270 bio->bi_private = dip;
e65e1535
MX
7271 dip->errors = 0;
7272 dip->orig_bio = bio;
7273 atomic_set(&dip->pending_bios, 0);
4b46fce2
JB
7274
7275 if (write)
7276 bio->bi_end_io = btrfs_endio_direct_write;
7277 else
7278 bio->bi_end_io = btrfs_endio_direct_read;
7279
e65e1535
MX
7280 ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
7281 if (!ret)
eaf25d93 7282 return;
4b46fce2
JB
7283free_ordered:
7284 /*
7285 * If this is a write, we need to clean up the reserved space and kill
7286 * the ordered extent.
7287 */
7288 if (write) {
7289 struct btrfs_ordered_extent *ordered;
955256f2 7290 ordered = btrfs_lookup_ordered_extent(inode, file_offset);
4b46fce2
JB
7291 if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
7292 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
7293 btrfs_free_reserved_extent(root, ordered->start,
7294 ordered->disk_len);
7295 btrfs_put_ordered_extent(ordered);
7296 btrfs_put_ordered_extent(ordered);
7297 }
7298 bio_endio(bio, ret);
7299}
7300
5a5f79b5
CM
7301static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
7302 const struct iovec *iov, loff_t offset,
7303 unsigned long nr_segs)
7304{
7305 int seg;
a1b75f7d 7306 int i;
5a5f79b5
CM
7307 size_t size;
7308 unsigned long addr;
7309 unsigned blocksize_mask = root->sectorsize - 1;
7310 ssize_t retval = -EINVAL;
7311 loff_t end = offset;
7312
7313 if (offset & blocksize_mask)
7314 goto out;
7315
7316 /* Check the memory alignment. Blocks cannot straddle pages */
7317 for (seg = 0; seg < nr_segs; seg++) {
7318 addr = (unsigned long)iov[seg].iov_base;
7319 size = iov[seg].iov_len;
7320 end += size;
a1b75f7d 7321 if ((addr & blocksize_mask) || (size & blocksize_mask))
5a5f79b5 7322 goto out;
a1b75f7d
JB
7323
7324 /* If this is a write we don't need to check anymore */
7325 if (rw & WRITE)
7326 continue;
7327
7328 /*
7329 * Check to make sure we don't have duplicate iov_base's in this
7330 * iovec, if so return EINVAL, otherwise we'll get csum errors
7331 * when reading back.
7332 */
7333 for (i = seg + 1; i < nr_segs; i++) {
7334 if (iov[seg].iov_base == iov[i].iov_base)
7335 goto out;
7336 }
5a5f79b5
CM
7337 }
7338 retval = 0;
7339out:
7340 return retval;
7341}
eb838e73 7342
16432985
CM
7343static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
7344 const struct iovec *iov, loff_t offset,
7345 unsigned long nr_segs)
7346{
4b46fce2
JB
7347 struct file *file = iocb->ki_filp;
7348 struct inode *inode = file->f_mapping->host;
0934856d 7349 size_t count = 0;
2e60a51e 7350 int flags = 0;
38851cc1
MX
7351 bool wakeup = true;
7352 bool relock = false;
0934856d 7353 ssize_t ret;
4b46fce2 7354
5a5f79b5 7355 if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
eb838e73 7356 offset, nr_segs))
5a5f79b5 7357 return 0;
3f7c579c 7358
38851cc1
MX
7359 atomic_inc(&inode->i_dio_count);
7360 smp_mb__after_atomic_inc();
7361
0934856d
MX
7362 if (rw & WRITE) {
7363 count = iov_length(iov, nr_segs);
38851cc1
MX
7364 /*
7365 * If the write DIO is beyond the EOF, we need update
7366 * the isize, but it is protected by i_mutex. So we can
7367 * not unlock the i_mutex at this case.
7368 */
7369 if (offset + count <= inode->i_size) {
7370 mutex_unlock(&inode->i_mutex);
7371 relock = true;
7372 }
0934856d
MX
7373 ret = btrfs_delalloc_reserve_space(inode, count);
7374 if (ret)
38851cc1
MX
7375 goto out;
7376 } else if (unlikely(test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
7377 &BTRFS_I(inode)->runtime_flags))) {
7378 inode_dio_done(inode);
7379 flags = DIO_LOCKING | DIO_SKIP_HOLES;
7380 wakeup = false;
0934856d
MX
7381 }
7382
7383 ret = __blockdev_direct_IO(rw, iocb, inode,
7384 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
7385 iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
2e60a51e 7386 btrfs_submit_direct, flags);
0934856d
MX
7387 if (rw & WRITE) {
7388 if (ret < 0 && ret != -EIOCBQUEUED)
7389 btrfs_delalloc_release_space(inode, count);
172a5049 7390 else if (ret >= 0 && (size_t)ret < count)
0934856d
MX
7391 btrfs_delalloc_release_space(inode,
7392 count - (size_t)ret);
172a5049
MX
7393 else
7394 btrfs_delalloc_release_metadata(inode, 0);
0934856d 7395 }
38851cc1 7396out:
2e60a51e
MX
7397 if (wakeup)
7398 inode_dio_done(inode);
38851cc1
MX
7399 if (relock)
7400 mutex_lock(&inode->i_mutex);
0934856d
MX
7401
7402 return ret;
16432985
CM
7403}
7404
05dadc09
TI
7405#define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC)
7406
1506fcc8
YS
7407static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
7408 __u64 start, __u64 len)
7409{
05dadc09
TI
7410 int ret;
7411
7412 ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
7413 if (ret)
7414 return ret;
7415
ec29ed5b 7416 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
1506fcc8
YS
7417}
7418
a52d9a80 7419int btrfs_readpage(struct file *file, struct page *page)
9ebefb18 7420{
d1310b2e
CM
7421 struct extent_io_tree *tree;
7422 tree = &BTRFS_I(page->mapping->host)->io_tree;
8ddc7d9c 7423 return extent_read_full_page(tree, page, btrfs_get_extent, 0);
9ebefb18 7424}
1832a6d5 7425
a52d9a80 7426static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
39279cc3 7427{
d1310b2e 7428 struct extent_io_tree *tree;
b888db2b
CM
7429
7430
7431 if (current->flags & PF_MEMALLOC) {
7432 redirty_page_for_writepage(wbc, page);
7433 unlock_page(page);
7434 return 0;
7435 }
d1310b2e 7436 tree = &BTRFS_I(page->mapping->host)->io_tree;
a52d9a80 7437 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
9ebefb18
CM
7438}
7439
f421950f
CM
7440int btrfs_writepages(struct address_space *mapping,
7441 struct writeback_control *wbc)
b293f02e 7442{
d1310b2e 7443 struct extent_io_tree *tree;
771ed689 7444
d1310b2e 7445 tree = &BTRFS_I(mapping->host)->io_tree;
b293f02e
CM
7446 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
7447}
7448
3ab2fb5a
CM
7449static int
7450btrfs_readpages(struct file *file, struct address_space *mapping,
7451 struct list_head *pages, unsigned nr_pages)
7452{
d1310b2e
CM
7453 struct extent_io_tree *tree;
7454 tree = &BTRFS_I(mapping->host)->io_tree;
3ab2fb5a
CM
7455 return extent_readpages(tree, mapping, pages, nr_pages,
7456 btrfs_get_extent);
7457}
e6dcd2dc 7458static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
9ebefb18 7459{
d1310b2e
CM
7460 struct extent_io_tree *tree;
7461 struct extent_map_tree *map;
a52d9a80 7462 int ret;
8c2383c3 7463
d1310b2e
CM
7464 tree = &BTRFS_I(page->mapping->host)->io_tree;
7465 map = &BTRFS_I(page->mapping->host)->extent_tree;
70dec807 7466 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
a52d9a80
CM
7467 if (ret == 1) {
7468 ClearPagePrivate(page);
7469 set_page_private(page, 0);
7470 page_cache_release(page);
39279cc3 7471 }
a52d9a80 7472 return ret;
39279cc3
CM
7473}
7474
e6dcd2dc
CM
7475static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
7476{
98509cfc
CM
7477 if (PageWriteback(page) || PageDirty(page))
7478 return 0;
b335b003 7479 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
e6dcd2dc
CM
7480}
7481
a52d9a80 7482static void btrfs_invalidatepage(struct page *page, unsigned long offset)
39279cc3 7483{
5fd02043 7484 struct inode *inode = page->mapping->host;
d1310b2e 7485 struct extent_io_tree *tree;
e6dcd2dc 7486 struct btrfs_ordered_extent *ordered;
2ac55d41 7487 struct extent_state *cached_state = NULL;
e6dcd2dc
CM
7488 u64 page_start = page_offset(page);
7489 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
39279cc3 7490
8b62b72b
CM
7491 /*
7492 * we have the page locked, so new writeback can't start,
7493 * and the dirty bit won't be cleared while we are here.
7494 *
7495 * Wait for IO on this page so that we can safely clear
7496 * the PagePrivate2 bit and do ordered accounting
7497 */
e6dcd2dc 7498 wait_on_page_writeback(page);
8b62b72b 7499
5fd02043 7500 tree = &BTRFS_I(inode)->io_tree;
e6dcd2dc
CM
7501 if (offset) {
7502 btrfs_releasepage(page, GFP_NOFS);
7503 return;
7504 }
d0082371 7505 lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
4eee4fa4 7506 ordered = btrfs_lookup_ordered_extent(inode, page_offset(page));
e6dcd2dc 7507 if (ordered) {
eb84ae03
CM
7508 /*
7509 * IO on this page will never be started, so we need
7510 * to account for any ordered extents now
7511 */
e6dcd2dc
CM
7512 clear_extent_bit(tree, page_start, page_end,
7513 EXTENT_DIRTY | EXTENT_DELALLOC |
9e8a4a8b
LB
7514 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
7515 EXTENT_DEFRAG, 1, 0, &cached_state, GFP_NOFS);
8b62b72b
CM
7516 /*
7517 * whoever cleared the private bit is responsible
7518 * for the finish_ordered_io
7519 */
5fd02043
JB
7520 if (TestClearPagePrivate2(page) &&
7521 btrfs_dec_test_ordered_pending(inode, &ordered, page_start,
7522 PAGE_CACHE_SIZE, 1)) {
7523 btrfs_finish_ordered_io(ordered);
8b62b72b 7524 }
e6dcd2dc 7525 btrfs_put_ordered_extent(ordered);
2ac55d41 7526 cached_state = NULL;
d0082371 7527 lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
e6dcd2dc
CM
7528 }
7529 clear_extent_bit(tree, page_start, page_end,
32c00aff 7530 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
9e8a4a8b
LB
7531 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
7532 &cached_state, GFP_NOFS);
e6dcd2dc
CM
7533 __btrfs_releasepage(page, GFP_NOFS);
7534
4a096752 7535 ClearPageChecked(page);
9ad6b7bc 7536 if (PagePrivate(page)) {
9ad6b7bc
CM
7537 ClearPagePrivate(page);
7538 set_page_private(page, 0);
7539 page_cache_release(page);
7540 }
39279cc3
CM
7541}
7542
9ebefb18
CM
7543/*
7544 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
7545 * called from a page fault handler when a page is first dirtied. Hence we must
7546 * be careful to check for EOF conditions here. We set the page up correctly
7547 * for a written page which means we get ENOSPC checking when writing into
7548 * holes and correct delalloc and unwritten extent mapping on filesystems that
7549 * support these features.
7550 *
7551 * We are not allowed to take the i_mutex here so we have to play games to
7552 * protect against truncate races as the page could now be beyond EOF. Because
7553 * vmtruncate() writes the inode size before removing pages, once we have the
7554 * page lock we can determine safely if the page is beyond EOF. If it is not
7555 * beyond EOF, then the page is guaranteed safe against truncation until we
7556 * unlock the page.
7557 */
c2ec175c 7558int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
9ebefb18 7559{
c2ec175c 7560 struct page *page = vmf->page;
496ad9aa 7561 struct inode *inode = file_inode(vma->vm_file);
1832a6d5 7562 struct btrfs_root *root = BTRFS_I(inode)->root;
e6dcd2dc
CM
7563 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7564 struct btrfs_ordered_extent *ordered;
2ac55d41 7565 struct extent_state *cached_state = NULL;
e6dcd2dc
CM
7566 char *kaddr;
7567 unsigned long zero_start;
9ebefb18 7568 loff_t size;
1832a6d5 7569 int ret;
9998eb70 7570 int reserved = 0;
a52d9a80 7571 u64 page_start;
e6dcd2dc 7572 u64 page_end;
9ebefb18 7573
b2b5ef5c 7574 sb_start_pagefault(inode->i_sb);
0ca1f7ce 7575 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
9998eb70 7576 if (!ret) {
e41f941a 7577 ret = file_update_time(vma->vm_file);
9998eb70
CM
7578 reserved = 1;
7579 }
56a76f82
NP
7580 if (ret) {
7581 if (ret == -ENOMEM)
7582 ret = VM_FAULT_OOM;
7583 else /* -ENOSPC, -EIO, etc */
7584 ret = VM_FAULT_SIGBUS;
9998eb70
CM
7585 if (reserved)
7586 goto out;
7587 goto out_noreserve;
56a76f82 7588 }
1832a6d5 7589
56a76f82 7590 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
e6dcd2dc 7591again:
9ebefb18 7592 lock_page(page);
9ebefb18 7593 size = i_size_read(inode);
e6dcd2dc
CM
7594 page_start = page_offset(page);
7595 page_end = page_start + PAGE_CACHE_SIZE - 1;
a52d9a80 7596
9ebefb18 7597 if ((page->mapping != inode->i_mapping) ||
e6dcd2dc 7598 (page_start >= size)) {
9ebefb18
CM
7599 /* page got truncated out from underneath us */
7600 goto out_unlock;
7601 }
e6dcd2dc
CM
7602 wait_on_page_writeback(page);
7603
d0082371 7604 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
e6dcd2dc
CM
7605 set_page_extent_mapped(page);
7606
eb84ae03
CM
7607 /*
7608 * we can't set the delalloc bits if there are pending ordered
7609 * extents. Drop our locks and wait for them to finish
7610 */
e6dcd2dc
CM
7611 ordered = btrfs_lookup_ordered_extent(inode, page_start);
7612 if (ordered) {
2ac55d41
JB
7613 unlock_extent_cached(io_tree, page_start, page_end,
7614 &cached_state, GFP_NOFS);
e6dcd2dc 7615 unlock_page(page);
eb84ae03 7616 btrfs_start_ordered_extent(inode, ordered, 1);
e6dcd2dc
CM
7617 btrfs_put_ordered_extent(ordered);
7618 goto again;
7619 }
7620
fbf19087
JB
7621 /*
7622 * XXX - page_mkwrite gets called every time the page is dirtied, even
7623 * if it was already dirty, so for space accounting reasons we need to
7624 * clear any delalloc bits for the range we are fixing to save. There
7625 * is probably a better way to do this, but for now keep consistent with
7626 * prepare_pages in the normal write path.
7627 */
2ac55d41 7628 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
9e8a4a8b
LB
7629 EXTENT_DIRTY | EXTENT_DELALLOC |
7630 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
2ac55d41 7631 0, 0, &cached_state, GFP_NOFS);
fbf19087 7632
2ac55d41
JB
7633 ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
7634 &cached_state);
9ed74f2d 7635 if (ret) {
2ac55d41
JB
7636 unlock_extent_cached(io_tree, page_start, page_end,
7637 &cached_state, GFP_NOFS);
9ed74f2d
JB
7638 ret = VM_FAULT_SIGBUS;
7639 goto out_unlock;
7640 }
e6dcd2dc 7641 ret = 0;
9ebefb18
CM
7642
7643 /* page is wholly or partially inside EOF */
a52d9a80 7644 if (page_start + PAGE_CACHE_SIZE > size)
e6dcd2dc 7645 zero_start = size & ~PAGE_CACHE_MASK;
9ebefb18 7646 else
e6dcd2dc 7647 zero_start = PAGE_CACHE_SIZE;
9ebefb18 7648
e6dcd2dc
CM
7649 if (zero_start != PAGE_CACHE_SIZE) {
7650 kaddr = kmap(page);
7651 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
7652 flush_dcache_page(page);
7653 kunmap(page);
7654 }
247e743c 7655 ClearPageChecked(page);
e6dcd2dc 7656 set_page_dirty(page);
50a9b214 7657 SetPageUptodate(page);
5a3f23d5 7658
257c62e1
CM
7659 BTRFS_I(inode)->last_trans = root->fs_info->generation;
7660 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
46d8bc34 7661 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
257c62e1 7662
2ac55d41 7663 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
9ebefb18
CM
7664
7665out_unlock:
b2b5ef5c
JK
7666 if (!ret) {
7667 sb_end_pagefault(inode->i_sb);
50a9b214 7668 return VM_FAULT_LOCKED;
b2b5ef5c 7669 }
9ebefb18 7670 unlock_page(page);
1832a6d5 7671out:
ec39e180 7672 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
9998eb70 7673out_noreserve:
b2b5ef5c 7674 sb_end_pagefault(inode->i_sb);
9ebefb18
CM
7675 return ret;
7676}
7677
a41ad394 7678static int btrfs_truncate(struct inode *inode)
39279cc3
CM
7679{
7680 struct btrfs_root *root = BTRFS_I(inode)->root;
fcb80c2a 7681 struct btrfs_block_rsv *rsv;
39279cc3 7682 int ret;
3893e33b 7683 int err = 0;
39279cc3 7684 struct btrfs_trans_handle *trans;
dbe674a9 7685 u64 mask = root->sectorsize - 1;
07127184 7686 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
39279cc3 7687
2aaa6655 7688 ret = btrfs_truncate_page(inode, inode->i_size, 0, 0);
5d5e103a 7689 if (ret)
a41ad394 7690 return ret;
8082510e 7691
4a096752 7692 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
8082510e 7693 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
39279cc3 7694
fcb80c2a
JB
7695 /*
7696 * Yes ladies and gentelment, this is indeed ugly. The fact is we have
7697 * 3 things going on here
7698 *
7699 * 1) We need to reserve space for our orphan item and the space to
7700 * delete our orphan item. Lord knows we don't want to have a dangling
7701 * orphan item because we didn't reserve space to remove it.
7702 *
7703 * 2) We need to reserve space to update our inode.
7704 *
7705 * 3) We need to have something to cache all the space that is going to
7706 * be free'd up by the truncate operation, but also have some slack
7707 * space reserved in case it uses space during the truncate (thank you
7708 * very much snapshotting).
7709 *
7710 * And we need these to all be seperate. The fact is we can use alot of
7711 * space doing the truncate, and we have no earthly idea how much space
7712 * we will use, so we need the truncate reservation to be seperate so it
7713 * doesn't end up using space reserved for updating the inode or
7714 * removing the orphan item. We also need to be able to stop the
7715 * transaction and start a new one, which means we need to be able to
7716 * update the inode several times, and we have no idea of knowing how
7717 * many times that will be, so we can't just reserve 1 item for the
7718 * entirety of the opration, so that has to be done seperately as well.
7719 * Then there is the orphan item, which does indeed need to be held on
7720 * to for the whole operation, and we need nobody to touch this reserved
7721 * space except the orphan code.
7722 *
7723 * So that leaves us with
7724 *
7725 * 1) root->orphan_block_rsv - for the orphan deletion.
7726 * 2) rsv - for the truncate reservation, which we will steal from the
7727 * transaction reservation.
7728 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
7729 * updating the inode.
7730 */
66d8f3dd 7731 rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
fcb80c2a
JB
7732 if (!rsv)
7733 return -ENOMEM;
4a338542 7734 rsv->size = min_size;
ca7e70f5 7735 rsv->failfast = 1;
f0cd846e 7736
907cbceb 7737 /*
07127184 7738 * 1 for the truncate slack space
907cbceb
JB
7739 * 1 for updating the inode.
7740 */
f3fe820c 7741 trans = btrfs_start_transaction(root, 2);
fcb80c2a
JB
7742 if (IS_ERR(trans)) {
7743 err = PTR_ERR(trans);
7744 goto out;
7745 }
f0cd846e 7746
907cbceb
JB
7747 /* Migrate the slack space for the truncate to our reserve */
7748 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
7749 min_size);
fcb80c2a 7750 BUG_ON(ret);
f0cd846e 7751
5a3f23d5
CM
7752 /*
7753 * setattr is responsible for setting the ordered_data_close flag,
7754 * but that is only tested during the last file release. That
7755 * could happen well after the next commit, leaving a great big
7756 * window where new writes may get lost if someone chooses to write
7757 * to this file after truncating to zero
7758 *
7759 * The inode doesn't have any dirty data here, and so if we commit
7760 * this is a noop. If someone immediately starts writing to the inode
7761 * it is very likely we'll catch some of their writes in this
7762 * transaction, and the commit will find this file on the ordered
7763 * data list with good things to send down.
7764 *
7765 * This is a best effort solution, there is still a window where
7766 * using truncate to replace the contents of the file will
7767 * end up with a zero length file after a crash.
7768 */
72ac3c0d
JB
7769 if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
7770 &BTRFS_I(inode)->runtime_flags))
5a3f23d5
CM
7771 btrfs_add_ordered_operation(trans, root, inode);
7772
5dc562c5
JB
7773 /*
7774 * So if we truncate and then write and fsync we normally would just
7775 * write the extents that changed, which is a problem if we need to
7776 * first truncate that entire inode. So set this flag so we write out
7777 * all of the extents in the inode to the sync log so we're completely
7778 * safe.
7779 */
7780 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
ca7e70f5 7781 trans->block_rsv = rsv;
907cbceb 7782
8082510e
YZ
7783 while (1) {
7784 ret = btrfs_truncate_inode_items(trans, root, inode,
7785 inode->i_size,
7786 BTRFS_EXTENT_DATA_KEY);
ca7e70f5 7787 if (ret != -ENOSPC) {
3893e33b 7788 err = ret;
8082510e 7789 break;
3893e33b 7790 }
39279cc3 7791
fcb80c2a 7792 trans->block_rsv = &root->fs_info->trans_block_rsv;
8082510e 7793 ret = btrfs_update_inode(trans, root, inode);
3893e33b
JB
7794 if (ret) {
7795 err = ret;
7796 break;
7797 }
ca7e70f5 7798
8082510e 7799 btrfs_end_transaction(trans, root);
b53d3f5d 7800 btrfs_btree_balance_dirty(root);
ca7e70f5
JB
7801
7802 trans = btrfs_start_transaction(root, 2);
7803 if (IS_ERR(trans)) {
7804 ret = err = PTR_ERR(trans);
7805 trans = NULL;
7806 break;
7807 }
7808
7809 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
7810 rsv, min_size);
7811 BUG_ON(ret); /* shouldn't happen */
7812 trans->block_rsv = rsv;
8082510e
YZ
7813 }
7814
7815 if (ret == 0 && inode->i_nlink > 0) {
fcb80c2a 7816 trans->block_rsv = root->orphan_block_rsv;
8082510e 7817 ret = btrfs_orphan_del(trans, inode);
3893e33b
JB
7818 if (ret)
7819 err = ret;
8082510e
YZ
7820 }
7821
917c16b2
CM
7822 if (trans) {
7823 trans->block_rsv = &root->fs_info->trans_block_rsv;
7824 ret = btrfs_update_inode(trans, root, inode);
7825 if (ret && !err)
7826 err = ret;
7b128766 7827
7ad85bb7 7828 ret = btrfs_end_transaction(trans, root);
b53d3f5d 7829 btrfs_btree_balance_dirty(root);
917c16b2 7830 }
fcb80c2a
JB
7831
7832out:
7833 btrfs_free_block_rsv(root, rsv);
7834
3893e33b
JB
7835 if (ret && !err)
7836 err = ret;
a41ad394 7837
3893e33b 7838 return err;
39279cc3
CM
7839}
7840
d352ac68
CM
7841/*
7842 * create a new subvolume directory/inode (helper for the ioctl).
7843 */
d2fb3437 7844int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
d82a6f1d 7845 struct btrfs_root *new_root, u64 new_dirid)
39279cc3 7846{
39279cc3 7847 struct inode *inode;
76dda93c 7848 int err;
00e4e6b3 7849 u64 index = 0;
39279cc3 7850
12fc9d09
FA
7851 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
7852 new_dirid, new_dirid,
7853 S_IFDIR | (~current_umask() & S_IRWXUGO),
7854 &index);
54aa1f4d 7855 if (IS_ERR(inode))
f46b5a66 7856 return PTR_ERR(inode);
39279cc3
CM
7857 inode->i_op = &btrfs_dir_inode_operations;
7858 inode->i_fop = &btrfs_dir_file_operations;
7859
bfe86848 7860 set_nlink(inode, 1);
dbe674a9 7861 btrfs_i_size_write(inode, 0);
3b96362c 7862
76dda93c 7863 err = btrfs_update_inode(trans, new_root, inode);
cb8e7090 7864
76dda93c 7865 iput(inode);
ce598979 7866 return err;
39279cc3
CM
7867}
7868
39279cc3
CM
7869struct inode *btrfs_alloc_inode(struct super_block *sb)
7870{
7871 struct btrfs_inode *ei;
2ead6ae7 7872 struct inode *inode;
39279cc3
CM
7873
7874 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
7875 if (!ei)
7876 return NULL;
2ead6ae7
YZ
7877
7878 ei->root = NULL;
2ead6ae7 7879 ei->generation = 0;
15ee9bc7 7880 ei->last_trans = 0;
257c62e1 7881 ei->last_sub_trans = 0;
e02119d5 7882 ei->logged_trans = 0;
2ead6ae7 7883 ei->delalloc_bytes = 0;
2ead6ae7
YZ
7884 ei->disk_i_size = 0;
7885 ei->flags = 0;
7709cde3 7886 ei->csum_bytes = 0;
2ead6ae7
YZ
7887 ei->index_cnt = (u64)-1;
7888 ei->last_unlink_trans = 0;
46d8bc34 7889 ei->last_log_commit = 0;
2ead6ae7 7890
9e0baf60
JB
7891 spin_lock_init(&ei->lock);
7892 ei->outstanding_extents = 0;
7893 ei->reserved_extents = 0;
2ead6ae7 7894
72ac3c0d 7895 ei->runtime_flags = 0;
261507a0 7896 ei->force_compress = BTRFS_COMPRESS_NONE;
2ead6ae7 7897
16cdcec7
MX
7898 ei->delayed_node = NULL;
7899
2ead6ae7 7900 inode = &ei->vfs_inode;
a8067e02 7901 extent_map_tree_init(&ei->extent_tree);
f993c883
DS
7902 extent_io_tree_init(&ei->io_tree, &inode->i_data);
7903 extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
0b32f4bb
JB
7904 ei->io_tree.track_uptodate = 1;
7905 ei->io_failure_tree.track_uptodate = 1;
b812ce28 7906 atomic_set(&ei->sync_writers, 0);
2ead6ae7 7907 mutex_init(&ei->log_mutex);
f248679e 7908 mutex_init(&ei->delalloc_mutex);
e6dcd2dc 7909 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
2ead6ae7 7910 INIT_LIST_HEAD(&ei->delalloc_inodes);
5a3f23d5 7911 INIT_LIST_HEAD(&ei->ordered_operations);
2ead6ae7
YZ
7912 RB_CLEAR_NODE(&ei->rb_node);
7913
7914 return inode;
39279cc3
CM
7915}
7916
fa0d7e3d
NP
7917static void btrfs_i_callback(struct rcu_head *head)
7918{
7919 struct inode *inode = container_of(head, struct inode, i_rcu);
fa0d7e3d
NP
7920 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
7921}
7922
39279cc3
CM
7923void btrfs_destroy_inode(struct inode *inode)
7924{
e6dcd2dc 7925 struct btrfs_ordered_extent *ordered;
5a3f23d5
CM
7926 struct btrfs_root *root = BTRFS_I(inode)->root;
7927
b3d9b7a3 7928 WARN_ON(!hlist_empty(&inode->i_dentry));
39279cc3 7929 WARN_ON(inode->i_data.nrpages);
9e0baf60
JB
7930 WARN_ON(BTRFS_I(inode)->outstanding_extents);
7931 WARN_ON(BTRFS_I(inode)->reserved_extents);
7709cde3
JB
7932 WARN_ON(BTRFS_I(inode)->delalloc_bytes);
7933 WARN_ON(BTRFS_I(inode)->csum_bytes);
39279cc3 7934
a6dbd429
JB
7935 /*
7936 * This can happen where we create an inode, but somebody else also
7937 * created the same inode and we need to destroy the one we already
7938 * created.
7939 */
7940 if (!root)
7941 goto free;
7942
5a3f23d5
CM
7943 /*
7944 * Make sure we're properly removed from the ordered operation
7945 * lists.
7946 */
7947 smp_mb();
7948 if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
7949 spin_lock(&root->fs_info->ordered_extent_lock);
7950 list_del_init(&BTRFS_I(inode)->ordered_operations);
7951 spin_unlock(&root->fs_info->ordered_extent_lock);
7952 }
7953
8a35d95f
JB
7954 if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
7955 &BTRFS_I(inode)->runtime_flags)) {
c2cf52eb
SK
7956 btrfs_info(root->fs_info, "inode %llu still on the orphan list",
7957 (unsigned long long)btrfs_ino(inode));
8a35d95f 7958 atomic_dec(&root->orphan_inodes);
7b128766 7959 }
7b128766 7960
d397712b 7961 while (1) {
e6dcd2dc
CM
7962 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
7963 if (!ordered)
7964 break;
7965 else {
c2cf52eb
SK
7966 btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup",
7967 (unsigned long long)ordered->file_offset,
7968 (unsigned long long)ordered->len);
e6dcd2dc
CM
7969 btrfs_remove_ordered_extent(inode, ordered);
7970 btrfs_put_ordered_extent(ordered);
7971 btrfs_put_ordered_extent(ordered);
7972 }
7973 }
5d4f98a2 7974 inode_tree_del(inode);
5b21f2ed 7975 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
a6dbd429 7976free:
16cdcec7 7977 btrfs_remove_delayed_node(inode);
fa0d7e3d 7978 call_rcu(&inode->i_rcu, btrfs_i_callback);
39279cc3
CM
7979}
7980
45321ac5 7981int btrfs_drop_inode(struct inode *inode)
76dda93c
YZ
7982{
7983 struct btrfs_root *root = BTRFS_I(inode)->root;
45321ac5 7984
fa6ac876 7985 /* the snap/subvol tree is on deleting */
0af3d00b 7986 if (btrfs_root_refs(&root->root_item) == 0 &&
fa6ac876 7987 root != root->fs_info->tree_root)
45321ac5 7988 return 1;
76dda93c 7989 else
45321ac5 7990 return generic_drop_inode(inode);
76dda93c
YZ
7991}
7992
0ee0fda0 7993static void init_once(void *foo)
39279cc3
CM
7994{
7995 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
7996
7997 inode_init_once(&ei->vfs_inode);
7998}
7999
8000void btrfs_destroy_cachep(void)
8001{
8c0a8537
KS
8002 /*
8003 * Make sure all delayed rcu free inodes are flushed before we
8004 * destroy cache.
8005 */
8006 rcu_barrier();
39279cc3
CM
8007 if (btrfs_inode_cachep)
8008 kmem_cache_destroy(btrfs_inode_cachep);
8009 if (btrfs_trans_handle_cachep)
8010 kmem_cache_destroy(btrfs_trans_handle_cachep);
8011 if (btrfs_transaction_cachep)
8012 kmem_cache_destroy(btrfs_transaction_cachep);
39279cc3
CM
8013 if (btrfs_path_cachep)
8014 kmem_cache_destroy(btrfs_path_cachep);
dc89e982
JB
8015 if (btrfs_free_space_cachep)
8016 kmem_cache_destroy(btrfs_free_space_cachep);
8ccf6f19
MX
8017 if (btrfs_delalloc_work_cachep)
8018 kmem_cache_destroy(btrfs_delalloc_work_cachep);
39279cc3
CM
8019}
8020
8021int btrfs_init_cachep(void)
8022{
837e1972 8023 btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
9601e3f6
CH
8024 sizeof(struct btrfs_inode), 0,
8025 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
39279cc3
CM
8026 if (!btrfs_inode_cachep)
8027 goto fail;
9601e3f6 8028
837e1972 8029 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
9601e3f6
CH
8030 sizeof(struct btrfs_trans_handle), 0,
8031 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
39279cc3
CM
8032 if (!btrfs_trans_handle_cachep)
8033 goto fail;
9601e3f6 8034
837e1972 8035 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
9601e3f6
CH
8036 sizeof(struct btrfs_transaction), 0,
8037 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
39279cc3
CM
8038 if (!btrfs_transaction_cachep)
8039 goto fail;
9601e3f6 8040
837e1972 8041 btrfs_path_cachep = kmem_cache_create("btrfs_path",
9601e3f6
CH
8042 sizeof(struct btrfs_path), 0,
8043 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
39279cc3
CM
8044 if (!btrfs_path_cachep)
8045 goto fail;
9601e3f6 8046
837e1972 8047 btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
dc89e982
JB
8048 sizeof(struct btrfs_free_space), 0,
8049 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
8050 if (!btrfs_free_space_cachep)
8051 goto fail;
8052
8ccf6f19
MX
8053 btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work",
8054 sizeof(struct btrfs_delalloc_work), 0,
8055 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
8056 NULL);
8057 if (!btrfs_delalloc_work_cachep)
8058 goto fail;
8059
39279cc3
CM
8060 return 0;
8061fail:
8062 btrfs_destroy_cachep();
8063 return -ENOMEM;
8064}
8065
8066static int btrfs_getattr(struct vfsmount *mnt,
8067 struct dentry *dentry, struct kstat *stat)
8068{
df0af1a5 8069 u64 delalloc_bytes;
39279cc3 8070 struct inode *inode = dentry->d_inode;
fadc0d8b
DS
8071 u32 blocksize = inode->i_sb->s_blocksize;
8072
39279cc3 8073 generic_fillattr(inode, stat);
0ee5dc67 8074 stat->dev = BTRFS_I(inode)->root->anon_dev;
d6667462 8075 stat->blksize = PAGE_CACHE_SIZE;
df0af1a5
MX
8076
8077 spin_lock(&BTRFS_I(inode)->lock);
8078 delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
8079 spin_unlock(&BTRFS_I(inode)->lock);
fadc0d8b 8080 stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
df0af1a5 8081 ALIGN(delalloc_bytes, blocksize)) >> 9;
39279cc3
CM
8082 return 0;
8083}
8084
d397712b
CM
8085static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
8086 struct inode *new_dir, struct dentry *new_dentry)
39279cc3
CM
8087{
8088 struct btrfs_trans_handle *trans;
8089 struct btrfs_root *root = BTRFS_I(old_dir)->root;
4df27c4d 8090 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
39279cc3
CM
8091 struct inode *new_inode = new_dentry->d_inode;
8092 struct inode *old_inode = old_dentry->d_inode;
8093 struct timespec ctime = CURRENT_TIME;
00e4e6b3 8094 u64 index = 0;
4df27c4d 8095 u64 root_objectid;
39279cc3 8096 int ret;
33345d01 8097 u64 old_ino = btrfs_ino(old_inode);
39279cc3 8098
33345d01 8099 if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
f679a840
YZ
8100 return -EPERM;
8101
4df27c4d 8102 /* we only allow rename subvolume link between subvolumes */
33345d01 8103 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
3394e160
CM
8104 return -EXDEV;
8105
33345d01
LZ
8106 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
8107 (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
39279cc3 8108 return -ENOTEMPTY;
5f39d397 8109
4df27c4d
YZ
8110 if (S_ISDIR(old_inode->i_mode) && new_inode &&
8111 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
8112 return -ENOTEMPTY;
9c52057c
CM
8113
8114
8115 /* check for collisions, even if the name isn't there */
8116 ret = btrfs_check_dir_item_collision(root, new_dir->i_ino,
8117 new_dentry->d_name.name,
8118 new_dentry->d_name.len);
8119
8120 if (ret) {
8121 if (ret == -EEXIST) {
8122 /* we shouldn't get
8123 * eexist without a new_inode */
8124 if (!new_inode) {
8125 WARN_ON(1);
8126 return ret;
8127 }
8128 } else {
8129 /* maybe -EOVERFLOW */
8130 return ret;
8131 }
8132 }
8133 ret = 0;
8134
5a3f23d5
CM
8135 /*
8136 * we're using rename to replace one file with another.
8137 * and the replacement file is large. Start IO on it now so
8138 * we don't add too much work to the end of the transaction
8139 */
4baf8c92 8140 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
5a3f23d5
CM
8141 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
8142 filemap_flush(old_inode->i_mapping);
8143
76dda93c 8144 /* close the racy window with snapshot create/destroy ioctl */
33345d01 8145 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
76dda93c 8146 down_read(&root->fs_info->subvol_sem);
a22285a6
YZ
8147 /*
8148 * We want to reserve the absolute worst case amount of items. So if
8149 * both inodes are subvols and we need to unlink them then that would
8150 * require 4 item modifications, but if they are both normal inodes it
8151 * would require 5 item modifications, so we'll assume their normal
8152 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
8153 * should cover the worst case number of items we'll modify.
8154 */
6e137ed3 8155 trans = btrfs_start_transaction(root, 11);
b44c59a8
JL
8156 if (IS_ERR(trans)) {
8157 ret = PTR_ERR(trans);
8158 goto out_notrans;
8159 }
76dda93c 8160
4df27c4d
YZ
8161 if (dest != root)
8162 btrfs_record_root_in_trans(trans, dest);
5f39d397 8163
a5719521
YZ
8164 ret = btrfs_set_inode_index(new_dir, &index);
8165 if (ret)
8166 goto out_fail;
5a3f23d5 8167
33345d01 8168 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4df27c4d
YZ
8169 /* force full log commit if subvolume involved. */
8170 root->fs_info->last_trans_log_full_commit = trans->transid;
8171 } else {
a5719521
YZ
8172 ret = btrfs_insert_inode_ref(trans, dest,
8173 new_dentry->d_name.name,
8174 new_dentry->d_name.len,
33345d01
LZ
8175 old_ino,
8176 btrfs_ino(new_dir), index);
a5719521
YZ
8177 if (ret)
8178 goto out_fail;
4df27c4d
YZ
8179 /*
8180 * this is an ugly little race, but the rename is required
8181 * to make sure that if we crash, the inode is either at the
8182 * old name or the new one. pinning the log transaction lets
8183 * us make sure we don't allow a log commit to come in after
8184 * we unlink the name but before we add the new name back in.
8185 */
8186 btrfs_pin_log_trans(root);
8187 }
5a3f23d5
CM
8188 /*
8189 * make sure the inode gets flushed if it is replacing
8190 * something.
8191 */
33345d01 8192 if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
5a3f23d5 8193 btrfs_add_ordered_operation(trans, root, old_inode);
5a3f23d5 8194
0c4d2d95
JB
8195 inode_inc_iversion(old_dir);
8196 inode_inc_iversion(new_dir);
8197 inode_inc_iversion(old_inode);
39279cc3
CM
8198 old_dir->i_ctime = old_dir->i_mtime = ctime;
8199 new_dir->i_ctime = new_dir->i_mtime = ctime;
8200 old_inode->i_ctime = ctime;
5f39d397 8201
12fcfd22
CM
8202 if (old_dentry->d_parent != new_dentry->d_parent)
8203 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
8204
33345d01 8205 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4df27c4d
YZ
8206 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
8207 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
8208 old_dentry->d_name.name,
8209 old_dentry->d_name.len);
8210 } else {
92986796
AV
8211 ret = __btrfs_unlink_inode(trans, root, old_dir,
8212 old_dentry->d_inode,
8213 old_dentry->d_name.name,
8214 old_dentry->d_name.len);
8215 if (!ret)
8216 ret = btrfs_update_inode(trans, root, old_inode);
4df27c4d 8217 }
79787eaa
JM
8218 if (ret) {
8219 btrfs_abort_transaction(trans, root, ret);
8220 goto out_fail;
8221 }
39279cc3
CM
8222
8223 if (new_inode) {
0c4d2d95 8224 inode_inc_iversion(new_inode);
39279cc3 8225 new_inode->i_ctime = CURRENT_TIME;
33345d01 8226 if (unlikely(btrfs_ino(new_inode) ==
4df27c4d
YZ
8227 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
8228 root_objectid = BTRFS_I(new_inode)->location.objectid;
8229 ret = btrfs_unlink_subvol(trans, dest, new_dir,
8230 root_objectid,
8231 new_dentry->d_name.name,
8232 new_dentry->d_name.len);
8233 BUG_ON(new_inode->i_nlink == 0);
8234 } else {
8235 ret = btrfs_unlink_inode(trans, dest, new_dir,
8236 new_dentry->d_inode,
8237 new_dentry->d_name.name,
8238 new_dentry->d_name.len);
8239 }
79787eaa 8240 if (!ret && new_inode->i_nlink == 0) {
e02119d5 8241 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
4df27c4d 8242 BUG_ON(ret);
7b128766 8243 }
79787eaa
JM
8244 if (ret) {
8245 btrfs_abort_transaction(trans, root, ret);
8246 goto out_fail;
8247 }
39279cc3 8248 }
aec7477b 8249
4df27c4d
YZ
8250 ret = btrfs_add_link(trans, new_dir, old_inode,
8251 new_dentry->d_name.name,
a5719521 8252 new_dentry->d_name.len, 0, index);
79787eaa
JM
8253 if (ret) {
8254 btrfs_abort_transaction(trans, root, ret);
8255 goto out_fail;
8256 }
39279cc3 8257
33345d01 8258 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
10d9f309 8259 struct dentry *parent = new_dentry->d_parent;
6a912213 8260 btrfs_log_new_name(trans, old_inode, old_dir, parent);
4df27c4d
YZ
8261 btrfs_end_log_trans(root);
8262 }
39279cc3 8263out_fail:
7ad85bb7 8264 btrfs_end_transaction(trans, root);
b44c59a8 8265out_notrans:
33345d01 8266 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
76dda93c 8267 up_read(&root->fs_info->subvol_sem);
9ed74f2d 8268
39279cc3
CM
8269 return ret;
8270}
8271
8ccf6f19
MX
8272static void btrfs_run_delalloc_work(struct btrfs_work *work)
8273{
8274 struct btrfs_delalloc_work *delalloc_work;
8275
8276 delalloc_work = container_of(work, struct btrfs_delalloc_work,
8277 work);
8278 if (delalloc_work->wait)
8279 btrfs_wait_ordered_range(delalloc_work->inode, 0, (u64)-1);
8280 else
8281 filemap_flush(delalloc_work->inode->i_mapping);
8282
8283 if (delalloc_work->delay_iput)
8284 btrfs_add_delayed_iput(delalloc_work->inode);
8285 else
8286 iput(delalloc_work->inode);
8287 complete(&delalloc_work->completion);
8288}
8289
8290struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
8291 int wait, int delay_iput)
8292{
8293 struct btrfs_delalloc_work *work;
8294
8295 work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS);
8296 if (!work)
8297 return NULL;
8298
8299 init_completion(&work->completion);
8300 INIT_LIST_HEAD(&work->list);
8301 work->inode = inode;
8302 work->wait = wait;
8303 work->delay_iput = delay_iput;
8304 work->work.func = btrfs_run_delalloc_work;
8305
8306 return work;
8307}
8308
8309void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
8310{
8311 wait_for_completion(&work->completion);
8312 kmem_cache_free(btrfs_delalloc_work_cachep, work);
8313}
8314
d352ac68
CM
8315/*
8316 * some fairly slow code that needs optimization. This walks the list
8317 * of all the inodes with pending delalloc and forces them to disk.
8318 */
24bbcf04 8319int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
ea8c2819 8320{
ea8c2819 8321 struct btrfs_inode *binode;
5b21f2ed 8322 struct inode *inode;
8ccf6f19
MX
8323 struct btrfs_delalloc_work *work, *next;
8324 struct list_head works;
1eafa6c7 8325 struct list_head splice;
8ccf6f19 8326 int ret = 0;
ea8c2819 8327
c146afad
YZ
8328 if (root->fs_info->sb->s_flags & MS_RDONLY)
8329 return -EROFS;
8330
8ccf6f19 8331 INIT_LIST_HEAD(&works);
1eafa6c7 8332 INIT_LIST_HEAD(&splice);
63607cc8 8333
75eff68e 8334 spin_lock(&root->fs_info->delalloc_lock);
1eafa6c7
MX
8335 list_splice_init(&root->fs_info->delalloc_inodes, &splice);
8336 while (!list_empty(&splice)) {
8337 binode = list_entry(splice.next, struct btrfs_inode,
ea8c2819 8338 delalloc_inodes);
1eafa6c7
MX
8339
8340 list_del_init(&binode->delalloc_inodes);
8341
5b21f2ed 8342 inode = igrab(&binode->vfs_inode);
df0af1a5
MX
8343 if (!inode) {
8344 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
8345 &binode->runtime_flags);
1eafa6c7 8346 continue;
df0af1a5 8347 }
1eafa6c7
MX
8348
8349 list_add_tail(&binode->delalloc_inodes,
8350 &root->fs_info->delalloc_inodes);
75eff68e 8351 spin_unlock(&root->fs_info->delalloc_lock);
1eafa6c7
MX
8352
8353 work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
8354 if (unlikely(!work)) {
8355 ret = -ENOMEM;
8356 goto out;
5b21f2ed 8357 }
1eafa6c7
MX
8358 list_add_tail(&work->list, &works);
8359 btrfs_queue_worker(&root->fs_info->flush_workers,
8360 &work->work);
8361
5b21f2ed 8362 cond_resched();
75eff68e 8363 spin_lock(&root->fs_info->delalloc_lock);
ea8c2819 8364 }
75eff68e 8365 spin_unlock(&root->fs_info->delalloc_lock);
8c8bee1d 8366
1eafa6c7
MX
8367 list_for_each_entry_safe(work, next, &works, list) {
8368 list_del_init(&work->list);
8369 btrfs_wait_and_free_delalloc_work(work);
8370 }
8371
8c8bee1d
CM
8372 /* the filemap_flush will queue IO into the worker threads, but
8373 * we have to make sure the IO is actually started and that
8374 * ordered extents get created before we return
8375 */
8376 atomic_inc(&root->fs_info->async_submit_draining);
d397712b 8377 while (atomic_read(&root->fs_info->nr_async_submits) ||
771ed689 8378 atomic_read(&root->fs_info->async_delalloc_pages)) {
8c8bee1d 8379 wait_event(root->fs_info->async_submit_wait,
771ed689
CM
8380 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
8381 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
8c8bee1d
CM
8382 }
8383 atomic_dec(&root->fs_info->async_submit_draining);
1eafa6c7 8384 return 0;
8ccf6f19
MX
8385out:
8386 list_for_each_entry_safe(work, next, &works, list) {
8387 list_del_init(&work->list);
8388 btrfs_wait_and_free_delalloc_work(work);
8389 }
1eafa6c7
MX
8390
8391 if (!list_empty_careful(&splice)) {
8392 spin_lock(&root->fs_info->delalloc_lock);
8393 list_splice_tail(&splice, &root->fs_info->delalloc_inodes);
8394 spin_unlock(&root->fs_info->delalloc_lock);
8395 }
8ccf6f19 8396 return ret;
ea8c2819
CM
8397}
8398
39279cc3
CM
8399static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
8400 const char *symname)
8401{
8402 struct btrfs_trans_handle *trans;
8403 struct btrfs_root *root = BTRFS_I(dir)->root;
8404 struct btrfs_path *path;
8405 struct btrfs_key key;
1832a6d5 8406 struct inode *inode = NULL;
39279cc3
CM
8407 int err;
8408 int drop_inode = 0;
8409 u64 objectid;
00e4e6b3 8410 u64 index = 0 ;
39279cc3
CM
8411 int name_len;
8412 int datasize;
5f39d397 8413 unsigned long ptr;
39279cc3 8414 struct btrfs_file_extent_item *ei;
5f39d397 8415 struct extent_buffer *leaf;
39279cc3
CM
8416
8417 name_len = strlen(symname) + 1;
8418 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
8419 return -ENAMETOOLONG;
1832a6d5 8420
9ed74f2d
JB
8421 /*
8422 * 2 items for inode item and ref
8423 * 2 items for dir items
8424 * 1 item for xattr if selinux is on
8425 */
a22285a6
YZ
8426 trans = btrfs_start_transaction(root, 5);
8427 if (IS_ERR(trans))
8428 return PTR_ERR(trans);
1832a6d5 8429
581bb050
LZ
8430 err = btrfs_find_free_ino(root, &objectid);
8431 if (err)
8432 goto out_unlock;
8433
aec7477b 8434 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
33345d01 8435 dentry->d_name.len, btrfs_ino(dir), objectid,
d82a6f1d 8436 S_IFLNK|S_IRWXUGO, &index);
7cf96da3
TI
8437 if (IS_ERR(inode)) {
8438 err = PTR_ERR(inode);
39279cc3 8439 goto out_unlock;
7cf96da3 8440 }
39279cc3 8441
2a7dba39 8442 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
33268eaf
JB
8443 if (err) {
8444 drop_inode = 1;
8445 goto out_unlock;
8446 }
8447
ad19db71
CS
8448 /*
8449 * If the active LSM wants to access the inode during
8450 * d_instantiate it needs these. Smack checks to see
8451 * if the filesystem supports xattrs by looking at the
8452 * ops vector.
8453 */
8454 inode->i_fop = &btrfs_file_operations;
8455 inode->i_op = &btrfs_file_inode_operations;
8456
a1b075d2 8457 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
39279cc3
CM
8458 if (err)
8459 drop_inode = 1;
8460 else {
8461 inode->i_mapping->a_ops = &btrfs_aops;
04160088 8462 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
d1310b2e 8463 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
39279cc3 8464 }
39279cc3
CM
8465 if (drop_inode)
8466 goto out_unlock;
8467
8468 path = btrfs_alloc_path();
d8926bb3
MF
8469 if (!path) {
8470 err = -ENOMEM;
8471 drop_inode = 1;
8472 goto out_unlock;
8473 }
33345d01 8474 key.objectid = btrfs_ino(inode);
39279cc3 8475 key.offset = 0;
39279cc3
CM
8476 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
8477 datasize = btrfs_file_extent_calc_inline_size(name_len);
8478 err = btrfs_insert_empty_item(trans, root, path, &key,
8479 datasize);
54aa1f4d
CM
8480 if (err) {
8481 drop_inode = 1;
b0839166 8482 btrfs_free_path(path);
54aa1f4d
CM
8483 goto out_unlock;
8484 }
5f39d397
CM
8485 leaf = path->nodes[0];
8486 ei = btrfs_item_ptr(leaf, path->slots[0],
8487 struct btrfs_file_extent_item);
8488 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
8489 btrfs_set_file_extent_type(leaf, ei,
39279cc3 8490 BTRFS_FILE_EXTENT_INLINE);
c8b97818
CM
8491 btrfs_set_file_extent_encryption(leaf, ei, 0);
8492 btrfs_set_file_extent_compression(leaf, ei, 0);
8493 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
8494 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
8495
39279cc3 8496 ptr = btrfs_file_extent_inline_start(ei);
5f39d397
CM
8497 write_extent_buffer(leaf, symname, ptr, name_len);
8498 btrfs_mark_buffer_dirty(leaf);
39279cc3 8499 btrfs_free_path(path);
5f39d397 8500
39279cc3
CM
8501 inode->i_op = &btrfs_symlink_inode_operations;
8502 inode->i_mapping->a_ops = &btrfs_symlink_aops;
04160088 8503 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
d899e052 8504 inode_set_bytes(inode, name_len);
dbe674a9 8505 btrfs_i_size_write(inode, name_len - 1);
54aa1f4d
CM
8506 err = btrfs_update_inode(trans, root, inode);
8507 if (err)
8508 drop_inode = 1;
39279cc3
CM
8509
8510out_unlock:
08c422c2
AV
8511 if (!err)
8512 d_instantiate(dentry, inode);
7ad85bb7 8513 btrfs_end_transaction(trans, root);
39279cc3
CM
8514 if (drop_inode) {
8515 inode_dec_link_count(inode);
8516 iput(inode);
8517 }
b53d3f5d 8518 btrfs_btree_balance_dirty(root);
39279cc3
CM
8519 return err;
8520}
16432985 8521
0af3d00b
JB
8522static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
8523 u64 start, u64 num_bytes, u64 min_size,
8524 loff_t actual_len, u64 *alloc_hint,
8525 struct btrfs_trans_handle *trans)
d899e052 8526{
5dc562c5
JB
8527 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
8528 struct extent_map *em;
d899e052
YZ
8529 struct btrfs_root *root = BTRFS_I(inode)->root;
8530 struct btrfs_key ins;
d899e052 8531 u64 cur_offset = start;
55a61d1d 8532 u64 i_size;
154ea289 8533 u64 cur_bytes;
d899e052 8534 int ret = 0;
0af3d00b 8535 bool own_trans = true;
d899e052 8536
0af3d00b
JB
8537 if (trans)
8538 own_trans = false;
d899e052 8539 while (num_bytes > 0) {
0af3d00b
JB
8540 if (own_trans) {
8541 trans = btrfs_start_transaction(root, 3);
8542 if (IS_ERR(trans)) {
8543 ret = PTR_ERR(trans);
8544 break;
8545 }
5a303d5d
YZ
8546 }
8547
154ea289
CM
8548 cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
8549 cur_bytes = max(cur_bytes, min_size);
8550 ret = btrfs_reserve_extent(trans, root, cur_bytes,
24542bf7 8551 min_size, 0, *alloc_hint, &ins, 1);
5a303d5d 8552 if (ret) {
0af3d00b
JB
8553 if (own_trans)
8554 btrfs_end_transaction(trans, root);
a22285a6 8555 break;
d899e052 8556 }
5a303d5d 8557
d899e052
YZ
8558 ret = insert_reserved_file_extent(trans, inode,
8559 cur_offset, ins.objectid,
8560 ins.offset, ins.offset,
920bbbfb 8561 ins.offset, 0, 0, 0,
d899e052 8562 BTRFS_FILE_EXTENT_PREALLOC);
79787eaa
JM
8563 if (ret) {
8564 btrfs_abort_transaction(trans, root, ret);
8565 if (own_trans)
8566 btrfs_end_transaction(trans, root);
8567 break;
8568 }
a1ed835e
CM
8569 btrfs_drop_extent_cache(inode, cur_offset,
8570 cur_offset + ins.offset -1, 0);
5a303d5d 8571
5dc562c5
JB
8572 em = alloc_extent_map();
8573 if (!em) {
8574 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
8575 &BTRFS_I(inode)->runtime_flags);
8576 goto next;
8577 }
8578
8579 em->start = cur_offset;
8580 em->orig_start = cur_offset;
8581 em->len = ins.offset;
8582 em->block_start = ins.objectid;
8583 em->block_len = ins.offset;
b4939680 8584 em->orig_block_len = ins.offset;
cc95bef6 8585 em->ram_bytes = ins.offset;
5dc562c5
JB
8586 em->bdev = root->fs_info->fs_devices->latest_bdev;
8587 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
8588 em->generation = trans->transid;
8589
8590 while (1) {
8591 write_lock(&em_tree->lock);
09a2a8f9 8592 ret = add_extent_mapping(em_tree, em, 1);
5dc562c5
JB
8593 write_unlock(&em_tree->lock);
8594 if (ret != -EEXIST)
8595 break;
8596 btrfs_drop_extent_cache(inode, cur_offset,
8597 cur_offset + ins.offset - 1,
8598 0);
8599 }
8600 free_extent_map(em);
8601next:
d899e052
YZ
8602 num_bytes -= ins.offset;
8603 cur_offset += ins.offset;
efa56464 8604 *alloc_hint = ins.objectid + ins.offset;
5a303d5d 8605
0c4d2d95 8606 inode_inc_iversion(inode);
d899e052 8607 inode->i_ctime = CURRENT_TIME;
6cbff00f 8608 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
d899e052 8609 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
efa56464
YZ
8610 (actual_len > inode->i_size) &&
8611 (cur_offset > inode->i_size)) {
d1ea6a61 8612 if (cur_offset > actual_len)
55a61d1d 8613 i_size = actual_len;
d1ea6a61 8614 else
55a61d1d
JB
8615 i_size = cur_offset;
8616 i_size_write(inode, i_size);
8617 btrfs_ordered_update_i_size(inode, i_size, NULL);
5a303d5d
YZ
8618 }
8619
d899e052 8620 ret = btrfs_update_inode(trans, root, inode);
79787eaa
JM
8621
8622 if (ret) {
8623 btrfs_abort_transaction(trans, root, ret);
8624 if (own_trans)
8625 btrfs_end_transaction(trans, root);
8626 break;
8627 }
d899e052 8628
0af3d00b
JB
8629 if (own_trans)
8630 btrfs_end_transaction(trans, root);
5a303d5d 8631 }
d899e052
YZ
8632 return ret;
8633}
8634
0af3d00b
JB
8635int btrfs_prealloc_file_range(struct inode *inode, int mode,
8636 u64 start, u64 num_bytes, u64 min_size,
8637 loff_t actual_len, u64 *alloc_hint)
8638{
8639 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
8640 min_size, actual_len, alloc_hint,
8641 NULL);
8642}
8643
8644int btrfs_prealloc_file_range_trans(struct inode *inode,
8645 struct btrfs_trans_handle *trans, int mode,
8646 u64 start, u64 num_bytes, u64 min_size,
8647 loff_t actual_len, u64 *alloc_hint)
8648{
8649 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
8650 min_size, actual_len, alloc_hint, trans);
8651}
8652
e6dcd2dc
CM
8653static int btrfs_set_page_dirty(struct page *page)
8654{
e6dcd2dc
CM
8655 return __set_page_dirty_nobuffers(page);
8656}
8657
10556cb2 8658static int btrfs_permission(struct inode *inode, int mask)
fdebe2bd 8659{
b83cc969 8660 struct btrfs_root *root = BTRFS_I(inode)->root;
cb6db4e5 8661 umode_t mode = inode->i_mode;
b83cc969 8662
cb6db4e5
JM
8663 if (mask & MAY_WRITE &&
8664 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
8665 if (btrfs_root_readonly(root))
8666 return -EROFS;
8667 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
8668 return -EACCES;
8669 }
2830ba7f 8670 return generic_permission(inode, mask);
fdebe2bd 8671}
39279cc3 8672
6e1d5dcc 8673static const struct inode_operations btrfs_dir_inode_operations = {
3394e160 8674 .getattr = btrfs_getattr,
39279cc3
CM
8675 .lookup = btrfs_lookup,
8676 .create = btrfs_create,
8677 .unlink = btrfs_unlink,
8678 .link = btrfs_link,
8679 .mkdir = btrfs_mkdir,
8680 .rmdir = btrfs_rmdir,
8681 .rename = btrfs_rename,
8682 .symlink = btrfs_symlink,
8683 .setattr = btrfs_setattr,
618e21d5 8684 .mknod = btrfs_mknod,
95819c05
CH
8685 .setxattr = btrfs_setxattr,
8686 .getxattr = btrfs_getxattr,
5103e947 8687 .listxattr = btrfs_listxattr,
95819c05 8688 .removexattr = btrfs_removexattr,
fdebe2bd 8689 .permission = btrfs_permission,
4e34e719 8690 .get_acl = btrfs_get_acl,
39279cc3 8691};
6e1d5dcc 8692static const struct inode_operations btrfs_dir_ro_inode_operations = {
39279cc3 8693 .lookup = btrfs_lookup,
fdebe2bd 8694 .permission = btrfs_permission,
4e34e719 8695 .get_acl = btrfs_get_acl,
39279cc3 8696};
76dda93c 8697
828c0950 8698static const struct file_operations btrfs_dir_file_operations = {
39279cc3
CM
8699 .llseek = generic_file_llseek,
8700 .read = generic_read_dir,
cbdf5a24 8701 .readdir = btrfs_real_readdir,
34287aa3 8702 .unlocked_ioctl = btrfs_ioctl,
39279cc3 8703#ifdef CONFIG_COMPAT
34287aa3 8704 .compat_ioctl = btrfs_ioctl,
39279cc3 8705#endif
6bf13c0c 8706 .release = btrfs_release_file,
e02119d5 8707 .fsync = btrfs_sync_file,
39279cc3
CM
8708};
8709
d1310b2e 8710static struct extent_io_ops btrfs_extent_io_ops = {
07157aac 8711 .fill_delalloc = run_delalloc_range,
065631f6 8712 .submit_bio_hook = btrfs_submit_bio_hook,
239b14b3 8713 .merge_bio_hook = btrfs_merge_bio_hook,
07157aac 8714 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
e6dcd2dc 8715 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
247e743c 8716 .writepage_start_hook = btrfs_writepage_start_hook,
b0c68f8b
CM
8717 .set_bit_hook = btrfs_set_bit_hook,
8718 .clear_bit_hook = btrfs_clear_bit_hook,
9ed74f2d
JB
8719 .merge_extent_hook = btrfs_merge_extent_hook,
8720 .split_extent_hook = btrfs_split_extent_hook,
07157aac
CM
8721};
8722
35054394
CM
8723/*
8724 * btrfs doesn't support the bmap operation because swapfiles
8725 * use bmap to make a mapping of extents in the file. They assume
8726 * these extents won't change over the life of the file and they
8727 * use the bmap result to do IO directly to the drive.
8728 *
8729 * the btrfs bmap call would return logical addresses that aren't
8730 * suitable for IO and they also will change frequently as COW
8731 * operations happen. So, swapfile + btrfs == corruption.
8732 *
8733 * For now we're avoiding this by dropping bmap.
8734 */
7f09410b 8735static const struct address_space_operations btrfs_aops = {
39279cc3
CM
8736 .readpage = btrfs_readpage,
8737 .writepage = btrfs_writepage,
b293f02e 8738 .writepages = btrfs_writepages,
3ab2fb5a 8739 .readpages = btrfs_readpages,
16432985 8740 .direct_IO = btrfs_direct_IO,
a52d9a80
CM
8741 .invalidatepage = btrfs_invalidatepage,
8742 .releasepage = btrfs_releasepage,
e6dcd2dc 8743 .set_page_dirty = btrfs_set_page_dirty,
465fdd97 8744 .error_remove_page = generic_error_remove_page,
39279cc3
CM
8745};
8746
7f09410b 8747static const struct address_space_operations btrfs_symlink_aops = {
39279cc3
CM
8748 .readpage = btrfs_readpage,
8749 .writepage = btrfs_writepage,
2bf5a725
CM
8750 .invalidatepage = btrfs_invalidatepage,
8751 .releasepage = btrfs_releasepage,
39279cc3
CM
8752};
8753
6e1d5dcc 8754static const struct inode_operations btrfs_file_inode_operations = {
39279cc3
CM
8755 .getattr = btrfs_getattr,
8756 .setattr = btrfs_setattr,
95819c05
CH
8757 .setxattr = btrfs_setxattr,
8758 .getxattr = btrfs_getxattr,
5103e947 8759 .listxattr = btrfs_listxattr,
95819c05 8760 .removexattr = btrfs_removexattr,
fdebe2bd 8761 .permission = btrfs_permission,
1506fcc8 8762 .fiemap = btrfs_fiemap,
4e34e719 8763 .get_acl = btrfs_get_acl,
e41f941a 8764 .update_time = btrfs_update_time,
39279cc3 8765};
6e1d5dcc 8766static const struct inode_operations btrfs_special_inode_operations = {
618e21d5
JB
8767 .getattr = btrfs_getattr,
8768 .setattr = btrfs_setattr,
fdebe2bd 8769 .permission = btrfs_permission,
95819c05
CH
8770 .setxattr = btrfs_setxattr,
8771 .getxattr = btrfs_getxattr,
33268eaf 8772 .listxattr = btrfs_listxattr,
95819c05 8773 .removexattr = btrfs_removexattr,
4e34e719 8774 .get_acl = btrfs_get_acl,
e41f941a 8775 .update_time = btrfs_update_time,
618e21d5 8776};
6e1d5dcc 8777static const struct inode_operations btrfs_symlink_inode_operations = {
39279cc3
CM
8778 .readlink = generic_readlink,
8779 .follow_link = page_follow_link_light,
8780 .put_link = page_put_link,
f209561a 8781 .getattr = btrfs_getattr,
22c44fe6 8782 .setattr = btrfs_setattr,
fdebe2bd 8783 .permission = btrfs_permission,
0279b4cd
JO
8784 .setxattr = btrfs_setxattr,
8785 .getxattr = btrfs_getxattr,
8786 .listxattr = btrfs_listxattr,
8787 .removexattr = btrfs_removexattr,
4e34e719 8788 .get_acl = btrfs_get_acl,
e41f941a 8789 .update_time = btrfs_update_time,
39279cc3 8790};
76dda93c 8791
82d339d9 8792const struct dentry_operations btrfs_dentry_operations = {
76dda93c 8793 .d_delete = btrfs_dentry_delete,
b4aff1f8 8794 .d_release = btrfs_dentry_release,
76dda93c 8795};