Btrfs: fix infinite path build loops in incremental send
[linux-2.6-block.git] / fs / btrfs / inode.c
CommitLineData
6cbd5570
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
8f18cf13 19#include <linux/kernel.h>
065631f6 20#include <linux/bio.h>
39279cc3 21#include <linux/buffer_head.h>
f2eb0a24 22#include <linux/file.h>
39279cc3
CM
23#include <linux/fs.h>
24#include <linux/pagemap.h>
25#include <linux/highmem.h>
26#include <linux/time.h>
27#include <linux/init.h>
28#include <linux/string.h>
39279cc3
CM
29#include <linux/backing-dev.h>
30#include <linux/mpage.h>
31#include <linux/swap.h>
32#include <linux/writeback.h>
33#include <linux/statfs.h>
34#include <linux/compat.h>
a27bb332 35#include <linux/aio.h>
9ebefb18 36#include <linux/bit_spinlock.h>
5103e947 37#include <linux/xattr.h>
33268eaf 38#include <linux/posix_acl.h>
d899e052 39#include <linux/falloc.h>
5a0e3ad6 40#include <linux/slab.h>
7a36ddec 41#include <linux/ratelimit.h>
22c44fe6 42#include <linux/mount.h>
55e301fd 43#include <linux/btrfs.h>
53b381b3 44#include <linux/blkdev.h>
f23b5a59 45#include <linux/posix_acl_xattr.h>
39279cc3
CM
46#include "ctree.h"
47#include "disk-io.h"
48#include "transaction.h"
49#include "btrfs_inode.h"
39279cc3 50#include "print-tree.h"
e6dcd2dc 51#include "ordered-data.h"
95819c05 52#include "xattr.h"
e02119d5 53#include "tree-log.h"
4a54c8c1 54#include "volumes.h"
c8b97818 55#include "compression.h"
b4ce94de 56#include "locking.h"
dc89e982 57#include "free-space-cache.h"
581bb050 58#include "inode-map.h"
38c227d8 59#include "backref.h"
f23b5a59 60#include "hash.h"
63541927 61#include "props.h"
39279cc3
CM
62
63struct btrfs_iget_args {
64 u64 ino;
65 struct btrfs_root *root;
66};
67
6e1d5dcc
AD
68static const struct inode_operations btrfs_dir_inode_operations;
69static const struct inode_operations btrfs_symlink_inode_operations;
70static const struct inode_operations btrfs_dir_ro_inode_operations;
71static const struct inode_operations btrfs_special_inode_operations;
72static const struct inode_operations btrfs_file_inode_operations;
7f09410b
AD
73static const struct address_space_operations btrfs_aops;
74static const struct address_space_operations btrfs_symlink_aops;
828c0950 75static const struct file_operations btrfs_dir_file_operations;
d1310b2e 76static struct extent_io_ops btrfs_extent_io_ops;
39279cc3
CM
77
78static struct kmem_cache *btrfs_inode_cachep;
8ccf6f19 79static struct kmem_cache *btrfs_delalloc_work_cachep;
39279cc3
CM
80struct kmem_cache *btrfs_trans_handle_cachep;
81struct kmem_cache *btrfs_transaction_cachep;
39279cc3 82struct kmem_cache *btrfs_path_cachep;
dc89e982 83struct kmem_cache *btrfs_free_space_cachep;
39279cc3
CM
84
85#define S_SHIFT 12
86static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
87 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
88 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
89 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
90 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
91 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
92 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
93 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
94};
95
3972f260 96static int btrfs_setsize(struct inode *inode, struct iattr *attr);
a41ad394 97static int btrfs_truncate(struct inode *inode);
5fd02043 98static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
771ed689
CM
99static noinline int cow_file_range(struct inode *inode,
100 struct page *locked_page,
101 u64 start, u64 end, int *page_started,
102 unsigned long *nr_written, int unlock);
70c8a91c
JB
103static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
104 u64 len, u64 orig_start,
105 u64 block_start, u64 block_len,
cc95bef6
JB
106 u64 orig_block_len, u64 ram_bytes,
107 int type);
7b128766 108
48a3b636 109static int btrfs_dirty_inode(struct inode *inode);
7b128766 110
f34f57a3 111static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
2a7dba39
EP
112 struct inode *inode, struct inode *dir,
113 const struct qstr *qstr)
0279b4cd
JO
114{
115 int err;
116
f34f57a3 117 err = btrfs_init_acl(trans, inode, dir);
0279b4cd 118 if (!err)
2a7dba39 119 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
0279b4cd
JO
120 return err;
121}
122
c8b97818
CM
123/*
124 * this does all the hard work for inserting an inline extent into
125 * the btree. The caller should have done a btrfs_drop_extents so that
126 * no overlapping inline items exist in the btree
127 */
d397712b 128static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
1acae57b 129 struct btrfs_path *path, int extent_inserted,
c8b97818
CM
130 struct btrfs_root *root, struct inode *inode,
131 u64 start, size_t size, size_t compressed_size,
fe3f566c 132 int compress_type,
c8b97818
CM
133 struct page **compressed_pages)
134{
c8b97818
CM
135 struct extent_buffer *leaf;
136 struct page *page = NULL;
137 char *kaddr;
138 unsigned long ptr;
139 struct btrfs_file_extent_item *ei;
140 int err = 0;
141 int ret;
142 size_t cur_size = size;
c8b97818 143 unsigned long offset;
c8b97818 144
fe3f566c 145 if (compressed_size && compressed_pages)
c8b97818 146 cur_size = compressed_size;
c8b97818 147
1acae57b 148 inode_add_bytes(inode, size);
c8b97818 149
1acae57b
FDBM
150 if (!extent_inserted) {
151 struct btrfs_key key;
152 size_t datasize;
c8b97818 153
1acae57b
FDBM
154 key.objectid = btrfs_ino(inode);
155 key.offset = start;
156 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
c8b97818 157
1acae57b
FDBM
158 datasize = btrfs_file_extent_calc_inline_size(cur_size);
159 path->leave_spinning = 1;
160 ret = btrfs_insert_empty_item(trans, root, path, &key,
161 datasize);
162 if (ret) {
163 err = ret;
164 goto fail;
165 }
c8b97818
CM
166 }
167 leaf = path->nodes[0];
168 ei = btrfs_item_ptr(leaf, path->slots[0],
169 struct btrfs_file_extent_item);
170 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
171 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
172 btrfs_set_file_extent_encryption(leaf, ei, 0);
173 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
174 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
175 ptr = btrfs_file_extent_inline_start(ei);
176
261507a0 177 if (compress_type != BTRFS_COMPRESS_NONE) {
c8b97818
CM
178 struct page *cpage;
179 int i = 0;
d397712b 180 while (compressed_size > 0) {
c8b97818 181 cpage = compressed_pages[i];
5b050f04 182 cur_size = min_t(unsigned long, compressed_size,
c8b97818
CM
183 PAGE_CACHE_SIZE);
184
7ac687d9 185 kaddr = kmap_atomic(cpage);
c8b97818 186 write_extent_buffer(leaf, kaddr, ptr, cur_size);
7ac687d9 187 kunmap_atomic(kaddr);
c8b97818
CM
188
189 i++;
190 ptr += cur_size;
191 compressed_size -= cur_size;
192 }
193 btrfs_set_file_extent_compression(leaf, ei,
261507a0 194 compress_type);
c8b97818
CM
195 } else {
196 page = find_get_page(inode->i_mapping,
197 start >> PAGE_CACHE_SHIFT);
198 btrfs_set_file_extent_compression(leaf, ei, 0);
7ac687d9 199 kaddr = kmap_atomic(page);
c8b97818
CM
200 offset = start & (PAGE_CACHE_SIZE - 1);
201 write_extent_buffer(leaf, kaddr + offset, ptr, size);
7ac687d9 202 kunmap_atomic(kaddr);
c8b97818
CM
203 page_cache_release(page);
204 }
205 btrfs_mark_buffer_dirty(leaf);
1acae57b 206 btrfs_release_path(path);
c8b97818 207
c2167754
YZ
208 /*
209 * we're an inline extent, so nobody can
210 * extend the file past i_size without locking
211 * a page we already have locked.
212 *
213 * We must do any isize and inode updates
214 * before we unlock the pages. Otherwise we
215 * could end up racing with unlink.
216 */
c8b97818 217 BTRFS_I(inode)->disk_i_size = inode->i_size;
79787eaa 218 ret = btrfs_update_inode(trans, root, inode);
c2167754 219
79787eaa 220 return ret;
c8b97818 221fail:
c8b97818
CM
222 return err;
223}
224
225
226/*
227 * conditionally insert an inline extent into the file. This
228 * does the checks required to make sure the data is small enough
229 * to fit as an inline extent.
230 */
00361589
JB
231static noinline int cow_file_range_inline(struct btrfs_root *root,
232 struct inode *inode, u64 start,
233 u64 end, size_t compressed_size,
234 int compress_type,
235 struct page **compressed_pages)
c8b97818 236{
00361589 237 struct btrfs_trans_handle *trans;
c8b97818
CM
238 u64 isize = i_size_read(inode);
239 u64 actual_end = min(end + 1, isize);
240 u64 inline_len = actual_end - start;
fda2832f 241 u64 aligned_end = ALIGN(end, root->sectorsize);
c8b97818
CM
242 u64 data_len = inline_len;
243 int ret;
1acae57b
FDBM
244 struct btrfs_path *path;
245 int extent_inserted = 0;
246 u32 extent_item_size;
c8b97818
CM
247
248 if (compressed_size)
249 data_len = compressed_size;
250
251 if (start > 0 ||
70b99e69 252 actual_end >= PAGE_CACHE_SIZE ||
c8b97818
CM
253 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
254 (!compressed_size &&
255 (actual_end & (root->sectorsize - 1)) == 0) ||
256 end + 1 < isize ||
257 data_len > root->fs_info->max_inline) {
258 return 1;
259 }
260
1acae57b
FDBM
261 path = btrfs_alloc_path();
262 if (!path)
263 return -ENOMEM;
264
00361589 265 trans = btrfs_join_transaction(root);
1acae57b
FDBM
266 if (IS_ERR(trans)) {
267 btrfs_free_path(path);
00361589 268 return PTR_ERR(trans);
1acae57b 269 }
00361589
JB
270 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
271
1acae57b
FDBM
272 if (compressed_size && compressed_pages)
273 extent_item_size = btrfs_file_extent_calc_inline_size(
274 compressed_size);
275 else
276 extent_item_size = btrfs_file_extent_calc_inline_size(
277 inline_len);
278
279 ret = __btrfs_drop_extents(trans, root, inode, path,
280 start, aligned_end, NULL,
281 1, 1, extent_item_size, &extent_inserted);
00361589
JB
282 if (ret) {
283 btrfs_abort_transaction(trans, root, ret);
284 goto out;
285 }
c8b97818
CM
286
287 if (isize > actual_end)
288 inline_len = min_t(u64, isize, actual_end);
1acae57b
FDBM
289 ret = insert_inline_extent(trans, path, extent_inserted,
290 root, inode, start,
c8b97818 291 inline_len, compressed_size,
fe3f566c 292 compress_type, compressed_pages);
2adcac1a 293 if (ret && ret != -ENOSPC) {
79787eaa 294 btrfs_abort_transaction(trans, root, ret);
00361589 295 goto out;
2adcac1a 296 } else if (ret == -ENOSPC) {
00361589
JB
297 ret = 1;
298 goto out;
79787eaa 299 }
2adcac1a 300
bdc20e67 301 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
0ca1f7ce 302 btrfs_delalloc_release_metadata(inode, end + 1 - start);
a1ed835e 303 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
00361589 304out:
1acae57b 305 btrfs_free_path(path);
00361589
JB
306 btrfs_end_transaction(trans, root);
307 return ret;
c8b97818
CM
308}
309
771ed689
CM
310struct async_extent {
311 u64 start;
312 u64 ram_size;
313 u64 compressed_size;
314 struct page **pages;
315 unsigned long nr_pages;
261507a0 316 int compress_type;
771ed689
CM
317 struct list_head list;
318};
319
320struct async_cow {
321 struct inode *inode;
322 struct btrfs_root *root;
323 struct page *locked_page;
324 u64 start;
325 u64 end;
326 struct list_head extents;
327 struct btrfs_work work;
328};
329
330static noinline int add_async_extent(struct async_cow *cow,
331 u64 start, u64 ram_size,
332 u64 compressed_size,
333 struct page **pages,
261507a0
LZ
334 unsigned long nr_pages,
335 int compress_type)
771ed689
CM
336{
337 struct async_extent *async_extent;
338
339 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
79787eaa 340 BUG_ON(!async_extent); /* -ENOMEM */
771ed689
CM
341 async_extent->start = start;
342 async_extent->ram_size = ram_size;
343 async_extent->compressed_size = compressed_size;
344 async_extent->pages = pages;
345 async_extent->nr_pages = nr_pages;
261507a0 346 async_extent->compress_type = compress_type;
771ed689
CM
347 list_add_tail(&async_extent->list, &cow->extents);
348 return 0;
349}
350
d352ac68 351/*
771ed689
CM
352 * we create compressed extents in two phases. The first
353 * phase compresses a range of pages that have already been
354 * locked (both pages and state bits are locked).
c8b97818 355 *
771ed689
CM
356 * This is done inside an ordered work queue, and the compression
357 * is spread across many cpus. The actual IO submission is step
358 * two, and the ordered work queue takes care of making sure that
359 * happens in the same order things were put onto the queue by
360 * writepages and friends.
c8b97818 361 *
771ed689
CM
362 * If this code finds it can't get good compression, it puts an
363 * entry onto the work queue to write the uncompressed bytes. This
364 * makes sure that both compressed inodes and uncompressed inodes
b2570314
AB
365 * are written in the same order that the flusher thread sent them
366 * down.
d352ac68 367 */
771ed689
CM
368static noinline int compress_file_range(struct inode *inode,
369 struct page *locked_page,
370 u64 start, u64 end,
371 struct async_cow *async_cow,
372 int *num_added)
b888db2b
CM
373{
374 struct btrfs_root *root = BTRFS_I(inode)->root;
db94535d 375 u64 num_bytes;
db94535d 376 u64 blocksize = root->sectorsize;
c8b97818 377 u64 actual_end;
42dc7bab 378 u64 isize = i_size_read(inode);
e6dcd2dc 379 int ret = 0;
c8b97818
CM
380 struct page **pages = NULL;
381 unsigned long nr_pages;
382 unsigned long nr_pages_ret = 0;
383 unsigned long total_compressed = 0;
384 unsigned long total_in = 0;
385 unsigned long max_compressed = 128 * 1024;
771ed689 386 unsigned long max_uncompressed = 128 * 1024;
c8b97818
CM
387 int i;
388 int will_compress;
261507a0 389 int compress_type = root->fs_info->compress_type;
4adaa611 390 int redirty = 0;
b888db2b 391
4cb13e5d
LB
392 /* if this is a small write inside eof, kick off a defrag */
393 if ((end - start + 1) < 16 * 1024 &&
394 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
4cb5300b
CM
395 btrfs_add_inode_defrag(NULL, inode);
396
42dc7bab 397 actual_end = min_t(u64, isize, end + 1);
c8b97818
CM
398again:
399 will_compress = 0;
400 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
401 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
be20aa9d 402
f03d9301
CM
403 /*
404 * we don't want to send crud past the end of i_size through
405 * compression, that's just a waste of CPU time. So, if the
406 * end of the file is before the start of our current
407 * requested range of bytes, we bail out to the uncompressed
408 * cleanup code that can deal with all of this.
409 *
410 * It isn't really the fastest way to fix things, but this is a
411 * very uncommon corner.
412 */
413 if (actual_end <= start)
414 goto cleanup_and_bail_uncompressed;
415
c8b97818
CM
416 total_compressed = actual_end - start;
417
418 /* we want to make sure that amount of ram required to uncompress
419 * an extent is reasonable, so we limit the total size in ram
771ed689
CM
420 * of a compressed extent to 128k. This is a crucial number
421 * because it also controls how easily we can spread reads across
422 * cpus for decompression.
423 *
424 * We also want to make sure the amount of IO required to do
425 * a random read is reasonably small, so we limit the size of
426 * a compressed extent to 128k.
c8b97818
CM
427 */
428 total_compressed = min(total_compressed, max_uncompressed);
fda2832f 429 num_bytes = ALIGN(end - start + 1, blocksize);
be20aa9d 430 num_bytes = max(blocksize, num_bytes);
c8b97818
CM
431 total_in = 0;
432 ret = 0;
db94535d 433
771ed689
CM
434 /*
435 * we do compression for mount -o compress and when the
436 * inode has not been flagged as nocompress. This flag can
437 * change at any time if we discover bad compression ratios.
c8b97818 438 */
6cbff00f 439 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
1e701a32 440 (btrfs_test_opt(root, COMPRESS) ||
75e7cb7f
LB
441 (BTRFS_I(inode)->force_compress) ||
442 (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
c8b97818 443 WARN_ON(pages);
cfbc246e 444 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
560f7d75
LZ
445 if (!pages) {
446 /* just bail out to the uncompressed code */
447 goto cont;
448 }
c8b97818 449
261507a0
LZ
450 if (BTRFS_I(inode)->force_compress)
451 compress_type = BTRFS_I(inode)->force_compress;
452
4adaa611
CM
453 /*
454 * we need to call clear_page_dirty_for_io on each
455 * page in the range. Otherwise applications with the file
456 * mmap'd can wander in and change the page contents while
457 * we are compressing them.
458 *
459 * If the compression fails for any reason, we set the pages
460 * dirty again later on.
461 */
462 extent_range_clear_dirty_for_io(inode, start, end);
463 redirty = 1;
261507a0
LZ
464 ret = btrfs_compress_pages(compress_type,
465 inode->i_mapping, start,
466 total_compressed, pages,
467 nr_pages, &nr_pages_ret,
468 &total_in,
469 &total_compressed,
470 max_compressed);
c8b97818
CM
471
472 if (!ret) {
473 unsigned long offset = total_compressed &
474 (PAGE_CACHE_SIZE - 1);
475 struct page *page = pages[nr_pages_ret - 1];
476 char *kaddr;
477
478 /* zero the tail end of the last page, we might be
479 * sending it down to disk
480 */
481 if (offset) {
7ac687d9 482 kaddr = kmap_atomic(page);
c8b97818
CM
483 memset(kaddr + offset, 0,
484 PAGE_CACHE_SIZE - offset);
7ac687d9 485 kunmap_atomic(kaddr);
c8b97818
CM
486 }
487 will_compress = 1;
488 }
489 }
560f7d75 490cont:
c8b97818
CM
491 if (start == 0) {
492 /* lets try to make an inline extent */
771ed689 493 if (ret || total_in < (actual_end - start)) {
c8b97818 494 /* we didn't compress the entire range, try
771ed689 495 * to make an uncompressed inline extent.
c8b97818 496 */
00361589
JB
497 ret = cow_file_range_inline(root, inode, start, end,
498 0, 0, NULL);
c8b97818 499 } else {
771ed689 500 /* try making a compressed inline extent */
00361589 501 ret = cow_file_range_inline(root, inode, start, end,
fe3f566c
LZ
502 total_compressed,
503 compress_type, pages);
c8b97818 504 }
79787eaa 505 if (ret <= 0) {
151a41bc
JB
506 unsigned long clear_flags = EXTENT_DELALLOC |
507 EXTENT_DEFRAG;
508 clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0;
509
771ed689 510 /*
79787eaa
JM
511 * inline extent creation worked or returned error,
512 * we don't need to create any more async work items.
513 * Unlock and free up our temp pages.
771ed689 514 */
c2790a2e 515 extent_clear_unlock_delalloc(inode, start, end, NULL,
151a41bc 516 clear_flags, PAGE_UNLOCK |
c2790a2e
JB
517 PAGE_CLEAR_DIRTY |
518 PAGE_SET_WRITEBACK |
519 PAGE_END_WRITEBACK);
c8b97818
CM
520 goto free_pages_out;
521 }
522 }
523
524 if (will_compress) {
525 /*
526 * we aren't doing an inline extent round the compressed size
527 * up to a block size boundary so the allocator does sane
528 * things
529 */
fda2832f 530 total_compressed = ALIGN(total_compressed, blocksize);
c8b97818
CM
531
532 /*
533 * one last check to make sure the compression is really a
534 * win, compare the page count read with the blocks on disk
535 */
fda2832f 536 total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
c8b97818
CM
537 if (total_compressed >= total_in) {
538 will_compress = 0;
539 } else {
c8b97818
CM
540 num_bytes = total_in;
541 }
542 }
543 if (!will_compress && pages) {
544 /*
545 * the compression code ran but failed to make things smaller,
546 * free any pages it allocated and our page pointer array
547 */
548 for (i = 0; i < nr_pages_ret; i++) {
70b99e69 549 WARN_ON(pages[i]->mapping);
c8b97818
CM
550 page_cache_release(pages[i]);
551 }
552 kfree(pages);
553 pages = NULL;
554 total_compressed = 0;
555 nr_pages_ret = 0;
556
557 /* flag the file so we don't compress in the future */
1e701a32
CM
558 if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
559 !(BTRFS_I(inode)->force_compress)) {
a555f810 560 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
1e701a32 561 }
c8b97818 562 }
771ed689
CM
563 if (will_compress) {
564 *num_added += 1;
c8b97818 565
771ed689
CM
566 /* the async work queues will take care of doing actual
567 * allocation on disk for these compressed pages,
568 * and will submit them to the elevator.
569 */
570 add_async_extent(async_cow, start, num_bytes,
261507a0
LZ
571 total_compressed, pages, nr_pages_ret,
572 compress_type);
179e29e4 573
24ae6365 574 if (start + num_bytes < end) {
771ed689
CM
575 start += num_bytes;
576 pages = NULL;
577 cond_resched();
578 goto again;
579 }
580 } else {
f03d9301 581cleanup_and_bail_uncompressed:
771ed689
CM
582 /*
583 * No compression, but we still need to write the pages in
584 * the file we've been given so far. redirty the locked
585 * page if it corresponds to our extent and set things up
586 * for the async work queue to run cow_file_range to do
587 * the normal delalloc dance
588 */
589 if (page_offset(locked_page) >= start &&
590 page_offset(locked_page) <= end) {
591 __set_page_dirty_nobuffers(locked_page);
592 /* unlocked later on in the async handlers */
593 }
4adaa611
CM
594 if (redirty)
595 extent_range_redirty_for_io(inode, start, end);
261507a0
LZ
596 add_async_extent(async_cow, start, end - start + 1,
597 0, NULL, 0, BTRFS_COMPRESS_NONE);
771ed689
CM
598 *num_added += 1;
599 }
3b951516 600
771ed689 601out:
79787eaa 602 return ret;
771ed689
CM
603
604free_pages_out:
605 for (i = 0; i < nr_pages_ret; i++) {
606 WARN_ON(pages[i]->mapping);
607 page_cache_release(pages[i]);
608 }
d397712b 609 kfree(pages);
771ed689
CM
610
611 goto out;
612}
613
614/*
615 * phase two of compressed writeback. This is the ordered portion
616 * of the code, which only gets called in the order the work was
617 * queued. We walk all the async extents created by compress_file_range
618 * and send them down to the disk.
619 */
620static noinline int submit_compressed_extents(struct inode *inode,
621 struct async_cow *async_cow)
622{
623 struct async_extent *async_extent;
624 u64 alloc_hint = 0;
771ed689
CM
625 struct btrfs_key ins;
626 struct extent_map *em;
627 struct btrfs_root *root = BTRFS_I(inode)->root;
628 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
629 struct extent_io_tree *io_tree;
f5a84ee3 630 int ret = 0;
771ed689
CM
631
632 if (list_empty(&async_cow->extents))
633 return 0;
634
3e04e7f1 635again:
d397712b 636 while (!list_empty(&async_cow->extents)) {
771ed689
CM
637 async_extent = list_entry(async_cow->extents.next,
638 struct async_extent, list);
639 list_del(&async_extent->list);
c8b97818 640
771ed689
CM
641 io_tree = &BTRFS_I(inode)->io_tree;
642
f5a84ee3 643retry:
771ed689
CM
644 /* did the compression code fall back to uncompressed IO? */
645 if (!async_extent->pages) {
646 int page_started = 0;
647 unsigned long nr_written = 0;
648
649 lock_extent(io_tree, async_extent->start,
2ac55d41 650 async_extent->start +
d0082371 651 async_extent->ram_size - 1);
771ed689
CM
652
653 /* allocate blocks */
f5a84ee3
JB
654 ret = cow_file_range(inode, async_cow->locked_page,
655 async_extent->start,
656 async_extent->start +
657 async_extent->ram_size - 1,
658 &page_started, &nr_written, 0);
771ed689 659
79787eaa
JM
660 /* JDM XXX */
661
771ed689
CM
662 /*
663 * if page_started, cow_file_range inserted an
664 * inline extent and took care of all the unlocking
665 * and IO for us. Otherwise, we need to submit
666 * all those pages down to the drive.
667 */
f5a84ee3 668 if (!page_started && !ret)
771ed689
CM
669 extent_write_locked_range(io_tree,
670 inode, async_extent->start,
d397712b 671 async_extent->start +
771ed689
CM
672 async_extent->ram_size - 1,
673 btrfs_get_extent,
674 WB_SYNC_ALL);
3e04e7f1
JB
675 else if (ret)
676 unlock_page(async_cow->locked_page);
771ed689
CM
677 kfree(async_extent);
678 cond_resched();
679 continue;
680 }
681
682 lock_extent(io_tree, async_extent->start,
d0082371 683 async_extent->start + async_extent->ram_size - 1);
771ed689 684
00361589 685 ret = btrfs_reserve_extent(root,
771ed689
CM
686 async_extent->compressed_size,
687 async_extent->compressed_size,
81c9ad23 688 0, alloc_hint, &ins, 1);
f5a84ee3
JB
689 if (ret) {
690 int i;
3e04e7f1 691
f5a84ee3
JB
692 for (i = 0; i < async_extent->nr_pages; i++) {
693 WARN_ON(async_extent->pages[i]->mapping);
694 page_cache_release(async_extent->pages[i]);
695 }
696 kfree(async_extent->pages);
697 async_extent->nr_pages = 0;
698 async_extent->pages = NULL;
3e04e7f1 699
fdf8e2ea
JB
700 if (ret == -ENOSPC) {
701 unlock_extent(io_tree, async_extent->start,
702 async_extent->start +
703 async_extent->ram_size - 1);
79787eaa 704 goto retry;
fdf8e2ea 705 }
3e04e7f1 706 goto out_free;
f5a84ee3
JB
707 }
708
c2167754
YZ
709 /*
710 * here we're doing allocation and writeback of the
711 * compressed pages
712 */
713 btrfs_drop_extent_cache(inode, async_extent->start,
714 async_extent->start +
715 async_extent->ram_size - 1, 0);
716
172ddd60 717 em = alloc_extent_map();
b9aa55be
LB
718 if (!em) {
719 ret = -ENOMEM;
3e04e7f1 720 goto out_free_reserve;
b9aa55be 721 }
771ed689
CM
722 em->start = async_extent->start;
723 em->len = async_extent->ram_size;
445a6944 724 em->orig_start = em->start;
2ab28f32
JB
725 em->mod_start = em->start;
726 em->mod_len = em->len;
c8b97818 727
771ed689
CM
728 em->block_start = ins.objectid;
729 em->block_len = ins.offset;
b4939680 730 em->orig_block_len = ins.offset;
cc95bef6 731 em->ram_bytes = async_extent->ram_size;
771ed689 732 em->bdev = root->fs_info->fs_devices->latest_bdev;
261507a0 733 em->compress_type = async_extent->compress_type;
771ed689
CM
734 set_bit(EXTENT_FLAG_PINNED, &em->flags);
735 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
70c8a91c 736 em->generation = -1;
771ed689 737
d397712b 738 while (1) {
890871be 739 write_lock(&em_tree->lock);
09a2a8f9 740 ret = add_extent_mapping(em_tree, em, 1);
890871be 741 write_unlock(&em_tree->lock);
771ed689
CM
742 if (ret != -EEXIST) {
743 free_extent_map(em);
744 break;
745 }
746 btrfs_drop_extent_cache(inode, async_extent->start,
747 async_extent->start +
748 async_extent->ram_size - 1, 0);
749 }
750
3e04e7f1
JB
751 if (ret)
752 goto out_free_reserve;
753
261507a0
LZ
754 ret = btrfs_add_ordered_extent_compress(inode,
755 async_extent->start,
756 ins.objectid,
757 async_extent->ram_size,
758 ins.offset,
759 BTRFS_ORDERED_COMPRESSED,
760 async_extent->compress_type);
3e04e7f1
JB
761 if (ret)
762 goto out_free_reserve;
771ed689 763
771ed689
CM
764 /*
765 * clear dirty, set writeback and unlock the pages.
766 */
c2790a2e 767 extent_clear_unlock_delalloc(inode, async_extent->start,
a791e35e
CM
768 async_extent->start +
769 async_extent->ram_size - 1,
151a41bc
JB
770 NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
771 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
c2790a2e 772 PAGE_SET_WRITEBACK);
771ed689 773 ret = btrfs_submit_compressed_write(inode,
d397712b
CM
774 async_extent->start,
775 async_extent->ram_size,
776 ins.objectid,
777 ins.offset, async_extent->pages,
778 async_extent->nr_pages);
771ed689
CM
779 alloc_hint = ins.objectid + ins.offset;
780 kfree(async_extent);
3e04e7f1
JB
781 if (ret)
782 goto out;
771ed689
CM
783 cond_resched();
784 }
79787eaa
JM
785 ret = 0;
786out:
787 return ret;
3e04e7f1
JB
788out_free_reserve:
789 btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
79787eaa 790out_free:
c2790a2e 791 extent_clear_unlock_delalloc(inode, async_extent->start,
3e04e7f1
JB
792 async_extent->start +
793 async_extent->ram_size - 1,
c2790a2e 794 NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
151a41bc
JB
795 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
796 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
797 PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK);
79787eaa 798 kfree(async_extent);
3e04e7f1 799 goto again;
771ed689
CM
800}
801
4b46fce2
JB
802static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
803 u64 num_bytes)
804{
805 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
806 struct extent_map *em;
807 u64 alloc_hint = 0;
808
809 read_lock(&em_tree->lock);
810 em = search_extent_mapping(em_tree, start, num_bytes);
811 if (em) {
812 /*
813 * if block start isn't an actual block number then find the
814 * first block in this inode and use that as a hint. If that
815 * block is also bogus then just don't worry about it.
816 */
817 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
818 free_extent_map(em);
819 em = search_extent_mapping(em_tree, 0, 0);
820 if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
821 alloc_hint = em->block_start;
822 if (em)
823 free_extent_map(em);
824 } else {
825 alloc_hint = em->block_start;
826 free_extent_map(em);
827 }
828 }
829 read_unlock(&em_tree->lock);
830
831 return alloc_hint;
832}
833
771ed689
CM
834/*
835 * when extent_io.c finds a delayed allocation range in the file,
836 * the call backs end up in this code. The basic idea is to
837 * allocate extents on disk for the range, and create ordered data structs
838 * in ram to track those extents.
839 *
840 * locked_page is the page that writepage had locked already. We use
841 * it to make sure we don't do extra locks or unlocks.
842 *
843 * *page_started is set to one if we unlock locked_page and do everything
844 * required to start IO on it. It may be clean and already done with
845 * IO when we return.
846 */
00361589
JB
847static noinline int cow_file_range(struct inode *inode,
848 struct page *locked_page,
849 u64 start, u64 end, int *page_started,
850 unsigned long *nr_written,
851 int unlock)
771ed689 852{
00361589 853 struct btrfs_root *root = BTRFS_I(inode)->root;
771ed689
CM
854 u64 alloc_hint = 0;
855 u64 num_bytes;
856 unsigned long ram_size;
857 u64 disk_num_bytes;
858 u64 cur_alloc_size;
859 u64 blocksize = root->sectorsize;
771ed689
CM
860 struct btrfs_key ins;
861 struct extent_map *em;
862 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
863 int ret = 0;
864
02ecd2c2
JB
865 if (btrfs_is_free_space_inode(inode)) {
866 WARN_ON_ONCE(1);
867 return -EINVAL;
868 }
771ed689 869
fda2832f 870 num_bytes = ALIGN(end - start + 1, blocksize);
771ed689
CM
871 num_bytes = max(blocksize, num_bytes);
872 disk_num_bytes = num_bytes;
771ed689 873
4cb5300b 874 /* if this is a small write inside eof, kick off defrag */
4cb13e5d
LB
875 if (num_bytes < 64 * 1024 &&
876 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
00361589 877 btrfs_add_inode_defrag(NULL, inode);
4cb5300b 878
771ed689
CM
879 if (start == 0) {
880 /* lets try to make an inline extent */
00361589
JB
881 ret = cow_file_range_inline(root, inode, start, end, 0, 0,
882 NULL);
771ed689 883 if (ret == 0) {
c2790a2e
JB
884 extent_clear_unlock_delalloc(inode, start, end, NULL,
885 EXTENT_LOCKED | EXTENT_DELALLOC |
151a41bc 886 EXTENT_DEFRAG, PAGE_UNLOCK |
c2790a2e
JB
887 PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
888 PAGE_END_WRITEBACK);
c2167754 889
771ed689
CM
890 *nr_written = *nr_written +
891 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
892 *page_started = 1;
771ed689 893 goto out;
79787eaa 894 } else if (ret < 0) {
79787eaa 895 goto out_unlock;
771ed689
CM
896 }
897 }
898
899 BUG_ON(disk_num_bytes >
6c41761f 900 btrfs_super_total_bytes(root->fs_info->super_copy));
771ed689 901
4b46fce2 902 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
771ed689
CM
903 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
904
d397712b 905 while (disk_num_bytes > 0) {
a791e35e
CM
906 unsigned long op;
907
287a0ab9 908 cur_alloc_size = disk_num_bytes;
00361589 909 ret = btrfs_reserve_extent(root, cur_alloc_size,
771ed689 910 root->sectorsize, 0, alloc_hint,
81c9ad23 911 &ins, 1);
00361589 912 if (ret < 0)
79787eaa 913 goto out_unlock;
d397712b 914
172ddd60 915 em = alloc_extent_map();
b9aa55be
LB
916 if (!em) {
917 ret = -ENOMEM;
ace68bac 918 goto out_reserve;
b9aa55be 919 }
e6dcd2dc 920 em->start = start;
445a6944 921 em->orig_start = em->start;
771ed689
CM
922 ram_size = ins.offset;
923 em->len = ins.offset;
2ab28f32
JB
924 em->mod_start = em->start;
925 em->mod_len = em->len;
c8b97818 926
e6dcd2dc 927 em->block_start = ins.objectid;
c8b97818 928 em->block_len = ins.offset;
b4939680 929 em->orig_block_len = ins.offset;
cc95bef6 930 em->ram_bytes = ram_size;
e6dcd2dc 931 em->bdev = root->fs_info->fs_devices->latest_bdev;
7f3c74fb 932 set_bit(EXTENT_FLAG_PINNED, &em->flags);
70c8a91c 933 em->generation = -1;
c8b97818 934
d397712b 935 while (1) {
890871be 936 write_lock(&em_tree->lock);
09a2a8f9 937 ret = add_extent_mapping(em_tree, em, 1);
890871be 938 write_unlock(&em_tree->lock);
e6dcd2dc
CM
939 if (ret != -EEXIST) {
940 free_extent_map(em);
941 break;
942 }
943 btrfs_drop_extent_cache(inode, start,
c8b97818 944 start + ram_size - 1, 0);
e6dcd2dc 945 }
ace68bac
LB
946 if (ret)
947 goto out_reserve;
e6dcd2dc 948
98d20f67 949 cur_alloc_size = ins.offset;
e6dcd2dc 950 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
771ed689 951 ram_size, cur_alloc_size, 0);
ace68bac
LB
952 if (ret)
953 goto out_reserve;
c8b97818 954
17d217fe
YZ
955 if (root->root_key.objectid ==
956 BTRFS_DATA_RELOC_TREE_OBJECTID) {
957 ret = btrfs_reloc_clone_csums(inode, start,
958 cur_alloc_size);
00361589 959 if (ret)
ace68bac 960 goto out_reserve;
17d217fe
YZ
961 }
962
d397712b 963 if (disk_num_bytes < cur_alloc_size)
3b951516 964 break;
d397712b 965
c8b97818
CM
966 /* we're not doing compressed IO, don't unlock the first
967 * page (which the caller expects to stay locked), don't
968 * clear any dirty bits and don't set any writeback bits
8b62b72b
CM
969 *
970 * Do set the Private2 bit so we know this page was properly
971 * setup for writepage
c8b97818 972 */
c2790a2e
JB
973 op = unlock ? PAGE_UNLOCK : 0;
974 op |= PAGE_SET_PRIVATE2;
a791e35e 975
c2790a2e
JB
976 extent_clear_unlock_delalloc(inode, start,
977 start + ram_size - 1, locked_page,
978 EXTENT_LOCKED | EXTENT_DELALLOC,
979 op);
c8b97818 980 disk_num_bytes -= cur_alloc_size;
c59f8951
CM
981 num_bytes -= cur_alloc_size;
982 alloc_hint = ins.objectid + ins.offset;
983 start += cur_alloc_size;
b888db2b 984 }
79787eaa 985out:
be20aa9d 986 return ret;
b7d5b0a8 987
ace68bac
LB
988out_reserve:
989 btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
79787eaa 990out_unlock:
c2790a2e 991 extent_clear_unlock_delalloc(inode, start, end, locked_page,
151a41bc
JB
992 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
993 EXTENT_DELALLOC | EXTENT_DEFRAG,
994 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
995 PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK);
79787eaa 996 goto out;
771ed689 997}
c8b97818 998
771ed689
CM
999/*
1000 * work queue call back to started compression on a file and pages
1001 */
1002static noinline void async_cow_start(struct btrfs_work *work)
1003{
1004 struct async_cow *async_cow;
1005 int num_added = 0;
1006 async_cow = container_of(work, struct async_cow, work);
1007
1008 compress_file_range(async_cow->inode, async_cow->locked_page,
1009 async_cow->start, async_cow->end, async_cow,
1010 &num_added);
8180ef88 1011 if (num_added == 0) {
cb77fcd8 1012 btrfs_add_delayed_iput(async_cow->inode);
771ed689 1013 async_cow->inode = NULL;
8180ef88 1014 }
771ed689
CM
1015}
1016
1017/*
1018 * work queue call back to submit previously compressed pages
1019 */
1020static noinline void async_cow_submit(struct btrfs_work *work)
1021{
1022 struct async_cow *async_cow;
1023 struct btrfs_root *root;
1024 unsigned long nr_pages;
1025
1026 async_cow = container_of(work, struct async_cow, work);
1027
1028 root = async_cow->root;
1029 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1030 PAGE_CACHE_SHIFT;
1031
66657b31 1032 if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
287082b0 1033 5 * 1024 * 1024 &&
771ed689
CM
1034 waitqueue_active(&root->fs_info->async_submit_wait))
1035 wake_up(&root->fs_info->async_submit_wait);
1036
d397712b 1037 if (async_cow->inode)
771ed689 1038 submit_compressed_extents(async_cow->inode, async_cow);
771ed689 1039}
c8b97818 1040
771ed689
CM
1041static noinline void async_cow_free(struct btrfs_work *work)
1042{
1043 struct async_cow *async_cow;
1044 async_cow = container_of(work, struct async_cow, work);
8180ef88 1045 if (async_cow->inode)
cb77fcd8 1046 btrfs_add_delayed_iput(async_cow->inode);
771ed689
CM
1047 kfree(async_cow);
1048}
1049
1050static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1051 u64 start, u64 end, int *page_started,
1052 unsigned long *nr_written)
1053{
1054 struct async_cow *async_cow;
1055 struct btrfs_root *root = BTRFS_I(inode)->root;
1056 unsigned long nr_pages;
1057 u64 cur_end;
287082b0 1058 int limit = 10 * 1024 * 1024;
771ed689 1059
a3429ab7
CM
1060 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1061 1, 0, NULL, GFP_NOFS);
d397712b 1062 while (start < end) {
771ed689 1063 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
79787eaa 1064 BUG_ON(!async_cow); /* -ENOMEM */
8180ef88 1065 async_cow->inode = igrab(inode);
771ed689
CM
1066 async_cow->root = root;
1067 async_cow->locked_page = locked_page;
1068 async_cow->start = start;
1069
6cbff00f 1070 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
771ed689
CM
1071 cur_end = end;
1072 else
1073 cur_end = min(end, start + 512 * 1024 - 1);
1074
1075 async_cow->end = cur_end;
1076 INIT_LIST_HEAD(&async_cow->extents);
1077
1078 async_cow->work.func = async_cow_start;
1079 async_cow->work.ordered_func = async_cow_submit;
1080 async_cow->work.ordered_free = async_cow_free;
1081 async_cow->work.flags = 0;
1082
771ed689
CM
1083 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
1084 PAGE_CACHE_SHIFT;
1085 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1086
1087 btrfs_queue_worker(&root->fs_info->delalloc_workers,
1088 &async_cow->work);
1089
1090 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1091 wait_event(root->fs_info->async_submit_wait,
1092 (atomic_read(&root->fs_info->async_delalloc_pages) <
1093 limit));
1094 }
1095
d397712b 1096 while (atomic_read(&root->fs_info->async_submit_draining) &&
771ed689
CM
1097 atomic_read(&root->fs_info->async_delalloc_pages)) {
1098 wait_event(root->fs_info->async_submit_wait,
1099 (atomic_read(&root->fs_info->async_delalloc_pages) ==
1100 0));
1101 }
1102
1103 *nr_written += nr_pages;
1104 start = cur_end + 1;
1105 }
1106 *page_started = 1;
1107 return 0;
be20aa9d
CM
1108}
1109
d397712b 1110static noinline int csum_exist_in_range(struct btrfs_root *root,
17d217fe
YZ
1111 u64 bytenr, u64 num_bytes)
1112{
1113 int ret;
1114 struct btrfs_ordered_sum *sums;
1115 LIST_HEAD(list);
1116
07d400a6 1117 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
a2de733c 1118 bytenr + num_bytes - 1, &list, 0);
17d217fe
YZ
1119 if (ret == 0 && list_empty(&list))
1120 return 0;
1121
1122 while (!list_empty(&list)) {
1123 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1124 list_del(&sums->list);
1125 kfree(sums);
1126 }
1127 return 1;
1128}
1129
d352ac68
CM
1130/*
1131 * when nowcow writeback call back. This checks for snapshots or COW copies
1132 * of the extents that exist in the file, and COWs the file as required.
1133 *
1134 * If no cow copies or snapshots exist, we write directly to the existing
1135 * blocks on disk
1136 */
7f366cfe
CM
1137static noinline int run_delalloc_nocow(struct inode *inode,
1138 struct page *locked_page,
771ed689
CM
1139 u64 start, u64 end, int *page_started, int force,
1140 unsigned long *nr_written)
be20aa9d 1141{
be20aa9d 1142 struct btrfs_root *root = BTRFS_I(inode)->root;
7ea394f1 1143 struct btrfs_trans_handle *trans;
be20aa9d 1144 struct extent_buffer *leaf;
be20aa9d 1145 struct btrfs_path *path;
80ff3856 1146 struct btrfs_file_extent_item *fi;
be20aa9d 1147 struct btrfs_key found_key;
80ff3856
YZ
1148 u64 cow_start;
1149 u64 cur_offset;
1150 u64 extent_end;
5d4f98a2 1151 u64 extent_offset;
80ff3856
YZ
1152 u64 disk_bytenr;
1153 u64 num_bytes;
b4939680 1154 u64 disk_num_bytes;
cc95bef6 1155 u64 ram_bytes;
80ff3856 1156 int extent_type;
79787eaa 1157 int ret, err;
d899e052 1158 int type;
80ff3856
YZ
1159 int nocow;
1160 int check_prev = 1;
82d5902d 1161 bool nolock;
33345d01 1162 u64 ino = btrfs_ino(inode);
be20aa9d
CM
1163
1164 path = btrfs_alloc_path();
17ca04af 1165 if (!path) {
c2790a2e
JB
1166 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1167 EXTENT_LOCKED | EXTENT_DELALLOC |
151a41bc
JB
1168 EXTENT_DO_ACCOUNTING |
1169 EXTENT_DEFRAG, PAGE_UNLOCK |
c2790a2e
JB
1170 PAGE_CLEAR_DIRTY |
1171 PAGE_SET_WRITEBACK |
1172 PAGE_END_WRITEBACK);
d8926bb3 1173 return -ENOMEM;
17ca04af 1174 }
82d5902d 1175
83eea1f1 1176 nolock = btrfs_is_free_space_inode(inode);
82d5902d
LZ
1177
1178 if (nolock)
7a7eaa40 1179 trans = btrfs_join_transaction_nolock(root);
82d5902d 1180 else
7a7eaa40 1181 trans = btrfs_join_transaction(root);
ff5714cc 1182
79787eaa 1183 if (IS_ERR(trans)) {
c2790a2e
JB
1184 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1185 EXTENT_LOCKED | EXTENT_DELALLOC |
151a41bc
JB
1186 EXTENT_DO_ACCOUNTING |
1187 EXTENT_DEFRAG, PAGE_UNLOCK |
c2790a2e
JB
1188 PAGE_CLEAR_DIRTY |
1189 PAGE_SET_WRITEBACK |
1190 PAGE_END_WRITEBACK);
79787eaa
JM
1191 btrfs_free_path(path);
1192 return PTR_ERR(trans);
1193 }
1194
74b21075 1195 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
be20aa9d 1196
80ff3856
YZ
1197 cow_start = (u64)-1;
1198 cur_offset = start;
1199 while (1) {
33345d01 1200 ret = btrfs_lookup_file_extent(trans, root, path, ino,
80ff3856 1201 cur_offset, 0);
d788a349 1202 if (ret < 0)
79787eaa 1203 goto error;
80ff3856
YZ
1204 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1205 leaf = path->nodes[0];
1206 btrfs_item_key_to_cpu(leaf, &found_key,
1207 path->slots[0] - 1);
33345d01 1208 if (found_key.objectid == ino &&
80ff3856
YZ
1209 found_key.type == BTRFS_EXTENT_DATA_KEY)
1210 path->slots[0]--;
1211 }
1212 check_prev = 0;
1213next_slot:
1214 leaf = path->nodes[0];
1215 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1216 ret = btrfs_next_leaf(root, path);
d788a349 1217 if (ret < 0)
79787eaa 1218 goto error;
80ff3856
YZ
1219 if (ret > 0)
1220 break;
1221 leaf = path->nodes[0];
1222 }
be20aa9d 1223
80ff3856
YZ
1224 nocow = 0;
1225 disk_bytenr = 0;
17d217fe 1226 num_bytes = 0;
80ff3856
YZ
1227 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1228
33345d01 1229 if (found_key.objectid > ino ||
80ff3856
YZ
1230 found_key.type > BTRFS_EXTENT_DATA_KEY ||
1231 found_key.offset > end)
1232 break;
1233
1234 if (found_key.offset > cur_offset) {
1235 extent_end = found_key.offset;
e9061e21 1236 extent_type = 0;
80ff3856
YZ
1237 goto out_check;
1238 }
1239
1240 fi = btrfs_item_ptr(leaf, path->slots[0],
1241 struct btrfs_file_extent_item);
1242 extent_type = btrfs_file_extent_type(leaf, fi);
1243
cc95bef6 1244 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
d899e052
YZ
1245 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1246 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
80ff3856 1247 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
5d4f98a2 1248 extent_offset = btrfs_file_extent_offset(leaf, fi);
80ff3856
YZ
1249 extent_end = found_key.offset +
1250 btrfs_file_extent_num_bytes(leaf, fi);
b4939680
JB
1251 disk_num_bytes =
1252 btrfs_file_extent_disk_num_bytes(leaf, fi);
80ff3856
YZ
1253 if (extent_end <= start) {
1254 path->slots[0]++;
1255 goto next_slot;
1256 }
17d217fe
YZ
1257 if (disk_bytenr == 0)
1258 goto out_check;
80ff3856
YZ
1259 if (btrfs_file_extent_compression(leaf, fi) ||
1260 btrfs_file_extent_encryption(leaf, fi) ||
1261 btrfs_file_extent_other_encoding(leaf, fi))
1262 goto out_check;
d899e052
YZ
1263 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1264 goto out_check;
d2fb3437 1265 if (btrfs_extent_readonly(root, disk_bytenr))
80ff3856 1266 goto out_check;
33345d01 1267 if (btrfs_cross_ref_exist(trans, root, ino,
5d4f98a2
YZ
1268 found_key.offset -
1269 extent_offset, disk_bytenr))
17d217fe 1270 goto out_check;
5d4f98a2 1271 disk_bytenr += extent_offset;
17d217fe
YZ
1272 disk_bytenr += cur_offset - found_key.offset;
1273 num_bytes = min(end + 1, extent_end) - cur_offset;
1274 /*
1275 * force cow if csum exists in the range.
1276 * this ensure that csum for a given extent are
1277 * either valid or do not exist.
1278 */
1279 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1280 goto out_check;
80ff3856
YZ
1281 nocow = 1;
1282 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1283 extent_end = found_key.offset +
1284 btrfs_file_extent_inline_len(leaf, fi);
1285 extent_end = ALIGN(extent_end, root->sectorsize);
1286 } else {
1287 BUG_ON(1);
1288 }
1289out_check:
1290 if (extent_end <= start) {
1291 path->slots[0]++;
1292 goto next_slot;
1293 }
1294 if (!nocow) {
1295 if (cow_start == (u64)-1)
1296 cow_start = cur_offset;
1297 cur_offset = extent_end;
1298 if (cur_offset > end)
1299 break;
1300 path->slots[0]++;
1301 goto next_slot;
7ea394f1
YZ
1302 }
1303
b3b4aa74 1304 btrfs_release_path(path);
80ff3856 1305 if (cow_start != (u64)-1) {
00361589
JB
1306 ret = cow_file_range(inode, locked_page,
1307 cow_start, found_key.offset - 1,
1308 page_started, nr_written, 1);
d788a349 1309 if (ret)
79787eaa 1310 goto error;
80ff3856 1311 cow_start = (u64)-1;
7ea394f1 1312 }
80ff3856 1313
d899e052
YZ
1314 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1315 struct extent_map *em;
1316 struct extent_map_tree *em_tree;
1317 em_tree = &BTRFS_I(inode)->extent_tree;
172ddd60 1318 em = alloc_extent_map();
79787eaa 1319 BUG_ON(!em); /* -ENOMEM */
d899e052 1320 em->start = cur_offset;
70c8a91c 1321 em->orig_start = found_key.offset - extent_offset;
d899e052
YZ
1322 em->len = num_bytes;
1323 em->block_len = num_bytes;
1324 em->block_start = disk_bytenr;
b4939680 1325 em->orig_block_len = disk_num_bytes;
cc95bef6 1326 em->ram_bytes = ram_bytes;
d899e052 1327 em->bdev = root->fs_info->fs_devices->latest_bdev;
2ab28f32
JB
1328 em->mod_start = em->start;
1329 em->mod_len = em->len;
d899e052 1330 set_bit(EXTENT_FLAG_PINNED, &em->flags);
b11e234d 1331 set_bit(EXTENT_FLAG_FILLING, &em->flags);
70c8a91c 1332 em->generation = -1;
d899e052 1333 while (1) {
890871be 1334 write_lock(&em_tree->lock);
09a2a8f9 1335 ret = add_extent_mapping(em_tree, em, 1);
890871be 1336 write_unlock(&em_tree->lock);
d899e052
YZ
1337 if (ret != -EEXIST) {
1338 free_extent_map(em);
1339 break;
1340 }
1341 btrfs_drop_extent_cache(inode, em->start,
1342 em->start + em->len - 1, 0);
1343 }
1344 type = BTRFS_ORDERED_PREALLOC;
1345 } else {
1346 type = BTRFS_ORDERED_NOCOW;
1347 }
80ff3856
YZ
1348
1349 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
d899e052 1350 num_bytes, num_bytes, type);
79787eaa 1351 BUG_ON(ret); /* -ENOMEM */
771ed689 1352
efa56464
YZ
1353 if (root->root_key.objectid ==
1354 BTRFS_DATA_RELOC_TREE_OBJECTID) {
1355 ret = btrfs_reloc_clone_csums(inode, cur_offset,
1356 num_bytes);
d788a349 1357 if (ret)
79787eaa 1358 goto error;
efa56464
YZ
1359 }
1360
c2790a2e
JB
1361 extent_clear_unlock_delalloc(inode, cur_offset,
1362 cur_offset + num_bytes - 1,
1363 locked_page, EXTENT_LOCKED |
1364 EXTENT_DELALLOC, PAGE_UNLOCK |
1365 PAGE_SET_PRIVATE2);
80ff3856
YZ
1366 cur_offset = extent_end;
1367 if (cur_offset > end)
1368 break;
be20aa9d 1369 }
b3b4aa74 1370 btrfs_release_path(path);
80ff3856 1371
17ca04af 1372 if (cur_offset <= end && cow_start == (u64)-1) {
80ff3856 1373 cow_start = cur_offset;
17ca04af
JB
1374 cur_offset = end;
1375 }
1376
80ff3856 1377 if (cow_start != (u64)-1) {
00361589
JB
1378 ret = cow_file_range(inode, locked_page, cow_start, end,
1379 page_started, nr_written, 1);
d788a349 1380 if (ret)
79787eaa 1381 goto error;
80ff3856
YZ
1382 }
1383
79787eaa 1384error:
a698d075 1385 err = btrfs_end_transaction(trans, root);
79787eaa
JM
1386 if (!ret)
1387 ret = err;
1388
17ca04af 1389 if (ret && cur_offset < end)
c2790a2e
JB
1390 extent_clear_unlock_delalloc(inode, cur_offset, end,
1391 locked_page, EXTENT_LOCKED |
151a41bc
JB
1392 EXTENT_DELALLOC | EXTENT_DEFRAG |
1393 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1394 PAGE_CLEAR_DIRTY |
c2790a2e
JB
1395 PAGE_SET_WRITEBACK |
1396 PAGE_END_WRITEBACK);
7ea394f1 1397 btrfs_free_path(path);
79787eaa 1398 return ret;
be20aa9d
CM
1399}
1400
d352ac68
CM
1401/*
1402 * extent_io.c call back to do delayed allocation processing
1403 */
c8b97818 1404static int run_delalloc_range(struct inode *inode, struct page *locked_page,
771ed689
CM
1405 u64 start, u64 end, int *page_started,
1406 unsigned long *nr_written)
be20aa9d 1407{
be20aa9d 1408 int ret;
7f366cfe 1409 struct btrfs_root *root = BTRFS_I(inode)->root;
a2135011 1410
7ddf5a42 1411 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) {
c8b97818 1412 ret = run_delalloc_nocow(inode, locked_page, start, end,
d397712b 1413 page_started, 1, nr_written);
7ddf5a42 1414 } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) {
d899e052 1415 ret = run_delalloc_nocow(inode, locked_page, start, end,
d397712b 1416 page_started, 0, nr_written);
7ddf5a42
JB
1417 } else if (!btrfs_test_opt(root, COMPRESS) &&
1418 !(BTRFS_I(inode)->force_compress) &&
1419 !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) {
7f366cfe
CM
1420 ret = cow_file_range(inode, locked_page, start, end,
1421 page_started, nr_written, 1);
7ddf5a42
JB
1422 } else {
1423 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1424 &BTRFS_I(inode)->runtime_flags);
771ed689 1425 ret = cow_file_range_async(inode, locked_page, start, end,
d397712b 1426 page_started, nr_written);
7ddf5a42 1427 }
b888db2b
CM
1428 return ret;
1429}
1430
1bf85046
JM
1431static void btrfs_split_extent_hook(struct inode *inode,
1432 struct extent_state *orig, u64 split)
9ed74f2d 1433{
0ca1f7ce 1434 /* not delalloc, ignore it */
9ed74f2d 1435 if (!(orig->state & EXTENT_DELALLOC))
1bf85046 1436 return;
9ed74f2d 1437
9e0baf60
JB
1438 spin_lock(&BTRFS_I(inode)->lock);
1439 BTRFS_I(inode)->outstanding_extents++;
1440 spin_unlock(&BTRFS_I(inode)->lock);
9ed74f2d
JB
1441}
1442
1443/*
1444 * extent_io.c merge_extent_hook, used to track merged delayed allocation
1445 * extents so we can keep track of new extents that are just merged onto old
1446 * extents, such as when we are doing sequential writes, so we can properly
1447 * account for the metadata space we'll need.
1448 */
1bf85046
JM
1449static void btrfs_merge_extent_hook(struct inode *inode,
1450 struct extent_state *new,
1451 struct extent_state *other)
9ed74f2d 1452{
9ed74f2d
JB
1453 /* not delalloc, ignore it */
1454 if (!(other->state & EXTENT_DELALLOC))
1bf85046 1455 return;
9ed74f2d 1456
9e0baf60
JB
1457 spin_lock(&BTRFS_I(inode)->lock);
1458 BTRFS_I(inode)->outstanding_extents--;
1459 spin_unlock(&BTRFS_I(inode)->lock);
9ed74f2d
JB
1460}
1461
eb73c1b7
MX
1462static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1463 struct inode *inode)
1464{
1465 spin_lock(&root->delalloc_lock);
1466 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1467 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1468 &root->delalloc_inodes);
1469 set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1470 &BTRFS_I(inode)->runtime_flags);
1471 root->nr_delalloc_inodes++;
1472 if (root->nr_delalloc_inodes == 1) {
1473 spin_lock(&root->fs_info->delalloc_root_lock);
1474 BUG_ON(!list_empty(&root->delalloc_root));
1475 list_add_tail(&root->delalloc_root,
1476 &root->fs_info->delalloc_roots);
1477 spin_unlock(&root->fs_info->delalloc_root_lock);
1478 }
1479 }
1480 spin_unlock(&root->delalloc_lock);
1481}
1482
1483static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1484 struct inode *inode)
1485{
1486 spin_lock(&root->delalloc_lock);
1487 if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1488 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1489 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1490 &BTRFS_I(inode)->runtime_flags);
1491 root->nr_delalloc_inodes--;
1492 if (!root->nr_delalloc_inodes) {
1493 spin_lock(&root->fs_info->delalloc_root_lock);
1494 BUG_ON(list_empty(&root->delalloc_root));
1495 list_del_init(&root->delalloc_root);
1496 spin_unlock(&root->fs_info->delalloc_root_lock);
1497 }
1498 }
1499 spin_unlock(&root->delalloc_lock);
1500}
1501
d352ac68
CM
1502/*
1503 * extent_io.c set_bit_hook, used to track delayed allocation
1504 * bytes in this file, and to maintain the list of inodes that
1505 * have pending delalloc work to be done.
1506 */
1bf85046 1507static void btrfs_set_bit_hook(struct inode *inode,
41074888 1508 struct extent_state *state, unsigned long *bits)
291d673e 1509{
9ed74f2d 1510
75eff68e
CM
1511 /*
1512 * set_bit and clear bit hooks normally require _irqsave/restore
27160b6b 1513 * but in this case, we are only testing for the DELALLOC
75eff68e
CM
1514 * bit, which is only set or cleared with irqs on
1515 */
0ca1f7ce 1516 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
291d673e 1517 struct btrfs_root *root = BTRFS_I(inode)->root;
0ca1f7ce 1518 u64 len = state->end + 1 - state->start;
83eea1f1 1519 bool do_list = !btrfs_is_free_space_inode(inode);
9ed74f2d 1520
9e0baf60 1521 if (*bits & EXTENT_FIRST_DELALLOC) {
0ca1f7ce 1522 *bits &= ~EXTENT_FIRST_DELALLOC;
9e0baf60
JB
1523 } else {
1524 spin_lock(&BTRFS_I(inode)->lock);
1525 BTRFS_I(inode)->outstanding_extents++;
1526 spin_unlock(&BTRFS_I(inode)->lock);
1527 }
287a0ab9 1528
963d678b
MX
1529 __percpu_counter_add(&root->fs_info->delalloc_bytes, len,
1530 root->fs_info->delalloc_batch);
df0af1a5 1531 spin_lock(&BTRFS_I(inode)->lock);
0ca1f7ce 1532 BTRFS_I(inode)->delalloc_bytes += len;
df0af1a5 1533 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
eb73c1b7
MX
1534 &BTRFS_I(inode)->runtime_flags))
1535 btrfs_add_delalloc_inodes(root, inode);
df0af1a5 1536 spin_unlock(&BTRFS_I(inode)->lock);
291d673e 1537 }
291d673e
CM
1538}
1539
d352ac68
CM
1540/*
1541 * extent_io.c clear_bit_hook, see set_bit_hook for why
1542 */
1bf85046 1543static void btrfs_clear_bit_hook(struct inode *inode,
41074888
DS
1544 struct extent_state *state,
1545 unsigned long *bits)
291d673e 1546{
75eff68e
CM
1547 /*
1548 * set_bit and clear bit hooks normally require _irqsave/restore
27160b6b 1549 * but in this case, we are only testing for the DELALLOC
75eff68e
CM
1550 * bit, which is only set or cleared with irqs on
1551 */
0ca1f7ce 1552 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
291d673e 1553 struct btrfs_root *root = BTRFS_I(inode)->root;
0ca1f7ce 1554 u64 len = state->end + 1 - state->start;
83eea1f1 1555 bool do_list = !btrfs_is_free_space_inode(inode);
bcbfce8a 1556
9e0baf60 1557 if (*bits & EXTENT_FIRST_DELALLOC) {
0ca1f7ce 1558 *bits &= ~EXTENT_FIRST_DELALLOC;
9e0baf60
JB
1559 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1560 spin_lock(&BTRFS_I(inode)->lock);
1561 BTRFS_I(inode)->outstanding_extents--;
1562 spin_unlock(&BTRFS_I(inode)->lock);
1563 }
0ca1f7ce 1564
b6d08f06
JB
1565 /*
1566 * We don't reserve metadata space for space cache inodes so we
1567 * don't need to call dellalloc_release_metadata if there is an
1568 * error.
1569 */
1570 if (*bits & EXTENT_DO_ACCOUNTING &&
1571 root != root->fs_info->tree_root)
0ca1f7ce
YZ
1572 btrfs_delalloc_release_metadata(inode, len);
1573
0cb59c99 1574 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
7ee9e440 1575 && do_list && !(state->state & EXTENT_NORESERVE))
0ca1f7ce 1576 btrfs_free_reserved_data_space(inode, len);
9ed74f2d 1577
963d678b
MX
1578 __percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
1579 root->fs_info->delalloc_batch);
df0af1a5 1580 spin_lock(&BTRFS_I(inode)->lock);
0ca1f7ce 1581 BTRFS_I(inode)->delalloc_bytes -= len;
0cb59c99 1582 if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
df0af1a5 1583 test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
eb73c1b7
MX
1584 &BTRFS_I(inode)->runtime_flags))
1585 btrfs_del_delalloc_inode(root, inode);
df0af1a5 1586 spin_unlock(&BTRFS_I(inode)->lock);
291d673e 1587 }
291d673e
CM
1588}
1589
d352ac68
CM
1590/*
1591 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1592 * we don't create bios that span stripes or chunks
1593 */
64a16701 1594int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
c8b97818
CM
1595 size_t size, struct bio *bio,
1596 unsigned long bio_flags)
239b14b3
CM
1597{
1598 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
a62b9401 1599 u64 logical = (u64)bio->bi_sector << 9;
239b14b3
CM
1600 u64 length = 0;
1601 u64 map_length;
239b14b3
CM
1602 int ret;
1603
771ed689
CM
1604 if (bio_flags & EXTENT_BIO_COMPRESSED)
1605 return 0;
1606
f2d8d74d 1607 length = bio->bi_size;
239b14b3 1608 map_length = length;
64a16701 1609 ret = btrfs_map_block(root->fs_info, rw, logical,
f188591e 1610 &map_length, NULL, 0);
3ec706c8 1611 /* Will always return 0 with map_multi == NULL */
3444a972 1612 BUG_ON(ret < 0);
d397712b 1613 if (map_length < length + size)
239b14b3 1614 return 1;
3444a972 1615 return 0;
239b14b3
CM
1616}
1617
d352ac68
CM
1618/*
1619 * in order to insert checksums into the metadata in large chunks,
1620 * we wait until bio submission time. All the pages in the bio are
1621 * checksummed and sums are attached onto the ordered extent record.
1622 *
1623 * At IO completion time the cums attached on the ordered extent record
1624 * are inserted into the btree
1625 */
d397712b
CM
1626static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1627 struct bio *bio, int mirror_num,
eaf25d93
CM
1628 unsigned long bio_flags,
1629 u64 bio_offset)
065631f6 1630{
065631f6 1631 struct btrfs_root *root = BTRFS_I(inode)->root;
065631f6 1632 int ret = 0;
e015640f 1633
d20f7043 1634 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
79787eaa 1635 BUG_ON(ret); /* -ENOMEM */
4a69a410
CM
1636 return 0;
1637}
e015640f 1638
4a69a410
CM
1639/*
1640 * in order to insert checksums into the metadata in large chunks,
1641 * we wait until bio submission time. All the pages in the bio are
1642 * checksummed and sums are attached onto the ordered extent record.
1643 *
1644 * At IO completion time the cums attached on the ordered extent record
1645 * are inserted into the btree
1646 */
b2950863 1647static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
eaf25d93
CM
1648 int mirror_num, unsigned long bio_flags,
1649 u64 bio_offset)
4a69a410
CM
1650{
1651 struct btrfs_root *root = BTRFS_I(inode)->root;
61891923
SB
1652 int ret;
1653
1654 ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
1655 if (ret)
1656 bio_endio(bio, ret);
1657 return ret;
44b8bd7e
CM
1658}
1659
d352ac68 1660/*
cad321ad
CM
1661 * extent_io.c submission hook. This does the right thing for csum calculation
1662 * on write, or reading the csums from the tree before a read
d352ac68 1663 */
b2950863 1664static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
eaf25d93
CM
1665 int mirror_num, unsigned long bio_flags,
1666 u64 bio_offset)
44b8bd7e
CM
1667{
1668 struct btrfs_root *root = BTRFS_I(inode)->root;
1669 int ret = 0;
19b9bdb0 1670 int skip_sum;
0417341e 1671 int metadata = 0;
b812ce28 1672 int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
44b8bd7e 1673
6cbff00f 1674 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
cad321ad 1675
83eea1f1 1676 if (btrfs_is_free_space_inode(inode))
0417341e
JM
1677 metadata = 2;
1678
7b6d91da 1679 if (!(rw & REQ_WRITE)) {
5fd02043
JB
1680 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1681 if (ret)
61891923 1682 goto out;
5fd02043 1683
d20f7043 1684 if (bio_flags & EXTENT_BIO_COMPRESSED) {
61891923
SB
1685 ret = btrfs_submit_compressed_read(inode, bio,
1686 mirror_num,
1687 bio_flags);
1688 goto out;
c2db1073
TI
1689 } else if (!skip_sum) {
1690 ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1691 if (ret)
61891923 1692 goto out;
c2db1073 1693 }
4d1b5fb4 1694 goto mapit;
b812ce28 1695 } else if (async && !skip_sum) {
17d217fe
YZ
1696 /* csum items have already been cloned */
1697 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1698 goto mapit;
19b9bdb0 1699 /* we're doing a write, do the async checksumming */
61891923 1700 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
44b8bd7e 1701 inode, rw, bio, mirror_num,
eaf25d93
CM
1702 bio_flags, bio_offset,
1703 __btrfs_submit_bio_start,
4a69a410 1704 __btrfs_submit_bio_done);
61891923 1705 goto out;
b812ce28
JB
1706 } else if (!skip_sum) {
1707 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1708 if (ret)
1709 goto out;
19b9bdb0
CM
1710 }
1711
0b86a832 1712mapit:
61891923
SB
1713 ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
1714
1715out:
1716 if (ret < 0)
1717 bio_endio(bio, ret);
1718 return ret;
065631f6 1719}
6885f308 1720
d352ac68
CM
1721/*
1722 * given a list of ordered sums record them in the inode. This happens
1723 * at IO completion time based on sums calculated at bio submission time.
1724 */
ba1da2f4 1725static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
e6dcd2dc
CM
1726 struct inode *inode, u64 file_offset,
1727 struct list_head *list)
1728{
e6dcd2dc
CM
1729 struct btrfs_ordered_sum *sum;
1730
c6e30871 1731 list_for_each_entry(sum, list, list) {
39847c4d 1732 trans->adding_csums = 1;
d20f7043
CM
1733 btrfs_csum_file_blocks(trans,
1734 BTRFS_I(inode)->root->fs_info->csum_root, sum);
39847c4d 1735 trans->adding_csums = 0;
e6dcd2dc
CM
1736 }
1737 return 0;
1738}
1739
2ac55d41
JB
1740int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1741 struct extent_state **cached_state)
ea8c2819 1742{
6c1500f2 1743 WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0);
ea8c2819 1744 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
2ac55d41 1745 cached_state, GFP_NOFS);
ea8c2819
CM
1746}
1747
d352ac68 1748/* see btrfs_writepage_start_hook for details on why this is required */
247e743c
CM
1749struct btrfs_writepage_fixup {
1750 struct page *page;
1751 struct btrfs_work work;
1752};
1753
b2950863 1754static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
247e743c
CM
1755{
1756 struct btrfs_writepage_fixup *fixup;
1757 struct btrfs_ordered_extent *ordered;
2ac55d41 1758 struct extent_state *cached_state = NULL;
247e743c
CM
1759 struct page *page;
1760 struct inode *inode;
1761 u64 page_start;
1762 u64 page_end;
87826df0 1763 int ret;
247e743c
CM
1764
1765 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1766 page = fixup->page;
4a096752 1767again:
247e743c
CM
1768 lock_page(page);
1769 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1770 ClearPageChecked(page);
1771 goto out_page;
1772 }
1773
1774 inode = page->mapping->host;
1775 page_start = page_offset(page);
1776 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1777
2ac55d41 1778 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
d0082371 1779 &cached_state);
4a096752
CM
1780
1781 /* already ordered? We're done */
8b62b72b 1782 if (PagePrivate2(page))
247e743c 1783 goto out;
4a096752
CM
1784
1785 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1786 if (ordered) {
2ac55d41
JB
1787 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1788 page_end, &cached_state, GFP_NOFS);
4a096752
CM
1789 unlock_page(page);
1790 btrfs_start_ordered_extent(inode, ordered, 1);
87826df0 1791 btrfs_put_ordered_extent(ordered);
4a096752
CM
1792 goto again;
1793 }
247e743c 1794
87826df0
JM
1795 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
1796 if (ret) {
1797 mapping_set_error(page->mapping, ret);
1798 end_extent_writepage(page, ret, page_start, page_end);
1799 ClearPageChecked(page);
1800 goto out;
1801 }
1802
2ac55d41 1803 btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
247e743c 1804 ClearPageChecked(page);
87826df0 1805 set_page_dirty(page);
247e743c 1806out:
2ac55d41
JB
1807 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1808 &cached_state, GFP_NOFS);
247e743c
CM
1809out_page:
1810 unlock_page(page);
1811 page_cache_release(page);
b897abec 1812 kfree(fixup);
247e743c
CM
1813}
1814
1815/*
1816 * There are a few paths in the higher layers of the kernel that directly
1817 * set the page dirty bit without asking the filesystem if it is a
1818 * good idea. This causes problems because we want to make sure COW
1819 * properly happens and the data=ordered rules are followed.
1820 *
c8b97818 1821 * In our case any range that doesn't have the ORDERED bit set
247e743c
CM
1822 * hasn't been properly setup for IO. We kick off an async process
1823 * to fix it up. The async helper will wait for ordered extents, set
1824 * the delalloc bit and make it safe to write the page.
1825 */
b2950863 1826static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
247e743c
CM
1827{
1828 struct inode *inode = page->mapping->host;
1829 struct btrfs_writepage_fixup *fixup;
1830 struct btrfs_root *root = BTRFS_I(inode)->root;
247e743c 1831
8b62b72b
CM
1832 /* this page is properly in the ordered list */
1833 if (TestClearPagePrivate2(page))
247e743c
CM
1834 return 0;
1835
1836 if (PageChecked(page))
1837 return -EAGAIN;
1838
1839 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1840 if (!fixup)
1841 return -EAGAIN;
f421950f 1842
247e743c
CM
1843 SetPageChecked(page);
1844 page_cache_get(page);
1845 fixup->work.func = btrfs_writepage_fixup_worker;
1846 fixup->page = page;
1847 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
87826df0 1848 return -EBUSY;
247e743c
CM
1849}
1850
d899e052
YZ
1851static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1852 struct inode *inode, u64 file_pos,
1853 u64 disk_bytenr, u64 disk_num_bytes,
1854 u64 num_bytes, u64 ram_bytes,
1855 u8 compression, u8 encryption,
1856 u16 other_encoding, int extent_type)
1857{
1858 struct btrfs_root *root = BTRFS_I(inode)->root;
1859 struct btrfs_file_extent_item *fi;
1860 struct btrfs_path *path;
1861 struct extent_buffer *leaf;
1862 struct btrfs_key ins;
1acae57b 1863 int extent_inserted = 0;
d899e052
YZ
1864 int ret;
1865
1866 path = btrfs_alloc_path();
d8926bb3
MF
1867 if (!path)
1868 return -ENOMEM;
d899e052 1869
a1ed835e
CM
1870 /*
1871 * we may be replacing one extent in the tree with another.
1872 * The new extent is pinned in the extent map, and we don't want
1873 * to drop it from the cache until it is completely in the btree.
1874 *
1875 * So, tell btrfs_drop_extents to leave this extent in the cache.
1876 * the caller is expected to unpin it and allow it to be merged
1877 * with the others.
1878 */
1acae57b
FDBM
1879 ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
1880 file_pos + num_bytes, NULL, 0,
1881 1, sizeof(*fi), &extent_inserted);
79787eaa
JM
1882 if (ret)
1883 goto out;
d899e052 1884
1acae57b
FDBM
1885 if (!extent_inserted) {
1886 ins.objectid = btrfs_ino(inode);
1887 ins.offset = file_pos;
1888 ins.type = BTRFS_EXTENT_DATA_KEY;
1889
1890 path->leave_spinning = 1;
1891 ret = btrfs_insert_empty_item(trans, root, path, &ins,
1892 sizeof(*fi));
1893 if (ret)
1894 goto out;
1895 }
d899e052
YZ
1896 leaf = path->nodes[0];
1897 fi = btrfs_item_ptr(leaf, path->slots[0],
1898 struct btrfs_file_extent_item);
1899 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1900 btrfs_set_file_extent_type(leaf, fi, extent_type);
1901 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1902 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1903 btrfs_set_file_extent_offset(leaf, fi, 0);
1904 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1905 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1906 btrfs_set_file_extent_compression(leaf, fi, compression);
1907 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1908 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
b9473439 1909
d899e052 1910 btrfs_mark_buffer_dirty(leaf);
ce195332 1911 btrfs_release_path(path);
d899e052
YZ
1912
1913 inode_add_bytes(inode, num_bytes);
d899e052
YZ
1914
1915 ins.objectid = disk_bytenr;
1916 ins.offset = disk_num_bytes;
1917 ins.type = BTRFS_EXTENT_ITEM_KEY;
5d4f98a2
YZ
1918 ret = btrfs_alloc_reserved_file_extent(trans, root,
1919 root->root_key.objectid,
33345d01 1920 btrfs_ino(inode), file_pos, &ins);
79787eaa 1921out:
d899e052 1922 btrfs_free_path(path);
b9473439 1923
79787eaa 1924 return ret;
d899e052
YZ
1925}
1926
38c227d8
LB
1927/* snapshot-aware defrag */
1928struct sa_defrag_extent_backref {
1929 struct rb_node node;
1930 struct old_sa_defrag_extent *old;
1931 u64 root_id;
1932 u64 inum;
1933 u64 file_pos;
1934 u64 extent_offset;
1935 u64 num_bytes;
1936 u64 generation;
1937};
1938
1939struct old_sa_defrag_extent {
1940 struct list_head list;
1941 struct new_sa_defrag_extent *new;
1942
1943 u64 extent_offset;
1944 u64 bytenr;
1945 u64 offset;
1946 u64 len;
1947 int count;
1948};
1949
1950struct new_sa_defrag_extent {
1951 struct rb_root root;
1952 struct list_head head;
1953 struct btrfs_path *path;
1954 struct inode *inode;
1955 u64 file_pos;
1956 u64 len;
1957 u64 bytenr;
1958 u64 disk_len;
1959 u8 compress_type;
1960};
1961
1962static int backref_comp(struct sa_defrag_extent_backref *b1,
1963 struct sa_defrag_extent_backref *b2)
1964{
1965 if (b1->root_id < b2->root_id)
1966 return -1;
1967 else if (b1->root_id > b2->root_id)
1968 return 1;
1969
1970 if (b1->inum < b2->inum)
1971 return -1;
1972 else if (b1->inum > b2->inum)
1973 return 1;
1974
1975 if (b1->file_pos < b2->file_pos)
1976 return -1;
1977 else if (b1->file_pos > b2->file_pos)
1978 return 1;
1979
1980 /*
1981 * [------------------------------] ===> (a range of space)
1982 * |<--->| |<---->| =============> (fs/file tree A)
1983 * |<---------------------------->| ===> (fs/file tree B)
1984 *
1985 * A range of space can refer to two file extents in one tree while
1986 * refer to only one file extent in another tree.
1987 *
1988 * So we may process a disk offset more than one time(two extents in A)
1989 * and locate at the same extent(one extent in B), then insert two same
1990 * backrefs(both refer to the extent in B).
1991 */
1992 return 0;
1993}
1994
1995static void backref_insert(struct rb_root *root,
1996 struct sa_defrag_extent_backref *backref)
1997{
1998 struct rb_node **p = &root->rb_node;
1999 struct rb_node *parent = NULL;
2000 struct sa_defrag_extent_backref *entry;
2001 int ret;
2002
2003 while (*p) {
2004 parent = *p;
2005 entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2006
2007 ret = backref_comp(backref, entry);
2008 if (ret < 0)
2009 p = &(*p)->rb_left;
2010 else
2011 p = &(*p)->rb_right;
2012 }
2013
2014 rb_link_node(&backref->node, parent, p);
2015 rb_insert_color(&backref->node, root);
2016}
2017
2018/*
2019 * Note the backref might has changed, and in this case we just return 0.
2020 */
2021static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2022 void *ctx)
2023{
2024 struct btrfs_file_extent_item *extent;
2025 struct btrfs_fs_info *fs_info;
2026 struct old_sa_defrag_extent *old = ctx;
2027 struct new_sa_defrag_extent *new = old->new;
2028 struct btrfs_path *path = new->path;
2029 struct btrfs_key key;
2030 struct btrfs_root *root;
2031 struct sa_defrag_extent_backref *backref;
2032 struct extent_buffer *leaf;
2033 struct inode *inode = new->inode;
2034 int slot;
2035 int ret;
2036 u64 extent_offset;
2037 u64 num_bytes;
2038
2039 if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2040 inum == btrfs_ino(inode))
2041 return 0;
2042
2043 key.objectid = root_id;
2044 key.type = BTRFS_ROOT_ITEM_KEY;
2045 key.offset = (u64)-1;
2046
2047 fs_info = BTRFS_I(inode)->root->fs_info;
2048 root = btrfs_read_fs_root_no_name(fs_info, &key);
2049 if (IS_ERR(root)) {
2050 if (PTR_ERR(root) == -ENOENT)
2051 return 0;
2052 WARN_ON(1);
2053 pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
2054 inum, offset, root_id);
2055 return PTR_ERR(root);
2056 }
2057
2058 key.objectid = inum;
2059 key.type = BTRFS_EXTENT_DATA_KEY;
2060 if (offset > (u64)-1 << 32)
2061 key.offset = 0;
2062 else
2063 key.offset = offset;
2064
2065 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
fae7f21c 2066 if (WARN_ON(ret < 0))
38c227d8 2067 return ret;
50f1319c 2068 ret = 0;
38c227d8
LB
2069
2070 while (1) {
2071 cond_resched();
2072
2073 leaf = path->nodes[0];
2074 slot = path->slots[0];
2075
2076 if (slot >= btrfs_header_nritems(leaf)) {
2077 ret = btrfs_next_leaf(root, path);
2078 if (ret < 0) {
2079 goto out;
2080 } else if (ret > 0) {
2081 ret = 0;
2082 goto out;
2083 }
2084 continue;
2085 }
2086
2087 path->slots[0]++;
2088
2089 btrfs_item_key_to_cpu(leaf, &key, slot);
2090
2091 if (key.objectid > inum)
2092 goto out;
2093
2094 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2095 continue;
2096
2097 extent = btrfs_item_ptr(leaf, slot,
2098 struct btrfs_file_extent_item);
2099
2100 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2101 continue;
2102
e68afa49
LB
2103 /*
2104 * 'offset' refers to the exact key.offset,
2105 * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2106 * (key.offset - extent_offset).
2107 */
2108 if (key.offset != offset)
38c227d8
LB
2109 continue;
2110
e68afa49 2111 extent_offset = btrfs_file_extent_offset(leaf, extent);
38c227d8 2112 num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
e68afa49 2113
38c227d8
LB
2114 if (extent_offset >= old->extent_offset + old->offset +
2115 old->len || extent_offset + num_bytes <=
2116 old->extent_offset + old->offset)
2117 continue;
38c227d8
LB
2118 break;
2119 }
2120
2121 backref = kmalloc(sizeof(*backref), GFP_NOFS);
2122 if (!backref) {
2123 ret = -ENOENT;
2124 goto out;
2125 }
2126
2127 backref->root_id = root_id;
2128 backref->inum = inum;
e68afa49 2129 backref->file_pos = offset;
38c227d8
LB
2130 backref->num_bytes = num_bytes;
2131 backref->extent_offset = extent_offset;
2132 backref->generation = btrfs_file_extent_generation(leaf, extent);
2133 backref->old = old;
2134 backref_insert(&new->root, backref);
2135 old->count++;
2136out:
2137 btrfs_release_path(path);
2138 WARN_ON(ret);
2139 return ret;
2140}
2141
2142static noinline bool record_extent_backrefs(struct btrfs_path *path,
2143 struct new_sa_defrag_extent *new)
2144{
2145 struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
2146 struct old_sa_defrag_extent *old, *tmp;
2147 int ret;
2148
2149 new->path = path;
2150
2151 list_for_each_entry_safe(old, tmp, &new->head, list) {
e68afa49
LB
2152 ret = iterate_inodes_from_logical(old->bytenr +
2153 old->extent_offset, fs_info,
38c227d8
LB
2154 path, record_one_backref,
2155 old);
4724b106
JB
2156 if (ret < 0 && ret != -ENOENT)
2157 return false;
38c227d8
LB
2158
2159 /* no backref to be processed for this extent */
2160 if (!old->count) {
2161 list_del(&old->list);
2162 kfree(old);
2163 }
2164 }
2165
2166 if (list_empty(&new->head))
2167 return false;
2168
2169 return true;
2170}
2171
2172static int relink_is_mergable(struct extent_buffer *leaf,
2173 struct btrfs_file_extent_item *fi,
116e0024 2174 struct new_sa_defrag_extent *new)
38c227d8 2175{
116e0024 2176 if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr)
38c227d8
LB
2177 return 0;
2178
2179 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2180 return 0;
2181
116e0024
LB
2182 if (btrfs_file_extent_compression(leaf, fi) != new->compress_type)
2183 return 0;
2184
2185 if (btrfs_file_extent_encryption(leaf, fi) ||
38c227d8
LB
2186 btrfs_file_extent_other_encoding(leaf, fi))
2187 return 0;
2188
2189 return 1;
2190}
2191
2192/*
2193 * Note the backref might has changed, and in this case we just return 0.
2194 */
2195static noinline int relink_extent_backref(struct btrfs_path *path,
2196 struct sa_defrag_extent_backref *prev,
2197 struct sa_defrag_extent_backref *backref)
2198{
2199 struct btrfs_file_extent_item *extent;
2200 struct btrfs_file_extent_item *item;
2201 struct btrfs_ordered_extent *ordered;
2202 struct btrfs_trans_handle *trans;
2203 struct btrfs_fs_info *fs_info;
2204 struct btrfs_root *root;
2205 struct btrfs_key key;
2206 struct extent_buffer *leaf;
2207 struct old_sa_defrag_extent *old = backref->old;
2208 struct new_sa_defrag_extent *new = old->new;
2209 struct inode *src_inode = new->inode;
2210 struct inode *inode;
2211 struct extent_state *cached = NULL;
2212 int ret = 0;
2213 u64 start;
2214 u64 len;
2215 u64 lock_start;
2216 u64 lock_end;
2217 bool merge = false;
2218 int index;
2219
2220 if (prev && prev->root_id == backref->root_id &&
2221 prev->inum == backref->inum &&
2222 prev->file_pos + prev->num_bytes == backref->file_pos)
2223 merge = true;
2224
2225 /* step 1: get root */
2226 key.objectid = backref->root_id;
2227 key.type = BTRFS_ROOT_ITEM_KEY;
2228 key.offset = (u64)-1;
2229
2230 fs_info = BTRFS_I(src_inode)->root->fs_info;
2231 index = srcu_read_lock(&fs_info->subvol_srcu);
2232
2233 root = btrfs_read_fs_root_no_name(fs_info, &key);
2234 if (IS_ERR(root)) {
2235 srcu_read_unlock(&fs_info->subvol_srcu, index);
2236 if (PTR_ERR(root) == -ENOENT)
2237 return 0;
2238 return PTR_ERR(root);
2239 }
38c227d8
LB
2240
2241 /* step 2: get inode */
2242 key.objectid = backref->inum;
2243 key.type = BTRFS_INODE_ITEM_KEY;
2244 key.offset = 0;
2245
2246 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2247 if (IS_ERR(inode)) {
2248 srcu_read_unlock(&fs_info->subvol_srcu, index);
2249 return 0;
2250 }
2251
2252 srcu_read_unlock(&fs_info->subvol_srcu, index);
2253
2254 /* step 3: relink backref */
2255 lock_start = backref->file_pos;
2256 lock_end = backref->file_pos + backref->num_bytes - 1;
2257 lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2258 0, &cached);
2259
2260 ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2261 if (ordered) {
2262 btrfs_put_ordered_extent(ordered);
2263 goto out_unlock;
2264 }
2265
2266 trans = btrfs_join_transaction(root);
2267 if (IS_ERR(trans)) {
2268 ret = PTR_ERR(trans);
2269 goto out_unlock;
2270 }
2271
2272 key.objectid = backref->inum;
2273 key.type = BTRFS_EXTENT_DATA_KEY;
2274 key.offset = backref->file_pos;
2275
2276 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2277 if (ret < 0) {
2278 goto out_free_path;
2279 } else if (ret > 0) {
2280 ret = 0;
2281 goto out_free_path;
2282 }
2283
2284 extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2285 struct btrfs_file_extent_item);
2286
2287 if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2288 backref->generation)
2289 goto out_free_path;
2290
2291 btrfs_release_path(path);
2292
2293 start = backref->file_pos;
2294 if (backref->extent_offset < old->extent_offset + old->offset)
2295 start += old->extent_offset + old->offset -
2296 backref->extent_offset;
2297
2298 len = min(backref->extent_offset + backref->num_bytes,
2299 old->extent_offset + old->offset + old->len);
2300 len -= max(backref->extent_offset, old->extent_offset + old->offset);
2301
2302 ret = btrfs_drop_extents(trans, root, inode, start,
2303 start + len, 1);
2304 if (ret)
2305 goto out_free_path;
2306again:
2307 key.objectid = btrfs_ino(inode);
2308 key.type = BTRFS_EXTENT_DATA_KEY;
2309 key.offset = start;
2310
a09a0a70 2311 path->leave_spinning = 1;
38c227d8
LB
2312 if (merge) {
2313 struct btrfs_file_extent_item *fi;
2314 u64 extent_len;
2315 struct btrfs_key found_key;
2316
2317 ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
2318 if (ret < 0)
2319 goto out_free_path;
2320
2321 path->slots[0]--;
2322 leaf = path->nodes[0];
2323 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2324
2325 fi = btrfs_item_ptr(leaf, path->slots[0],
2326 struct btrfs_file_extent_item);
2327 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2328
116e0024
LB
2329 if (extent_len + found_key.offset == start &&
2330 relink_is_mergable(leaf, fi, new)) {
38c227d8
LB
2331 btrfs_set_file_extent_num_bytes(leaf, fi,
2332 extent_len + len);
2333 btrfs_mark_buffer_dirty(leaf);
2334 inode_add_bytes(inode, len);
2335
2336 ret = 1;
2337 goto out_free_path;
2338 } else {
2339 merge = false;
2340 btrfs_release_path(path);
2341 goto again;
2342 }
2343 }
2344
2345 ret = btrfs_insert_empty_item(trans, root, path, &key,
2346 sizeof(*extent));
2347 if (ret) {
2348 btrfs_abort_transaction(trans, root, ret);
2349 goto out_free_path;
2350 }
2351
2352 leaf = path->nodes[0];
2353 item = btrfs_item_ptr(leaf, path->slots[0],
2354 struct btrfs_file_extent_item);
2355 btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2356 btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2357 btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2358 btrfs_set_file_extent_num_bytes(leaf, item, len);
2359 btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2360 btrfs_set_file_extent_generation(leaf, item, trans->transid);
2361 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2362 btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2363 btrfs_set_file_extent_encryption(leaf, item, 0);
2364 btrfs_set_file_extent_other_encoding(leaf, item, 0);
2365
2366 btrfs_mark_buffer_dirty(leaf);
2367 inode_add_bytes(inode, len);
a09a0a70 2368 btrfs_release_path(path);
38c227d8
LB
2369
2370 ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
2371 new->disk_len, 0,
2372 backref->root_id, backref->inum,
2373 new->file_pos, 0); /* start - extent_offset */
2374 if (ret) {
2375 btrfs_abort_transaction(trans, root, ret);
2376 goto out_free_path;
2377 }
2378
2379 ret = 1;
2380out_free_path:
2381 btrfs_release_path(path);
a09a0a70 2382 path->leave_spinning = 0;
38c227d8
LB
2383 btrfs_end_transaction(trans, root);
2384out_unlock:
2385 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2386 &cached, GFP_NOFS);
2387 iput(inode);
2388 return ret;
2389}
2390
6f519564
LB
2391static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
2392{
2393 struct old_sa_defrag_extent *old, *tmp;
2394
2395 if (!new)
2396 return;
2397
2398 list_for_each_entry_safe(old, tmp, &new->head, list) {
2399 list_del(&old->list);
2400 kfree(old);
2401 }
2402 kfree(new);
2403}
2404
38c227d8
LB
2405static void relink_file_extents(struct new_sa_defrag_extent *new)
2406{
2407 struct btrfs_path *path;
38c227d8
LB
2408 struct sa_defrag_extent_backref *backref;
2409 struct sa_defrag_extent_backref *prev = NULL;
2410 struct inode *inode;
2411 struct btrfs_root *root;
2412 struct rb_node *node;
2413 int ret;
2414
2415 inode = new->inode;
2416 root = BTRFS_I(inode)->root;
2417
2418 path = btrfs_alloc_path();
2419 if (!path)
2420 return;
2421
2422 if (!record_extent_backrefs(path, new)) {
2423 btrfs_free_path(path);
2424 goto out;
2425 }
2426 btrfs_release_path(path);
2427
2428 while (1) {
2429 node = rb_first(&new->root);
2430 if (!node)
2431 break;
2432 rb_erase(node, &new->root);
2433
2434 backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2435
2436 ret = relink_extent_backref(path, prev, backref);
2437 WARN_ON(ret < 0);
2438
2439 kfree(prev);
2440
2441 if (ret == 1)
2442 prev = backref;
2443 else
2444 prev = NULL;
2445 cond_resched();
2446 }
2447 kfree(prev);
2448
2449 btrfs_free_path(path);
38c227d8 2450out:
6f519564
LB
2451 free_sa_defrag_extent(new);
2452
38c227d8
LB
2453 atomic_dec(&root->fs_info->defrag_running);
2454 wake_up(&root->fs_info->transaction_wait);
38c227d8
LB
2455}
2456
2457static struct new_sa_defrag_extent *
2458record_old_file_extents(struct inode *inode,
2459 struct btrfs_ordered_extent *ordered)
2460{
2461 struct btrfs_root *root = BTRFS_I(inode)->root;
2462 struct btrfs_path *path;
2463 struct btrfs_key key;
6f519564 2464 struct old_sa_defrag_extent *old;
38c227d8
LB
2465 struct new_sa_defrag_extent *new;
2466 int ret;
2467
2468 new = kmalloc(sizeof(*new), GFP_NOFS);
2469 if (!new)
2470 return NULL;
2471
2472 new->inode = inode;
2473 new->file_pos = ordered->file_offset;
2474 new->len = ordered->len;
2475 new->bytenr = ordered->start;
2476 new->disk_len = ordered->disk_len;
2477 new->compress_type = ordered->compress_type;
2478 new->root = RB_ROOT;
2479 INIT_LIST_HEAD(&new->head);
2480
2481 path = btrfs_alloc_path();
2482 if (!path)
2483 goto out_kfree;
2484
2485 key.objectid = btrfs_ino(inode);
2486 key.type = BTRFS_EXTENT_DATA_KEY;
2487 key.offset = new->file_pos;
2488
2489 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2490 if (ret < 0)
2491 goto out_free_path;
2492 if (ret > 0 && path->slots[0] > 0)
2493 path->slots[0]--;
2494
2495 /* find out all the old extents for the file range */
2496 while (1) {
2497 struct btrfs_file_extent_item *extent;
2498 struct extent_buffer *l;
2499 int slot;
2500 u64 num_bytes;
2501 u64 offset;
2502 u64 end;
2503 u64 disk_bytenr;
2504 u64 extent_offset;
2505
2506 l = path->nodes[0];
2507 slot = path->slots[0];
2508
2509 if (slot >= btrfs_header_nritems(l)) {
2510 ret = btrfs_next_leaf(root, path);
2511 if (ret < 0)
6f519564 2512 goto out_free_path;
38c227d8
LB
2513 else if (ret > 0)
2514 break;
2515 continue;
2516 }
2517
2518 btrfs_item_key_to_cpu(l, &key, slot);
2519
2520 if (key.objectid != btrfs_ino(inode))
2521 break;
2522 if (key.type != BTRFS_EXTENT_DATA_KEY)
2523 break;
2524 if (key.offset >= new->file_pos + new->len)
2525 break;
2526
2527 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2528
2529 num_bytes = btrfs_file_extent_num_bytes(l, extent);
2530 if (key.offset + num_bytes < new->file_pos)
2531 goto next;
2532
2533 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2534 if (!disk_bytenr)
2535 goto next;
2536
2537 extent_offset = btrfs_file_extent_offset(l, extent);
2538
2539 old = kmalloc(sizeof(*old), GFP_NOFS);
2540 if (!old)
6f519564 2541 goto out_free_path;
38c227d8
LB
2542
2543 offset = max(new->file_pos, key.offset);
2544 end = min(new->file_pos + new->len, key.offset + num_bytes);
2545
2546 old->bytenr = disk_bytenr;
2547 old->extent_offset = extent_offset;
2548 old->offset = offset - key.offset;
2549 old->len = end - offset;
2550 old->new = new;
2551 old->count = 0;
2552 list_add_tail(&old->list, &new->head);
2553next:
2554 path->slots[0]++;
2555 cond_resched();
2556 }
2557
2558 btrfs_free_path(path);
2559 atomic_inc(&root->fs_info->defrag_running);
2560
2561 return new;
2562
38c227d8
LB
2563out_free_path:
2564 btrfs_free_path(path);
2565out_kfree:
6f519564 2566 free_sa_defrag_extent(new);
38c227d8
LB
2567 return NULL;
2568}
2569
d352ac68
CM
2570/* as ordered data IO finishes, this gets called so we can finish
2571 * an ordered extent if the range of bytes in the file it covers are
2572 * fully written.
2573 */
5fd02043 2574static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
e6dcd2dc 2575{
5fd02043 2576 struct inode *inode = ordered_extent->inode;
e6dcd2dc 2577 struct btrfs_root *root = BTRFS_I(inode)->root;
0ca1f7ce 2578 struct btrfs_trans_handle *trans = NULL;
e6dcd2dc 2579 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2ac55d41 2580 struct extent_state *cached_state = NULL;
38c227d8 2581 struct new_sa_defrag_extent *new = NULL;
261507a0 2582 int compress_type = 0;
77cef2ec
JB
2583 int ret = 0;
2584 u64 logical_len = ordered_extent->len;
82d5902d 2585 bool nolock;
77cef2ec 2586 bool truncated = false;
e6dcd2dc 2587
83eea1f1 2588 nolock = btrfs_is_free_space_inode(inode);
0cb59c99 2589
5fd02043
JB
2590 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2591 ret = -EIO;
2592 goto out;
2593 }
2594
77cef2ec
JB
2595 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
2596 truncated = true;
2597 logical_len = ordered_extent->truncated_len;
2598 /* Truncated the entire extent, don't bother adding */
2599 if (!logical_len)
2600 goto out;
2601 }
2602
c2167754 2603 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
79787eaa 2604 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
6c760c07
JB
2605 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2606 if (nolock)
2607 trans = btrfs_join_transaction_nolock(root);
2608 else
2609 trans = btrfs_join_transaction(root);
2610 if (IS_ERR(trans)) {
2611 ret = PTR_ERR(trans);
2612 trans = NULL;
2613 goto out;
c2167754 2614 }
6c760c07
JB
2615 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2616 ret = btrfs_update_inode_fallback(trans, root, inode);
2617 if (ret) /* -ENOMEM or corruption */
2618 btrfs_abort_transaction(trans, root, ret);
c2167754
YZ
2619 goto out;
2620 }
e6dcd2dc 2621
2ac55d41
JB
2622 lock_extent_bits(io_tree, ordered_extent->file_offset,
2623 ordered_extent->file_offset + ordered_extent->len - 1,
d0082371 2624 0, &cached_state);
e6dcd2dc 2625
38c227d8
LB
2626 ret = test_range_bit(io_tree, ordered_extent->file_offset,
2627 ordered_extent->file_offset + ordered_extent->len - 1,
2628 EXTENT_DEFRAG, 1, cached_state);
2629 if (ret) {
2630 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2631 if (last_snapshot >= BTRFS_I(inode)->generation)
2632 /* the inode is shared */
2633 new = record_old_file_extents(inode, ordered_extent);
2634
2635 clear_extent_bit(io_tree, ordered_extent->file_offset,
2636 ordered_extent->file_offset + ordered_extent->len - 1,
2637 EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
2638 }
2639
0cb59c99 2640 if (nolock)
7a7eaa40 2641 trans = btrfs_join_transaction_nolock(root);
0cb59c99 2642 else
7a7eaa40 2643 trans = btrfs_join_transaction(root);
79787eaa
JM
2644 if (IS_ERR(trans)) {
2645 ret = PTR_ERR(trans);
2646 trans = NULL;
2647 goto out_unlock;
2648 }
0ca1f7ce 2649 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
c2167754 2650
c8b97818 2651 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
261507a0 2652 compress_type = ordered_extent->compress_type;
d899e052 2653 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
261507a0 2654 BUG_ON(compress_type);
920bbbfb 2655 ret = btrfs_mark_extent_written(trans, inode,
d899e052
YZ
2656 ordered_extent->file_offset,
2657 ordered_extent->file_offset +
77cef2ec 2658 logical_len);
d899e052 2659 } else {
0af3d00b 2660 BUG_ON(root == root->fs_info->tree_root);
d899e052
YZ
2661 ret = insert_reserved_file_extent(trans, inode,
2662 ordered_extent->file_offset,
2663 ordered_extent->start,
2664 ordered_extent->disk_len,
77cef2ec 2665 logical_len, logical_len,
261507a0 2666 compress_type, 0, 0,
d899e052 2667 BTRFS_FILE_EXTENT_REG);
d899e052 2668 }
5dc562c5
JB
2669 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
2670 ordered_extent->file_offset, ordered_extent->len,
2671 trans->transid);
79787eaa
JM
2672 if (ret < 0) {
2673 btrfs_abort_transaction(trans, root, ret);
5fd02043 2674 goto out_unlock;
79787eaa 2675 }
2ac55d41 2676
e6dcd2dc
CM
2677 add_pending_csums(trans, inode, ordered_extent->file_offset,
2678 &ordered_extent->list);
2679
6c760c07
JB
2680 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2681 ret = btrfs_update_inode_fallback(trans, root, inode);
2682 if (ret) { /* -ENOMEM or corruption */
2683 btrfs_abort_transaction(trans, root, ret);
2684 goto out_unlock;
1ef30be1
JB
2685 }
2686 ret = 0;
5fd02043
JB
2687out_unlock:
2688 unlock_extent_cached(io_tree, ordered_extent->file_offset,
2689 ordered_extent->file_offset +
2690 ordered_extent->len - 1, &cached_state, GFP_NOFS);
c2167754 2691out:
5b0e95bf 2692 if (root != root->fs_info->tree_root)
0cb59c99 2693 btrfs_delalloc_release_metadata(inode, ordered_extent->len);
a698d075
MX
2694 if (trans)
2695 btrfs_end_transaction(trans, root);
0cb59c99 2696
77cef2ec
JB
2697 if (ret || truncated) {
2698 u64 start, end;
2699
2700 if (truncated)
2701 start = ordered_extent->file_offset + logical_len;
2702 else
2703 start = ordered_extent->file_offset;
2704 end = ordered_extent->file_offset + ordered_extent->len - 1;
2705 clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS);
2706
2707 /* Drop the cache for the part of the extent we didn't write. */
2708 btrfs_drop_extent_cache(inode, start, end, 0);
5fd02043 2709
0bec9ef5
JB
2710 /*
2711 * If the ordered extent had an IOERR or something else went
2712 * wrong we need to return the space for this ordered extent
77cef2ec
JB
2713 * back to the allocator. We only free the extent in the
2714 * truncated case if we didn't write out the extent at all.
0bec9ef5 2715 */
77cef2ec
JB
2716 if ((ret || !logical_len) &&
2717 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
0bec9ef5
JB
2718 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
2719 btrfs_free_reserved_extent(root, ordered_extent->start,
2720 ordered_extent->disk_len);
2721 }
2722
2723
5fd02043 2724 /*
8bad3c02
LB
2725 * This needs to be done to make sure anybody waiting knows we are done
2726 * updating everything for this ordered extent.
5fd02043
JB
2727 */
2728 btrfs_remove_ordered_extent(inode, ordered_extent);
2729
38c227d8 2730 /* for snapshot-aware defrag */
6f519564
LB
2731 if (new) {
2732 if (ret) {
2733 free_sa_defrag_extent(new);
2734 atomic_dec(&root->fs_info->defrag_running);
2735 } else {
2736 relink_file_extents(new);
2737 }
2738 }
38c227d8 2739
e6dcd2dc
CM
2740 /* once for us */
2741 btrfs_put_ordered_extent(ordered_extent);
2742 /* once for the tree */
2743 btrfs_put_ordered_extent(ordered_extent);
2744
5fd02043
JB
2745 return ret;
2746}
2747
2748static void finish_ordered_fn(struct btrfs_work *work)
2749{
2750 struct btrfs_ordered_extent *ordered_extent;
2751 ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
2752 btrfs_finish_ordered_io(ordered_extent);
e6dcd2dc
CM
2753}
2754
b2950863 2755static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
211f90e6
CM
2756 struct extent_state *state, int uptodate)
2757{
5fd02043
JB
2758 struct inode *inode = page->mapping->host;
2759 struct btrfs_root *root = BTRFS_I(inode)->root;
2760 struct btrfs_ordered_extent *ordered_extent = NULL;
2761 struct btrfs_workers *workers;
2762
1abe9b8a 2763 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
2764
8b62b72b 2765 ClearPagePrivate2(page);
5fd02043
JB
2766 if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
2767 end - start + 1, uptodate))
2768 return 0;
2769
2770 ordered_extent->work.func = finish_ordered_fn;
2771 ordered_extent->work.flags = 0;
2772
83eea1f1 2773 if (btrfs_is_free_space_inode(inode))
5fd02043
JB
2774 workers = &root->fs_info->endio_freespace_worker;
2775 else
2776 workers = &root->fs_info->endio_write_workers;
2777 btrfs_queue_worker(workers, &ordered_extent->work);
2778
2779 return 0;
211f90e6
CM
2780}
2781
d352ac68
CM
2782/*
2783 * when reads are done, we need to check csums to verify the data is correct
4a54c8c1
JS
2784 * if there's a match, we allow the bio to finish. If not, the code in
2785 * extent_io.c will try to find good copies for us.
d352ac68 2786 */
facc8a22
MX
2787static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
2788 u64 phy_offset, struct page *page,
2789 u64 start, u64 end, int mirror)
07157aac 2790{
4eee4fa4 2791 size_t offset = start - page_offset(page);
07157aac 2792 struct inode *inode = page->mapping->host;
d1310b2e 2793 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
07157aac 2794 char *kaddr;
ff79f819 2795 struct btrfs_root *root = BTRFS_I(inode)->root;
facc8a22 2796 u32 csum_expected;
ff79f819 2797 u32 csum = ~(u32)0;
c2cf52eb
SK
2798 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
2799 DEFAULT_RATELIMIT_BURST);
d1310b2e 2800
d20f7043
CM
2801 if (PageChecked(page)) {
2802 ClearPageChecked(page);
2803 goto good;
2804 }
6cbff00f
CH
2805
2806 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
08d2f347 2807 goto good;
17d217fe
YZ
2808
2809 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
9655d298 2810 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
17d217fe
YZ
2811 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
2812 GFP_NOFS);
b6cda9bc 2813 return 0;
17d217fe 2814 }
d20f7043 2815
facc8a22
MX
2816 phy_offset >>= inode->i_sb->s_blocksize_bits;
2817 csum_expected = *(((u32 *)io_bio->csum) + phy_offset);
d397712b 2818
facc8a22 2819 kaddr = kmap_atomic(page);
b0496686 2820 csum = btrfs_csum_data(kaddr + offset, csum, end - start + 1);
ff79f819 2821 btrfs_csum_final(csum, (char *)&csum);
facc8a22 2822 if (csum != csum_expected)
07157aac 2823 goto zeroit;
d397712b 2824
7ac687d9 2825 kunmap_atomic(kaddr);
d20f7043 2826good:
07157aac
CM
2827 return 0;
2828
2829zeroit:
c2cf52eb 2830 if (__ratelimit(&_rs))
facc8a22 2831 btrfs_info(root->fs_info, "csum failed ino %llu off %llu csum %u expected csum %u",
c1c9ff7c 2832 btrfs_ino(page->mapping->host), start, csum, csum_expected);
db94535d
CM
2833 memset(kaddr + offset, 1, end - start + 1);
2834 flush_dcache_page(page);
7ac687d9 2835 kunmap_atomic(kaddr);
facc8a22 2836 if (csum_expected == 0)
3b951516 2837 return 0;
7e38326f 2838 return -EIO;
07157aac 2839}
b888db2b 2840
24bbcf04
YZ
2841struct delayed_iput {
2842 struct list_head list;
2843 struct inode *inode;
2844};
2845
79787eaa
JM
2846/* JDM: If this is fs-wide, why can't we add a pointer to
2847 * btrfs_inode instead and avoid the allocation? */
24bbcf04
YZ
2848void btrfs_add_delayed_iput(struct inode *inode)
2849{
2850 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2851 struct delayed_iput *delayed;
2852
2853 if (atomic_add_unless(&inode->i_count, -1, 1))
2854 return;
2855
2856 delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
2857 delayed->inode = inode;
2858
2859 spin_lock(&fs_info->delayed_iput_lock);
2860 list_add_tail(&delayed->list, &fs_info->delayed_iputs);
2861 spin_unlock(&fs_info->delayed_iput_lock);
2862}
2863
2864void btrfs_run_delayed_iputs(struct btrfs_root *root)
2865{
2866 LIST_HEAD(list);
2867 struct btrfs_fs_info *fs_info = root->fs_info;
2868 struct delayed_iput *delayed;
2869 int empty;
2870
2871 spin_lock(&fs_info->delayed_iput_lock);
2872 empty = list_empty(&fs_info->delayed_iputs);
2873 spin_unlock(&fs_info->delayed_iput_lock);
2874 if (empty)
2875 return;
2876
24bbcf04
YZ
2877 spin_lock(&fs_info->delayed_iput_lock);
2878 list_splice_init(&fs_info->delayed_iputs, &list);
2879 spin_unlock(&fs_info->delayed_iput_lock);
2880
2881 while (!list_empty(&list)) {
2882 delayed = list_entry(list.next, struct delayed_iput, list);
2883 list_del(&delayed->list);
2884 iput(delayed->inode);
2885 kfree(delayed);
2886 }
24bbcf04
YZ
2887}
2888
d68fc57b 2889/*
42b2aa86 2890 * This is called in transaction commit time. If there are no orphan
d68fc57b
YZ
2891 * files in the subvolume, it removes orphan item and frees block_rsv
2892 * structure.
2893 */
2894void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2895 struct btrfs_root *root)
2896{
90290e19 2897 struct btrfs_block_rsv *block_rsv;
d68fc57b
YZ
2898 int ret;
2899
8a35d95f 2900 if (atomic_read(&root->orphan_inodes) ||
d68fc57b
YZ
2901 root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
2902 return;
2903
90290e19 2904 spin_lock(&root->orphan_lock);
8a35d95f 2905 if (atomic_read(&root->orphan_inodes)) {
90290e19
JB
2906 spin_unlock(&root->orphan_lock);
2907 return;
2908 }
2909
2910 if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
2911 spin_unlock(&root->orphan_lock);
2912 return;
2913 }
2914
2915 block_rsv = root->orphan_block_rsv;
2916 root->orphan_block_rsv = NULL;
2917 spin_unlock(&root->orphan_lock);
2918
d68fc57b
YZ
2919 if (root->orphan_item_inserted &&
2920 btrfs_root_refs(&root->root_item) > 0) {
2921 ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
2922 root->root_key.objectid);
4ef31a45
JB
2923 if (ret)
2924 btrfs_abort_transaction(trans, root, ret);
2925 else
2926 root->orphan_item_inserted = 0;
d68fc57b
YZ
2927 }
2928
90290e19
JB
2929 if (block_rsv) {
2930 WARN_ON(block_rsv->size > 0);
2931 btrfs_free_block_rsv(root, block_rsv);
d68fc57b
YZ
2932 }
2933}
2934
7b128766
JB
2935/*
2936 * This creates an orphan entry for the given inode in case something goes
2937 * wrong in the middle of an unlink/truncate.
d68fc57b
YZ
2938 *
2939 * NOTE: caller of this function should reserve 5 units of metadata for
2940 * this function.
7b128766
JB
2941 */
2942int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2943{
2944 struct btrfs_root *root = BTRFS_I(inode)->root;
d68fc57b
YZ
2945 struct btrfs_block_rsv *block_rsv = NULL;
2946 int reserve = 0;
2947 int insert = 0;
2948 int ret;
7b128766 2949
d68fc57b 2950 if (!root->orphan_block_rsv) {
66d8f3dd 2951 block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
b532402e
TI
2952 if (!block_rsv)
2953 return -ENOMEM;
d68fc57b 2954 }
7b128766 2955
d68fc57b
YZ
2956 spin_lock(&root->orphan_lock);
2957 if (!root->orphan_block_rsv) {
2958 root->orphan_block_rsv = block_rsv;
2959 } else if (block_rsv) {
2960 btrfs_free_block_rsv(root, block_rsv);
2961 block_rsv = NULL;
7b128766 2962 }
7b128766 2963
8a35d95f
JB
2964 if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2965 &BTRFS_I(inode)->runtime_flags)) {
d68fc57b
YZ
2966#if 0
2967 /*
2968 * For proper ENOSPC handling, we should do orphan
2969 * cleanup when mounting. But this introduces backward
2970 * compatibility issue.
2971 */
2972 if (!xchg(&root->orphan_item_inserted, 1))
2973 insert = 2;
2974 else
2975 insert = 1;
2976#endif
2977 insert = 1;
321f0e70 2978 atomic_inc(&root->orphan_inodes);
7b128766
JB
2979 }
2980
72ac3c0d
JB
2981 if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
2982 &BTRFS_I(inode)->runtime_flags))
d68fc57b 2983 reserve = 1;
d68fc57b 2984 spin_unlock(&root->orphan_lock);
7b128766 2985
d68fc57b
YZ
2986 /* grab metadata reservation from transaction handle */
2987 if (reserve) {
2988 ret = btrfs_orphan_reserve_metadata(trans, inode);
79787eaa 2989 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
d68fc57b 2990 }
7b128766 2991
d68fc57b
YZ
2992 /* insert an orphan item to track this unlinked/truncated file */
2993 if (insert >= 1) {
33345d01 2994 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
4ef31a45 2995 if (ret) {
703c88e0 2996 atomic_dec(&root->orphan_inodes);
4ef31a45
JB
2997 if (reserve) {
2998 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
2999 &BTRFS_I(inode)->runtime_flags);
3000 btrfs_orphan_release_metadata(inode);
3001 }
3002 if (ret != -EEXIST) {
e8e7cff6
JB
3003 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3004 &BTRFS_I(inode)->runtime_flags);
4ef31a45
JB
3005 btrfs_abort_transaction(trans, root, ret);
3006 return ret;
3007 }
79787eaa
JM
3008 }
3009 ret = 0;
d68fc57b
YZ
3010 }
3011
3012 /* insert an orphan item to track subvolume contains orphan files */
3013 if (insert >= 2) {
3014 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
3015 root->root_key.objectid);
79787eaa
JM
3016 if (ret && ret != -EEXIST) {
3017 btrfs_abort_transaction(trans, root, ret);
3018 return ret;
3019 }
d68fc57b
YZ
3020 }
3021 return 0;
7b128766
JB
3022}
3023
3024/*
3025 * We have done the truncate/delete so we can go ahead and remove the orphan
3026 * item for this particular inode.
3027 */
48a3b636
ES
3028static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3029 struct inode *inode)
7b128766
JB
3030{
3031 struct btrfs_root *root = BTRFS_I(inode)->root;
d68fc57b
YZ
3032 int delete_item = 0;
3033 int release_rsv = 0;
7b128766
JB
3034 int ret = 0;
3035
d68fc57b 3036 spin_lock(&root->orphan_lock);
8a35d95f
JB
3037 if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3038 &BTRFS_I(inode)->runtime_flags))
d68fc57b 3039 delete_item = 1;
7b128766 3040
72ac3c0d
JB
3041 if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3042 &BTRFS_I(inode)->runtime_flags))
d68fc57b 3043 release_rsv = 1;
d68fc57b 3044 spin_unlock(&root->orphan_lock);
7b128766 3045
703c88e0 3046 if (delete_item) {
8a35d95f 3047 atomic_dec(&root->orphan_inodes);
703c88e0
FDBM
3048 if (trans)
3049 ret = btrfs_del_orphan_item(trans, root,
3050 btrfs_ino(inode));
8a35d95f 3051 }
7b128766 3052
703c88e0
FDBM
3053 if (release_rsv)
3054 btrfs_orphan_release_metadata(inode);
3055
4ef31a45 3056 return ret;
7b128766
JB
3057}
3058
3059/*
3060 * this cleans up any orphans that may be left on the list from the last use
3061 * of this root.
3062 */
66b4ffd1 3063int btrfs_orphan_cleanup(struct btrfs_root *root)
7b128766
JB
3064{
3065 struct btrfs_path *path;
3066 struct extent_buffer *leaf;
7b128766
JB
3067 struct btrfs_key key, found_key;
3068 struct btrfs_trans_handle *trans;
3069 struct inode *inode;
8f6d7f4f 3070 u64 last_objectid = 0;
7b128766
JB
3071 int ret = 0, nr_unlink = 0, nr_truncate = 0;
3072
d68fc57b 3073 if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
66b4ffd1 3074 return 0;
c71bf099
YZ
3075
3076 path = btrfs_alloc_path();
66b4ffd1
JB
3077 if (!path) {
3078 ret = -ENOMEM;
3079 goto out;
3080 }
7b128766
JB
3081 path->reada = -1;
3082
3083 key.objectid = BTRFS_ORPHAN_OBJECTID;
3084 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
3085 key.offset = (u64)-1;
3086
7b128766
JB
3087 while (1) {
3088 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
66b4ffd1
JB
3089 if (ret < 0)
3090 goto out;
7b128766
JB
3091
3092 /*
3093 * if ret == 0 means we found what we were searching for, which
25985edc 3094 * is weird, but possible, so only screw with path if we didn't
7b128766
JB
3095 * find the key and see if we have stuff that matches
3096 */
3097 if (ret > 0) {
66b4ffd1 3098 ret = 0;
7b128766
JB
3099 if (path->slots[0] == 0)
3100 break;
3101 path->slots[0]--;
3102 }
3103
3104 /* pull out the item */
3105 leaf = path->nodes[0];
7b128766
JB
3106 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3107
3108 /* make sure the item matches what we want */
3109 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3110 break;
3111 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
3112 break;
3113
3114 /* release the path since we're done with it */
b3b4aa74 3115 btrfs_release_path(path);
7b128766
JB
3116
3117 /*
3118 * this is where we are basically btrfs_lookup, without the
3119 * crossing root thing. we store the inode number in the
3120 * offset of the orphan item.
3121 */
8f6d7f4f
JB
3122
3123 if (found_key.offset == last_objectid) {
c2cf52eb
SK
3124 btrfs_err(root->fs_info,
3125 "Error removing orphan entry, stopping orphan cleanup");
8f6d7f4f
JB
3126 ret = -EINVAL;
3127 goto out;
3128 }
3129
3130 last_objectid = found_key.offset;
3131
5d4f98a2
YZ
3132 found_key.objectid = found_key.offset;
3133 found_key.type = BTRFS_INODE_ITEM_KEY;
3134 found_key.offset = 0;
73f73415 3135 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
8c6ffba0 3136 ret = PTR_ERR_OR_ZERO(inode);
a8c9e576 3137 if (ret && ret != -ESTALE)
66b4ffd1 3138 goto out;
7b128766 3139
f8e9e0b0
AJ
3140 if (ret == -ESTALE && root == root->fs_info->tree_root) {
3141 struct btrfs_root *dead_root;
3142 struct btrfs_fs_info *fs_info = root->fs_info;
3143 int is_dead_root = 0;
3144
3145 /*
3146 * this is an orphan in the tree root. Currently these
3147 * could come from 2 sources:
3148 * a) a snapshot deletion in progress
3149 * b) a free space cache inode
3150 * We need to distinguish those two, as the snapshot
3151 * orphan must not get deleted.
3152 * find_dead_roots already ran before us, so if this
3153 * is a snapshot deletion, we should find the root
3154 * in the dead_roots list
3155 */
3156 spin_lock(&fs_info->trans_lock);
3157 list_for_each_entry(dead_root, &fs_info->dead_roots,
3158 root_list) {
3159 if (dead_root->root_key.objectid ==
3160 found_key.objectid) {
3161 is_dead_root = 1;
3162 break;
3163 }
3164 }
3165 spin_unlock(&fs_info->trans_lock);
3166 if (is_dead_root) {
3167 /* prevent this orphan from being found again */
3168 key.offset = found_key.objectid - 1;
3169 continue;
3170 }
3171 }
7b128766 3172 /*
a8c9e576
JB
3173 * Inode is already gone but the orphan item is still there,
3174 * kill the orphan item.
7b128766 3175 */
a8c9e576
JB
3176 if (ret == -ESTALE) {
3177 trans = btrfs_start_transaction(root, 1);
66b4ffd1
JB
3178 if (IS_ERR(trans)) {
3179 ret = PTR_ERR(trans);
3180 goto out;
3181 }
c2cf52eb
SK
3182 btrfs_debug(root->fs_info, "auto deleting %Lu",
3183 found_key.objectid);
a8c9e576
JB
3184 ret = btrfs_del_orphan_item(trans, root,
3185 found_key.objectid);
5b21f2ed 3186 btrfs_end_transaction(trans, root);
4ef31a45
JB
3187 if (ret)
3188 goto out;
7b128766
JB
3189 continue;
3190 }
3191
a8c9e576
JB
3192 /*
3193 * add this inode to the orphan list so btrfs_orphan_del does
3194 * the proper thing when we hit it
3195 */
8a35d95f
JB
3196 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3197 &BTRFS_I(inode)->runtime_flags);
925396ec 3198 atomic_inc(&root->orphan_inodes);
a8c9e576 3199
7b128766
JB
3200 /* if we have links, this was a truncate, lets do that */
3201 if (inode->i_nlink) {
fae7f21c 3202 if (WARN_ON(!S_ISREG(inode->i_mode))) {
a41ad394
JB
3203 iput(inode);
3204 continue;
3205 }
7b128766 3206 nr_truncate++;
f3fe820c
JB
3207
3208 /* 1 for the orphan item deletion. */
3209 trans = btrfs_start_transaction(root, 1);
3210 if (IS_ERR(trans)) {
c69b26b0 3211 iput(inode);
f3fe820c
JB
3212 ret = PTR_ERR(trans);
3213 goto out;
3214 }
3215 ret = btrfs_orphan_add(trans, inode);
3216 btrfs_end_transaction(trans, root);
c69b26b0
JB
3217 if (ret) {
3218 iput(inode);
f3fe820c 3219 goto out;
c69b26b0 3220 }
f3fe820c 3221
66b4ffd1 3222 ret = btrfs_truncate(inode);
4a7d0f68
JB
3223 if (ret)
3224 btrfs_orphan_del(NULL, inode);
7b128766
JB
3225 } else {
3226 nr_unlink++;
3227 }
3228
3229 /* this will do delete_inode and everything for us */
3230 iput(inode);
66b4ffd1
JB
3231 if (ret)
3232 goto out;
7b128766 3233 }
3254c876
MX
3234 /* release the path since we're done with it */
3235 btrfs_release_path(path);
3236
d68fc57b
YZ
3237 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3238
3239 if (root->orphan_block_rsv)
3240 btrfs_block_rsv_release(root, root->orphan_block_rsv,
3241 (u64)-1);
3242
3243 if (root->orphan_block_rsv || root->orphan_item_inserted) {
7a7eaa40 3244 trans = btrfs_join_transaction(root);
66b4ffd1
JB
3245 if (!IS_ERR(trans))
3246 btrfs_end_transaction(trans, root);
d68fc57b 3247 }
7b128766
JB
3248
3249 if (nr_unlink)
4884b476 3250 btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink);
7b128766 3251 if (nr_truncate)
4884b476 3252 btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate);
66b4ffd1
JB
3253
3254out:
3255 if (ret)
c2cf52eb
SK
3256 btrfs_crit(root->fs_info,
3257 "could not do orphan cleanup %d", ret);
66b4ffd1
JB
3258 btrfs_free_path(path);
3259 return ret;
7b128766
JB
3260}
3261
46a53cca
CM
3262/*
3263 * very simple check to peek ahead in the leaf looking for xattrs. If we
3264 * don't find any xattrs, we know there can't be any acls.
3265 *
3266 * slot is the slot the inode is in, objectid is the objectid of the inode
3267 */
3268static noinline int acls_after_inode_item(struct extent_buffer *leaf,
63541927
FDBM
3269 int slot, u64 objectid,
3270 int *first_xattr_slot)
46a53cca
CM
3271{
3272 u32 nritems = btrfs_header_nritems(leaf);
3273 struct btrfs_key found_key;
f23b5a59
JB
3274 static u64 xattr_access = 0;
3275 static u64 xattr_default = 0;
46a53cca
CM
3276 int scanned = 0;
3277
f23b5a59
JB
3278 if (!xattr_access) {
3279 xattr_access = btrfs_name_hash(POSIX_ACL_XATTR_ACCESS,
3280 strlen(POSIX_ACL_XATTR_ACCESS));
3281 xattr_default = btrfs_name_hash(POSIX_ACL_XATTR_DEFAULT,
3282 strlen(POSIX_ACL_XATTR_DEFAULT));
3283 }
3284
46a53cca 3285 slot++;
63541927 3286 *first_xattr_slot = -1;
46a53cca
CM
3287 while (slot < nritems) {
3288 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3289
3290 /* we found a different objectid, there must not be acls */
3291 if (found_key.objectid != objectid)
3292 return 0;
3293
3294 /* we found an xattr, assume we've got an acl */
f23b5a59 3295 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
63541927
FDBM
3296 if (*first_xattr_slot == -1)
3297 *first_xattr_slot = slot;
f23b5a59
JB
3298 if (found_key.offset == xattr_access ||
3299 found_key.offset == xattr_default)
3300 return 1;
3301 }
46a53cca
CM
3302
3303 /*
3304 * we found a key greater than an xattr key, there can't
3305 * be any acls later on
3306 */
3307 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3308 return 0;
3309
3310 slot++;
3311 scanned++;
3312
3313 /*
3314 * it goes inode, inode backrefs, xattrs, extents,
3315 * so if there are a ton of hard links to an inode there can
3316 * be a lot of backrefs. Don't waste time searching too hard,
3317 * this is just an optimization
3318 */
3319 if (scanned >= 8)
3320 break;
3321 }
3322 /* we hit the end of the leaf before we found an xattr or
3323 * something larger than an xattr. We have to assume the inode
3324 * has acls
3325 */
63541927
FDBM
3326 if (*first_xattr_slot == -1)
3327 *first_xattr_slot = slot;
46a53cca
CM
3328 return 1;
3329}
3330
d352ac68
CM
3331/*
3332 * read an inode from the btree into the in-memory inode
3333 */
5d4f98a2 3334static void btrfs_read_locked_inode(struct inode *inode)
39279cc3
CM
3335{
3336 struct btrfs_path *path;
5f39d397 3337 struct extent_buffer *leaf;
39279cc3 3338 struct btrfs_inode_item *inode_item;
0b86a832 3339 struct btrfs_timespec *tspec;
39279cc3
CM
3340 struct btrfs_root *root = BTRFS_I(inode)->root;
3341 struct btrfs_key location;
67de1176 3342 unsigned long ptr;
46a53cca 3343 int maybe_acls;
618e21d5 3344 u32 rdev;
39279cc3 3345 int ret;
2f7e33d4 3346 bool filled = false;
63541927 3347 int first_xattr_slot;
2f7e33d4
MX
3348
3349 ret = btrfs_fill_inode(inode, &rdev);
3350 if (!ret)
3351 filled = true;
39279cc3
CM
3352
3353 path = btrfs_alloc_path();
1748f843
MF
3354 if (!path)
3355 goto make_bad;
3356
39279cc3 3357 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
dc17ff8f 3358
39279cc3 3359 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
5f39d397 3360 if (ret)
39279cc3 3361 goto make_bad;
39279cc3 3362
5f39d397 3363 leaf = path->nodes[0];
2f7e33d4
MX
3364
3365 if (filled)
67de1176 3366 goto cache_index;
2f7e33d4 3367
5f39d397
CM
3368 inode_item = btrfs_item_ptr(leaf, path->slots[0],
3369 struct btrfs_inode_item);
5f39d397 3370 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
bfe86848 3371 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
2f2f43d3
EB
3372 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3373 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
dbe674a9 3374 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
5f39d397
CM
3375
3376 tspec = btrfs_inode_atime(inode_item);
3377 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3378 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3379
3380 tspec = btrfs_inode_mtime(inode_item);
3381 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3382 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3383
3384 tspec = btrfs_inode_ctime(inode_item);
3385 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3386 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3387
a76a3cd4 3388 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
e02119d5 3389 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
5dc562c5
JB
3390 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3391
3392 /*
3393 * If we were modified in the current generation and evicted from memory
3394 * and then re-read we need to do a full sync since we don't have any
3395 * idea about which extents were modified before we were evicted from
3396 * cache.
3397 */
3398 if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
3399 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3400 &BTRFS_I(inode)->runtime_flags);
3401
0c4d2d95 3402 inode->i_version = btrfs_inode_sequence(leaf, inode_item);
e02119d5 3403 inode->i_generation = BTRFS_I(inode)->generation;
618e21d5 3404 inode->i_rdev = 0;
5f39d397
CM
3405 rdev = btrfs_inode_rdev(leaf, inode_item);
3406
aec7477b 3407 BTRFS_I(inode)->index_cnt = (u64)-1;
d2fb3437 3408 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
67de1176
MX
3409
3410cache_index:
3411 path->slots[0]++;
3412 if (inode->i_nlink != 1 ||
3413 path->slots[0] >= btrfs_header_nritems(leaf))
3414 goto cache_acl;
3415
3416 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3417 if (location.objectid != btrfs_ino(inode))
3418 goto cache_acl;
3419
3420 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3421 if (location.type == BTRFS_INODE_REF_KEY) {
3422 struct btrfs_inode_ref *ref;
3423
3424 ref = (struct btrfs_inode_ref *)ptr;
3425 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3426 } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3427 struct btrfs_inode_extref *extref;
3428
3429 extref = (struct btrfs_inode_extref *)ptr;
3430 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3431 extref);
3432 }
2f7e33d4 3433cache_acl:
46a53cca
CM
3434 /*
3435 * try to precache a NULL acl entry for files that don't have
3436 * any xattrs or acls
3437 */
33345d01 3438 maybe_acls = acls_after_inode_item(leaf, path->slots[0],
63541927
FDBM
3439 btrfs_ino(inode), &first_xattr_slot);
3440 if (first_xattr_slot != -1) {
3441 path->slots[0] = first_xattr_slot;
3442 ret = btrfs_load_inode_props(inode, path);
3443 if (ret)
3444 btrfs_err(root->fs_info,
3445 "error loading props for ino %llu (root %llu): %d\n",
3446 btrfs_ino(inode),
3447 root->root_key.objectid, ret);
3448 }
3449 btrfs_free_path(path);
3450
72c04902
AV
3451 if (!maybe_acls)
3452 cache_no_acl(inode);
46a53cca 3453
39279cc3 3454 switch (inode->i_mode & S_IFMT) {
39279cc3
CM
3455 case S_IFREG:
3456 inode->i_mapping->a_ops = &btrfs_aops;
04160088 3457 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
d1310b2e 3458 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
39279cc3
CM
3459 inode->i_fop = &btrfs_file_operations;
3460 inode->i_op = &btrfs_file_inode_operations;
3461 break;
3462 case S_IFDIR:
3463 inode->i_fop = &btrfs_dir_file_operations;
3464 if (root == root->fs_info->tree_root)
3465 inode->i_op = &btrfs_dir_ro_inode_operations;
3466 else
3467 inode->i_op = &btrfs_dir_inode_operations;
3468 break;
3469 case S_IFLNK:
3470 inode->i_op = &btrfs_symlink_inode_operations;
3471 inode->i_mapping->a_ops = &btrfs_symlink_aops;
04160088 3472 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
39279cc3 3473 break;
618e21d5 3474 default:
0279b4cd 3475 inode->i_op = &btrfs_special_inode_operations;
618e21d5
JB
3476 init_special_inode(inode, inode->i_mode, rdev);
3477 break;
39279cc3 3478 }
6cbff00f
CH
3479
3480 btrfs_update_iflags(inode);
39279cc3
CM
3481 return;
3482
3483make_bad:
39279cc3 3484 btrfs_free_path(path);
39279cc3
CM
3485 make_bad_inode(inode);
3486}
3487
d352ac68
CM
3488/*
3489 * given a leaf and an inode, copy the inode fields into the leaf
3490 */
e02119d5
CM
3491static void fill_inode_item(struct btrfs_trans_handle *trans,
3492 struct extent_buffer *leaf,
5f39d397 3493 struct btrfs_inode_item *item,
39279cc3
CM
3494 struct inode *inode)
3495{
51fab693
LB
3496 struct btrfs_map_token token;
3497
3498 btrfs_init_map_token(&token);
5f39d397 3499
51fab693
LB
3500 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3501 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3502 btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3503 &token);
3504 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3505 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
5f39d397 3506
51fab693
LB
3507 btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
3508 inode->i_atime.tv_sec, &token);
3509 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
3510 inode->i_atime.tv_nsec, &token);
5f39d397 3511
51fab693
LB
3512 btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
3513 inode->i_mtime.tv_sec, &token);
3514 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
3515 inode->i_mtime.tv_nsec, &token);
5f39d397 3516
51fab693
LB
3517 btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
3518 inode->i_ctime.tv_sec, &token);
3519 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
3520 inode->i_ctime.tv_nsec, &token);
5f39d397 3521
51fab693
LB
3522 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3523 &token);
3524 btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3525 &token);
3526 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3527 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3528 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3529 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3530 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
39279cc3
CM
3531}
3532
d352ac68
CM
3533/*
3534 * copy everything in the in-memory inode into the btree.
3535 */
2115133f 3536static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
d397712b 3537 struct btrfs_root *root, struct inode *inode)
39279cc3
CM
3538{
3539 struct btrfs_inode_item *inode_item;
3540 struct btrfs_path *path;
5f39d397 3541 struct extent_buffer *leaf;
39279cc3
CM
3542 int ret;
3543
3544 path = btrfs_alloc_path();
16cdcec7
MX
3545 if (!path)
3546 return -ENOMEM;
3547
b9473439 3548 path->leave_spinning = 1;
16cdcec7
MX
3549 ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
3550 1);
39279cc3
CM
3551 if (ret) {
3552 if (ret > 0)
3553 ret = -ENOENT;
3554 goto failed;
3555 }
3556
5f39d397
CM
3557 leaf = path->nodes[0];
3558 inode_item = btrfs_item_ptr(leaf, path->slots[0],
16cdcec7 3559 struct btrfs_inode_item);
39279cc3 3560
e02119d5 3561 fill_inode_item(trans, leaf, inode_item, inode);
5f39d397 3562 btrfs_mark_buffer_dirty(leaf);
15ee9bc7 3563 btrfs_set_inode_last_trans(trans, inode);
39279cc3
CM
3564 ret = 0;
3565failed:
39279cc3
CM
3566 btrfs_free_path(path);
3567 return ret;
3568}
3569
2115133f
CM
3570/*
3571 * copy everything in the in-memory inode into the btree.
3572 */
3573noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3574 struct btrfs_root *root, struct inode *inode)
3575{
3576 int ret;
3577
3578 /*
3579 * If the inode is a free space inode, we can deadlock during commit
3580 * if we put it into the delayed code.
3581 *
3582 * The data relocation inode should also be directly updated
3583 * without delay
3584 */
83eea1f1 3585 if (!btrfs_is_free_space_inode(inode)
2115133f 3586 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
8ea05e3a
AB
3587 btrfs_update_root_times(trans, root);
3588
2115133f
CM
3589 ret = btrfs_delayed_update_inode(trans, root, inode);
3590 if (!ret)
3591 btrfs_set_inode_last_trans(trans, inode);
3592 return ret;
3593 }
3594
3595 return btrfs_update_inode_item(trans, root, inode);
3596}
3597
be6aef60
JB
3598noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3599 struct btrfs_root *root,
3600 struct inode *inode)
2115133f
CM
3601{
3602 int ret;
3603
3604 ret = btrfs_update_inode(trans, root, inode);
3605 if (ret == -ENOSPC)
3606 return btrfs_update_inode_item(trans, root, inode);
3607 return ret;
3608}
3609
d352ac68
CM
3610/*
3611 * unlink helper that gets used here in inode.c and in the tree logging
3612 * recovery code. It remove a link in a directory with a given name, and
3613 * also drops the back refs in the inode to the directory
3614 */
92986796
AV
3615static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3616 struct btrfs_root *root,
3617 struct inode *dir, struct inode *inode,
3618 const char *name, int name_len)
39279cc3
CM
3619{
3620 struct btrfs_path *path;
39279cc3 3621 int ret = 0;
5f39d397 3622 struct extent_buffer *leaf;
39279cc3 3623 struct btrfs_dir_item *di;
5f39d397 3624 struct btrfs_key key;
aec7477b 3625 u64 index;
33345d01
LZ
3626 u64 ino = btrfs_ino(inode);
3627 u64 dir_ino = btrfs_ino(dir);
39279cc3
CM
3628
3629 path = btrfs_alloc_path();
54aa1f4d
CM
3630 if (!path) {
3631 ret = -ENOMEM;
554233a6 3632 goto out;
54aa1f4d
CM
3633 }
3634
b9473439 3635 path->leave_spinning = 1;
33345d01 3636 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
39279cc3
CM
3637 name, name_len, -1);
3638 if (IS_ERR(di)) {
3639 ret = PTR_ERR(di);
3640 goto err;
3641 }
3642 if (!di) {
3643 ret = -ENOENT;
3644 goto err;
3645 }
5f39d397
CM
3646 leaf = path->nodes[0];
3647 btrfs_dir_item_key_to_cpu(leaf, di, &key);
39279cc3 3648 ret = btrfs_delete_one_dir_name(trans, root, path, di);
54aa1f4d
CM
3649 if (ret)
3650 goto err;
b3b4aa74 3651 btrfs_release_path(path);
39279cc3 3652
67de1176
MX
3653 /*
3654 * If we don't have dir index, we have to get it by looking up
3655 * the inode ref, since we get the inode ref, remove it directly,
3656 * it is unnecessary to do delayed deletion.
3657 *
3658 * But if we have dir index, needn't search inode ref to get it.
3659 * Since the inode ref is close to the inode item, it is better
3660 * that we delay to delete it, and just do this deletion when
3661 * we update the inode item.
3662 */
3663 if (BTRFS_I(inode)->dir_index) {
3664 ret = btrfs_delayed_delete_inode_ref(inode);
3665 if (!ret) {
3666 index = BTRFS_I(inode)->dir_index;
3667 goto skip_backref;
3668 }
3669 }
3670
33345d01
LZ
3671 ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
3672 dir_ino, &index);
aec7477b 3673 if (ret) {
c2cf52eb
SK
3674 btrfs_info(root->fs_info,
3675 "failed to delete reference to %.*s, inode %llu parent %llu",
c1c9ff7c 3676 name_len, name, ino, dir_ino);
79787eaa 3677 btrfs_abort_transaction(trans, root, ret);
aec7477b
JB
3678 goto err;
3679 }
67de1176 3680skip_backref:
16cdcec7 3681 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
79787eaa
JM
3682 if (ret) {
3683 btrfs_abort_transaction(trans, root, ret);
39279cc3 3684 goto err;
79787eaa 3685 }
39279cc3 3686
e02119d5 3687 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
33345d01 3688 inode, dir_ino);
79787eaa
JM
3689 if (ret != 0 && ret != -ENOENT) {
3690 btrfs_abort_transaction(trans, root, ret);
3691 goto err;
3692 }
e02119d5
CM
3693
3694 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
3695 dir, index);
6418c961
CM
3696 if (ret == -ENOENT)
3697 ret = 0;
d4e3991b
ZB
3698 else if (ret)
3699 btrfs_abort_transaction(trans, root, ret);
39279cc3
CM
3700err:
3701 btrfs_free_path(path);
e02119d5
CM
3702 if (ret)
3703 goto out;
3704
3705 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
0c4d2d95
JB
3706 inode_inc_iversion(inode);
3707 inode_inc_iversion(dir);
e02119d5 3708 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
b9959295 3709 ret = btrfs_update_inode(trans, root, dir);
e02119d5 3710out:
39279cc3
CM
3711 return ret;
3712}
3713
92986796
AV
3714int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3715 struct btrfs_root *root,
3716 struct inode *dir, struct inode *inode,
3717 const char *name, int name_len)
3718{
3719 int ret;
3720 ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
3721 if (!ret) {
8b558c5f 3722 drop_nlink(inode);
92986796
AV
3723 ret = btrfs_update_inode(trans, root, inode);
3724 }
3725 return ret;
3726}
39279cc3 3727
a22285a6
YZ
3728/*
3729 * helper to start transaction for unlink and rmdir.
3730 *
d52be818
JB
3731 * unlink and rmdir are special in btrfs, they do not always free space, so
3732 * if we cannot make our reservations the normal way try and see if there is
3733 * plenty of slack room in the global reserve to migrate, otherwise we cannot
3734 * allow the unlink to occur.
a22285a6 3735 */
d52be818 3736static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4df27c4d 3737{
39279cc3 3738 struct btrfs_trans_handle *trans;
a22285a6 3739 struct btrfs_root *root = BTRFS_I(dir)->root;
4df27c4d
YZ
3740 int ret;
3741
e70bea5f
JB
3742 /*
3743 * 1 for the possible orphan item
3744 * 1 for the dir item
3745 * 1 for the dir index
3746 * 1 for the inode ref
e70bea5f
JB
3747 * 1 for the inode
3748 */
6e137ed3 3749 trans = btrfs_start_transaction(root, 5);
a22285a6
YZ
3750 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
3751 return trans;
4df27c4d 3752
d52be818
JB
3753 if (PTR_ERR(trans) == -ENOSPC) {
3754 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
4df27c4d 3755
d52be818
JB
3756 trans = btrfs_start_transaction(root, 0);
3757 if (IS_ERR(trans))
3758 return trans;
3759 ret = btrfs_cond_migrate_bytes(root->fs_info,
3760 &root->fs_info->trans_block_rsv,
3761 num_bytes, 5);
3762 if (ret) {
3763 btrfs_end_transaction(trans, root);
3764 return ERR_PTR(ret);
a22285a6 3765 }
5a77d76c 3766 trans->block_rsv = &root->fs_info->trans_block_rsv;
d52be818 3767 trans->bytes_reserved = num_bytes;
a22285a6 3768 }
d52be818 3769 return trans;
a22285a6
YZ
3770}
3771
3772static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
3773{
3774 struct btrfs_root *root = BTRFS_I(dir)->root;
3775 struct btrfs_trans_handle *trans;
3776 struct inode *inode = dentry->d_inode;
3777 int ret;
a22285a6 3778
d52be818 3779 trans = __unlink_start_trans(dir);
a22285a6
YZ
3780 if (IS_ERR(trans))
3781 return PTR_ERR(trans);
5f39d397 3782
12fcfd22
CM
3783 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
3784
e02119d5
CM
3785 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3786 dentry->d_name.name, dentry->d_name.len);
b532402e
TI
3787 if (ret)
3788 goto out;
7b128766 3789
a22285a6 3790 if (inode->i_nlink == 0) {
7b128766 3791 ret = btrfs_orphan_add(trans, inode);
b532402e
TI
3792 if (ret)
3793 goto out;
a22285a6 3794 }
7b128766 3795
b532402e 3796out:
d52be818 3797 btrfs_end_transaction(trans, root);
b53d3f5d 3798 btrfs_btree_balance_dirty(root);
39279cc3
CM
3799 return ret;
3800}
3801
4df27c4d
YZ
3802int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3803 struct btrfs_root *root,
3804 struct inode *dir, u64 objectid,
3805 const char *name, int name_len)
3806{
3807 struct btrfs_path *path;
3808 struct extent_buffer *leaf;
3809 struct btrfs_dir_item *di;
3810 struct btrfs_key key;
3811 u64 index;
3812 int ret;
33345d01 3813 u64 dir_ino = btrfs_ino(dir);
4df27c4d
YZ
3814
3815 path = btrfs_alloc_path();
3816 if (!path)
3817 return -ENOMEM;
3818
33345d01 3819 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4df27c4d 3820 name, name_len, -1);
79787eaa
JM
3821 if (IS_ERR_OR_NULL(di)) {
3822 if (!di)
3823 ret = -ENOENT;
3824 else
3825 ret = PTR_ERR(di);
3826 goto out;
3827 }
4df27c4d
YZ
3828
3829 leaf = path->nodes[0];
3830 btrfs_dir_item_key_to_cpu(leaf, di, &key);
3831 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
3832 ret = btrfs_delete_one_dir_name(trans, root, path, di);
79787eaa
JM
3833 if (ret) {
3834 btrfs_abort_transaction(trans, root, ret);
3835 goto out;
3836 }
b3b4aa74 3837 btrfs_release_path(path);
4df27c4d
YZ
3838
3839 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
3840 objectid, root->root_key.objectid,
33345d01 3841 dir_ino, &index, name, name_len);
4df27c4d 3842 if (ret < 0) {
79787eaa
JM
3843 if (ret != -ENOENT) {
3844 btrfs_abort_transaction(trans, root, ret);
3845 goto out;
3846 }
33345d01 3847 di = btrfs_search_dir_index_item(root, path, dir_ino,
4df27c4d 3848 name, name_len);
79787eaa
JM
3849 if (IS_ERR_OR_NULL(di)) {
3850 if (!di)
3851 ret = -ENOENT;
3852 else
3853 ret = PTR_ERR(di);
3854 btrfs_abort_transaction(trans, root, ret);
3855 goto out;
3856 }
4df27c4d
YZ
3857
3858 leaf = path->nodes[0];
3859 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
b3b4aa74 3860 btrfs_release_path(path);
4df27c4d
YZ
3861 index = key.offset;
3862 }
945d8962 3863 btrfs_release_path(path);
4df27c4d 3864
16cdcec7 3865 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
79787eaa
JM
3866 if (ret) {
3867 btrfs_abort_transaction(trans, root, ret);
3868 goto out;
3869 }
4df27c4d
YZ
3870
3871 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
0c4d2d95 3872 inode_inc_iversion(dir);
4df27c4d 3873 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
5a24e84c 3874 ret = btrfs_update_inode_fallback(trans, root, dir);
79787eaa
JM
3875 if (ret)
3876 btrfs_abort_transaction(trans, root, ret);
3877out:
71d7aed0 3878 btrfs_free_path(path);
79787eaa 3879 return ret;
4df27c4d
YZ
3880}
3881
39279cc3
CM
3882static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3883{
3884 struct inode *inode = dentry->d_inode;
1832a6d5 3885 int err = 0;
39279cc3 3886 struct btrfs_root *root = BTRFS_I(dir)->root;
39279cc3 3887 struct btrfs_trans_handle *trans;
39279cc3 3888
b3ae244e 3889 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
134d4512 3890 return -ENOTEMPTY;
b3ae244e
DS
3891 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
3892 return -EPERM;
134d4512 3893
d52be818 3894 trans = __unlink_start_trans(dir);
a22285a6 3895 if (IS_ERR(trans))
5df6a9f6 3896 return PTR_ERR(trans);
5df6a9f6 3897
33345d01 3898 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4df27c4d
YZ
3899 err = btrfs_unlink_subvol(trans, root, dir,
3900 BTRFS_I(inode)->location.objectid,
3901 dentry->d_name.name,
3902 dentry->d_name.len);
3903 goto out;
3904 }
3905
7b128766
JB
3906 err = btrfs_orphan_add(trans, inode);
3907 if (err)
4df27c4d 3908 goto out;
7b128766 3909
39279cc3 3910 /* now the directory is empty */
e02119d5
CM
3911 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3912 dentry->d_name.name, dentry->d_name.len);
d397712b 3913 if (!err)
dbe674a9 3914 btrfs_i_size_write(inode, 0);
4df27c4d 3915out:
d52be818 3916 btrfs_end_transaction(trans, root);
b53d3f5d 3917 btrfs_btree_balance_dirty(root);
3954401f 3918
39279cc3
CM
3919 return err;
3920}
3921
39279cc3
CM
3922/*
3923 * this can truncate away extent items, csum items and directory items.
3924 * It starts at a high offset and removes keys until it can't find
d352ac68 3925 * any higher than new_size
39279cc3
CM
3926 *
3927 * csum items that cross the new i_size are truncated to the new size
3928 * as well.
7b128766
JB
3929 *
3930 * min_type is the minimum key type to truncate down to. If set to 0, this
3931 * will kill all the items on this inode, including the INODE_ITEM_KEY.
39279cc3 3932 */
8082510e
YZ
3933int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3934 struct btrfs_root *root,
3935 struct inode *inode,
3936 u64 new_size, u32 min_type)
39279cc3 3937{
39279cc3 3938 struct btrfs_path *path;
5f39d397 3939 struct extent_buffer *leaf;
39279cc3 3940 struct btrfs_file_extent_item *fi;
8082510e
YZ
3941 struct btrfs_key key;
3942 struct btrfs_key found_key;
39279cc3 3943 u64 extent_start = 0;
db94535d 3944 u64 extent_num_bytes = 0;
5d4f98a2 3945 u64 extent_offset = 0;
39279cc3 3946 u64 item_end = 0;
7f4f6e0a 3947 u64 last_size = (u64)-1;
8082510e 3948 u32 found_type = (u8)-1;
39279cc3
CM
3949 int found_extent;
3950 int del_item;
85e21bac
CM
3951 int pending_del_nr = 0;
3952 int pending_del_slot = 0;
179e29e4 3953 int extent_type = -1;
8082510e
YZ
3954 int ret;
3955 int err = 0;
33345d01 3956 u64 ino = btrfs_ino(inode);
8082510e
YZ
3957
3958 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
39279cc3 3959
0eb0e19c
MF
3960 path = btrfs_alloc_path();
3961 if (!path)
3962 return -ENOMEM;
3963 path->reada = -1;
3964
5dc562c5
JB
3965 /*
3966 * We want to drop from the next block forward in case this new size is
3967 * not block aligned since we will be keeping the last block of the
3968 * extent just the way it is.
3969 */
0af3d00b 3970 if (root->ref_cows || root == root->fs_info->tree_root)
fda2832f
QW
3971 btrfs_drop_extent_cache(inode, ALIGN(new_size,
3972 root->sectorsize), (u64)-1, 0);
8082510e 3973
16cdcec7
MX
3974 /*
3975 * This function is also used to drop the items in the log tree before
3976 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
3977 * it is used to drop the loged items. So we shouldn't kill the delayed
3978 * items.
3979 */
3980 if (min_type == 0 && root == BTRFS_I(inode)->root)
3981 btrfs_kill_delayed_inode_items(inode);
3982
33345d01 3983 key.objectid = ino;
39279cc3 3984 key.offset = (u64)-1;
5f39d397
CM
3985 key.type = (u8)-1;
3986
85e21bac 3987search_again:
b9473439 3988 path->leave_spinning = 1;
85e21bac 3989 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8082510e
YZ
3990 if (ret < 0) {
3991 err = ret;
3992 goto out;
3993 }
d397712b 3994
85e21bac 3995 if (ret > 0) {
e02119d5
CM
3996 /* there are no items in the tree for us to truncate, we're
3997 * done
3998 */
8082510e
YZ
3999 if (path->slots[0] == 0)
4000 goto out;
85e21bac
CM
4001 path->slots[0]--;
4002 }
4003
d397712b 4004 while (1) {
39279cc3 4005 fi = NULL;
5f39d397
CM
4006 leaf = path->nodes[0];
4007 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4008 found_type = btrfs_key_type(&found_key);
39279cc3 4009
33345d01 4010 if (found_key.objectid != ino)
39279cc3 4011 break;
5f39d397 4012
85e21bac 4013 if (found_type < min_type)
39279cc3
CM
4014 break;
4015
5f39d397 4016 item_end = found_key.offset;
39279cc3 4017 if (found_type == BTRFS_EXTENT_DATA_KEY) {
5f39d397 4018 fi = btrfs_item_ptr(leaf, path->slots[0],
39279cc3 4019 struct btrfs_file_extent_item);
179e29e4
CM
4020 extent_type = btrfs_file_extent_type(leaf, fi);
4021 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
5f39d397 4022 item_end +=
db94535d 4023 btrfs_file_extent_num_bytes(leaf, fi);
179e29e4 4024 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
179e29e4 4025 item_end += btrfs_file_extent_inline_len(leaf,
c8b97818 4026 fi);
39279cc3 4027 }
008630c1 4028 item_end--;
39279cc3 4029 }
8082510e
YZ
4030 if (found_type > min_type) {
4031 del_item = 1;
4032 } else {
4033 if (item_end < new_size)
b888db2b 4034 break;
8082510e
YZ
4035 if (found_key.offset >= new_size)
4036 del_item = 1;
4037 else
4038 del_item = 0;
39279cc3 4039 }
39279cc3 4040 found_extent = 0;
39279cc3 4041 /* FIXME, shrink the extent if the ref count is only 1 */
179e29e4
CM
4042 if (found_type != BTRFS_EXTENT_DATA_KEY)
4043 goto delete;
4044
7f4f6e0a
JB
4045 if (del_item)
4046 last_size = found_key.offset;
4047 else
4048 last_size = new_size;
4049
179e29e4 4050 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
39279cc3 4051 u64 num_dec;
db94535d 4052 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
f70a9a6b 4053 if (!del_item) {
db94535d
CM
4054 u64 orig_num_bytes =
4055 btrfs_file_extent_num_bytes(leaf, fi);
fda2832f
QW
4056 extent_num_bytes = ALIGN(new_size -
4057 found_key.offset,
4058 root->sectorsize);
db94535d
CM
4059 btrfs_set_file_extent_num_bytes(leaf, fi,
4060 extent_num_bytes);
4061 num_dec = (orig_num_bytes -
9069218d 4062 extent_num_bytes);
e02119d5 4063 if (root->ref_cows && extent_start != 0)
a76a3cd4 4064 inode_sub_bytes(inode, num_dec);
5f39d397 4065 btrfs_mark_buffer_dirty(leaf);
39279cc3 4066 } else {
db94535d
CM
4067 extent_num_bytes =
4068 btrfs_file_extent_disk_num_bytes(leaf,
4069 fi);
5d4f98a2
YZ
4070 extent_offset = found_key.offset -
4071 btrfs_file_extent_offset(leaf, fi);
4072
39279cc3 4073 /* FIXME blocksize != 4096 */
9069218d 4074 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
39279cc3
CM
4075 if (extent_start != 0) {
4076 found_extent = 1;
e02119d5 4077 if (root->ref_cows)
a76a3cd4 4078 inode_sub_bytes(inode, num_dec);
e02119d5 4079 }
39279cc3 4080 }
9069218d 4081 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
c8b97818
CM
4082 /*
4083 * we can't truncate inline items that have had
4084 * special encodings
4085 */
4086 if (!del_item &&
4087 btrfs_file_extent_compression(leaf, fi) == 0 &&
4088 btrfs_file_extent_encryption(leaf, fi) == 0 &&
4089 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
e02119d5
CM
4090 u32 size = new_size - found_key.offset;
4091
4092 if (root->ref_cows) {
a76a3cd4
YZ
4093 inode_sub_bytes(inode, item_end + 1 -
4094 new_size);
e02119d5
CM
4095 }
4096 size =
4097 btrfs_file_extent_calc_inline_size(size);
afe5fea7 4098 btrfs_truncate_item(root, path, size, 1);
e02119d5 4099 } else if (root->ref_cows) {
a76a3cd4
YZ
4100 inode_sub_bytes(inode, item_end + 1 -
4101 found_key.offset);
9069218d 4102 }
39279cc3 4103 }
179e29e4 4104delete:
39279cc3 4105 if (del_item) {
85e21bac
CM
4106 if (!pending_del_nr) {
4107 /* no pending yet, add ourselves */
4108 pending_del_slot = path->slots[0];
4109 pending_del_nr = 1;
4110 } else if (pending_del_nr &&
4111 path->slots[0] + 1 == pending_del_slot) {
4112 /* hop on the pending chunk */
4113 pending_del_nr++;
4114 pending_del_slot = path->slots[0];
4115 } else {
d397712b 4116 BUG();
85e21bac 4117 }
39279cc3
CM
4118 } else {
4119 break;
4120 }
0af3d00b
JB
4121 if (found_extent && (root->ref_cows ||
4122 root == root->fs_info->tree_root)) {
b9473439 4123 btrfs_set_path_blocking(path);
39279cc3 4124 ret = btrfs_free_extent(trans, root, extent_start,
5d4f98a2
YZ
4125 extent_num_bytes, 0,
4126 btrfs_header_owner(leaf),
66d7e7f0 4127 ino, extent_offset, 0);
39279cc3
CM
4128 BUG_ON(ret);
4129 }
85e21bac 4130
8082510e
YZ
4131 if (found_type == BTRFS_INODE_ITEM_KEY)
4132 break;
4133
4134 if (path->slots[0] == 0 ||
4135 path->slots[0] != pending_del_slot) {
8082510e
YZ
4136 if (pending_del_nr) {
4137 ret = btrfs_del_items(trans, root, path,
4138 pending_del_slot,
4139 pending_del_nr);
79787eaa
JM
4140 if (ret) {
4141 btrfs_abort_transaction(trans,
4142 root, ret);
4143 goto error;
4144 }
8082510e
YZ
4145 pending_del_nr = 0;
4146 }
b3b4aa74 4147 btrfs_release_path(path);
85e21bac 4148 goto search_again;
8082510e
YZ
4149 } else {
4150 path->slots[0]--;
85e21bac 4151 }
39279cc3 4152 }
8082510e 4153out:
85e21bac
CM
4154 if (pending_del_nr) {
4155 ret = btrfs_del_items(trans, root, path, pending_del_slot,
4156 pending_del_nr);
79787eaa
JM
4157 if (ret)
4158 btrfs_abort_transaction(trans, root, ret);
85e21bac 4159 }
79787eaa 4160error:
7f4f6e0a
JB
4161 if (last_size != (u64)-1)
4162 btrfs_ordered_update_i_size(inode, last_size, NULL);
39279cc3 4163 btrfs_free_path(path);
8082510e 4164 return err;
39279cc3
CM
4165}
4166
4167/*
2aaa6655
JB
4168 * btrfs_truncate_page - read, zero a chunk and write a page
4169 * @inode - inode that we're zeroing
4170 * @from - the offset to start zeroing
4171 * @len - the length to zero, 0 to zero the entire range respective to the
4172 * offset
4173 * @front - zero up to the offset instead of from the offset on
4174 *
4175 * This will find the page for the "from" offset and cow the page and zero the
4176 * part we want to zero. This is used with truncate and hole punching.
39279cc3 4177 */
2aaa6655
JB
4178int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
4179 int front)
39279cc3 4180{
2aaa6655 4181 struct address_space *mapping = inode->i_mapping;
db94535d 4182 struct btrfs_root *root = BTRFS_I(inode)->root;
e6dcd2dc
CM
4183 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4184 struct btrfs_ordered_extent *ordered;
2ac55d41 4185 struct extent_state *cached_state = NULL;
e6dcd2dc 4186 char *kaddr;
db94535d 4187 u32 blocksize = root->sectorsize;
39279cc3
CM
4188 pgoff_t index = from >> PAGE_CACHE_SHIFT;
4189 unsigned offset = from & (PAGE_CACHE_SIZE-1);
4190 struct page *page;
3b16a4e3 4191 gfp_t mask = btrfs_alloc_write_mask(mapping);
39279cc3 4192 int ret = 0;
a52d9a80 4193 u64 page_start;
e6dcd2dc 4194 u64 page_end;
39279cc3 4195
2aaa6655
JB
4196 if ((offset & (blocksize - 1)) == 0 &&
4197 (!len || ((len & (blocksize - 1)) == 0)))
39279cc3 4198 goto out;
0ca1f7ce 4199 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
5d5e103a
JB
4200 if (ret)
4201 goto out;
39279cc3 4202
211c17f5 4203again:
3b16a4e3 4204 page = find_or_create_page(mapping, index, mask);
5d5e103a 4205 if (!page) {
0ca1f7ce 4206 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
ac6a2b36 4207 ret = -ENOMEM;
39279cc3 4208 goto out;
5d5e103a 4209 }
e6dcd2dc
CM
4210
4211 page_start = page_offset(page);
4212 page_end = page_start + PAGE_CACHE_SIZE - 1;
4213
39279cc3 4214 if (!PageUptodate(page)) {
9ebefb18 4215 ret = btrfs_readpage(NULL, page);
39279cc3 4216 lock_page(page);
211c17f5
CM
4217 if (page->mapping != mapping) {
4218 unlock_page(page);
4219 page_cache_release(page);
4220 goto again;
4221 }
39279cc3
CM
4222 if (!PageUptodate(page)) {
4223 ret = -EIO;
89642229 4224 goto out_unlock;
39279cc3
CM
4225 }
4226 }
211c17f5 4227 wait_on_page_writeback(page);
e6dcd2dc 4228
d0082371 4229 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
e6dcd2dc
CM
4230 set_page_extent_mapped(page);
4231
4232 ordered = btrfs_lookup_ordered_extent(inode, page_start);
4233 if (ordered) {
2ac55d41
JB
4234 unlock_extent_cached(io_tree, page_start, page_end,
4235 &cached_state, GFP_NOFS);
e6dcd2dc
CM
4236 unlock_page(page);
4237 page_cache_release(page);
eb84ae03 4238 btrfs_start_ordered_extent(inode, ordered, 1);
e6dcd2dc
CM
4239 btrfs_put_ordered_extent(ordered);
4240 goto again;
4241 }
4242
2ac55d41 4243 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
9e8a4a8b
LB
4244 EXTENT_DIRTY | EXTENT_DELALLOC |
4245 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
2ac55d41 4246 0, 0, &cached_state, GFP_NOFS);
5d5e103a 4247
2ac55d41
JB
4248 ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
4249 &cached_state);
9ed74f2d 4250 if (ret) {
2ac55d41
JB
4251 unlock_extent_cached(io_tree, page_start, page_end,
4252 &cached_state, GFP_NOFS);
9ed74f2d
JB
4253 goto out_unlock;
4254 }
4255
e6dcd2dc 4256 if (offset != PAGE_CACHE_SIZE) {
2aaa6655
JB
4257 if (!len)
4258 len = PAGE_CACHE_SIZE - offset;
e6dcd2dc 4259 kaddr = kmap(page);
2aaa6655
JB
4260 if (front)
4261 memset(kaddr, 0, offset);
4262 else
4263 memset(kaddr + offset, 0, len);
e6dcd2dc
CM
4264 flush_dcache_page(page);
4265 kunmap(page);
4266 }
247e743c 4267 ClearPageChecked(page);
e6dcd2dc 4268 set_page_dirty(page);
2ac55d41
JB
4269 unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
4270 GFP_NOFS);
39279cc3 4271
89642229 4272out_unlock:
5d5e103a 4273 if (ret)
0ca1f7ce 4274 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
39279cc3
CM
4275 unlock_page(page);
4276 page_cache_release(page);
4277out:
4278 return ret;
4279}
4280
16e7549f
JB
4281static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
4282 u64 offset, u64 len)
4283{
4284 struct btrfs_trans_handle *trans;
4285 int ret;
4286
4287 /*
4288 * Still need to make sure the inode looks like it's been updated so
4289 * that any holes get logged if we fsync.
4290 */
4291 if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) {
4292 BTRFS_I(inode)->last_trans = root->fs_info->generation;
4293 BTRFS_I(inode)->last_sub_trans = root->log_transid;
4294 BTRFS_I(inode)->last_log_commit = root->last_log_commit;
4295 return 0;
4296 }
4297
4298 /*
4299 * 1 - for the one we're dropping
4300 * 1 - for the one we're adding
4301 * 1 - for updating the inode.
4302 */
4303 trans = btrfs_start_transaction(root, 3);
4304 if (IS_ERR(trans))
4305 return PTR_ERR(trans);
4306
4307 ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
4308 if (ret) {
4309 btrfs_abort_transaction(trans, root, ret);
4310 btrfs_end_transaction(trans, root);
4311 return ret;
4312 }
4313
4314 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
4315 0, 0, len, 0, len, 0, 0, 0);
4316 if (ret)
4317 btrfs_abort_transaction(trans, root, ret);
4318 else
4319 btrfs_update_inode(trans, root, inode);
4320 btrfs_end_transaction(trans, root);
4321 return ret;
4322}
4323
695a0d0d
JB
4324/*
4325 * This function puts in dummy file extents for the area we're creating a hole
4326 * for. So if we are truncating this file to a larger size we need to insert
4327 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4328 * the range between oldsize and size
4329 */
a41ad394 4330int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
39279cc3 4331{
9036c102
YZ
4332 struct btrfs_root *root = BTRFS_I(inode)->root;
4333 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
a22285a6 4334 struct extent_map *em = NULL;
2ac55d41 4335 struct extent_state *cached_state = NULL;
5dc562c5 4336 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
fda2832f
QW
4337 u64 hole_start = ALIGN(oldsize, root->sectorsize);
4338 u64 block_end = ALIGN(size, root->sectorsize);
9036c102
YZ
4339 u64 last_byte;
4340 u64 cur_offset;
4341 u64 hole_size;
9ed74f2d 4342 int err = 0;
39279cc3 4343
a71754fc
JB
4344 /*
4345 * If our size started in the middle of a page we need to zero out the
4346 * rest of the page before we expand the i_size, otherwise we could
4347 * expose stale data.
4348 */
4349 err = btrfs_truncate_page(inode, oldsize, 0, 0);
4350 if (err)
4351 return err;
4352
9036c102
YZ
4353 if (size <= hole_start)
4354 return 0;
4355
9036c102
YZ
4356 while (1) {
4357 struct btrfs_ordered_extent *ordered;
fa7c1494 4358
2ac55d41 4359 lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
d0082371 4360 &cached_state);
fa7c1494
MX
4361 ordered = btrfs_lookup_ordered_range(inode, hole_start,
4362 block_end - hole_start);
9036c102
YZ
4363 if (!ordered)
4364 break;
2ac55d41
JB
4365 unlock_extent_cached(io_tree, hole_start, block_end - 1,
4366 &cached_state, GFP_NOFS);
fa7c1494 4367 btrfs_start_ordered_extent(inode, ordered, 1);
9036c102
YZ
4368 btrfs_put_ordered_extent(ordered);
4369 }
39279cc3 4370
9036c102
YZ
4371 cur_offset = hole_start;
4372 while (1) {
4373 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4374 block_end - cur_offset, 0);
79787eaa
JM
4375 if (IS_ERR(em)) {
4376 err = PTR_ERR(em);
f2767956 4377 em = NULL;
79787eaa
JM
4378 break;
4379 }
9036c102 4380 last_byte = min(extent_map_end(em), block_end);
fda2832f 4381 last_byte = ALIGN(last_byte , root->sectorsize);
8082510e 4382 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
5dc562c5 4383 struct extent_map *hole_em;
9036c102 4384 hole_size = last_byte - cur_offset;
9ed74f2d 4385
16e7549f
JB
4386 err = maybe_insert_hole(root, inode, cur_offset,
4387 hole_size);
4388 if (err)
3893e33b 4389 break;
5dc562c5
JB
4390 btrfs_drop_extent_cache(inode, cur_offset,
4391 cur_offset + hole_size - 1, 0);
4392 hole_em = alloc_extent_map();
4393 if (!hole_em) {
4394 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4395 &BTRFS_I(inode)->runtime_flags);
4396 goto next;
4397 }
4398 hole_em->start = cur_offset;
4399 hole_em->len = hole_size;
4400 hole_em->orig_start = cur_offset;
8082510e 4401
5dc562c5
JB
4402 hole_em->block_start = EXTENT_MAP_HOLE;
4403 hole_em->block_len = 0;
b4939680 4404 hole_em->orig_block_len = 0;
cc95bef6 4405 hole_em->ram_bytes = hole_size;
5dc562c5
JB
4406 hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
4407 hole_em->compress_type = BTRFS_COMPRESS_NONE;
16e7549f 4408 hole_em->generation = root->fs_info->generation;
8082510e 4409
5dc562c5
JB
4410 while (1) {
4411 write_lock(&em_tree->lock);
09a2a8f9 4412 err = add_extent_mapping(em_tree, hole_em, 1);
5dc562c5
JB
4413 write_unlock(&em_tree->lock);
4414 if (err != -EEXIST)
4415 break;
4416 btrfs_drop_extent_cache(inode, cur_offset,
4417 cur_offset +
4418 hole_size - 1, 0);
4419 }
4420 free_extent_map(hole_em);
9036c102 4421 }
16e7549f 4422next:
9036c102 4423 free_extent_map(em);
a22285a6 4424 em = NULL;
9036c102 4425 cur_offset = last_byte;
8082510e 4426 if (cur_offset >= block_end)
9036c102
YZ
4427 break;
4428 }
a22285a6 4429 free_extent_map(em);
2ac55d41
JB
4430 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
4431 GFP_NOFS);
9036c102
YZ
4432 return err;
4433}
39279cc3 4434
3972f260 4435static int btrfs_setsize(struct inode *inode, struct iattr *attr)
8082510e 4436{
f4a2f4c5
MX
4437 struct btrfs_root *root = BTRFS_I(inode)->root;
4438 struct btrfs_trans_handle *trans;
a41ad394 4439 loff_t oldsize = i_size_read(inode);
3972f260
ES
4440 loff_t newsize = attr->ia_size;
4441 int mask = attr->ia_valid;
8082510e
YZ
4442 int ret;
4443
3972f260
ES
4444 /*
4445 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4446 * special case where we need to update the times despite not having
4447 * these flags set. For all other operations the VFS set these flags
4448 * explicitly if it wants a timestamp update.
4449 */
4450 if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME))))
4451 inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
4452
a41ad394 4453 if (newsize > oldsize) {
7caef267 4454 truncate_pagecache(inode, newsize);
a41ad394 4455 ret = btrfs_cont_expand(inode, oldsize, newsize);
f4a2f4c5 4456 if (ret)
8082510e 4457 return ret;
8082510e 4458
f4a2f4c5
MX
4459 trans = btrfs_start_transaction(root, 1);
4460 if (IS_ERR(trans))
4461 return PTR_ERR(trans);
4462
4463 i_size_write(inode, newsize);
4464 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
4465 ret = btrfs_update_inode(trans, root, inode);
7ad85bb7 4466 btrfs_end_transaction(trans, root);
a41ad394 4467 } else {
8082510e 4468
a41ad394
JB
4469 /*
4470 * We're truncating a file that used to have good data down to
4471 * zero. Make sure it gets into the ordered flush list so that
4472 * any new writes get down to disk quickly.
4473 */
4474 if (newsize == 0)
72ac3c0d
JB
4475 set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
4476 &BTRFS_I(inode)->runtime_flags);
8082510e 4477
f3fe820c
JB
4478 /*
4479 * 1 for the orphan item we're going to add
4480 * 1 for the orphan item deletion.
4481 */
4482 trans = btrfs_start_transaction(root, 2);
4483 if (IS_ERR(trans))
4484 return PTR_ERR(trans);
4485
4486 /*
4487 * We need to do this in case we fail at _any_ point during the
4488 * actual truncate. Once we do the truncate_setsize we could
4489 * invalidate pages which forces any outstanding ordered io to
4490 * be instantly completed which will give us extents that need
4491 * to be truncated. If we fail to get an orphan inode down we
4492 * could have left over extents that were never meant to live,
4493 * so we need to garuntee from this point on that everything
4494 * will be consistent.
4495 */
4496 ret = btrfs_orphan_add(trans, inode);
4497 btrfs_end_transaction(trans, root);
4498 if (ret)
4499 return ret;
4500
a41ad394
JB
4501 /* we don't support swapfiles, so vmtruncate shouldn't fail */
4502 truncate_setsize(inode, newsize);
2e60a51e
MX
4503
4504 /* Disable nonlocked read DIO to avoid the end less truncate */
4505 btrfs_inode_block_unlocked_dio(inode);
4506 inode_dio_wait(inode);
4507 btrfs_inode_resume_unlocked_dio(inode);
4508
a41ad394 4509 ret = btrfs_truncate(inode);
7f4f6e0a
JB
4510 if (ret && inode->i_nlink) {
4511 int err;
4512
4513 /*
4514 * failed to truncate, disk_i_size is only adjusted down
4515 * as we remove extents, so it should represent the true
4516 * size of the inode, so reset the in memory size and
4517 * delete our orphan entry.
4518 */
4519 trans = btrfs_join_transaction(root);
4520 if (IS_ERR(trans)) {
4521 btrfs_orphan_del(NULL, inode);
4522 return ret;
4523 }
4524 i_size_write(inode, BTRFS_I(inode)->disk_i_size);
4525 err = btrfs_orphan_del(trans, inode);
4526 if (err)
4527 btrfs_abort_transaction(trans, root, err);
4528 btrfs_end_transaction(trans, root);
4529 }
8082510e
YZ
4530 }
4531
a41ad394 4532 return ret;
8082510e
YZ
4533}
4534
9036c102
YZ
4535static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
4536{
4537 struct inode *inode = dentry->d_inode;
b83cc969 4538 struct btrfs_root *root = BTRFS_I(inode)->root;
9036c102 4539 int err;
39279cc3 4540
b83cc969
LZ
4541 if (btrfs_root_readonly(root))
4542 return -EROFS;
4543
9036c102
YZ
4544 err = inode_change_ok(inode, attr);
4545 if (err)
4546 return err;
2bf5a725 4547
5a3f23d5 4548 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3972f260 4549 err = btrfs_setsize(inode, attr);
8082510e
YZ
4550 if (err)
4551 return err;
39279cc3 4552 }
9036c102 4553
1025774c
CH
4554 if (attr->ia_valid) {
4555 setattr_copy(inode, attr);
0c4d2d95 4556 inode_inc_iversion(inode);
22c44fe6 4557 err = btrfs_dirty_inode(inode);
1025774c 4558
22c44fe6 4559 if (!err && attr->ia_valid & ATTR_MODE)
1025774c
CH
4560 err = btrfs_acl_chmod(inode);
4561 }
33268eaf 4562
39279cc3
CM
4563 return err;
4564}
61295eb8 4565
131e404a
FDBM
4566/*
4567 * While truncating the inode pages during eviction, we get the VFS calling
4568 * btrfs_invalidatepage() against each page of the inode. This is slow because
4569 * the calls to btrfs_invalidatepage() result in a huge amount of calls to
4570 * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
4571 * extent_state structures over and over, wasting lots of time.
4572 *
4573 * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
4574 * those expensive operations on a per page basis and do only the ordered io
4575 * finishing, while we release here the extent_map and extent_state structures,
4576 * without the excessive merging and splitting.
4577 */
4578static void evict_inode_truncate_pages(struct inode *inode)
4579{
4580 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4581 struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
4582 struct rb_node *node;
4583
4584 ASSERT(inode->i_state & I_FREEING);
4585 truncate_inode_pages(&inode->i_data, 0);
4586
4587 write_lock(&map_tree->lock);
4588 while (!RB_EMPTY_ROOT(&map_tree->map)) {
4589 struct extent_map *em;
4590
4591 node = rb_first(&map_tree->map);
4592 em = rb_entry(node, struct extent_map, rb_node);
180589ef
WS
4593 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
4594 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
131e404a
FDBM
4595 remove_extent_mapping(map_tree, em);
4596 free_extent_map(em);
4597 }
4598 write_unlock(&map_tree->lock);
4599
4600 spin_lock(&io_tree->lock);
4601 while (!RB_EMPTY_ROOT(&io_tree->state)) {
4602 struct extent_state *state;
4603 struct extent_state *cached_state = NULL;
4604
4605 node = rb_first(&io_tree->state);
4606 state = rb_entry(node, struct extent_state, rb_node);
4607 atomic_inc(&state->refs);
4608 spin_unlock(&io_tree->lock);
4609
4610 lock_extent_bits(io_tree, state->start, state->end,
4611 0, &cached_state);
4612 clear_extent_bit(io_tree, state->start, state->end,
4613 EXTENT_LOCKED | EXTENT_DIRTY |
4614 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
4615 EXTENT_DEFRAG, 1, 1,
4616 &cached_state, GFP_NOFS);
4617 free_extent_state(state);
4618
4619 spin_lock(&io_tree->lock);
4620 }
4621 spin_unlock(&io_tree->lock);
4622}
4623
bd555975 4624void btrfs_evict_inode(struct inode *inode)
39279cc3
CM
4625{
4626 struct btrfs_trans_handle *trans;
4627 struct btrfs_root *root = BTRFS_I(inode)->root;
726c35fa 4628 struct btrfs_block_rsv *rsv, *global_rsv;
07127184 4629 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
39279cc3
CM
4630 int ret;
4631
1abe9b8a 4632 trace_btrfs_inode_evict(inode);
4633
131e404a
FDBM
4634 evict_inode_truncate_pages(inode);
4635
69e9c6c6
SB
4636 if (inode->i_nlink &&
4637 ((btrfs_root_refs(&root->root_item) != 0 &&
4638 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
4639 btrfs_is_free_space_inode(inode)))
bd555975
AV
4640 goto no_delete;
4641
39279cc3 4642 if (is_bad_inode(inode)) {
7b128766 4643 btrfs_orphan_del(NULL, inode);
39279cc3
CM
4644 goto no_delete;
4645 }
bd555975 4646 /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
4a096752 4647 btrfs_wait_ordered_range(inode, 0, (u64)-1);
5f39d397 4648
c71bf099 4649 if (root->fs_info->log_root_recovering) {
6bf02314 4650 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
8a35d95f 4651 &BTRFS_I(inode)->runtime_flags));
c71bf099
YZ
4652 goto no_delete;
4653 }
4654
76dda93c 4655 if (inode->i_nlink > 0) {
69e9c6c6
SB
4656 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
4657 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
76dda93c
YZ
4658 goto no_delete;
4659 }
4660
0e8c36a9
MX
4661 ret = btrfs_commit_inode_delayed_inode(inode);
4662 if (ret) {
4663 btrfs_orphan_del(NULL, inode);
4664 goto no_delete;
4665 }
4666
66d8f3dd 4667 rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
4289a667
JB
4668 if (!rsv) {
4669 btrfs_orphan_del(NULL, inode);
4670 goto no_delete;
4671 }
4a338542 4672 rsv->size = min_size;
ca7e70f5 4673 rsv->failfast = 1;
726c35fa 4674 global_rsv = &root->fs_info->global_block_rsv;
4289a667 4675
dbe674a9 4676 btrfs_i_size_write(inode, 0);
5f39d397 4677
4289a667 4678 /*
8407aa46
MX
4679 * This is a bit simpler than btrfs_truncate since we've already
4680 * reserved our space for our orphan item in the unlink, so we just
4681 * need to reserve some slack space in case we add bytes and update
4682 * inode item when doing the truncate.
4289a667 4683 */
8082510e 4684 while (1) {
08e007d2
MX
4685 ret = btrfs_block_rsv_refill(root, rsv, min_size,
4686 BTRFS_RESERVE_FLUSH_LIMIT);
726c35fa
JB
4687
4688 /*
4689 * Try and steal from the global reserve since we will
4690 * likely not use this space anyway, we want to try as
4691 * hard as possible to get this to work.
4692 */
4693 if (ret)
4694 ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
d68fc57b 4695
d68fc57b 4696 if (ret) {
c2cf52eb
SK
4697 btrfs_warn(root->fs_info,
4698 "Could not get space for a delete, will truncate on mount %d",
4699 ret);
4289a667
JB
4700 btrfs_orphan_del(NULL, inode);
4701 btrfs_free_block_rsv(root, rsv);
4702 goto no_delete;
d68fc57b 4703 }
7b128766 4704
0e8c36a9 4705 trans = btrfs_join_transaction(root);
4289a667
JB
4706 if (IS_ERR(trans)) {
4707 btrfs_orphan_del(NULL, inode);
4708 btrfs_free_block_rsv(root, rsv);
4709 goto no_delete;
d68fc57b 4710 }
7b128766 4711
4289a667
JB
4712 trans->block_rsv = rsv;
4713
d68fc57b 4714 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
ca7e70f5 4715 if (ret != -ENOSPC)
8082510e 4716 break;
85e21bac 4717
8407aa46 4718 trans->block_rsv = &root->fs_info->trans_block_rsv;
8082510e
YZ
4719 btrfs_end_transaction(trans, root);
4720 trans = NULL;
b53d3f5d 4721 btrfs_btree_balance_dirty(root);
8082510e 4722 }
5f39d397 4723
4289a667
JB
4724 btrfs_free_block_rsv(root, rsv);
4725
4ef31a45
JB
4726 /*
4727 * Errors here aren't a big deal, it just means we leave orphan items
4728 * in the tree. They will be cleaned up on the next mount.
4729 */
8082510e 4730 if (ret == 0) {
4289a667 4731 trans->block_rsv = root->orphan_block_rsv;
4ef31a45
JB
4732 btrfs_orphan_del(trans, inode);
4733 } else {
4734 btrfs_orphan_del(NULL, inode);
8082510e 4735 }
54aa1f4d 4736
4289a667 4737 trans->block_rsv = &root->fs_info->trans_block_rsv;
581bb050
LZ
4738 if (!(root == root->fs_info->tree_root ||
4739 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
33345d01 4740 btrfs_return_ino(root, btrfs_ino(inode));
581bb050 4741
54aa1f4d 4742 btrfs_end_transaction(trans, root);
b53d3f5d 4743 btrfs_btree_balance_dirty(root);
39279cc3 4744no_delete:
89042e5a 4745 btrfs_remove_delayed_node(inode);
dbd5768f 4746 clear_inode(inode);
8082510e 4747 return;
39279cc3
CM
4748}
4749
4750/*
4751 * this returns the key found in the dir entry in the location pointer.
4752 * If no dir entries were found, location->objectid is 0.
4753 */
4754static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
4755 struct btrfs_key *location)
4756{
4757 const char *name = dentry->d_name.name;
4758 int namelen = dentry->d_name.len;
4759 struct btrfs_dir_item *di;
4760 struct btrfs_path *path;
4761 struct btrfs_root *root = BTRFS_I(dir)->root;
0d9f7f3e 4762 int ret = 0;
39279cc3
CM
4763
4764 path = btrfs_alloc_path();
d8926bb3
MF
4765 if (!path)
4766 return -ENOMEM;
3954401f 4767
33345d01 4768 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
39279cc3 4769 namelen, 0);
0d9f7f3e
Y
4770 if (IS_ERR(di))
4771 ret = PTR_ERR(di);
d397712b 4772
c704005d 4773 if (IS_ERR_OR_NULL(di))
3954401f 4774 goto out_err;
d397712b 4775
5f39d397 4776 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
39279cc3 4777out:
39279cc3
CM
4778 btrfs_free_path(path);
4779 return ret;
3954401f
CM
4780out_err:
4781 location->objectid = 0;
4782 goto out;
39279cc3
CM
4783}
4784
4785/*
4786 * when we hit a tree root in a directory, the btrfs part of the inode
4787 * needs to be changed to reflect the root directory of the tree root. This
4788 * is kind of like crossing a mount point.
4789 */
4790static int fixup_tree_root_location(struct btrfs_root *root,
4df27c4d
YZ
4791 struct inode *dir,
4792 struct dentry *dentry,
4793 struct btrfs_key *location,
4794 struct btrfs_root **sub_root)
39279cc3 4795{
4df27c4d
YZ
4796 struct btrfs_path *path;
4797 struct btrfs_root *new_root;
4798 struct btrfs_root_ref *ref;
4799 struct extent_buffer *leaf;
4800 int ret;
4801 int err = 0;
39279cc3 4802
4df27c4d
YZ
4803 path = btrfs_alloc_path();
4804 if (!path) {
4805 err = -ENOMEM;
4806 goto out;
4807 }
39279cc3 4808
4df27c4d 4809 err = -ENOENT;
75ac2dd9
KN
4810 ret = btrfs_find_item(root->fs_info->tree_root, path,
4811 BTRFS_I(dir)->root->root_key.objectid,
4812 location->objectid, BTRFS_ROOT_REF_KEY, NULL);
4df27c4d
YZ
4813 if (ret) {
4814 if (ret < 0)
4815 err = ret;
4816 goto out;
4817 }
39279cc3 4818
4df27c4d
YZ
4819 leaf = path->nodes[0];
4820 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
33345d01 4821 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
4df27c4d
YZ
4822 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
4823 goto out;
39279cc3 4824
4df27c4d
YZ
4825 ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
4826 (unsigned long)(ref + 1),
4827 dentry->d_name.len);
4828 if (ret)
4829 goto out;
4830
b3b4aa74 4831 btrfs_release_path(path);
4df27c4d
YZ
4832
4833 new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
4834 if (IS_ERR(new_root)) {
4835 err = PTR_ERR(new_root);
4836 goto out;
4837 }
4838
4df27c4d
YZ
4839 *sub_root = new_root;
4840 location->objectid = btrfs_root_dirid(&new_root->root_item);
4841 location->type = BTRFS_INODE_ITEM_KEY;
4842 location->offset = 0;
4843 err = 0;
4844out:
4845 btrfs_free_path(path);
4846 return err;
39279cc3
CM
4847}
4848
5d4f98a2
YZ
4849static void inode_tree_add(struct inode *inode)
4850{
4851 struct btrfs_root *root = BTRFS_I(inode)->root;
4852 struct btrfs_inode *entry;
03e860bd
FNP
4853 struct rb_node **p;
4854 struct rb_node *parent;
cef21937 4855 struct rb_node *new = &BTRFS_I(inode)->rb_node;
33345d01 4856 u64 ino = btrfs_ino(inode);
5d4f98a2 4857
1d3382cb 4858 if (inode_unhashed(inode))
76dda93c 4859 return;
e1409cef 4860 parent = NULL;
5d4f98a2 4861 spin_lock(&root->inode_lock);
e1409cef 4862 p = &root->inode_tree.rb_node;
5d4f98a2
YZ
4863 while (*p) {
4864 parent = *p;
4865 entry = rb_entry(parent, struct btrfs_inode, rb_node);
4866
33345d01 4867 if (ino < btrfs_ino(&entry->vfs_inode))
03e860bd 4868 p = &parent->rb_left;
33345d01 4869 else if (ino > btrfs_ino(&entry->vfs_inode))
03e860bd 4870 p = &parent->rb_right;
5d4f98a2
YZ
4871 else {
4872 WARN_ON(!(entry->vfs_inode.i_state &
a4ffdde6 4873 (I_WILL_FREE | I_FREEING)));
cef21937 4874 rb_replace_node(parent, new, &root->inode_tree);
03e860bd
FNP
4875 RB_CLEAR_NODE(parent);
4876 spin_unlock(&root->inode_lock);
cef21937 4877 return;
5d4f98a2
YZ
4878 }
4879 }
cef21937
FDBM
4880 rb_link_node(new, parent, p);
4881 rb_insert_color(new, &root->inode_tree);
5d4f98a2
YZ
4882 spin_unlock(&root->inode_lock);
4883}
4884
4885static void inode_tree_del(struct inode *inode)
4886{
4887 struct btrfs_root *root = BTRFS_I(inode)->root;
76dda93c 4888 int empty = 0;
5d4f98a2 4889
03e860bd 4890 spin_lock(&root->inode_lock);
5d4f98a2 4891 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
5d4f98a2 4892 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
5d4f98a2 4893 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
76dda93c 4894 empty = RB_EMPTY_ROOT(&root->inode_tree);
5d4f98a2 4895 }
03e860bd 4896 spin_unlock(&root->inode_lock);
76dda93c 4897
69e9c6c6 4898 if (empty && btrfs_root_refs(&root->root_item) == 0) {
76dda93c
YZ
4899 synchronize_srcu(&root->fs_info->subvol_srcu);
4900 spin_lock(&root->inode_lock);
4901 empty = RB_EMPTY_ROOT(&root->inode_tree);
4902 spin_unlock(&root->inode_lock);
4903 if (empty)
4904 btrfs_add_dead_root(root);
4905 }
4906}
4907
143bede5 4908void btrfs_invalidate_inodes(struct btrfs_root *root)
76dda93c
YZ
4909{
4910 struct rb_node *node;
4911 struct rb_node *prev;
4912 struct btrfs_inode *entry;
4913 struct inode *inode;
4914 u64 objectid = 0;
4915
4916 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4917
4918 spin_lock(&root->inode_lock);
4919again:
4920 node = root->inode_tree.rb_node;
4921 prev = NULL;
4922 while (node) {
4923 prev = node;
4924 entry = rb_entry(node, struct btrfs_inode, rb_node);
4925
33345d01 4926 if (objectid < btrfs_ino(&entry->vfs_inode))
76dda93c 4927 node = node->rb_left;
33345d01 4928 else if (objectid > btrfs_ino(&entry->vfs_inode))
76dda93c
YZ
4929 node = node->rb_right;
4930 else
4931 break;
4932 }
4933 if (!node) {
4934 while (prev) {
4935 entry = rb_entry(prev, struct btrfs_inode, rb_node);
33345d01 4936 if (objectid <= btrfs_ino(&entry->vfs_inode)) {
76dda93c
YZ
4937 node = prev;
4938 break;
4939 }
4940 prev = rb_next(prev);
4941 }
4942 }
4943 while (node) {
4944 entry = rb_entry(node, struct btrfs_inode, rb_node);
33345d01 4945 objectid = btrfs_ino(&entry->vfs_inode) + 1;
76dda93c
YZ
4946 inode = igrab(&entry->vfs_inode);
4947 if (inode) {
4948 spin_unlock(&root->inode_lock);
4949 if (atomic_read(&inode->i_count) > 1)
4950 d_prune_aliases(inode);
4951 /*
45321ac5 4952 * btrfs_drop_inode will have it removed from
76dda93c
YZ
4953 * the inode cache when its usage count
4954 * hits zero.
4955 */
4956 iput(inode);
4957 cond_resched();
4958 spin_lock(&root->inode_lock);
4959 goto again;
4960 }
4961
4962 if (cond_resched_lock(&root->inode_lock))
4963 goto again;
4964
4965 node = rb_next(node);
4966 }
4967 spin_unlock(&root->inode_lock);
5d4f98a2
YZ
4968}
4969
e02119d5
CM
4970static int btrfs_init_locked_inode(struct inode *inode, void *p)
4971{
4972 struct btrfs_iget_args *args = p;
4973 inode->i_ino = args->ino;
e02119d5 4974 BTRFS_I(inode)->root = args->root;
39279cc3
CM
4975 return 0;
4976}
4977
4978static int btrfs_find_actor(struct inode *inode, void *opaque)
4979{
4980 struct btrfs_iget_args *args = opaque;
33345d01 4981 return args->ino == btrfs_ino(inode) &&
d397712b 4982 args->root == BTRFS_I(inode)->root;
39279cc3
CM
4983}
4984
5d4f98a2
YZ
4985static struct inode *btrfs_iget_locked(struct super_block *s,
4986 u64 objectid,
4987 struct btrfs_root *root)
39279cc3
CM
4988{
4989 struct inode *inode;
4990 struct btrfs_iget_args args;
778ba82b
FDBM
4991 unsigned long hashval = btrfs_inode_hash(objectid, root);
4992
39279cc3
CM
4993 args.ino = objectid;
4994 args.root = root;
4995
778ba82b 4996 inode = iget5_locked(s, hashval, btrfs_find_actor,
39279cc3
CM
4997 btrfs_init_locked_inode,
4998 (void *)&args);
4999 return inode;
5000}
5001
1a54ef8c
BR
5002/* Get an inode object given its location and corresponding root.
5003 * Returns in *is_new if the inode was read from disk
5004 */
5005struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
73f73415 5006 struct btrfs_root *root, int *new)
1a54ef8c
BR
5007{
5008 struct inode *inode;
5009
5010 inode = btrfs_iget_locked(s, location->objectid, root);
5011 if (!inode)
5d4f98a2 5012 return ERR_PTR(-ENOMEM);
1a54ef8c
BR
5013
5014 if (inode->i_state & I_NEW) {
5015 BTRFS_I(inode)->root = root;
5016 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
5017 btrfs_read_locked_inode(inode);
1748f843
MF
5018 if (!is_bad_inode(inode)) {
5019 inode_tree_add(inode);
5020 unlock_new_inode(inode);
5021 if (new)
5022 *new = 1;
5023 } else {
e0b6d65b
ST
5024 unlock_new_inode(inode);
5025 iput(inode);
5026 inode = ERR_PTR(-ESTALE);
1748f843
MF
5027 }
5028 }
5029
1a54ef8c
BR
5030 return inode;
5031}
5032
4df27c4d
YZ
5033static struct inode *new_simple_dir(struct super_block *s,
5034 struct btrfs_key *key,
5035 struct btrfs_root *root)
5036{
5037 struct inode *inode = new_inode(s);
5038
5039 if (!inode)
5040 return ERR_PTR(-ENOMEM);
5041
4df27c4d
YZ
5042 BTRFS_I(inode)->root = root;
5043 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
72ac3c0d 5044 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
4df27c4d
YZ
5045
5046 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
848cce0d 5047 inode->i_op = &btrfs_dir_ro_inode_operations;
4df27c4d
YZ
5048 inode->i_fop = &simple_dir_operations;
5049 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5050 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
5051
5052 return inode;
5053}
5054
3de4586c 5055struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
39279cc3 5056{
d397712b 5057 struct inode *inode;
4df27c4d 5058 struct btrfs_root *root = BTRFS_I(dir)->root;
39279cc3
CM
5059 struct btrfs_root *sub_root = root;
5060 struct btrfs_key location;
76dda93c 5061 int index;
b4aff1f8 5062 int ret = 0;
39279cc3
CM
5063
5064 if (dentry->d_name.len > BTRFS_NAME_LEN)
5065 return ERR_PTR(-ENAMETOOLONG);
5f39d397 5066
39e3c955 5067 ret = btrfs_inode_by_name(dir, dentry, &location);
39279cc3
CM
5068 if (ret < 0)
5069 return ERR_PTR(ret);
5f39d397 5070
4df27c4d 5071 if (location.objectid == 0)
5662344b 5072 return ERR_PTR(-ENOENT);
4df27c4d
YZ
5073
5074 if (location.type == BTRFS_INODE_ITEM_KEY) {
73f73415 5075 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
4df27c4d
YZ
5076 return inode;
5077 }
5078
5079 BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
5080
76dda93c 5081 index = srcu_read_lock(&root->fs_info->subvol_srcu);
4df27c4d
YZ
5082 ret = fixup_tree_root_location(root, dir, dentry,
5083 &location, &sub_root);
5084 if (ret < 0) {
5085 if (ret != -ENOENT)
5086 inode = ERR_PTR(ret);
5087 else
5088 inode = new_simple_dir(dir->i_sb, &location, sub_root);
5089 } else {
73f73415 5090 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
39279cc3 5091 }
76dda93c
YZ
5092 srcu_read_unlock(&root->fs_info->subvol_srcu, index);
5093
34d19bad 5094 if (!IS_ERR(inode) && root != sub_root) {
c71bf099
YZ
5095 down_read(&root->fs_info->cleanup_work_sem);
5096 if (!(inode->i_sb->s_flags & MS_RDONLY))
66b4ffd1 5097 ret = btrfs_orphan_cleanup(sub_root);
c71bf099 5098 up_read(&root->fs_info->cleanup_work_sem);
01cd3367
JB
5099 if (ret) {
5100 iput(inode);
66b4ffd1 5101 inode = ERR_PTR(ret);
01cd3367 5102 }
c71bf099
YZ
5103 }
5104
3de4586c
CM
5105 return inode;
5106}
5107
fe15ce44 5108static int btrfs_dentry_delete(const struct dentry *dentry)
76dda93c
YZ
5109{
5110 struct btrfs_root *root;
848cce0d 5111 struct inode *inode = dentry->d_inode;
76dda93c 5112
848cce0d
LZ
5113 if (!inode && !IS_ROOT(dentry))
5114 inode = dentry->d_parent->d_inode;
76dda93c 5115
848cce0d
LZ
5116 if (inode) {
5117 root = BTRFS_I(inode)->root;
efefb143
YZ
5118 if (btrfs_root_refs(&root->root_item) == 0)
5119 return 1;
848cce0d
LZ
5120
5121 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5122 return 1;
efefb143 5123 }
76dda93c
YZ
5124 return 0;
5125}
5126
b4aff1f8
JB
5127static void btrfs_dentry_release(struct dentry *dentry)
5128{
5129 if (dentry->d_fsdata)
5130 kfree(dentry->d_fsdata);
5131}
5132
3de4586c 5133static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
00cd8dd3 5134 unsigned int flags)
3de4586c 5135{
5662344b 5136 struct inode *inode;
a66e7cc6 5137
5662344b
TI
5138 inode = btrfs_lookup_dentry(dir, dentry);
5139 if (IS_ERR(inode)) {
5140 if (PTR_ERR(inode) == -ENOENT)
5141 inode = NULL;
5142 else
5143 return ERR_CAST(inode);
5144 }
5145
5146 return d_splice_alias(inode, dentry);
39279cc3
CM
5147}
5148
16cdcec7 5149unsigned char btrfs_filetype_table[] = {
39279cc3
CM
5150 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
5151};
5152
9cdda8d3 5153static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
39279cc3 5154{
9cdda8d3 5155 struct inode *inode = file_inode(file);
39279cc3
CM
5156 struct btrfs_root *root = BTRFS_I(inode)->root;
5157 struct btrfs_item *item;
5158 struct btrfs_dir_item *di;
5159 struct btrfs_key key;
5f39d397 5160 struct btrfs_key found_key;
39279cc3 5161 struct btrfs_path *path;
16cdcec7
MX
5162 struct list_head ins_list;
5163 struct list_head del_list;
39279cc3 5164 int ret;
5f39d397 5165 struct extent_buffer *leaf;
39279cc3 5166 int slot;
39279cc3
CM
5167 unsigned char d_type;
5168 int over = 0;
5169 u32 di_cur;
5170 u32 di_total;
5171 u32 di_len;
5172 int key_type = BTRFS_DIR_INDEX_KEY;
5f39d397
CM
5173 char tmp_name[32];
5174 char *name_ptr;
5175 int name_len;
9cdda8d3 5176 int is_curr = 0; /* ctx->pos points to the current index? */
39279cc3
CM
5177
5178 /* FIXME, use a real flag for deciding about the key type */
5179 if (root->fs_info->tree_root == root)
5180 key_type = BTRFS_DIR_ITEM_KEY;
5f39d397 5181
9cdda8d3
AV
5182 if (!dir_emit_dots(file, ctx))
5183 return 0;
5184
49593bfa 5185 path = btrfs_alloc_path();
16cdcec7
MX
5186 if (!path)
5187 return -ENOMEM;
ff5714cc 5188
026fd317 5189 path->reada = 1;
49593bfa 5190
16cdcec7
MX
5191 if (key_type == BTRFS_DIR_INDEX_KEY) {
5192 INIT_LIST_HEAD(&ins_list);
5193 INIT_LIST_HEAD(&del_list);
5194 btrfs_get_delayed_items(inode, &ins_list, &del_list);
5195 }
5196
39279cc3 5197 btrfs_set_key_type(&key, key_type);
9cdda8d3 5198 key.offset = ctx->pos;
33345d01 5199 key.objectid = btrfs_ino(inode);
5f39d397 5200
39279cc3
CM
5201 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5202 if (ret < 0)
5203 goto err;
49593bfa
DW
5204
5205 while (1) {
5f39d397 5206 leaf = path->nodes[0];
39279cc3 5207 slot = path->slots[0];
b9e03af0
LZ
5208 if (slot >= btrfs_header_nritems(leaf)) {
5209 ret = btrfs_next_leaf(root, path);
5210 if (ret < 0)
5211 goto err;
5212 else if (ret > 0)
5213 break;
5214 continue;
39279cc3 5215 }
3de4586c 5216
dd3cc16b 5217 item = btrfs_item_nr(slot);
5f39d397
CM
5218 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5219
5220 if (found_key.objectid != key.objectid)
39279cc3 5221 break;
5f39d397 5222 if (btrfs_key_type(&found_key) != key_type)
39279cc3 5223 break;
9cdda8d3 5224 if (found_key.offset < ctx->pos)
b9e03af0 5225 goto next;
16cdcec7
MX
5226 if (key_type == BTRFS_DIR_INDEX_KEY &&
5227 btrfs_should_delete_dir_index(&del_list,
5228 found_key.offset))
5229 goto next;
5f39d397 5230
9cdda8d3 5231 ctx->pos = found_key.offset;
16cdcec7 5232 is_curr = 1;
49593bfa 5233
39279cc3
CM
5234 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
5235 di_cur = 0;
5f39d397 5236 di_total = btrfs_item_size(leaf, item);
49593bfa
DW
5237
5238 while (di_cur < di_total) {
5f39d397
CM
5239 struct btrfs_key location;
5240
22a94d44
JB
5241 if (verify_dir_item(root, leaf, di))
5242 break;
5243
5f39d397 5244 name_len = btrfs_dir_name_len(leaf, di);
49593bfa 5245 if (name_len <= sizeof(tmp_name)) {
5f39d397
CM
5246 name_ptr = tmp_name;
5247 } else {
5248 name_ptr = kmalloc(name_len, GFP_NOFS);
49593bfa
DW
5249 if (!name_ptr) {
5250 ret = -ENOMEM;
5251 goto err;
5252 }
5f39d397
CM
5253 }
5254 read_extent_buffer(leaf, name_ptr,
5255 (unsigned long)(di + 1), name_len);
5256
5257 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
5258 btrfs_dir_item_key_to_cpu(leaf, di, &location);
3de4586c 5259
fede766f 5260
3de4586c 5261 /* is this a reference to our own snapshot? If so
8c9c2bf7
AJ
5262 * skip it.
5263 *
5264 * In contrast to old kernels, we insert the snapshot's
5265 * dir item and dir index after it has been created, so
5266 * we won't find a reference to our own snapshot. We
5267 * still keep the following code for backward
5268 * compatibility.
3de4586c
CM
5269 */
5270 if (location.type == BTRFS_ROOT_ITEM_KEY &&
5271 location.objectid == root->root_key.objectid) {
5272 over = 0;
5273 goto skip;
5274 }
9cdda8d3
AV
5275 over = !dir_emit(ctx, name_ptr, name_len,
5276 location.objectid, d_type);
5f39d397 5277
3de4586c 5278skip:
5f39d397
CM
5279 if (name_ptr != tmp_name)
5280 kfree(name_ptr);
5281
39279cc3
CM
5282 if (over)
5283 goto nopos;
5103e947 5284 di_len = btrfs_dir_name_len(leaf, di) +
49593bfa 5285 btrfs_dir_data_len(leaf, di) + sizeof(*di);
39279cc3
CM
5286 di_cur += di_len;
5287 di = (struct btrfs_dir_item *)((char *)di + di_len);
5288 }
b9e03af0
LZ
5289next:
5290 path->slots[0]++;
39279cc3 5291 }
49593bfa 5292
16cdcec7
MX
5293 if (key_type == BTRFS_DIR_INDEX_KEY) {
5294 if (is_curr)
9cdda8d3
AV
5295 ctx->pos++;
5296 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
16cdcec7
MX
5297 if (ret)
5298 goto nopos;
5299 }
5300
49593bfa 5301 /* Reached end of directory/root. Bump pos past the last item. */
db62efbb
ZB
5302 ctx->pos++;
5303
5304 /*
5305 * Stop new entries from being returned after we return the last
5306 * entry.
5307 *
5308 * New directory entries are assigned a strictly increasing
5309 * offset. This means that new entries created during readdir
5310 * are *guaranteed* to be seen in the future by that readdir.
5311 * This has broken buggy programs which operate on names as
5312 * they're returned by readdir. Until we re-use freed offsets
5313 * we have this hack to stop new entries from being returned
5314 * under the assumption that they'll never reach this huge
5315 * offset.
5316 *
5317 * This is being careful not to overflow 32bit loff_t unless the
5318 * last entry requires it because doing so has broken 32bit apps
5319 * in the past.
5320 */
5321 if (key_type == BTRFS_DIR_INDEX_KEY) {
5322 if (ctx->pos >= INT_MAX)
5323 ctx->pos = LLONG_MAX;
5324 else
5325 ctx->pos = INT_MAX;
5326 }
39279cc3
CM
5327nopos:
5328 ret = 0;
5329err:
16cdcec7
MX
5330 if (key_type == BTRFS_DIR_INDEX_KEY)
5331 btrfs_put_delayed_items(&ins_list, &del_list);
39279cc3 5332 btrfs_free_path(path);
39279cc3
CM
5333 return ret;
5334}
5335
a9185b41 5336int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
39279cc3
CM
5337{
5338 struct btrfs_root *root = BTRFS_I(inode)->root;
5339 struct btrfs_trans_handle *trans;
5340 int ret = 0;
0af3d00b 5341 bool nolock = false;
39279cc3 5342
72ac3c0d 5343 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
4ca8b41e
CM
5344 return 0;
5345
83eea1f1 5346 if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
82d5902d 5347 nolock = true;
0af3d00b 5348
a9185b41 5349 if (wbc->sync_mode == WB_SYNC_ALL) {
0af3d00b 5350 if (nolock)
7a7eaa40 5351 trans = btrfs_join_transaction_nolock(root);
0af3d00b 5352 else
7a7eaa40 5353 trans = btrfs_join_transaction(root);
3612b495
TI
5354 if (IS_ERR(trans))
5355 return PTR_ERR(trans);
a698d075 5356 ret = btrfs_commit_transaction(trans, root);
39279cc3
CM
5357 }
5358 return ret;
5359}
5360
5361/*
54aa1f4d 5362 * This is somewhat expensive, updating the tree every time the
39279cc3
CM
5363 * inode changes. But, it is most likely to find the inode in cache.
5364 * FIXME, needs more benchmarking...there are no reasons other than performance
5365 * to keep or drop this code.
5366 */
48a3b636 5367static int btrfs_dirty_inode(struct inode *inode)
39279cc3
CM
5368{
5369 struct btrfs_root *root = BTRFS_I(inode)->root;
5370 struct btrfs_trans_handle *trans;
8929ecfa
YZ
5371 int ret;
5372
72ac3c0d 5373 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
22c44fe6 5374 return 0;
39279cc3 5375
7a7eaa40 5376 trans = btrfs_join_transaction(root);
22c44fe6
JB
5377 if (IS_ERR(trans))
5378 return PTR_ERR(trans);
8929ecfa
YZ
5379
5380 ret = btrfs_update_inode(trans, root, inode);
94b60442
CM
5381 if (ret && ret == -ENOSPC) {
5382 /* whoops, lets try again with the full transaction */
5383 btrfs_end_transaction(trans, root);
5384 trans = btrfs_start_transaction(root, 1);
22c44fe6
JB
5385 if (IS_ERR(trans))
5386 return PTR_ERR(trans);
8929ecfa 5387
94b60442 5388 ret = btrfs_update_inode(trans, root, inode);
94b60442 5389 }
39279cc3 5390 btrfs_end_transaction(trans, root);
16cdcec7
MX
5391 if (BTRFS_I(inode)->delayed_node)
5392 btrfs_balance_delayed_items(root);
22c44fe6
JB
5393
5394 return ret;
5395}
5396
5397/*
5398 * This is a copy of file_update_time. We need this so we can return error on
5399 * ENOSPC for updating the inode in the case of file write and mmap writes.
5400 */
e41f941a
JB
5401static int btrfs_update_time(struct inode *inode, struct timespec *now,
5402 int flags)
22c44fe6 5403{
2bc55652
AB
5404 struct btrfs_root *root = BTRFS_I(inode)->root;
5405
5406 if (btrfs_root_readonly(root))
5407 return -EROFS;
5408
e41f941a 5409 if (flags & S_VERSION)
22c44fe6 5410 inode_inc_iversion(inode);
e41f941a
JB
5411 if (flags & S_CTIME)
5412 inode->i_ctime = *now;
5413 if (flags & S_MTIME)
5414 inode->i_mtime = *now;
5415 if (flags & S_ATIME)
5416 inode->i_atime = *now;
5417 return btrfs_dirty_inode(inode);
39279cc3
CM
5418}
5419
d352ac68
CM
5420/*
5421 * find the highest existing sequence number in a directory
5422 * and then set the in-memory index_cnt variable to reflect
5423 * free sequence numbers
5424 */
aec7477b
JB
5425static int btrfs_set_inode_index_count(struct inode *inode)
5426{
5427 struct btrfs_root *root = BTRFS_I(inode)->root;
5428 struct btrfs_key key, found_key;
5429 struct btrfs_path *path;
5430 struct extent_buffer *leaf;
5431 int ret;
5432
33345d01 5433 key.objectid = btrfs_ino(inode);
aec7477b
JB
5434 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
5435 key.offset = (u64)-1;
5436
5437 path = btrfs_alloc_path();
5438 if (!path)
5439 return -ENOMEM;
5440
5441 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5442 if (ret < 0)
5443 goto out;
5444 /* FIXME: we should be able to handle this */
5445 if (ret == 0)
5446 goto out;
5447 ret = 0;
5448
5449 /*
5450 * MAGIC NUMBER EXPLANATION:
5451 * since we search a directory based on f_pos we have to start at 2
5452 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
5453 * else has to start at 2
5454 */
5455 if (path->slots[0] == 0) {
5456 BTRFS_I(inode)->index_cnt = 2;
5457 goto out;
5458 }
5459
5460 path->slots[0]--;
5461
5462 leaf = path->nodes[0];
5463 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5464
33345d01 5465 if (found_key.objectid != btrfs_ino(inode) ||
aec7477b
JB
5466 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
5467 BTRFS_I(inode)->index_cnt = 2;
5468 goto out;
5469 }
5470
5471 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
5472out:
5473 btrfs_free_path(path);
5474 return ret;
5475}
5476
d352ac68
CM
5477/*
5478 * helper to find a free sequence number in a given directory. This current
5479 * code is very simple, later versions will do smarter things in the btree
5480 */
3de4586c 5481int btrfs_set_inode_index(struct inode *dir, u64 *index)
aec7477b
JB
5482{
5483 int ret = 0;
5484
5485 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
16cdcec7
MX
5486 ret = btrfs_inode_delayed_dir_index_count(dir);
5487 if (ret) {
5488 ret = btrfs_set_inode_index_count(dir);
5489 if (ret)
5490 return ret;
5491 }
aec7477b
JB
5492 }
5493
00e4e6b3 5494 *index = BTRFS_I(dir)->index_cnt;
aec7477b
JB
5495 BTRFS_I(dir)->index_cnt++;
5496
5497 return ret;
5498}
5499
39279cc3
CM
5500static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
5501 struct btrfs_root *root,
aec7477b 5502 struct inode *dir,
9c58309d 5503 const char *name, int name_len,
175a4eb7
AV
5504 u64 ref_objectid, u64 objectid,
5505 umode_t mode, u64 *index)
39279cc3
CM
5506{
5507 struct inode *inode;
5f39d397 5508 struct btrfs_inode_item *inode_item;
39279cc3 5509 struct btrfs_key *location;
5f39d397 5510 struct btrfs_path *path;
9c58309d
CM
5511 struct btrfs_inode_ref *ref;
5512 struct btrfs_key key[2];
5513 u32 sizes[2];
5514 unsigned long ptr;
39279cc3 5515 int ret;
39279cc3 5516
5f39d397 5517 path = btrfs_alloc_path();
d8926bb3
MF
5518 if (!path)
5519 return ERR_PTR(-ENOMEM);
5f39d397 5520
39279cc3 5521 inode = new_inode(root->fs_info->sb);
8fb27640
YS
5522 if (!inode) {
5523 btrfs_free_path(path);
39279cc3 5524 return ERR_PTR(-ENOMEM);
8fb27640 5525 }
39279cc3 5526
581bb050
LZ
5527 /*
5528 * we have to initialize this early, so we can reclaim the inode
5529 * number if we fail afterwards in this function.
5530 */
5531 inode->i_ino = objectid;
5532
aec7477b 5533 if (dir) {
1abe9b8a 5534 trace_btrfs_inode_request(dir);
5535
3de4586c 5536 ret = btrfs_set_inode_index(dir, index);
09771430 5537 if (ret) {
8fb27640 5538 btrfs_free_path(path);
09771430 5539 iput(inode);
aec7477b 5540 return ERR_PTR(ret);
09771430 5541 }
aec7477b
JB
5542 }
5543 /*
5544 * index_cnt is ignored for everything but a dir,
5545 * btrfs_get_inode_index_count has an explanation for the magic
5546 * number
5547 */
5548 BTRFS_I(inode)->index_cnt = 2;
67de1176 5549 BTRFS_I(inode)->dir_index = *index;
39279cc3 5550 BTRFS_I(inode)->root = root;
e02119d5 5551 BTRFS_I(inode)->generation = trans->transid;
76195853 5552 inode->i_generation = BTRFS_I(inode)->generation;
b888db2b 5553
5dc562c5
JB
5554 /*
5555 * We could have gotten an inode number from somebody who was fsynced
5556 * and then removed in this same transaction, so let's just set full
5557 * sync since it will be a full sync anyway and this will blow away the
5558 * old info in the log.
5559 */
5560 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
5561
9c58309d
CM
5562 key[0].objectid = objectid;
5563 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
5564 key[0].offset = 0;
5565
f186373f
MF
5566 /*
5567 * Start new inodes with an inode_ref. This is slightly more
5568 * efficient for small numbers of hard links since they will
5569 * be packed into one item. Extended refs will kick in if we
5570 * add more hard links than can fit in the ref item.
5571 */
9c58309d
CM
5572 key[1].objectid = objectid;
5573 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
5574 key[1].offset = ref_objectid;
5575
5576 sizes[0] = sizeof(struct btrfs_inode_item);
5577 sizes[1] = name_len + sizeof(*ref);
5578
b9473439 5579 path->leave_spinning = 1;
9c58309d
CM
5580 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
5581 if (ret != 0)
5f39d397
CM
5582 goto fail;
5583
ecc11fab 5584 inode_init_owner(inode, dir, mode);
a76a3cd4 5585 inode_set_bytes(inode, 0);
39279cc3 5586 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
5f39d397
CM
5587 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
5588 struct btrfs_inode_item);
293f7e07
LZ
5589 memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
5590 sizeof(*inode_item));
e02119d5 5591 fill_inode_item(trans, path->nodes[0], inode_item, inode);
9c58309d
CM
5592
5593 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
5594 struct btrfs_inode_ref);
5595 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
00e4e6b3 5596 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
9c58309d
CM
5597 ptr = (unsigned long)(ref + 1);
5598 write_extent_buffer(path->nodes[0], name, ptr, name_len);
5599
5f39d397
CM
5600 btrfs_mark_buffer_dirty(path->nodes[0]);
5601 btrfs_free_path(path);
5602
39279cc3
CM
5603 location = &BTRFS_I(inode)->location;
5604 location->objectid = objectid;
39279cc3
CM
5605 location->offset = 0;
5606 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
5607
6cbff00f
CH
5608 btrfs_inherit_iflags(inode, dir);
5609
569254b0 5610 if (S_ISREG(mode)) {
94272164
CM
5611 if (btrfs_test_opt(root, NODATASUM))
5612 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
213490b3 5613 if (btrfs_test_opt(root, NODATACOW))
f2bdf9a8
JB
5614 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
5615 BTRFS_INODE_NODATASUM;
94272164
CM
5616 }
5617
778ba82b 5618 btrfs_insert_inode_hash(inode);
5d4f98a2 5619 inode_tree_add(inode);
1abe9b8a 5620
5621 trace_btrfs_inode_new(inode);
1973f0fa 5622 btrfs_set_inode_last_trans(trans, inode);
1abe9b8a 5623
8ea05e3a
AB
5624 btrfs_update_root_times(trans, root);
5625
63541927
FDBM
5626 ret = btrfs_inode_inherit_props(trans, inode, dir);
5627 if (ret)
5628 btrfs_err(root->fs_info,
5629 "error inheriting props for ino %llu (root %llu): %d",
5630 btrfs_ino(inode), root->root_key.objectid, ret);
5631
39279cc3 5632 return inode;
5f39d397 5633fail:
aec7477b
JB
5634 if (dir)
5635 BTRFS_I(dir)->index_cnt--;
5f39d397 5636 btrfs_free_path(path);
09771430 5637 iput(inode);
5f39d397 5638 return ERR_PTR(ret);
39279cc3
CM
5639}
5640
5641static inline u8 btrfs_inode_type(struct inode *inode)
5642{
5643 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
5644}
5645
d352ac68
CM
5646/*
5647 * utility function to add 'inode' into 'parent_inode' with
5648 * a give name and a given sequence number.
5649 * if 'add_backref' is true, also insert a backref from the
5650 * inode to the parent directory.
5651 */
e02119d5
CM
5652int btrfs_add_link(struct btrfs_trans_handle *trans,
5653 struct inode *parent_inode, struct inode *inode,
5654 const char *name, int name_len, int add_backref, u64 index)
39279cc3 5655{
4df27c4d 5656 int ret = 0;
39279cc3 5657 struct btrfs_key key;
e02119d5 5658 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
33345d01
LZ
5659 u64 ino = btrfs_ino(inode);
5660 u64 parent_ino = btrfs_ino(parent_inode);
5f39d397 5661
33345d01 5662 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4df27c4d
YZ
5663 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
5664 } else {
33345d01 5665 key.objectid = ino;
4df27c4d
YZ
5666 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
5667 key.offset = 0;
5668 }
5669
33345d01 5670 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4df27c4d
YZ
5671 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
5672 key.objectid, root->root_key.objectid,
33345d01 5673 parent_ino, index, name, name_len);
4df27c4d 5674 } else if (add_backref) {
33345d01
LZ
5675 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
5676 parent_ino, index);
4df27c4d 5677 }
39279cc3 5678
79787eaa
JM
5679 /* Nothing to clean up yet */
5680 if (ret)
5681 return ret;
4df27c4d 5682
79787eaa
JM
5683 ret = btrfs_insert_dir_item(trans, root, name, name_len,
5684 parent_inode, &key,
5685 btrfs_inode_type(inode), index);
9c52057c 5686 if (ret == -EEXIST || ret == -EOVERFLOW)
79787eaa
JM
5687 goto fail_dir_item;
5688 else if (ret) {
5689 btrfs_abort_transaction(trans, root, ret);
5690 return ret;
39279cc3 5691 }
79787eaa
JM
5692
5693 btrfs_i_size_write(parent_inode, parent_inode->i_size +
5694 name_len * 2);
0c4d2d95 5695 inode_inc_iversion(parent_inode);
79787eaa
JM
5696 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
5697 ret = btrfs_update_inode(trans, root, parent_inode);
5698 if (ret)
5699 btrfs_abort_transaction(trans, root, ret);
39279cc3 5700 return ret;
fe66a05a
CM
5701
5702fail_dir_item:
5703 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
5704 u64 local_index;
5705 int err;
5706 err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
5707 key.objectid, root->root_key.objectid,
5708 parent_ino, &local_index, name, name_len);
5709
5710 } else if (add_backref) {
5711 u64 local_index;
5712 int err;
5713
5714 err = btrfs_del_inode_ref(trans, root, name, name_len,
5715 ino, parent_ino, &local_index);
5716 }
5717 return ret;
39279cc3
CM
5718}
5719
5720static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
a1b075d2
JB
5721 struct inode *dir, struct dentry *dentry,
5722 struct inode *inode, int backref, u64 index)
39279cc3 5723{
a1b075d2
JB
5724 int err = btrfs_add_link(trans, dir, inode,
5725 dentry->d_name.name, dentry->d_name.len,
5726 backref, index);
39279cc3
CM
5727 if (err > 0)
5728 err = -EEXIST;
5729 return err;
5730}
5731
618e21d5 5732static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
1a67aafb 5733 umode_t mode, dev_t rdev)
618e21d5
JB
5734{
5735 struct btrfs_trans_handle *trans;
5736 struct btrfs_root *root = BTRFS_I(dir)->root;
1832a6d5 5737 struct inode *inode = NULL;
618e21d5
JB
5738 int err;
5739 int drop_inode = 0;
5740 u64 objectid;
00e4e6b3 5741 u64 index = 0;
618e21d5
JB
5742
5743 if (!new_valid_dev(rdev))
5744 return -EINVAL;
5745
9ed74f2d
JB
5746 /*
5747 * 2 for inode item and ref
5748 * 2 for dir items
5749 * 1 for xattr if selinux is on
5750 */
a22285a6
YZ
5751 trans = btrfs_start_transaction(root, 5);
5752 if (IS_ERR(trans))
5753 return PTR_ERR(trans);
1832a6d5 5754
581bb050
LZ
5755 err = btrfs_find_free_ino(root, &objectid);
5756 if (err)
5757 goto out_unlock;
5758
aec7477b 5759 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
33345d01 5760 dentry->d_name.len, btrfs_ino(dir), objectid,
d82a6f1d 5761 mode, &index);
7cf96da3
TI
5762 if (IS_ERR(inode)) {
5763 err = PTR_ERR(inode);
618e21d5 5764 goto out_unlock;
7cf96da3 5765 }
618e21d5 5766
2a7dba39 5767 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
33268eaf
JB
5768 if (err) {
5769 drop_inode = 1;
5770 goto out_unlock;
5771 }
5772
ad19db71
CS
5773 /*
5774 * If the active LSM wants to access the inode during
5775 * d_instantiate it needs these. Smack checks to see
5776 * if the filesystem supports xattrs by looking at the
5777 * ops vector.
5778 */
5779
5780 inode->i_op = &btrfs_special_inode_operations;
a1b075d2 5781 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
618e21d5
JB
5782 if (err)
5783 drop_inode = 1;
5784 else {
618e21d5 5785 init_special_inode(inode, inode->i_mode, rdev);
1b4ab1bb 5786 btrfs_update_inode(trans, root, inode);
08c422c2 5787 d_instantiate(dentry, inode);
618e21d5 5788 }
618e21d5 5789out_unlock:
7ad85bb7 5790 btrfs_end_transaction(trans, root);
b53d3f5d 5791 btrfs_btree_balance_dirty(root);
618e21d5
JB
5792 if (drop_inode) {
5793 inode_dec_link_count(inode);
5794 iput(inode);
5795 }
618e21d5
JB
5796 return err;
5797}
5798
39279cc3 5799static int btrfs_create(struct inode *dir, struct dentry *dentry,
ebfc3b49 5800 umode_t mode, bool excl)
39279cc3
CM
5801{
5802 struct btrfs_trans_handle *trans;
5803 struct btrfs_root *root = BTRFS_I(dir)->root;
1832a6d5 5804 struct inode *inode = NULL;
43baa579 5805 int drop_inode_on_err = 0;
a22285a6 5806 int err;
39279cc3 5807 u64 objectid;
00e4e6b3 5808 u64 index = 0;
39279cc3 5809
9ed74f2d
JB
5810 /*
5811 * 2 for inode item and ref
5812 * 2 for dir items
5813 * 1 for xattr if selinux is on
5814 */
a22285a6
YZ
5815 trans = btrfs_start_transaction(root, 5);
5816 if (IS_ERR(trans))
5817 return PTR_ERR(trans);
9ed74f2d 5818
581bb050
LZ
5819 err = btrfs_find_free_ino(root, &objectid);
5820 if (err)
5821 goto out_unlock;
5822
aec7477b 5823 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
33345d01 5824 dentry->d_name.len, btrfs_ino(dir), objectid,
d82a6f1d 5825 mode, &index);
7cf96da3
TI
5826 if (IS_ERR(inode)) {
5827 err = PTR_ERR(inode);
39279cc3 5828 goto out_unlock;
7cf96da3 5829 }
43baa579 5830 drop_inode_on_err = 1;
39279cc3 5831
2a7dba39 5832 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
43baa579 5833 if (err)
33268eaf 5834 goto out_unlock;
33268eaf 5835
9185aa58
FB
5836 err = btrfs_update_inode(trans, root, inode);
5837 if (err)
5838 goto out_unlock;
5839
ad19db71
CS
5840 /*
5841 * If the active LSM wants to access the inode during
5842 * d_instantiate it needs these. Smack checks to see
5843 * if the filesystem supports xattrs by looking at the
5844 * ops vector.
5845 */
5846 inode->i_fop = &btrfs_file_operations;
5847 inode->i_op = &btrfs_file_inode_operations;
5848
a1b075d2 5849 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
39279cc3 5850 if (err)
43baa579
FB
5851 goto out_unlock;
5852
5853 inode->i_mapping->a_ops = &btrfs_aops;
5854 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5855 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
5856 d_instantiate(dentry, inode);
5857
39279cc3 5858out_unlock:
7ad85bb7 5859 btrfs_end_transaction(trans, root);
43baa579 5860 if (err && drop_inode_on_err) {
39279cc3
CM
5861 inode_dec_link_count(inode);
5862 iput(inode);
5863 }
b53d3f5d 5864 btrfs_btree_balance_dirty(root);
39279cc3
CM
5865 return err;
5866}
5867
5868static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
5869 struct dentry *dentry)
5870{
5871 struct btrfs_trans_handle *trans;
5872 struct btrfs_root *root = BTRFS_I(dir)->root;
5873 struct inode *inode = old_dentry->d_inode;
00e4e6b3 5874 u64 index;
39279cc3
CM
5875 int err;
5876 int drop_inode = 0;
5877
4a8be425
TH
5878 /* do not allow sys_link's with other subvols of the same device */
5879 if (root->objectid != BTRFS_I(inode)->root->objectid)
3ab3564f 5880 return -EXDEV;
4a8be425 5881
f186373f 5882 if (inode->i_nlink >= BTRFS_LINK_MAX)
c055e99e 5883 return -EMLINK;
4a8be425 5884
3de4586c 5885 err = btrfs_set_inode_index(dir, &index);
aec7477b
JB
5886 if (err)
5887 goto fail;
5888
a22285a6 5889 /*
7e6b6465 5890 * 2 items for inode and inode ref
a22285a6 5891 * 2 items for dir items
7e6b6465 5892 * 1 item for parent inode
a22285a6 5893 */
7e6b6465 5894 trans = btrfs_start_transaction(root, 5);
a22285a6
YZ
5895 if (IS_ERR(trans)) {
5896 err = PTR_ERR(trans);
5897 goto fail;
5898 }
5f39d397 5899
67de1176
MX
5900 /* There are several dir indexes for this inode, clear the cache. */
5901 BTRFS_I(inode)->dir_index = 0ULL;
8b558c5f 5902 inc_nlink(inode);
0c4d2d95 5903 inode_inc_iversion(inode);
3153495d 5904 inode->i_ctime = CURRENT_TIME;
7de9c6ee 5905 ihold(inode);
e9976151 5906 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
aec7477b 5907
a1b075d2 5908 err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
5f39d397 5909
a5719521 5910 if (err) {
54aa1f4d 5911 drop_inode = 1;
a5719521 5912 } else {
10d9f309 5913 struct dentry *parent = dentry->d_parent;
a5719521 5914 err = btrfs_update_inode(trans, root, inode);
79787eaa
JM
5915 if (err)
5916 goto fail;
08c422c2 5917 d_instantiate(dentry, inode);
6a912213 5918 btrfs_log_new_name(trans, inode, NULL, parent);
a5719521 5919 }
39279cc3 5920
7ad85bb7 5921 btrfs_end_transaction(trans, root);
1832a6d5 5922fail:
39279cc3
CM
5923 if (drop_inode) {
5924 inode_dec_link_count(inode);
5925 iput(inode);
5926 }
b53d3f5d 5927 btrfs_btree_balance_dirty(root);
39279cc3
CM
5928 return err;
5929}
5930
18bb1db3 5931static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
39279cc3 5932{
b9d86667 5933 struct inode *inode = NULL;
39279cc3
CM
5934 struct btrfs_trans_handle *trans;
5935 struct btrfs_root *root = BTRFS_I(dir)->root;
5936 int err = 0;
5937 int drop_on_err = 0;
b9d86667 5938 u64 objectid = 0;
00e4e6b3 5939 u64 index = 0;
39279cc3 5940
9ed74f2d
JB
5941 /*
5942 * 2 items for inode and ref
5943 * 2 items for dir items
5944 * 1 for xattr if selinux is on
5945 */
a22285a6
YZ
5946 trans = btrfs_start_transaction(root, 5);
5947 if (IS_ERR(trans))
5948 return PTR_ERR(trans);
39279cc3 5949
581bb050
LZ
5950 err = btrfs_find_free_ino(root, &objectid);
5951 if (err)
5952 goto out_fail;
5953
aec7477b 5954 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
33345d01 5955 dentry->d_name.len, btrfs_ino(dir), objectid,
d82a6f1d 5956 S_IFDIR | mode, &index);
39279cc3
CM
5957 if (IS_ERR(inode)) {
5958 err = PTR_ERR(inode);
5959 goto out_fail;
5960 }
5f39d397 5961
39279cc3 5962 drop_on_err = 1;
33268eaf 5963
2a7dba39 5964 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
33268eaf
JB
5965 if (err)
5966 goto out_fail;
5967
39279cc3
CM
5968 inode->i_op = &btrfs_dir_inode_operations;
5969 inode->i_fop = &btrfs_dir_file_operations;
39279cc3 5970
dbe674a9 5971 btrfs_i_size_write(inode, 0);
39279cc3
CM
5972 err = btrfs_update_inode(trans, root, inode);
5973 if (err)
5974 goto out_fail;
5f39d397 5975
a1b075d2
JB
5976 err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
5977 dentry->d_name.len, 0, index);
39279cc3
CM
5978 if (err)
5979 goto out_fail;
5f39d397 5980
39279cc3
CM
5981 d_instantiate(dentry, inode);
5982 drop_on_err = 0;
39279cc3
CM
5983
5984out_fail:
7ad85bb7 5985 btrfs_end_transaction(trans, root);
39279cc3
CM
5986 if (drop_on_err)
5987 iput(inode);
b53d3f5d 5988 btrfs_btree_balance_dirty(root);
39279cc3
CM
5989 return err;
5990}
5991
d352ac68
CM
5992/* helper for btfs_get_extent. Given an existing extent in the tree,
5993 * and an extent that you want to insert, deal with overlap and insert
5994 * the new extent into the tree.
5995 */
3b951516
CM
5996static int merge_extent_mapping(struct extent_map_tree *em_tree,
5997 struct extent_map *existing,
e6dcd2dc
CM
5998 struct extent_map *em,
5999 u64 map_start, u64 map_len)
3b951516
CM
6000{
6001 u64 start_diff;
3b951516 6002
e6dcd2dc
CM
6003 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
6004 start_diff = map_start - em->start;
6005 em->start = map_start;
6006 em->len = map_len;
c8b97818
CM
6007 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
6008 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
e6dcd2dc 6009 em->block_start += start_diff;
c8b97818
CM
6010 em->block_len -= start_diff;
6011 }
09a2a8f9 6012 return add_extent_mapping(em_tree, em, 0);
3b951516
CM
6013}
6014
c8b97818
CM
6015static noinline int uncompress_inline(struct btrfs_path *path,
6016 struct inode *inode, struct page *page,
6017 size_t pg_offset, u64 extent_offset,
6018 struct btrfs_file_extent_item *item)
6019{
6020 int ret;
6021 struct extent_buffer *leaf = path->nodes[0];
6022 char *tmp;
6023 size_t max_size;
6024 unsigned long inline_size;
6025 unsigned long ptr;
261507a0 6026 int compress_type;
c8b97818
CM
6027
6028 WARN_ON(pg_offset != 0);
261507a0 6029 compress_type = btrfs_file_extent_compression(leaf, item);
c8b97818
CM
6030 max_size = btrfs_file_extent_ram_bytes(leaf, item);
6031 inline_size = btrfs_file_extent_inline_item_len(leaf,
dd3cc16b 6032 btrfs_item_nr(path->slots[0]));
c8b97818 6033 tmp = kmalloc(inline_size, GFP_NOFS);
8d413713
TI
6034 if (!tmp)
6035 return -ENOMEM;
c8b97818
CM
6036 ptr = btrfs_file_extent_inline_start(item);
6037
6038 read_extent_buffer(leaf, tmp, ptr, inline_size);
6039
5b050f04 6040 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
261507a0
LZ
6041 ret = btrfs_decompress(compress_type, tmp, page,
6042 extent_offset, inline_size, max_size);
c8b97818 6043 if (ret) {
7ac687d9 6044 char *kaddr = kmap_atomic(page);
c8b97818
CM
6045 unsigned long copy_size = min_t(u64,
6046 PAGE_CACHE_SIZE - pg_offset,
6047 max_size - extent_offset);
6048 memset(kaddr + pg_offset, 0, copy_size);
7ac687d9 6049 kunmap_atomic(kaddr);
c8b97818
CM
6050 }
6051 kfree(tmp);
6052 return 0;
6053}
6054
d352ac68
CM
6055/*
6056 * a bit scary, this does extent mapping from logical file offset to the disk.
d397712b
CM
6057 * the ugly parts come from merging extents from the disk with the in-ram
6058 * representation. This gets more complex because of the data=ordered code,
d352ac68
CM
6059 * where the in-ram extents might be locked pending data=ordered completion.
6060 *
6061 * This also copies inline extents directly into the page.
6062 */
d397712b 6063
a52d9a80 6064struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
70dec807 6065 size_t pg_offset, u64 start, u64 len,
a52d9a80
CM
6066 int create)
6067{
6068 int ret;
6069 int err = 0;
db94535d 6070 u64 bytenr;
a52d9a80
CM
6071 u64 extent_start = 0;
6072 u64 extent_end = 0;
33345d01 6073 u64 objectid = btrfs_ino(inode);
a52d9a80 6074 u32 found_type;
f421950f 6075 struct btrfs_path *path = NULL;
a52d9a80
CM
6076 struct btrfs_root *root = BTRFS_I(inode)->root;
6077 struct btrfs_file_extent_item *item;
5f39d397
CM
6078 struct extent_buffer *leaf;
6079 struct btrfs_key found_key;
a52d9a80
CM
6080 struct extent_map *em = NULL;
6081 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
d1310b2e 6082 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
a52d9a80 6083 struct btrfs_trans_handle *trans = NULL;
261507a0 6084 int compress_type;
a52d9a80 6085
a52d9a80 6086again:
890871be 6087 read_lock(&em_tree->lock);
d1310b2e 6088 em = lookup_extent_mapping(em_tree, start, len);
a061fc8d
CM
6089 if (em)
6090 em->bdev = root->fs_info->fs_devices->latest_bdev;
890871be 6091 read_unlock(&em_tree->lock);
d1310b2e 6092
a52d9a80 6093 if (em) {
e1c4b745
CM
6094 if (em->start > start || em->start + em->len <= start)
6095 free_extent_map(em);
6096 else if (em->block_start == EXTENT_MAP_INLINE && page)
70dec807
CM
6097 free_extent_map(em);
6098 else
6099 goto out;
a52d9a80 6100 }
172ddd60 6101 em = alloc_extent_map();
a52d9a80 6102 if (!em) {
d1310b2e
CM
6103 err = -ENOMEM;
6104 goto out;
a52d9a80 6105 }
e6dcd2dc 6106 em->bdev = root->fs_info->fs_devices->latest_bdev;
d1310b2e 6107 em->start = EXTENT_MAP_HOLE;
445a6944 6108 em->orig_start = EXTENT_MAP_HOLE;
d1310b2e 6109 em->len = (u64)-1;
c8b97818 6110 em->block_len = (u64)-1;
f421950f
CM
6111
6112 if (!path) {
6113 path = btrfs_alloc_path();
026fd317
JB
6114 if (!path) {
6115 err = -ENOMEM;
6116 goto out;
6117 }
6118 /*
6119 * Chances are we'll be called again, so go ahead and do
6120 * readahead
6121 */
6122 path->reada = 1;
f421950f
CM
6123 }
6124
179e29e4
CM
6125 ret = btrfs_lookup_file_extent(trans, root, path,
6126 objectid, start, trans != NULL);
a52d9a80
CM
6127 if (ret < 0) {
6128 err = ret;
6129 goto out;
6130 }
6131
6132 if (ret != 0) {
6133 if (path->slots[0] == 0)
6134 goto not_found;
6135 path->slots[0]--;
6136 }
6137
5f39d397
CM
6138 leaf = path->nodes[0];
6139 item = btrfs_item_ptr(leaf, path->slots[0],
a52d9a80 6140 struct btrfs_file_extent_item);
a52d9a80 6141 /* are we inside the extent that was found? */
5f39d397
CM
6142 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6143 found_type = btrfs_key_type(&found_key);
6144 if (found_key.objectid != objectid ||
a52d9a80 6145 found_type != BTRFS_EXTENT_DATA_KEY) {
25a50341
JB
6146 /*
6147 * If we backup past the first extent we want to move forward
6148 * and see if there is an extent in front of us, otherwise we'll
6149 * say there is a hole for our whole search range which can
6150 * cause problems.
6151 */
6152 extent_end = start;
6153 goto next;
a52d9a80
CM
6154 }
6155
5f39d397
CM
6156 found_type = btrfs_file_extent_type(leaf, item);
6157 extent_start = found_key.offset;
261507a0 6158 compress_type = btrfs_file_extent_compression(leaf, item);
d899e052
YZ
6159 if (found_type == BTRFS_FILE_EXTENT_REG ||
6160 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
a52d9a80 6161 extent_end = extent_start +
db94535d 6162 btrfs_file_extent_num_bytes(leaf, item);
9036c102
YZ
6163 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6164 size_t size;
6165 size = btrfs_file_extent_inline_len(leaf, item);
fda2832f 6166 extent_end = ALIGN(extent_start + size, root->sectorsize);
9036c102 6167 }
25a50341 6168next:
9036c102
YZ
6169 if (start >= extent_end) {
6170 path->slots[0]++;
6171 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6172 ret = btrfs_next_leaf(root, path);
6173 if (ret < 0) {
6174 err = ret;
6175 goto out;
a52d9a80 6176 }
9036c102
YZ
6177 if (ret > 0)
6178 goto not_found;
6179 leaf = path->nodes[0];
a52d9a80 6180 }
9036c102
YZ
6181 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6182 if (found_key.objectid != objectid ||
6183 found_key.type != BTRFS_EXTENT_DATA_KEY)
6184 goto not_found;
6185 if (start + len <= found_key.offset)
6186 goto not_found;
6187 em->start = start;
70c8a91c 6188 em->orig_start = start;
9036c102
YZ
6189 em->len = found_key.offset - start;
6190 goto not_found_em;
6191 }
6192
cc95bef6 6193 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
d899e052
YZ
6194 if (found_type == BTRFS_FILE_EXTENT_REG ||
6195 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
9036c102
YZ
6196 em->start = extent_start;
6197 em->len = extent_end - extent_start;
ff5b7ee3
YZ
6198 em->orig_start = extent_start -
6199 btrfs_file_extent_offset(leaf, item);
b4939680
JB
6200 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf,
6201 item);
db94535d
CM
6202 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
6203 if (bytenr == 0) {
5f39d397 6204 em->block_start = EXTENT_MAP_HOLE;
a52d9a80
CM
6205 goto insert;
6206 }
261507a0 6207 if (compress_type != BTRFS_COMPRESS_NONE) {
c8b97818 6208 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
261507a0 6209 em->compress_type = compress_type;
c8b97818 6210 em->block_start = bytenr;
b4939680 6211 em->block_len = em->orig_block_len;
c8b97818
CM
6212 } else {
6213 bytenr += btrfs_file_extent_offset(leaf, item);
6214 em->block_start = bytenr;
6215 em->block_len = em->len;
d899e052
YZ
6216 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
6217 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
c8b97818 6218 }
a52d9a80
CM
6219 goto insert;
6220 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5f39d397 6221 unsigned long ptr;
a52d9a80 6222 char *map;
3326d1b0
CM
6223 size_t size;
6224 size_t extent_offset;
6225 size_t copy_size;
a52d9a80 6226
689f9346 6227 em->block_start = EXTENT_MAP_INLINE;
c8b97818 6228 if (!page || create) {
689f9346 6229 em->start = extent_start;
9036c102 6230 em->len = extent_end - extent_start;
689f9346
Y
6231 goto out;
6232 }
5f39d397 6233
9036c102
YZ
6234 size = btrfs_file_extent_inline_len(leaf, item);
6235 extent_offset = page_offset(page) + pg_offset - extent_start;
70dec807 6236 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
3326d1b0 6237 size - extent_offset);
3326d1b0 6238 em->start = extent_start + extent_offset;
fda2832f 6239 em->len = ALIGN(copy_size, root->sectorsize);
b4939680 6240 em->orig_block_len = em->len;
70c8a91c 6241 em->orig_start = em->start;
261507a0 6242 if (compress_type) {
c8b97818 6243 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
261507a0
LZ
6244 em->compress_type = compress_type;
6245 }
689f9346 6246 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
179e29e4 6247 if (create == 0 && !PageUptodate(page)) {
261507a0
LZ
6248 if (btrfs_file_extent_compression(leaf, item) !=
6249 BTRFS_COMPRESS_NONE) {
c8b97818
CM
6250 ret = uncompress_inline(path, inode, page,
6251 pg_offset,
6252 extent_offset, item);
79787eaa 6253 BUG_ON(ret); /* -ENOMEM */
c8b97818
CM
6254 } else {
6255 map = kmap(page);
6256 read_extent_buffer(leaf, map + pg_offset, ptr,
6257 copy_size);
93c82d57
CM
6258 if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
6259 memset(map + pg_offset + copy_size, 0,
6260 PAGE_CACHE_SIZE - pg_offset -
6261 copy_size);
6262 }
c8b97818
CM
6263 kunmap(page);
6264 }
179e29e4
CM
6265 flush_dcache_page(page);
6266 } else if (create && PageUptodate(page)) {
6bf7e080 6267 BUG();
179e29e4
CM
6268 if (!trans) {
6269 kunmap(page);
6270 free_extent_map(em);
6271 em = NULL;
ff5714cc 6272
b3b4aa74 6273 btrfs_release_path(path);
7a7eaa40 6274 trans = btrfs_join_transaction(root);
ff5714cc 6275
3612b495
TI
6276 if (IS_ERR(trans))
6277 return ERR_CAST(trans);
179e29e4
CM
6278 goto again;
6279 }
c8b97818 6280 map = kmap(page);
70dec807 6281 write_extent_buffer(leaf, map + pg_offset, ptr,
179e29e4 6282 copy_size);
c8b97818 6283 kunmap(page);
179e29e4 6284 btrfs_mark_buffer_dirty(leaf);
a52d9a80 6285 }
d1310b2e 6286 set_extent_uptodate(io_tree, em->start,
507903b8 6287 extent_map_end(em) - 1, NULL, GFP_NOFS);
a52d9a80
CM
6288 goto insert;
6289 } else {
31b1a2bd 6290 WARN(1, KERN_ERR "btrfs unknown found_type %d\n", found_type);
a52d9a80
CM
6291 }
6292not_found:
6293 em->start = start;
70c8a91c 6294 em->orig_start = start;
d1310b2e 6295 em->len = len;
a52d9a80 6296not_found_em:
5f39d397 6297 em->block_start = EXTENT_MAP_HOLE;
9036c102 6298 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
a52d9a80 6299insert:
b3b4aa74 6300 btrfs_release_path(path);
d1310b2e 6301 if (em->start > start || extent_map_end(em) <= start) {
c2cf52eb 6302 btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]",
c1c9ff7c 6303 em->start, em->len, start, len);
a52d9a80
CM
6304 err = -EIO;
6305 goto out;
6306 }
d1310b2e
CM
6307
6308 err = 0;
890871be 6309 write_lock(&em_tree->lock);
09a2a8f9 6310 ret = add_extent_mapping(em_tree, em, 0);
3b951516
CM
6311 /* it is possible that someone inserted the extent into the tree
6312 * while we had the lock dropped. It is also possible that
6313 * an overlapping map exists in the tree
6314 */
a52d9a80 6315 if (ret == -EEXIST) {
3b951516 6316 struct extent_map *existing;
e6dcd2dc
CM
6317
6318 ret = 0;
6319
3b951516 6320 existing = lookup_extent_mapping(em_tree, start, len);
e1c4b745
CM
6321 if (existing && (existing->start > start ||
6322 existing->start + existing->len <= start)) {
6323 free_extent_map(existing);
6324 existing = NULL;
6325 }
3b951516
CM
6326 if (!existing) {
6327 existing = lookup_extent_mapping(em_tree, em->start,
6328 em->len);
6329 if (existing) {
6330 err = merge_extent_mapping(em_tree, existing,
e6dcd2dc
CM
6331 em, start,
6332 root->sectorsize);
3b951516
CM
6333 free_extent_map(existing);
6334 if (err) {
6335 free_extent_map(em);
6336 em = NULL;
6337 }
6338 } else {
6339 err = -EIO;
3b951516
CM
6340 free_extent_map(em);
6341 em = NULL;
6342 }
6343 } else {
6344 free_extent_map(em);
6345 em = existing;
e6dcd2dc 6346 err = 0;
a52d9a80 6347 }
a52d9a80 6348 }
890871be 6349 write_unlock(&em_tree->lock);
a52d9a80 6350out:
1abe9b8a 6351
4cd8587c 6352 trace_btrfs_get_extent(root, em);
1abe9b8a 6353
f421950f
CM
6354 if (path)
6355 btrfs_free_path(path);
a52d9a80
CM
6356 if (trans) {
6357 ret = btrfs_end_transaction(trans, root);
d397712b 6358 if (!err)
a52d9a80
CM
6359 err = ret;
6360 }
a52d9a80
CM
6361 if (err) {
6362 free_extent_map(em);
a52d9a80
CM
6363 return ERR_PTR(err);
6364 }
79787eaa 6365 BUG_ON(!em); /* Error is always set */
a52d9a80
CM
6366 return em;
6367}
6368
ec29ed5b
CM
6369struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
6370 size_t pg_offset, u64 start, u64 len,
6371 int create)
6372{
6373 struct extent_map *em;
6374 struct extent_map *hole_em = NULL;
6375 u64 range_start = start;
6376 u64 end;
6377 u64 found;
6378 u64 found_end;
6379 int err = 0;
6380
6381 em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
6382 if (IS_ERR(em))
6383 return em;
6384 if (em) {
6385 /*
f9e4fb53
LB
6386 * if our em maps to
6387 * - a hole or
6388 * - a pre-alloc extent,
6389 * there might actually be delalloc bytes behind it.
ec29ed5b 6390 */
f9e4fb53
LB
6391 if (em->block_start != EXTENT_MAP_HOLE &&
6392 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
ec29ed5b
CM
6393 return em;
6394 else
6395 hole_em = em;
6396 }
6397
6398 /* check to see if we've wrapped (len == -1 or similar) */
6399 end = start + len;
6400 if (end < start)
6401 end = (u64)-1;
6402 else
6403 end -= 1;
6404
6405 em = NULL;
6406
6407 /* ok, we didn't find anything, lets look for delalloc */
6408 found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
6409 end, len, EXTENT_DELALLOC, 1);
6410 found_end = range_start + found;
6411 if (found_end < range_start)
6412 found_end = (u64)-1;
6413
6414 /*
6415 * we didn't find anything useful, return
6416 * the original results from get_extent()
6417 */
6418 if (range_start > end || found_end <= start) {
6419 em = hole_em;
6420 hole_em = NULL;
6421 goto out;
6422 }
6423
6424 /* adjust the range_start to make sure it doesn't
6425 * go backwards from the start they passed in
6426 */
67871254 6427 range_start = max(start, range_start);
ec29ed5b
CM
6428 found = found_end - range_start;
6429
6430 if (found > 0) {
6431 u64 hole_start = start;
6432 u64 hole_len = len;
6433
172ddd60 6434 em = alloc_extent_map();
ec29ed5b
CM
6435 if (!em) {
6436 err = -ENOMEM;
6437 goto out;
6438 }
6439 /*
6440 * when btrfs_get_extent can't find anything it
6441 * returns one huge hole
6442 *
6443 * make sure what it found really fits our range, and
6444 * adjust to make sure it is based on the start from
6445 * the caller
6446 */
6447 if (hole_em) {
6448 u64 calc_end = extent_map_end(hole_em);
6449
6450 if (calc_end <= start || (hole_em->start > end)) {
6451 free_extent_map(hole_em);
6452 hole_em = NULL;
6453 } else {
6454 hole_start = max(hole_em->start, start);
6455 hole_len = calc_end - hole_start;
6456 }
6457 }
6458 em->bdev = NULL;
6459 if (hole_em && range_start > hole_start) {
6460 /* our hole starts before our delalloc, so we
6461 * have to return just the parts of the hole
6462 * that go until the delalloc starts
6463 */
6464 em->len = min(hole_len,
6465 range_start - hole_start);
6466 em->start = hole_start;
6467 em->orig_start = hole_start;
6468 /*
6469 * don't adjust block start at all,
6470 * it is fixed at EXTENT_MAP_HOLE
6471 */
6472 em->block_start = hole_em->block_start;
6473 em->block_len = hole_len;
f9e4fb53
LB
6474 if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
6475 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
ec29ed5b
CM
6476 } else {
6477 em->start = range_start;
6478 em->len = found;
6479 em->orig_start = range_start;
6480 em->block_start = EXTENT_MAP_DELALLOC;
6481 em->block_len = found;
6482 }
6483 } else if (hole_em) {
6484 return hole_em;
6485 }
6486out:
6487
6488 free_extent_map(hole_em);
6489 if (err) {
6490 free_extent_map(em);
6491 return ERR_PTR(err);
6492 }
6493 return em;
6494}
6495
4b46fce2
JB
6496static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
6497 u64 start, u64 len)
6498{
6499 struct btrfs_root *root = BTRFS_I(inode)->root;
70c8a91c 6500 struct extent_map *em;
4b46fce2
JB
6501 struct btrfs_key ins;
6502 u64 alloc_hint;
6503 int ret;
4b46fce2 6504
4b46fce2 6505 alloc_hint = get_extent_allocation_hint(inode, start, len);
00361589 6506 ret = btrfs_reserve_extent(root, len, root->sectorsize, 0,
81c9ad23 6507 alloc_hint, &ins, 1);
00361589
JB
6508 if (ret)
6509 return ERR_PTR(ret);
4b46fce2 6510
70c8a91c 6511 em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
cc95bef6 6512 ins.offset, ins.offset, ins.offset, 0);
00361589
JB
6513 if (IS_ERR(em)) {
6514 btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
6515 return em;
6516 }
4b46fce2
JB
6517
6518 ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
6519 ins.offset, ins.offset, 0);
6520 if (ret) {
6521 btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
00361589
JB
6522 free_extent_map(em);
6523 return ERR_PTR(ret);
4b46fce2 6524 }
00361589 6525
4b46fce2
JB
6526 return em;
6527}
6528
46bfbb5c
CM
6529/*
6530 * returns 1 when the nocow is safe, < 1 on error, 0 if the
6531 * block must be cow'd
6532 */
00361589 6533noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7ee9e440
JB
6534 u64 *orig_start, u64 *orig_block_len,
6535 u64 *ram_bytes)
46bfbb5c 6536{
00361589 6537 struct btrfs_trans_handle *trans;
46bfbb5c
CM
6538 struct btrfs_path *path;
6539 int ret;
6540 struct extent_buffer *leaf;
6541 struct btrfs_root *root = BTRFS_I(inode)->root;
6542 struct btrfs_file_extent_item *fi;
6543 struct btrfs_key key;
6544 u64 disk_bytenr;
6545 u64 backref_offset;
6546 u64 extent_end;
6547 u64 num_bytes;
6548 int slot;
6549 int found_type;
7ee9e440 6550 bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
e77751aa 6551
46bfbb5c
CM
6552 path = btrfs_alloc_path();
6553 if (!path)
6554 return -ENOMEM;
6555
00361589 6556 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
46bfbb5c
CM
6557 offset, 0);
6558 if (ret < 0)
6559 goto out;
6560
6561 slot = path->slots[0];
6562 if (ret == 1) {
6563 if (slot == 0) {
6564 /* can't find the item, must cow */
6565 ret = 0;
6566 goto out;
6567 }
6568 slot--;
6569 }
6570 ret = 0;
6571 leaf = path->nodes[0];
6572 btrfs_item_key_to_cpu(leaf, &key, slot);
33345d01 6573 if (key.objectid != btrfs_ino(inode) ||
46bfbb5c
CM
6574 key.type != BTRFS_EXTENT_DATA_KEY) {
6575 /* not our file or wrong item type, must cow */
6576 goto out;
6577 }
6578
6579 if (key.offset > offset) {
6580 /* Wrong offset, must cow */
6581 goto out;
6582 }
6583
6584 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
6585 found_type = btrfs_file_extent_type(leaf, fi);
6586 if (found_type != BTRFS_FILE_EXTENT_REG &&
6587 found_type != BTRFS_FILE_EXTENT_PREALLOC) {
6588 /* not a regular extent, must cow */
6589 goto out;
6590 }
7ee9e440
JB
6591
6592 if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
6593 goto out;
6594
e77751aa
MX
6595 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
6596 if (extent_end <= offset)
6597 goto out;
6598
46bfbb5c 6599 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7ee9e440
JB
6600 if (disk_bytenr == 0)
6601 goto out;
6602
6603 if (btrfs_file_extent_compression(leaf, fi) ||
6604 btrfs_file_extent_encryption(leaf, fi) ||
6605 btrfs_file_extent_other_encoding(leaf, fi))
6606 goto out;
6607
46bfbb5c
CM
6608 backref_offset = btrfs_file_extent_offset(leaf, fi);
6609
7ee9e440
JB
6610 if (orig_start) {
6611 *orig_start = key.offset - backref_offset;
6612 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
6613 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
6614 }
eb384b55 6615
46bfbb5c
CM
6616 if (btrfs_extent_readonly(root, disk_bytenr))
6617 goto out;
1bda19eb 6618 btrfs_release_path(path);
46bfbb5c
CM
6619
6620 /*
6621 * look for other files referencing this extent, if we
6622 * find any we must cow
6623 */
00361589
JB
6624 trans = btrfs_join_transaction(root);
6625 if (IS_ERR(trans)) {
6626 ret = 0;
46bfbb5c 6627 goto out;
00361589
JB
6628 }
6629
6630 ret = btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
6631 key.offset - backref_offset, disk_bytenr);
6632 btrfs_end_transaction(trans, root);
6633 if (ret) {
6634 ret = 0;
6635 goto out;
6636 }
46bfbb5c
CM
6637
6638 /*
6639 * adjust disk_bytenr and num_bytes to cover just the bytes
6640 * in this extent we are about to write. If there
6641 * are any csums in that range we have to cow in order
6642 * to keep the csums correct
6643 */
6644 disk_bytenr += backref_offset;
6645 disk_bytenr += offset - key.offset;
eb384b55 6646 num_bytes = min(offset + *len, extent_end) - offset;
46bfbb5c
CM
6647 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
6648 goto out;
6649 /*
6650 * all of the above have passed, it is safe to overwrite this extent
6651 * without cow
6652 */
eb384b55 6653 *len = num_bytes;
46bfbb5c
CM
6654 ret = 1;
6655out:
6656 btrfs_free_path(path);
6657 return ret;
6658}
6659
eb838e73
JB
6660static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
6661 struct extent_state **cached_state, int writing)
6662{
6663 struct btrfs_ordered_extent *ordered;
6664 int ret = 0;
6665
6666 while (1) {
6667 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6668 0, cached_state);
6669 /*
6670 * We're concerned with the entire range that we're going to be
6671 * doing DIO to, so we need to make sure theres no ordered
6672 * extents in this range.
6673 */
6674 ordered = btrfs_lookup_ordered_range(inode, lockstart,
6675 lockend - lockstart + 1);
6676
6677 /*
6678 * We need to make sure there are no buffered pages in this
6679 * range either, we could have raced between the invalidate in
6680 * generic_file_direct_write and locking the extent. The
6681 * invalidate needs to happen so that reads after a write do not
6682 * get stale data.
6683 */
6684 if (!ordered && (!writing ||
6685 !test_range_bit(&BTRFS_I(inode)->io_tree,
6686 lockstart, lockend, EXTENT_UPTODATE, 0,
6687 *cached_state)))
6688 break;
6689
6690 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6691 cached_state, GFP_NOFS);
6692
6693 if (ordered) {
6694 btrfs_start_ordered_extent(inode, ordered, 1);
6695 btrfs_put_ordered_extent(ordered);
6696 } else {
6697 /* Screw you mmap */
6698 ret = filemap_write_and_wait_range(inode->i_mapping,
6699 lockstart,
6700 lockend);
6701 if (ret)
6702 break;
6703
6704 /*
6705 * If we found a page that couldn't be invalidated just
6706 * fall back to buffered.
6707 */
6708 ret = invalidate_inode_pages2_range(inode->i_mapping,
6709 lockstart >> PAGE_CACHE_SHIFT,
6710 lockend >> PAGE_CACHE_SHIFT);
6711 if (ret)
6712 break;
6713 }
6714
6715 cond_resched();
6716 }
6717
6718 return ret;
6719}
6720
69ffb543
JB
6721static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
6722 u64 len, u64 orig_start,
6723 u64 block_start, u64 block_len,
cc95bef6
JB
6724 u64 orig_block_len, u64 ram_bytes,
6725 int type)
69ffb543
JB
6726{
6727 struct extent_map_tree *em_tree;
6728 struct extent_map *em;
6729 struct btrfs_root *root = BTRFS_I(inode)->root;
6730 int ret;
6731
6732 em_tree = &BTRFS_I(inode)->extent_tree;
6733 em = alloc_extent_map();
6734 if (!em)
6735 return ERR_PTR(-ENOMEM);
6736
6737 em->start = start;
6738 em->orig_start = orig_start;
2ab28f32
JB
6739 em->mod_start = start;
6740 em->mod_len = len;
69ffb543
JB
6741 em->len = len;
6742 em->block_len = block_len;
6743 em->block_start = block_start;
6744 em->bdev = root->fs_info->fs_devices->latest_bdev;
b4939680 6745 em->orig_block_len = orig_block_len;
cc95bef6 6746 em->ram_bytes = ram_bytes;
70c8a91c 6747 em->generation = -1;
69ffb543
JB
6748 set_bit(EXTENT_FLAG_PINNED, &em->flags);
6749 if (type == BTRFS_ORDERED_PREALLOC)
b11e234d 6750 set_bit(EXTENT_FLAG_FILLING, &em->flags);
69ffb543
JB
6751
6752 do {
6753 btrfs_drop_extent_cache(inode, em->start,
6754 em->start + em->len - 1, 0);
6755 write_lock(&em_tree->lock);
09a2a8f9 6756 ret = add_extent_mapping(em_tree, em, 1);
69ffb543
JB
6757 write_unlock(&em_tree->lock);
6758 } while (ret == -EEXIST);
6759
6760 if (ret) {
6761 free_extent_map(em);
6762 return ERR_PTR(ret);
6763 }
6764
6765 return em;
6766}
6767
6768
4b46fce2
JB
6769static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
6770 struct buffer_head *bh_result, int create)
6771{
6772 struct extent_map *em;
6773 struct btrfs_root *root = BTRFS_I(inode)->root;
eb838e73 6774 struct extent_state *cached_state = NULL;
4b46fce2 6775 u64 start = iblock << inode->i_blkbits;
eb838e73 6776 u64 lockstart, lockend;
4b46fce2 6777 u64 len = bh_result->b_size;
eb838e73 6778 int unlock_bits = EXTENT_LOCKED;
0934856d 6779 int ret = 0;
eb838e73 6780
172a5049 6781 if (create)
eb838e73 6782 unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
172a5049 6783 else
c329861d 6784 len = min_t(u64, len, root->sectorsize);
eb838e73 6785
c329861d
JB
6786 lockstart = start;
6787 lockend = start + len - 1;
6788
eb838e73
JB
6789 /*
6790 * If this errors out it's because we couldn't invalidate pagecache for
6791 * this range and we need to fallback to buffered.
6792 */
6793 if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
6794 return -ENOTBLK;
6795
4b46fce2 6796 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
eb838e73
JB
6797 if (IS_ERR(em)) {
6798 ret = PTR_ERR(em);
6799 goto unlock_err;
6800 }
4b46fce2
JB
6801
6802 /*
6803 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
6804 * io. INLINE is special, and we could probably kludge it in here, but
6805 * it's still buffered so for safety lets just fall back to the generic
6806 * buffered path.
6807 *
6808 * For COMPRESSED we _have_ to read the entire extent in so we can
6809 * decompress it, so there will be buffering required no matter what we
6810 * do, so go ahead and fallback to buffered.
6811 *
6812 * We return -ENOTBLK because thats what makes DIO go ahead and go back
6813 * to buffered IO. Don't blame me, this is the price we pay for using
6814 * the generic code.
6815 */
6816 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
6817 em->block_start == EXTENT_MAP_INLINE) {
6818 free_extent_map(em);
eb838e73
JB
6819 ret = -ENOTBLK;
6820 goto unlock_err;
4b46fce2
JB
6821 }
6822
6823 /* Just a good old fashioned hole, return */
6824 if (!create && (em->block_start == EXTENT_MAP_HOLE ||
6825 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
6826 free_extent_map(em);
eb838e73 6827 goto unlock_err;
4b46fce2
JB
6828 }
6829
6830 /*
6831 * We don't allocate a new extent in the following cases
6832 *
6833 * 1) The inode is marked as NODATACOW. In this case we'll just use the
6834 * existing extent.
6835 * 2) The extent is marked as PREALLOC. We're good to go here and can
6836 * just use the extent.
6837 *
6838 */
46bfbb5c 6839 if (!create) {
eb838e73
JB
6840 len = min(len, em->len - (start - em->start));
6841 lockstart = start + len;
6842 goto unlock;
46bfbb5c 6843 }
4b46fce2
JB
6844
6845 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
6846 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
6847 em->block_start != EXTENT_MAP_HOLE)) {
4b46fce2
JB
6848 int type;
6849 int ret;
eb384b55 6850 u64 block_start, orig_start, orig_block_len, ram_bytes;
4b46fce2
JB
6851
6852 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6853 type = BTRFS_ORDERED_PREALLOC;
6854 else
6855 type = BTRFS_ORDERED_NOCOW;
46bfbb5c 6856 len = min(len, em->len - (start - em->start));
4b46fce2 6857 block_start = em->block_start + (start - em->start);
46bfbb5c 6858
00361589 6859 if (can_nocow_extent(inode, start, &len, &orig_start,
7ee9e440 6860 &orig_block_len, &ram_bytes) == 1) {
69ffb543
JB
6861 if (type == BTRFS_ORDERED_PREALLOC) {
6862 free_extent_map(em);
6863 em = create_pinned_em(inode, start, len,
6864 orig_start,
b4939680 6865 block_start, len,
cc95bef6
JB
6866 orig_block_len,
6867 ram_bytes, type);
00361589 6868 if (IS_ERR(em))
69ffb543 6869 goto unlock_err;
69ffb543
JB
6870 }
6871
46bfbb5c
CM
6872 ret = btrfs_add_ordered_extent_dio(inode, start,
6873 block_start, len, len, type);
46bfbb5c
CM
6874 if (ret) {
6875 free_extent_map(em);
eb838e73 6876 goto unlock_err;
46bfbb5c
CM
6877 }
6878 goto unlock;
4b46fce2 6879 }
4b46fce2 6880 }
00361589 6881
46bfbb5c
CM
6882 /*
6883 * this will cow the extent, reset the len in case we changed
6884 * it above
6885 */
6886 len = bh_result->b_size;
70c8a91c
JB
6887 free_extent_map(em);
6888 em = btrfs_new_extent_direct(inode, start, len);
eb838e73
JB
6889 if (IS_ERR(em)) {
6890 ret = PTR_ERR(em);
6891 goto unlock_err;
6892 }
46bfbb5c
CM
6893 len = min(len, em->len - (start - em->start));
6894unlock:
4b46fce2
JB
6895 bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
6896 inode->i_blkbits;
46bfbb5c 6897 bh_result->b_size = len;
4b46fce2
JB
6898 bh_result->b_bdev = em->bdev;
6899 set_buffer_mapped(bh_result);
c3473e83
JB
6900 if (create) {
6901 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6902 set_buffer_new(bh_result);
6903
6904 /*
6905 * Need to update the i_size under the extent lock so buffered
6906 * readers will get the updated i_size when we unlock.
6907 */
6908 if (start + len > i_size_read(inode))
6909 i_size_write(inode, start + len);
0934856d 6910
172a5049
MX
6911 spin_lock(&BTRFS_I(inode)->lock);
6912 BTRFS_I(inode)->outstanding_extents++;
6913 spin_unlock(&BTRFS_I(inode)->lock);
6914
0934856d
MX
6915 ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6916 lockstart + len - 1, EXTENT_DELALLOC, NULL,
6917 &cached_state, GFP_NOFS);
6918 BUG_ON(ret);
c3473e83 6919 }
4b46fce2 6920
eb838e73
JB
6921 /*
6922 * In the case of write we need to clear and unlock the entire range,
6923 * in the case of read we need to unlock only the end area that we
6924 * aren't using if there is any left over space.
6925 */
24c03fa5 6926 if (lockstart < lockend) {
0934856d
MX
6927 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6928 lockend, unlock_bits, 1, 0,
6929 &cached_state, GFP_NOFS);
24c03fa5 6930 } else {
eb838e73 6931 free_extent_state(cached_state);
24c03fa5 6932 }
eb838e73 6933
4b46fce2
JB
6934 free_extent_map(em);
6935
6936 return 0;
eb838e73
JB
6937
6938unlock_err:
eb838e73
JB
6939 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6940 unlock_bits, 1, 0, &cached_state, GFP_NOFS);
6941 return ret;
4b46fce2
JB
6942}
6943
4b46fce2
JB
6944static void btrfs_endio_direct_read(struct bio *bio, int err)
6945{
e65e1535 6946 struct btrfs_dio_private *dip = bio->bi_private;
4b46fce2
JB
6947 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
6948 struct bio_vec *bvec = bio->bi_io_vec;
4b46fce2
JB
6949 struct inode *inode = dip->inode;
6950 struct btrfs_root *root = BTRFS_I(inode)->root;
9be3395b 6951 struct bio *dio_bio;
facc8a22
MX
6952 u32 *csums = (u32 *)dip->csum;
6953 int index = 0;
4b46fce2 6954 u64 start;
4b46fce2
JB
6955
6956 start = dip->logical_offset;
6957 do {
6958 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
6959 struct page *page = bvec->bv_page;
6960 char *kaddr;
6961 u32 csum = ~(u32)0;
6962 unsigned long flags;
6963
6964 local_irq_save(flags);
7ac687d9 6965 kaddr = kmap_atomic(page);
b0496686 6966 csum = btrfs_csum_data(kaddr + bvec->bv_offset,
4b46fce2
JB
6967 csum, bvec->bv_len);
6968 btrfs_csum_final(csum, (char *)&csum);
7ac687d9 6969 kunmap_atomic(kaddr);
4b46fce2
JB
6970 local_irq_restore(flags);
6971
6972 flush_dcache_page(bvec->bv_page);
facc8a22
MX
6973 if (csum != csums[index]) {
6974 btrfs_err(root->fs_info, "csum failed ino %llu off %llu csum %u expected csum %u",
c1c9ff7c
GU
6975 btrfs_ino(inode), start, csum,
6976 csums[index]);
4b46fce2
JB
6977 err = -EIO;
6978 }
6979 }
6980
6981 start += bvec->bv_len;
4b46fce2 6982 bvec++;
facc8a22 6983 index++;
4b46fce2
JB
6984 } while (bvec <= bvec_end);
6985
6986 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
d0082371 6987 dip->logical_offset + dip->bytes - 1);
9be3395b 6988 dio_bio = dip->dio_bio;
4b46fce2 6989
4b46fce2 6990 kfree(dip);
c0da7aa1
JB
6991
6992 /* If we had a csum failure make sure to clear the uptodate flag */
6993 if (err)
9be3395b
CM
6994 clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
6995 dio_end_io(dio_bio, err);
6996 bio_put(bio);
4b46fce2
JB
6997}
6998
6999static void btrfs_endio_direct_write(struct bio *bio, int err)
7000{
7001 struct btrfs_dio_private *dip = bio->bi_private;
7002 struct inode *inode = dip->inode;
7003 struct btrfs_root *root = BTRFS_I(inode)->root;
4b46fce2 7004 struct btrfs_ordered_extent *ordered = NULL;
163cf09c
CM
7005 u64 ordered_offset = dip->logical_offset;
7006 u64 ordered_bytes = dip->bytes;
9be3395b 7007 struct bio *dio_bio;
4b46fce2
JB
7008 int ret;
7009
7010 if (err)
7011 goto out_done;
163cf09c
CM
7012again:
7013 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
7014 &ordered_offset,
5fd02043 7015 ordered_bytes, !err);
4b46fce2 7016 if (!ret)
163cf09c 7017 goto out_test;
4b46fce2 7018
5fd02043
JB
7019 ordered->work.func = finish_ordered_fn;
7020 ordered->work.flags = 0;
7021 btrfs_queue_worker(&root->fs_info->endio_write_workers,
7022 &ordered->work);
163cf09c
CM
7023out_test:
7024 /*
7025 * our bio might span multiple ordered extents. If we haven't
7026 * completed the accounting for the whole dio, go back and try again
7027 */
7028 if (ordered_offset < dip->logical_offset + dip->bytes) {
7029 ordered_bytes = dip->logical_offset + dip->bytes -
7030 ordered_offset;
5fd02043 7031 ordered = NULL;
163cf09c
CM
7032 goto again;
7033 }
4b46fce2 7034out_done:
9be3395b 7035 dio_bio = dip->dio_bio;
4b46fce2 7036
4b46fce2 7037 kfree(dip);
c0da7aa1
JB
7038
7039 /* If we had an error make sure to clear the uptodate flag */
7040 if (err)
9be3395b
CM
7041 clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
7042 dio_end_io(dio_bio, err);
7043 bio_put(bio);
4b46fce2
JB
7044}
7045
eaf25d93
CM
7046static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
7047 struct bio *bio, int mirror_num,
7048 unsigned long bio_flags, u64 offset)
7049{
7050 int ret;
7051 struct btrfs_root *root = BTRFS_I(inode)->root;
7052 ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
79787eaa 7053 BUG_ON(ret); /* -ENOMEM */
eaf25d93
CM
7054 return 0;
7055}
7056
e65e1535
MX
7057static void btrfs_end_dio_bio(struct bio *bio, int err)
7058{
7059 struct btrfs_dio_private *dip = bio->bi_private;
7060
7061 if (err) {
efe120a0
FH
7062 btrfs_err(BTRFS_I(dip->inode)->root->fs_info,
7063 "direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d",
c1c9ff7c 7064 btrfs_ino(dip->inode), bio->bi_rw,
3dd1462e 7065 (unsigned long long)bio->bi_sector, bio->bi_size, err);
e65e1535
MX
7066 dip->errors = 1;
7067
7068 /*
7069 * before atomic variable goto zero, we must make sure
7070 * dip->errors is perceived to be set.
7071 */
7072 smp_mb__before_atomic_dec();
7073 }
7074
7075 /* if there are more bios still pending for this dio, just exit */
7076 if (!atomic_dec_and_test(&dip->pending_bios))
7077 goto out;
7078
9be3395b 7079 if (dip->errors) {
e65e1535 7080 bio_io_error(dip->orig_bio);
9be3395b
CM
7081 } else {
7082 set_bit(BIO_UPTODATE, &dip->dio_bio->bi_flags);
e65e1535
MX
7083 bio_endio(dip->orig_bio, 0);
7084 }
7085out:
7086 bio_put(bio);
7087}
7088
7089static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
7090 u64 first_sector, gfp_t gfp_flags)
7091{
7092 int nr_vecs = bio_get_nr_vecs(bdev);
7093 return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
7094}
7095
7096static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
7097 int rw, u64 file_offset, int skip_sum,
c329861d 7098 int async_submit)
e65e1535 7099{
facc8a22 7100 struct btrfs_dio_private *dip = bio->bi_private;
e65e1535
MX
7101 int write = rw & REQ_WRITE;
7102 struct btrfs_root *root = BTRFS_I(inode)->root;
7103 int ret;
7104
b812ce28
JB
7105 if (async_submit)
7106 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
7107
e65e1535 7108 bio_get(bio);
5fd02043
JB
7109
7110 if (!write) {
7111 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
7112 if (ret)
7113 goto err;
7114 }
e65e1535 7115
1ae39938
JB
7116 if (skip_sum)
7117 goto map;
7118
7119 if (write && async_submit) {
e65e1535
MX
7120 ret = btrfs_wq_submit_bio(root->fs_info,
7121 inode, rw, bio, 0, 0,
7122 file_offset,
7123 __btrfs_submit_bio_start_direct_io,
7124 __btrfs_submit_bio_done);
7125 goto err;
1ae39938
JB
7126 } else if (write) {
7127 /*
7128 * If we aren't doing async submit, calculate the csum of the
7129 * bio now.
7130 */
7131 ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
7132 if (ret)
7133 goto err;
c2db1073 7134 } else if (!skip_sum) {
facc8a22
MX
7135 ret = btrfs_lookup_bio_sums_dio(root, inode, dip, bio,
7136 file_offset);
c2db1073
TI
7137 if (ret)
7138 goto err;
7139 }
e65e1535 7140
1ae39938
JB
7141map:
7142 ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
e65e1535
MX
7143err:
7144 bio_put(bio);
7145 return ret;
7146}
7147
7148static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
7149 int skip_sum)
7150{
7151 struct inode *inode = dip->inode;
7152 struct btrfs_root *root = BTRFS_I(inode)->root;
e65e1535
MX
7153 struct bio *bio;
7154 struct bio *orig_bio = dip->orig_bio;
7155 struct bio_vec *bvec = orig_bio->bi_io_vec;
7156 u64 start_sector = orig_bio->bi_sector;
7157 u64 file_offset = dip->logical_offset;
7158 u64 submit_len = 0;
7159 u64 map_length;
7160 int nr_pages = 0;
e65e1535 7161 int ret = 0;
1ae39938 7162 int async_submit = 0;
e65e1535 7163
e65e1535 7164 map_length = orig_bio->bi_size;
53b381b3 7165 ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
e65e1535
MX
7166 &map_length, NULL, 0);
7167 if (ret) {
64728bbb 7168 bio_put(orig_bio);
e65e1535
MX
7169 return -EIO;
7170 }
facc8a22 7171
02f57c7a
JB
7172 if (map_length >= orig_bio->bi_size) {
7173 bio = orig_bio;
7174 goto submit;
7175 }
7176
53b381b3
DW
7177 /* async crcs make it difficult to collect full stripe writes. */
7178 if (btrfs_get_alloc_profile(root, 1) &
7179 (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6))
7180 async_submit = 0;
7181 else
7182 async_submit = 1;
7183
02f57c7a
JB
7184 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
7185 if (!bio)
7186 return -ENOMEM;
7187 bio->bi_private = dip;
7188 bio->bi_end_io = btrfs_end_dio_bio;
7189 atomic_inc(&dip->pending_bios);
7190
e65e1535
MX
7191 while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
7192 if (unlikely(map_length < submit_len + bvec->bv_len ||
7193 bio_add_page(bio, bvec->bv_page, bvec->bv_len,
7194 bvec->bv_offset) < bvec->bv_len)) {
7195 /*
7196 * inc the count before we submit the bio so
7197 * we know the end IO handler won't happen before
7198 * we inc the count. Otherwise, the dip might get freed
7199 * before we're done setting it up
7200 */
7201 atomic_inc(&dip->pending_bios);
7202 ret = __btrfs_submit_dio_bio(bio, inode, rw,
7203 file_offset, skip_sum,
c329861d 7204 async_submit);
e65e1535
MX
7205 if (ret) {
7206 bio_put(bio);
7207 atomic_dec(&dip->pending_bios);
7208 goto out_err;
7209 }
7210
e65e1535
MX
7211 start_sector += submit_len >> 9;
7212 file_offset += submit_len;
7213
7214 submit_len = 0;
7215 nr_pages = 0;
7216
7217 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
7218 start_sector, GFP_NOFS);
7219 if (!bio)
7220 goto out_err;
7221 bio->bi_private = dip;
7222 bio->bi_end_io = btrfs_end_dio_bio;
7223
7224 map_length = orig_bio->bi_size;
53b381b3 7225 ret = btrfs_map_block(root->fs_info, rw,
3ec706c8 7226 start_sector << 9,
e65e1535
MX
7227 &map_length, NULL, 0);
7228 if (ret) {
7229 bio_put(bio);
7230 goto out_err;
7231 }
7232 } else {
7233 submit_len += bvec->bv_len;
67871254 7234 nr_pages++;
e65e1535
MX
7235 bvec++;
7236 }
7237 }
7238
02f57c7a 7239submit:
e65e1535 7240 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
c329861d 7241 async_submit);
e65e1535
MX
7242 if (!ret)
7243 return 0;
7244
7245 bio_put(bio);
7246out_err:
7247 dip->errors = 1;
7248 /*
7249 * before atomic variable goto zero, we must
7250 * make sure dip->errors is perceived to be set.
7251 */
7252 smp_mb__before_atomic_dec();
7253 if (atomic_dec_and_test(&dip->pending_bios))
7254 bio_io_error(dip->orig_bio);
7255
7256 /* bio_end_io() will handle error, so we needn't return it */
7257 return 0;
7258}
7259
9be3395b
CM
7260static void btrfs_submit_direct(int rw, struct bio *dio_bio,
7261 struct inode *inode, loff_t file_offset)
4b46fce2
JB
7262{
7263 struct btrfs_root *root = BTRFS_I(inode)->root;
7264 struct btrfs_dio_private *dip;
9be3395b 7265 struct bio *io_bio;
4b46fce2 7266 int skip_sum;
facc8a22 7267 int sum_len;
7b6d91da 7268 int write = rw & REQ_WRITE;
4b46fce2 7269 int ret = 0;
facc8a22 7270 u16 csum_size;
4b46fce2
JB
7271
7272 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
7273
9be3395b 7274 io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS);
9be3395b
CM
7275 if (!io_bio) {
7276 ret = -ENOMEM;
7277 goto free_ordered;
7278 }
7279
facc8a22
MX
7280 if (!skip_sum && !write) {
7281 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
7282 sum_len = dio_bio->bi_size >> inode->i_sb->s_blocksize_bits;
7283 sum_len *= csum_size;
7284 } else {
7285 sum_len = 0;
7286 }
7287
7288 dip = kmalloc(sizeof(*dip) + sum_len, GFP_NOFS);
4b46fce2
JB
7289 if (!dip) {
7290 ret = -ENOMEM;
9be3395b 7291 goto free_io_bio;
4b46fce2 7292 }
4b46fce2 7293
9be3395b 7294 dip->private = dio_bio->bi_private;
4b46fce2
JB
7295 dip->inode = inode;
7296 dip->logical_offset = file_offset;
e6da5d2e 7297 dip->bytes = dio_bio->bi_size;
9be3395b
CM
7298 dip->disk_bytenr = (u64)dio_bio->bi_sector << 9;
7299 io_bio->bi_private = dip;
e65e1535 7300 dip->errors = 0;
9be3395b
CM
7301 dip->orig_bio = io_bio;
7302 dip->dio_bio = dio_bio;
e65e1535 7303 atomic_set(&dip->pending_bios, 0);
4b46fce2
JB
7304
7305 if (write)
9be3395b 7306 io_bio->bi_end_io = btrfs_endio_direct_write;
4b46fce2 7307 else
9be3395b 7308 io_bio->bi_end_io = btrfs_endio_direct_read;
4b46fce2 7309
e65e1535
MX
7310 ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
7311 if (!ret)
eaf25d93 7312 return;
9be3395b
CM
7313
7314free_io_bio:
7315 bio_put(io_bio);
7316
4b46fce2
JB
7317free_ordered:
7318 /*
7319 * If this is a write, we need to clean up the reserved space and kill
7320 * the ordered extent.
7321 */
7322 if (write) {
7323 struct btrfs_ordered_extent *ordered;
955256f2 7324 ordered = btrfs_lookup_ordered_extent(inode, file_offset);
4b46fce2
JB
7325 if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
7326 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
7327 btrfs_free_reserved_extent(root, ordered->start,
7328 ordered->disk_len);
7329 btrfs_put_ordered_extent(ordered);
7330 btrfs_put_ordered_extent(ordered);
7331 }
9be3395b 7332 bio_endio(dio_bio, ret);
4b46fce2
JB
7333}
7334
5a5f79b5
CM
7335static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
7336 const struct iovec *iov, loff_t offset,
7337 unsigned long nr_segs)
7338{
7339 int seg;
a1b75f7d 7340 int i;
5a5f79b5
CM
7341 size_t size;
7342 unsigned long addr;
7343 unsigned blocksize_mask = root->sectorsize - 1;
7344 ssize_t retval = -EINVAL;
7345 loff_t end = offset;
7346
7347 if (offset & blocksize_mask)
7348 goto out;
7349
7350 /* Check the memory alignment. Blocks cannot straddle pages */
7351 for (seg = 0; seg < nr_segs; seg++) {
7352 addr = (unsigned long)iov[seg].iov_base;
7353 size = iov[seg].iov_len;
7354 end += size;
a1b75f7d 7355 if ((addr & blocksize_mask) || (size & blocksize_mask))
5a5f79b5 7356 goto out;
a1b75f7d
JB
7357
7358 /* If this is a write we don't need to check anymore */
7359 if (rw & WRITE)
7360 continue;
7361
7362 /*
7363 * Check to make sure we don't have duplicate iov_base's in this
7364 * iovec, if so return EINVAL, otherwise we'll get csum errors
7365 * when reading back.
7366 */
7367 for (i = seg + 1; i < nr_segs; i++) {
7368 if (iov[seg].iov_base == iov[i].iov_base)
7369 goto out;
7370 }
5a5f79b5
CM
7371 }
7372 retval = 0;
7373out:
7374 return retval;
7375}
eb838e73 7376
16432985
CM
7377static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
7378 const struct iovec *iov, loff_t offset,
7379 unsigned long nr_segs)
7380{
4b46fce2
JB
7381 struct file *file = iocb->ki_filp;
7382 struct inode *inode = file->f_mapping->host;
0934856d 7383 size_t count = 0;
2e60a51e 7384 int flags = 0;
38851cc1
MX
7385 bool wakeup = true;
7386 bool relock = false;
0934856d 7387 ssize_t ret;
4b46fce2 7388
5a5f79b5 7389 if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
eb838e73 7390 offset, nr_segs))
5a5f79b5 7391 return 0;
3f7c579c 7392
38851cc1
MX
7393 atomic_inc(&inode->i_dio_count);
7394 smp_mb__after_atomic_inc();
7395
0e267c44
JB
7396 /*
7397 * The generic stuff only does filemap_write_and_wait_range, which isn't
7398 * enough if we've written compressed pages to this area, so we need to
7399 * call btrfs_wait_ordered_range to make absolutely sure that any
7400 * outstanding dirty pages are on disk.
7401 */
7402 count = iov_length(iov, nr_segs);
0ef8b726
JB
7403 ret = btrfs_wait_ordered_range(inode, offset, count);
7404 if (ret)
7405 return ret;
0e267c44 7406
0934856d 7407 if (rw & WRITE) {
38851cc1
MX
7408 /*
7409 * If the write DIO is beyond the EOF, we need update
7410 * the isize, but it is protected by i_mutex. So we can
7411 * not unlock the i_mutex at this case.
7412 */
7413 if (offset + count <= inode->i_size) {
7414 mutex_unlock(&inode->i_mutex);
7415 relock = true;
7416 }
0934856d
MX
7417 ret = btrfs_delalloc_reserve_space(inode, count);
7418 if (ret)
38851cc1
MX
7419 goto out;
7420 } else if (unlikely(test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
7421 &BTRFS_I(inode)->runtime_flags))) {
7422 inode_dio_done(inode);
7423 flags = DIO_LOCKING | DIO_SKIP_HOLES;
7424 wakeup = false;
0934856d
MX
7425 }
7426
7427 ret = __blockdev_direct_IO(rw, iocb, inode,
7428 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
7429 iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
2e60a51e 7430 btrfs_submit_direct, flags);
0934856d
MX
7431 if (rw & WRITE) {
7432 if (ret < 0 && ret != -EIOCBQUEUED)
7433 btrfs_delalloc_release_space(inode, count);
172a5049 7434 else if (ret >= 0 && (size_t)ret < count)
0934856d
MX
7435 btrfs_delalloc_release_space(inode,
7436 count - (size_t)ret);
172a5049
MX
7437 else
7438 btrfs_delalloc_release_metadata(inode, 0);
0934856d 7439 }
38851cc1 7440out:
2e60a51e
MX
7441 if (wakeup)
7442 inode_dio_done(inode);
38851cc1
MX
7443 if (relock)
7444 mutex_lock(&inode->i_mutex);
0934856d
MX
7445
7446 return ret;
16432985
CM
7447}
7448
05dadc09
TI
7449#define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC)
7450
1506fcc8
YS
7451static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
7452 __u64 start, __u64 len)
7453{
05dadc09
TI
7454 int ret;
7455
7456 ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
7457 if (ret)
7458 return ret;
7459
ec29ed5b 7460 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
1506fcc8
YS
7461}
7462
a52d9a80 7463int btrfs_readpage(struct file *file, struct page *page)
9ebefb18 7464{
d1310b2e
CM
7465 struct extent_io_tree *tree;
7466 tree = &BTRFS_I(page->mapping->host)->io_tree;
8ddc7d9c 7467 return extent_read_full_page(tree, page, btrfs_get_extent, 0);
9ebefb18 7468}
1832a6d5 7469
a52d9a80 7470static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
39279cc3 7471{
d1310b2e 7472 struct extent_io_tree *tree;
b888db2b
CM
7473
7474
7475 if (current->flags & PF_MEMALLOC) {
7476 redirty_page_for_writepage(wbc, page);
7477 unlock_page(page);
7478 return 0;
7479 }
d1310b2e 7480 tree = &BTRFS_I(page->mapping->host)->io_tree;
a52d9a80 7481 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
9ebefb18
CM
7482}
7483
48a3b636
ES
7484static int btrfs_writepages(struct address_space *mapping,
7485 struct writeback_control *wbc)
b293f02e 7486{
d1310b2e 7487 struct extent_io_tree *tree;
771ed689 7488
d1310b2e 7489 tree = &BTRFS_I(mapping->host)->io_tree;
b293f02e
CM
7490 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
7491}
7492
3ab2fb5a
CM
7493static int
7494btrfs_readpages(struct file *file, struct address_space *mapping,
7495 struct list_head *pages, unsigned nr_pages)
7496{
d1310b2e
CM
7497 struct extent_io_tree *tree;
7498 tree = &BTRFS_I(mapping->host)->io_tree;
3ab2fb5a
CM
7499 return extent_readpages(tree, mapping, pages, nr_pages,
7500 btrfs_get_extent);
7501}
e6dcd2dc 7502static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
9ebefb18 7503{
d1310b2e
CM
7504 struct extent_io_tree *tree;
7505 struct extent_map_tree *map;
a52d9a80 7506 int ret;
8c2383c3 7507
d1310b2e
CM
7508 tree = &BTRFS_I(page->mapping->host)->io_tree;
7509 map = &BTRFS_I(page->mapping->host)->extent_tree;
70dec807 7510 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
a52d9a80
CM
7511 if (ret == 1) {
7512 ClearPagePrivate(page);
7513 set_page_private(page, 0);
7514 page_cache_release(page);
39279cc3 7515 }
a52d9a80 7516 return ret;
39279cc3
CM
7517}
7518
e6dcd2dc
CM
7519static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
7520{
98509cfc
CM
7521 if (PageWriteback(page) || PageDirty(page))
7522 return 0;
b335b003 7523 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
e6dcd2dc
CM
7524}
7525
d47992f8
LC
7526static void btrfs_invalidatepage(struct page *page, unsigned int offset,
7527 unsigned int length)
39279cc3 7528{
5fd02043 7529 struct inode *inode = page->mapping->host;
d1310b2e 7530 struct extent_io_tree *tree;
e6dcd2dc 7531 struct btrfs_ordered_extent *ordered;
2ac55d41 7532 struct extent_state *cached_state = NULL;
e6dcd2dc
CM
7533 u64 page_start = page_offset(page);
7534 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
131e404a 7535 int inode_evicting = inode->i_state & I_FREEING;
39279cc3 7536
8b62b72b
CM
7537 /*
7538 * we have the page locked, so new writeback can't start,
7539 * and the dirty bit won't be cleared while we are here.
7540 *
7541 * Wait for IO on this page so that we can safely clear
7542 * the PagePrivate2 bit and do ordered accounting
7543 */
e6dcd2dc 7544 wait_on_page_writeback(page);
8b62b72b 7545
5fd02043 7546 tree = &BTRFS_I(inode)->io_tree;
e6dcd2dc
CM
7547 if (offset) {
7548 btrfs_releasepage(page, GFP_NOFS);
7549 return;
7550 }
131e404a
FDBM
7551
7552 if (!inode_evicting)
7553 lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
7554 ordered = btrfs_lookup_ordered_extent(inode, page_start);
e6dcd2dc 7555 if (ordered) {
eb84ae03
CM
7556 /*
7557 * IO on this page will never be started, so we need
7558 * to account for any ordered extents now
7559 */
131e404a
FDBM
7560 if (!inode_evicting)
7561 clear_extent_bit(tree, page_start, page_end,
7562 EXTENT_DIRTY | EXTENT_DELALLOC |
7563 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
7564 EXTENT_DEFRAG, 1, 0, &cached_state,
7565 GFP_NOFS);
8b62b72b
CM
7566 /*
7567 * whoever cleared the private bit is responsible
7568 * for the finish_ordered_io
7569 */
77cef2ec
JB
7570 if (TestClearPagePrivate2(page)) {
7571 struct btrfs_ordered_inode_tree *tree;
7572 u64 new_len;
7573
7574 tree = &BTRFS_I(inode)->ordered_tree;
7575
7576 spin_lock_irq(&tree->lock);
7577 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
7578 new_len = page_start - ordered->file_offset;
7579 if (new_len < ordered->truncated_len)
7580 ordered->truncated_len = new_len;
7581 spin_unlock_irq(&tree->lock);
7582
7583 if (btrfs_dec_test_ordered_pending(inode, &ordered,
7584 page_start,
7585 PAGE_CACHE_SIZE, 1))
7586 btrfs_finish_ordered_io(ordered);
8b62b72b 7587 }
e6dcd2dc 7588 btrfs_put_ordered_extent(ordered);
131e404a
FDBM
7589 if (!inode_evicting) {
7590 cached_state = NULL;
7591 lock_extent_bits(tree, page_start, page_end, 0,
7592 &cached_state);
7593 }
7594 }
7595
7596 if (!inode_evicting) {
7597 clear_extent_bit(tree, page_start, page_end,
7598 EXTENT_LOCKED | EXTENT_DIRTY |
7599 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
7600 EXTENT_DEFRAG, 1, 1,
7601 &cached_state, GFP_NOFS);
7602
7603 __btrfs_releasepage(page, GFP_NOFS);
e6dcd2dc 7604 }
e6dcd2dc 7605
4a096752 7606 ClearPageChecked(page);
9ad6b7bc 7607 if (PagePrivate(page)) {
9ad6b7bc
CM
7608 ClearPagePrivate(page);
7609 set_page_private(page, 0);
7610 page_cache_release(page);
7611 }
39279cc3
CM
7612}
7613
9ebefb18
CM
7614/*
7615 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
7616 * called from a page fault handler when a page is first dirtied. Hence we must
7617 * be careful to check for EOF conditions here. We set the page up correctly
7618 * for a written page which means we get ENOSPC checking when writing into
7619 * holes and correct delalloc and unwritten extent mapping on filesystems that
7620 * support these features.
7621 *
7622 * We are not allowed to take the i_mutex here so we have to play games to
7623 * protect against truncate races as the page could now be beyond EOF. Because
7624 * vmtruncate() writes the inode size before removing pages, once we have the
7625 * page lock we can determine safely if the page is beyond EOF. If it is not
7626 * beyond EOF, then the page is guaranteed safe against truncation until we
7627 * unlock the page.
7628 */
c2ec175c 7629int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
9ebefb18 7630{
c2ec175c 7631 struct page *page = vmf->page;
496ad9aa 7632 struct inode *inode = file_inode(vma->vm_file);
1832a6d5 7633 struct btrfs_root *root = BTRFS_I(inode)->root;
e6dcd2dc
CM
7634 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7635 struct btrfs_ordered_extent *ordered;
2ac55d41 7636 struct extent_state *cached_state = NULL;
e6dcd2dc
CM
7637 char *kaddr;
7638 unsigned long zero_start;
9ebefb18 7639 loff_t size;
1832a6d5 7640 int ret;
9998eb70 7641 int reserved = 0;
a52d9a80 7642 u64 page_start;
e6dcd2dc 7643 u64 page_end;
9ebefb18 7644
b2b5ef5c 7645 sb_start_pagefault(inode->i_sb);
0ca1f7ce 7646 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
9998eb70 7647 if (!ret) {
e41f941a 7648 ret = file_update_time(vma->vm_file);
9998eb70
CM
7649 reserved = 1;
7650 }
56a76f82
NP
7651 if (ret) {
7652 if (ret == -ENOMEM)
7653 ret = VM_FAULT_OOM;
7654 else /* -ENOSPC, -EIO, etc */
7655 ret = VM_FAULT_SIGBUS;
9998eb70
CM
7656 if (reserved)
7657 goto out;
7658 goto out_noreserve;
56a76f82 7659 }
1832a6d5 7660
56a76f82 7661 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
e6dcd2dc 7662again:
9ebefb18 7663 lock_page(page);
9ebefb18 7664 size = i_size_read(inode);
e6dcd2dc
CM
7665 page_start = page_offset(page);
7666 page_end = page_start + PAGE_CACHE_SIZE - 1;
a52d9a80 7667
9ebefb18 7668 if ((page->mapping != inode->i_mapping) ||
e6dcd2dc 7669 (page_start >= size)) {
9ebefb18
CM
7670 /* page got truncated out from underneath us */
7671 goto out_unlock;
7672 }
e6dcd2dc
CM
7673 wait_on_page_writeback(page);
7674
d0082371 7675 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
e6dcd2dc
CM
7676 set_page_extent_mapped(page);
7677
eb84ae03
CM
7678 /*
7679 * we can't set the delalloc bits if there are pending ordered
7680 * extents. Drop our locks and wait for them to finish
7681 */
e6dcd2dc
CM
7682 ordered = btrfs_lookup_ordered_extent(inode, page_start);
7683 if (ordered) {
2ac55d41
JB
7684 unlock_extent_cached(io_tree, page_start, page_end,
7685 &cached_state, GFP_NOFS);
e6dcd2dc 7686 unlock_page(page);
eb84ae03 7687 btrfs_start_ordered_extent(inode, ordered, 1);
e6dcd2dc
CM
7688 btrfs_put_ordered_extent(ordered);
7689 goto again;
7690 }
7691
fbf19087
JB
7692 /*
7693 * XXX - page_mkwrite gets called every time the page is dirtied, even
7694 * if it was already dirty, so for space accounting reasons we need to
7695 * clear any delalloc bits for the range we are fixing to save. There
7696 * is probably a better way to do this, but for now keep consistent with
7697 * prepare_pages in the normal write path.
7698 */
2ac55d41 7699 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
9e8a4a8b
LB
7700 EXTENT_DIRTY | EXTENT_DELALLOC |
7701 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
2ac55d41 7702 0, 0, &cached_state, GFP_NOFS);
fbf19087 7703
2ac55d41
JB
7704 ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
7705 &cached_state);
9ed74f2d 7706 if (ret) {
2ac55d41
JB
7707 unlock_extent_cached(io_tree, page_start, page_end,
7708 &cached_state, GFP_NOFS);
9ed74f2d
JB
7709 ret = VM_FAULT_SIGBUS;
7710 goto out_unlock;
7711 }
e6dcd2dc 7712 ret = 0;
9ebefb18
CM
7713
7714 /* page is wholly or partially inside EOF */
a52d9a80 7715 if (page_start + PAGE_CACHE_SIZE > size)
e6dcd2dc 7716 zero_start = size & ~PAGE_CACHE_MASK;
9ebefb18 7717 else
e6dcd2dc 7718 zero_start = PAGE_CACHE_SIZE;
9ebefb18 7719
e6dcd2dc
CM
7720 if (zero_start != PAGE_CACHE_SIZE) {
7721 kaddr = kmap(page);
7722 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
7723 flush_dcache_page(page);
7724 kunmap(page);
7725 }
247e743c 7726 ClearPageChecked(page);
e6dcd2dc 7727 set_page_dirty(page);
50a9b214 7728 SetPageUptodate(page);
5a3f23d5 7729
257c62e1
CM
7730 BTRFS_I(inode)->last_trans = root->fs_info->generation;
7731 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
46d8bc34 7732 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
257c62e1 7733
2ac55d41 7734 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
9ebefb18
CM
7735
7736out_unlock:
b2b5ef5c
JK
7737 if (!ret) {
7738 sb_end_pagefault(inode->i_sb);
50a9b214 7739 return VM_FAULT_LOCKED;
b2b5ef5c 7740 }
9ebefb18 7741 unlock_page(page);
1832a6d5 7742out:
ec39e180 7743 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
9998eb70 7744out_noreserve:
b2b5ef5c 7745 sb_end_pagefault(inode->i_sb);
9ebefb18
CM
7746 return ret;
7747}
7748
a41ad394 7749static int btrfs_truncate(struct inode *inode)
39279cc3
CM
7750{
7751 struct btrfs_root *root = BTRFS_I(inode)->root;
fcb80c2a 7752 struct btrfs_block_rsv *rsv;
a71754fc 7753 int ret = 0;
3893e33b 7754 int err = 0;
39279cc3 7755 struct btrfs_trans_handle *trans;
dbe674a9 7756 u64 mask = root->sectorsize - 1;
07127184 7757 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
39279cc3 7758
0ef8b726
JB
7759 ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
7760 (u64)-1);
7761 if (ret)
7762 return ret;
39279cc3 7763
fcb80c2a
JB
7764 /*
7765 * Yes ladies and gentelment, this is indeed ugly. The fact is we have
7766 * 3 things going on here
7767 *
7768 * 1) We need to reserve space for our orphan item and the space to
7769 * delete our orphan item. Lord knows we don't want to have a dangling
7770 * orphan item because we didn't reserve space to remove it.
7771 *
7772 * 2) We need to reserve space to update our inode.
7773 *
7774 * 3) We need to have something to cache all the space that is going to
7775 * be free'd up by the truncate operation, but also have some slack
7776 * space reserved in case it uses space during the truncate (thank you
7777 * very much snapshotting).
7778 *
7779 * And we need these to all be seperate. The fact is we can use alot of
7780 * space doing the truncate, and we have no earthly idea how much space
7781 * we will use, so we need the truncate reservation to be seperate so it
7782 * doesn't end up using space reserved for updating the inode or
7783 * removing the orphan item. We also need to be able to stop the
7784 * transaction and start a new one, which means we need to be able to
7785 * update the inode several times, and we have no idea of knowing how
7786 * many times that will be, so we can't just reserve 1 item for the
7787 * entirety of the opration, so that has to be done seperately as well.
7788 * Then there is the orphan item, which does indeed need to be held on
7789 * to for the whole operation, and we need nobody to touch this reserved
7790 * space except the orphan code.
7791 *
7792 * So that leaves us with
7793 *
7794 * 1) root->orphan_block_rsv - for the orphan deletion.
7795 * 2) rsv - for the truncate reservation, which we will steal from the
7796 * transaction reservation.
7797 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
7798 * updating the inode.
7799 */
66d8f3dd 7800 rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
fcb80c2a
JB
7801 if (!rsv)
7802 return -ENOMEM;
4a338542 7803 rsv->size = min_size;
ca7e70f5 7804 rsv->failfast = 1;
f0cd846e 7805
907cbceb 7806 /*
07127184 7807 * 1 for the truncate slack space
907cbceb
JB
7808 * 1 for updating the inode.
7809 */
f3fe820c 7810 trans = btrfs_start_transaction(root, 2);
fcb80c2a
JB
7811 if (IS_ERR(trans)) {
7812 err = PTR_ERR(trans);
7813 goto out;
7814 }
f0cd846e 7815
907cbceb
JB
7816 /* Migrate the slack space for the truncate to our reserve */
7817 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
7818 min_size);
fcb80c2a 7819 BUG_ON(ret);
f0cd846e 7820
5a3f23d5
CM
7821 /*
7822 * setattr is responsible for setting the ordered_data_close flag,
7823 * but that is only tested during the last file release. That
7824 * could happen well after the next commit, leaving a great big
7825 * window where new writes may get lost if someone chooses to write
7826 * to this file after truncating to zero
7827 *
7828 * The inode doesn't have any dirty data here, and so if we commit
7829 * this is a noop. If someone immediately starts writing to the inode
7830 * it is very likely we'll catch some of their writes in this
7831 * transaction, and the commit will find this file on the ordered
7832 * data list with good things to send down.
7833 *
7834 * This is a best effort solution, there is still a window where
7835 * using truncate to replace the contents of the file will
7836 * end up with a zero length file after a crash.
7837 */
72ac3c0d
JB
7838 if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
7839 &BTRFS_I(inode)->runtime_flags))
5a3f23d5
CM
7840 btrfs_add_ordered_operation(trans, root, inode);
7841
5dc562c5
JB
7842 /*
7843 * So if we truncate and then write and fsync we normally would just
7844 * write the extents that changed, which is a problem if we need to
7845 * first truncate that entire inode. So set this flag so we write out
7846 * all of the extents in the inode to the sync log so we're completely
7847 * safe.
7848 */
7849 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
ca7e70f5 7850 trans->block_rsv = rsv;
907cbceb 7851
8082510e
YZ
7852 while (1) {
7853 ret = btrfs_truncate_inode_items(trans, root, inode,
7854 inode->i_size,
7855 BTRFS_EXTENT_DATA_KEY);
ca7e70f5 7856 if (ret != -ENOSPC) {
3893e33b 7857 err = ret;
8082510e 7858 break;
3893e33b 7859 }
39279cc3 7860
fcb80c2a 7861 trans->block_rsv = &root->fs_info->trans_block_rsv;
8082510e 7862 ret = btrfs_update_inode(trans, root, inode);
3893e33b
JB
7863 if (ret) {
7864 err = ret;
7865 break;
7866 }
ca7e70f5 7867
8082510e 7868 btrfs_end_transaction(trans, root);
b53d3f5d 7869 btrfs_btree_balance_dirty(root);
ca7e70f5
JB
7870
7871 trans = btrfs_start_transaction(root, 2);
7872 if (IS_ERR(trans)) {
7873 ret = err = PTR_ERR(trans);
7874 trans = NULL;
7875 break;
7876 }
7877
7878 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
7879 rsv, min_size);
7880 BUG_ON(ret); /* shouldn't happen */
7881 trans->block_rsv = rsv;
8082510e
YZ
7882 }
7883
7884 if (ret == 0 && inode->i_nlink > 0) {
fcb80c2a 7885 trans->block_rsv = root->orphan_block_rsv;
8082510e 7886 ret = btrfs_orphan_del(trans, inode);
3893e33b
JB
7887 if (ret)
7888 err = ret;
8082510e
YZ
7889 }
7890
917c16b2
CM
7891 if (trans) {
7892 trans->block_rsv = &root->fs_info->trans_block_rsv;
7893 ret = btrfs_update_inode(trans, root, inode);
7894 if (ret && !err)
7895 err = ret;
7b128766 7896
7ad85bb7 7897 ret = btrfs_end_transaction(trans, root);
b53d3f5d 7898 btrfs_btree_balance_dirty(root);
917c16b2 7899 }
fcb80c2a
JB
7900
7901out:
7902 btrfs_free_block_rsv(root, rsv);
7903
3893e33b
JB
7904 if (ret && !err)
7905 err = ret;
a41ad394 7906
3893e33b 7907 return err;
39279cc3
CM
7908}
7909
d352ac68
CM
7910/*
7911 * create a new subvolume directory/inode (helper for the ioctl).
7912 */
d2fb3437 7913int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
63541927
FDBM
7914 struct btrfs_root *new_root,
7915 struct btrfs_root *parent_root,
7916 u64 new_dirid)
39279cc3 7917{
39279cc3 7918 struct inode *inode;
76dda93c 7919 int err;
00e4e6b3 7920 u64 index = 0;
39279cc3 7921
12fc9d09
FA
7922 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
7923 new_dirid, new_dirid,
7924 S_IFDIR | (~current_umask() & S_IRWXUGO),
7925 &index);
54aa1f4d 7926 if (IS_ERR(inode))
f46b5a66 7927 return PTR_ERR(inode);
39279cc3
CM
7928 inode->i_op = &btrfs_dir_inode_operations;
7929 inode->i_fop = &btrfs_dir_file_operations;
7930
bfe86848 7931 set_nlink(inode, 1);
dbe674a9 7932 btrfs_i_size_write(inode, 0);
3b96362c 7933
63541927
FDBM
7934 err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
7935 if (err)
7936 btrfs_err(new_root->fs_info,
7937 "error inheriting subvolume %llu properties: %d\n",
7938 new_root->root_key.objectid, err);
7939
76dda93c 7940 err = btrfs_update_inode(trans, new_root, inode);
cb8e7090 7941
76dda93c 7942 iput(inode);
ce598979 7943 return err;
39279cc3
CM
7944}
7945
39279cc3
CM
7946struct inode *btrfs_alloc_inode(struct super_block *sb)
7947{
7948 struct btrfs_inode *ei;
2ead6ae7 7949 struct inode *inode;
39279cc3
CM
7950
7951 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
7952 if (!ei)
7953 return NULL;
2ead6ae7
YZ
7954
7955 ei->root = NULL;
2ead6ae7 7956 ei->generation = 0;
15ee9bc7 7957 ei->last_trans = 0;
257c62e1 7958 ei->last_sub_trans = 0;
e02119d5 7959 ei->logged_trans = 0;
2ead6ae7 7960 ei->delalloc_bytes = 0;
2ead6ae7
YZ
7961 ei->disk_i_size = 0;
7962 ei->flags = 0;
7709cde3 7963 ei->csum_bytes = 0;
2ead6ae7 7964 ei->index_cnt = (u64)-1;
67de1176 7965 ei->dir_index = 0;
2ead6ae7 7966 ei->last_unlink_trans = 0;
46d8bc34 7967 ei->last_log_commit = 0;
2ead6ae7 7968
9e0baf60
JB
7969 spin_lock_init(&ei->lock);
7970 ei->outstanding_extents = 0;
7971 ei->reserved_extents = 0;
2ead6ae7 7972
72ac3c0d 7973 ei->runtime_flags = 0;
261507a0 7974 ei->force_compress = BTRFS_COMPRESS_NONE;
2ead6ae7 7975
16cdcec7
MX
7976 ei->delayed_node = NULL;
7977
2ead6ae7 7978 inode = &ei->vfs_inode;
a8067e02 7979 extent_map_tree_init(&ei->extent_tree);
f993c883
DS
7980 extent_io_tree_init(&ei->io_tree, &inode->i_data);
7981 extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
0b32f4bb
JB
7982 ei->io_tree.track_uptodate = 1;
7983 ei->io_failure_tree.track_uptodate = 1;
b812ce28 7984 atomic_set(&ei->sync_writers, 0);
2ead6ae7 7985 mutex_init(&ei->log_mutex);
f248679e 7986 mutex_init(&ei->delalloc_mutex);
e6dcd2dc 7987 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
2ead6ae7 7988 INIT_LIST_HEAD(&ei->delalloc_inodes);
5a3f23d5 7989 INIT_LIST_HEAD(&ei->ordered_operations);
2ead6ae7
YZ
7990 RB_CLEAR_NODE(&ei->rb_node);
7991
7992 return inode;
39279cc3
CM
7993}
7994
aaedb55b
JB
7995#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
7996void btrfs_test_destroy_inode(struct inode *inode)
7997{
7998 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
7999 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8000}
8001#endif
8002
fa0d7e3d
NP
8003static void btrfs_i_callback(struct rcu_head *head)
8004{
8005 struct inode *inode = container_of(head, struct inode, i_rcu);
fa0d7e3d
NP
8006 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8007}
8008
39279cc3
CM
8009void btrfs_destroy_inode(struct inode *inode)
8010{
e6dcd2dc 8011 struct btrfs_ordered_extent *ordered;
5a3f23d5
CM
8012 struct btrfs_root *root = BTRFS_I(inode)->root;
8013
b3d9b7a3 8014 WARN_ON(!hlist_empty(&inode->i_dentry));
39279cc3 8015 WARN_ON(inode->i_data.nrpages);
9e0baf60
JB
8016 WARN_ON(BTRFS_I(inode)->outstanding_extents);
8017 WARN_ON(BTRFS_I(inode)->reserved_extents);
7709cde3
JB
8018 WARN_ON(BTRFS_I(inode)->delalloc_bytes);
8019 WARN_ON(BTRFS_I(inode)->csum_bytes);
39279cc3 8020
a6dbd429
JB
8021 /*
8022 * This can happen where we create an inode, but somebody else also
8023 * created the same inode and we need to destroy the one we already
8024 * created.
8025 */
8026 if (!root)
8027 goto free;
8028
5a3f23d5
CM
8029 /*
8030 * Make sure we're properly removed from the ordered operation
8031 * lists.
8032 */
8033 smp_mb();
8034 if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
199c2a9c 8035 spin_lock(&root->fs_info->ordered_root_lock);
5a3f23d5 8036 list_del_init(&BTRFS_I(inode)->ordered_operations);
199c2a9c 8037 spin_unlock(&root->fs_info->ordered_root_lock);
5a3f23d5
CM
8038 }
8039
8a35d95f
JB
8040 if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
8041 &BTRFS_I(inode)->runtime_flags)) {
c2cf52eb 8042 btrfs_info(root->fs_info, "inode %llu still on the orphan list",
c1c9ff7c 8043 btrfs_ino(inode));
8a35d95f 8044 atomic_dec(&root->orphan_inodes);
7b128766 8045 }
7b128766 8046
d397712b 8047 while (1) {
e6dcd2dc
CM
8048 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
8049 if (!ordered)
8050 break;
8051 else {
c2cf52eb 8052 btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup",
c1c9ff7c 8053 ordered->file_offset, ordered->len);
e6dcd2dc
CM
8054 btrfs_remove_ordered_extent(inode, ordered);
8055 btrfs_put_ordered_extent(ordered);
8056 btrfs_put_ordered_extent(ordered);
8057 }
8058 }
5d4f98a2 8059 inode_tree_del(inode);
5b21f2ed 8060 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
a6dbd429 8061free:
fa0d7e3d 8062 call_rcu(&inode->i_rcu, btrfs_i_callback);
39279cc3
CM
8063}
8064
45321ac5 8065int btrfs_drop_inode(struct inode *inode)
76dda93c
YZ
8066{
8067 struct btrfs_root *root = BTRFS_I(inode)->root;
45321ac5 8068
6379ef9f
NA
8069 if (root == NULL)
8070 return 1;
8071
fa6ac876 8072 /* the snap/subvol tree is on deleting */
69e9c6c6 8073 if (btrfs_root_refs(&root->root_item) == 0)
45321ac5 8074 return 1;
76dda93c 8075 else
45321ac5 8076 return generic_drop_inode(inode);
76dda93c
YZ
8077}
8078
0ee0fda0 8079static void init_once(void *foo)
39279cc3
CM
8080{
8081 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
8082
8083 inode_init_once(&ei->vfs_inode);
8084}
8085
8086void btrfs_destroy_cachep(void)
8087{
8c0a8537
KS
8088 /*
8089 * Make sure all delayed rcu free inodes are flushed before we
8090 * destroy cache.
8091 */
8092 rcu_barrier();
39279cc3
CM
8093 if (btrfs_inode_cachep)
8094 kmem_cache_destroy(btrfs_inode_cachep);
8095 if (btrfs_trans_handle_cachep)
8096 kmem_cache_destroy(btrfs_trans_handle_cachep);
8097 if (btrfs_transaction_cachep)
8098 kmem_cache_destroy(btrfs_transaction_cachep);
39279cc3
CM
8099 if (btrfs_path_cachep)
8100 kmem_cache_destroy(btrfs_path_cachep);
dc89e982
JB
8101 if (btrfs_free_space_cachep)
8102 kmem_cache_destroy(btrfs_free_space_cachep);
8ccf6f19
MX
8103 if (btrfs_delalloc_work_cachep)
8104 kmem_cache_destroy(btrfs_delalloc_work_cachep);
39279cc3
CM
8105}
8106
8107int btrfs_init_cachep(void)
8108{
837e1972 8109 btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
9601e3f6
CH
8110 sizeof(struct btrfs_inode), 0,
8111 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
39279cc3
CM
8112 if (!btrfs_inode_cachep)
8113 goto fail;
9601e3f6 8114
837e1972 8115 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
9601e3f6
CH
8116 sizeof(struct btrfs_trans_handle), 0,
8117 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
39279cc3
CM
8118 if (!btrfs_trans_handle_cachep)
8119 goto fail;
9601e3f6 8120
837e1972 8121 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
9601e3f6
CH
8122 sizeof(struct btrfs_transaction), 0,
8123 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
39279cc3
CM
8124 if (!btrfs_transaction_cachep)
8125 goto fail;
9601e3f6 8126
837e1972 8127 btrfs_path_cachep = kmem_cache_create("btrfs_path",
9601e3f6
CH
8128 sizeof(struct btrfs_path), 0,
8129 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
39279cc3
CM
8130 if (!btrfs_path_cachep)
8131 goto fail;
9601e3f6 8132
837e1972 8133 btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
dc89e982
JB
8134 sizeof(struct btrfs_free_space), 0,
8135 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
8136 if (!btrfs_free_space_cachep)
8137 goto fail;
8138
8ccf6f19
MX
8139 btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work",
8140 sizeof(struct btrfs_delalloc_work), 0,
8141 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
8142 NULL);
8143 if (!btrfs_delalloc_work_cachep)
8144 goto fail;
8145
39279cc3
CM
8146 return 0;
8147fail:
8148 btrfs_destroy_cachep();
8149 return -ENOMEM;
8150}
8151
8152static int btrfs_getattr(struct vfsmount *mnt,
8153 struct dentry *dentry, struct kstat *stat)
8154{
df0af1a5 8155 u64 delalloc_bytes;
39279cc3 8156 struct inode *inode = dentry->d_inode;
fadc0d8b
DS
8157 u32 blocksize = inode->i_sb->s_blocksize;
8158
39279cc3 8159 generic_fillattr(inode, stat);
0ee5dc67 8160 stat->dev = BTRFS_I(inode)->root->anon_dev;
d6667462 8161 stat->blksize = PAGE_CACHE_SIZE;
df0af1a5
MX
8162
8163 spin_lock(&BTRFS_I(inode)->lock);
8164 delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
8165 spin_unlock(&BTRFS_I(inode)->lock);
fadc0d8b 8166 stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
df0af1a5 8167 ALIGN(delalloc_bytes, blocksize)) >> 9;
39279cc3
CM
8168 return 0;
8169}
8170
d397712b
CM
8171static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
8172 struct inode *new_dir, struct dentry *new_dentry)
39279cc3
CM
8173{
8174 struct btrfs_trans_handle *trans;
8175 struct btrfs_root *root = BTRFS_I(old_dir)->root;
4df27c4d 8176 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
39279cc3
CM
8177 struct inode *new_inode = new_dentry->d_inode;
8178 struct inode *old_inode = old_dentry->d_inode;
8179 struct timespec ctime = CURRENT_TIME;
00e4e6b3 8180 u64 index = 0;
4df27c4d 8181 u64 root_objectid;
39279cc3 8182 int ret;
33345d01 8183 u64 old_ino = btrfs_ino(old_inode);
39279cc3 8184
33345d01 8185 if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
f679a840
YZ
8186 return -EPERM;
8187
4df27c4d 8188 /* we only allow rename subvolume link between subvolumes */
33345d01 8189 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
3394e160
CM
8190 return -EXDEV;
8191
33345d01
LZ
8192 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
8193 (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
39279cc3 8194 return -ENOTEMPTY;
5f39d397 8195
4df27c4d
YZ
8196 if (S_ISDIR(old_inode->i_mode) && new_inode &&
8197 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
8198 return -ENOTEMPTY;
9c52057c
CM
8199
8200
8201 /* check for collisions, even if the name isn't there */
4871c158 8202 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
9c52057c
CM
8203 new_dentry->d_name.name,
8204 new_dentry->d_name.len);
8205
8206 if (ret) {
8207 if (ret == -EEXIST) {
8208 /* we shouldn't get
8209 * eexist without a new_inode */
fae7f21c 8210 if (WARN_ON(!new_inode)) {
9c52057c
CM
8211 return ret;
8212 }
8213 } else {
8214 /* maybe -EOVERFLOW */
8215 return ret;
8216 }
8217 }
8218 ret = 0;
8219
5a3f23d5
CM
8220 /*
8221 * we're using rename to replace one file with another.
8222 * and the replacement file is large. Start IO on it now so
8223 * we don't add too much work to the end of the transaction
8224 */
4baf8c92 8225 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
5a3f23d5
CM
8226 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
8227 filemap_flush(old_inode->i_mapping);
8228
76dda93c 8229 /* close the racy window with snapshot create/destroy ioctl */
33345d01 8230 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
76dda93c 8231 down_read(&root->fs_info->subvol_sem);
a22285a6
YZ
8232 /*
8233 * We want to reserve the absolute worst case amount of items. So if
8234 * both inodes are subvols and we need to unlink them then that would
8235 * require 4 item modifications, but if they are both normal inodes it
8236 * would require 5 item modifications, so we'll assume their normal
8237 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
8238 * should cover the worst case number of items we'll modify.
8239 */
6e137ed3 8240 trans = btrfs_start_transaction(root, 11);
b44c59a8
JL
8241 if (IS_ERR(trans)) {
8242 ret = PTR_ERR(trans);
8243 goto out_notrans;
8244 }
76dda93c 8245
4df27c4d
YZ
8246 if (dest != root)
8247 btrfs_record_root_in_trans(trans, dest);
5f39d397 8248
a5719521
YZ
8249 ret = btrfs_set_inode_index(new_dir, &index);
8250 if (ret)
8251 goto out_fail;
5a3f23d5 8252
67de1176 8253 BTRFS_I(old_inode)->dir_index = 0ULL;
33345d01 8254 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4df27c4d
YZ
8255 /* force full log commit if subvolume involved. */
8256 root->fs_info->last_trans_log_full_commit = trans->transid;
8257 } else {
a5719521
YZ
8258 ret = btrfs_insert_inode_ref(trans, dest,
8259 new_dentry->d_name.name,
8260 new_dentry->d_name.len,
33345d01
LZ
8261 old_ino,
8262 btrfs_ino(new_dir), index);
a5719521
YZ
8263 if (ret)
8264 goto out_fail;
4df27c4d
YZ
8265 /*
8266 * this is an ugly little race, but the rename is required
8267 * to make sure that if we crash, the inode is either at the
8268 * old name or the new one. pinning the log transaction lets
8269 * us make sure we don't allow a log commit to come in after
8270 * we unlink the name but before we add the new name back in.
8271 */
8272 btrfs_pin_log_trans(root);
8273 }
5a3f23d5
CM
8274 /*
8275 * make sure the inode gets flushed if it is replacing
8276 * something.
8277 */
33345d01 8278 if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
5a3f23d5 8279 btrfs_add_ordered_operation(trans, root, old_inode);
5a3f23d5 8280
0c4d2d95
JB
8281 inode_inc_iversion(old_dir);
8282 inode_inc_iversion(new_dir);
8283 inode_inc_iversion(old_inode);
39279cc3
CM
8284 old_dir->i_ctime = old_dir->i_mtime = ctime;
8285 new_dir->i_ctime = new_dir->i_mtime = ctime;
8286 old_inode->i_ctime = ctime;
5f39d397 8287
12fcfd22
CM
8288 if (old_dentry->d_parent != new_dentry->d_parent)
8289 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
8290
33345d01 8291 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4df27c4d
YZ
8292 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
8293 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
8294 old_dentry->d_name.name,
8295 old_dentry->d_name.len);
8296 } else {
92986796
AV
8297 ret = __btrfs_unlink_inode(trans, root, old_dir,
8298 old_dentry->d_inode,
8299 old_dentry->d_name.name,
8300 old_dentry->d_name.len);
8301 if (!ret)
8302 ret = btrfs_update_inode(trans, root, old_inode);
4df27c4d 8303 }
79787eaa
JM
8304 if (ret) {
8305 btrfs_abort_transaction(trans, root, ret);
8306 goto out_fail;
8307 }
39279cc3
CM
8308
8309 if (new_inode) {
0c4d2d95 8310 inode_inc_iversion(new_inode);
39279cc3 8311 new_inode->i_ctime = CURRENT_TIME;
33345d01 8312 if (unlikely(btrfs_ino(new_inode) ==
4df27c4d
YZ
8313 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
8314 root_objectid = BTRFS_I(new_inode)->location.objectid;
8315 ret = btrfs_unlink_subvol(trans, dest, new_dir,
8316 root_objectid,
8317 new_dentry->d_name.name,
8318 new_dentry->d_name.len);
8319 BUG_ON(new_inode->i_nlink == 0);
8320 } else {
8321 ret = btrfs_unlink_inode(trans, dest, new_dir,
8322 new_dentry->d_inode,
8323 new_dentry->d_name.name,
8324 new_dentry->d_name.len);
8325 }
4ef31a45 8326 if (!ret && new_inode->i_nlink == 0)
e02119d5 8327 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
79787eaa
JM
8328 if (ret) {
8329 btrfs_abort_transaction(trans, root, ret);
8330 goto out_fail;
8331 }
39279cc3 8332 }
aec7477b 8333
4df27c4d
YZ
8334 ret = btrfs_add_link(trans, new_dir, old_inode,
8335 new_dentry->d_name.name,
a5719521 8336 new_dentry->d_name.len, 0, index);
79787eaa
JM
8337 if (ret) {
8338 btrfs_abort_transaction(trans, root, ret);
8339 goto out_fail;
8340 }
39279cc3 8341
67de1176
MX
8342 if (old_inode->i_nlink == 1)
8343 BTRFS_I(old_inode)->dir_index = index;
8344
33345d01 8345 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
10d9f309 8346 struct dentry *parent = new_dentry->d_parent;
6a912213 8347 btrfs_log_new_name(trans, old_inode, old_dir, parent);
4df27c4d
YZ
8348 btrfs_end_log_trans(root);
8349 }
39279cc3 8350out_fail:
7ad85bb7 8351 btrfs_end_transaction(trans, root);
b44c59a8 8352out_notrans:
33345d01 8353 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
76dda93c 8354 up_read(&root->fs_info->subvol_sem);
9ed74f2d 8355
39279cc3
CM
8356 return ret;
8357}
8358
8ccf6f19
MX
8359static void btrfs_run_delalloc_work(struct btrfs_work *work)
8360{
8361 struct btrfs_delalloc_work *delalloc_work;
9f23e289 8362 struct inode *inode;
8ccf6f19
MX
8363
8364 delalloc_work = container_of(work, struct btrfs_delalloc_work,
8365 work);
9f23e289
JB
8366 inode = delalloc_work->inode;
8367 if (delalloc_work->wait) {
8368 btrfs_wait_ordered_range(inode, 0, (u64)-1);
8369 } else {
8370 filemap_flush(inode->i_mapping);
8371 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8372 &BTRFS_I(inode)->runtime_flags))
8373 filemap_flush(inode->i_mapping);
8374 }
8ccf6f19
MX
8375
8376 if (delalloc_work->delay_iput)
9f23e289 8377 btrfs_add_delayed_iput(inode);
8ccf6f19 8378 else
9f23e289 8379 iput(inode);
8ccf6f19
MX
8380 complete(&delalloc_work->completion);
8381}
8382
8383struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
8384 int wait, int delay_iput)
8385{
8386 struct btrfs_delalloc_work *work;
8387
8388 work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS);
8389 if (!work)
8390 return NULL;
8391
8392 init_completion(&work->completion);
8393 INIT_LIST_HEAD(&work->list);
8394 work->inode = inode;
8395 work->wait = wait;
8396 work->delay_iput = delay_iput;
8397 work->work.func = btrfs_run_delalloc_work;
8398
8399 return work;
8400}
8401
8402void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
8403{
8404 wait_for_completion(&work->completion);
8405 kmem_cache_free(btrfs_delalloc_work_cachep, work);
8406}
8407
d352ac68
CM
8408/*
8409 * some fairly slow code that needs optimization. This walks the list
8410 * of all the inodes with pending delalloc and forces them to disk.
8411 */
eb73c1b7 8412static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
ea8c2819 8413{
ea8c2819 8414 struct btrfs_inode *binode;
5b21f2ed 8415 struct inode *inode;
8ccf6f19
MX
8416 struct btrfs_delalloc_work *work, *next;
8417 struct list_head works;
1eafa6c7 8418 struct list_head splice;
8ccf6f19 8419 int ret = 0;
ea8c2819 8420
8ccf6f19 8421 INIT_LIST_HEAD(&works);
1eafa6c7 8422 INIT_LIST_HEAD(&splice);
63607cc8 8423
eb73c1b7
MX
8424 spin_lock(&root->delalloc_lock);
8425 list_splice_init(&root->delalloc_inodes, &splice);
1eafa6c7
MX
8426 while (!list_empty(&splice)) {
8427 binode = list_entry(splice.next, struct btrfs_inode,
ea8c2819 8428 delalloc_inodes);
1eafa6c7 8429
eb73c1b7
MX
8430 list_move_tail(&binode->delalloc_inodes,
8431 &root->delalloc_inodes);
5b21f2ed 8432 inode = igrab(&binode->vfs_inode);
df0af1a5 8433 if (!inode) {
eb73c1b7 8434 cond_resched_lock(&root->delalloc_lock);
1eafa6c7 8435 continue;
df0af1a5 8436 }
eb73c1b7 8437 spin_unlock(&root->delalloc_lock);
1eafa6c7
MX
8438
8439 work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
8440 if (unlikely(!work)) {
f4ab9ea7
JB
8441 if (delay_iput)
8442 btrfs_add_delayed_iput(inode);
8443 else
8444 iput(inode);
1eafa6c7
MX
8445 ret = -ENOMEM;
8446 goto out;
5b21f2ed 8447 }
1eafa6c7
MX
8448 list_add_tail(&work->list, &works);
8449 btrfs_queue_worker(&root->fs_info->flush_workers,
8450 &work->work);
8451
5b21f2ed 8452 cond_resched();
eb73c1b7 8453 spin_lock(&root->delalloc_lock);
ea8c2819 8454 }
eb73c1b7 8455 spin_unlock(&root->delalloc_lock);
8c8bee1d 8456
1eafa6c7
MX
8457 list_for_each_entry_safe(work, next, &works, list) {
8458 list_del_init(&work->list);
8459 btrfs_wait_and_free_delalloc_work(work);
8460 }
eb73c1b7
MX
8461 return 0;
8462out:
8463 list_for_each_entry_safe(work, next, &works, list) {
8464 list_del_init(&work->list);
8465 btrfs_wait_and_free_delalloc_work(work);
8466 }
8467
8468 if (!list_empty_careful(&splice)) {
8469 spin_lock(&root->delalloc_lock);
8470 list_splice_tail(&splice, &root->delalloc_inodes);
8471 spin_unlock(&root->delalloc_lock);
8472 }
8473 return ret;
8474}
1eafa6c7 8475
eb73c1b7
MX
8476int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
8477{
8478 int ret;
1eafa6c7 8479
2c21b4d7 8480 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
eb73c1b7
MX
8481 return -EROFS;
8482
8483 ret = __start_delalloc_inodes(root, delay_iput);
8484 /*
8485 * the filemap_flush will queue IO into the worker threads, but
8c8bee1d
CM
8486 * we have to make sure the IO is actually started and that
8487 * ordered extents get created before we return
8488 */
8489 atomic_inc(&root->fs_info->async_submit_draining);
d397712b 8490 while (atomic_read(&root->fs_info->nr_async_submits) ||
771ed689 8491 atomic_read(&root->fs_info->async_delalloc_pages)) {
8c8bee1d 8492 wait_event(root->fs_info->async_submit_wait,
771ed689
CM
8493 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
8494 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
8c8bee1d
CM
8495 }
8496 atomic_dec(&root->fs_info->async_submit_draining);
eb73c1b7
MX
8497 return ret;
8498}
8499
91aef86f 8500int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput)
eb73c1b7
MX
8501{
8502 struct btrfs_root *root;
8503 struct list_head splice;
8504 int ret;
8505
2c21b4d7 8506 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
eb73c1b7
MX
8507 return -EROFS;
8508
8509 INIT_LIST_HEAD(&splice);
8510
8511 spin_lock(&fs_info->delalloc_root_lock);
8512 list_splice_init(&fs_info->delalloc_roots, &splice);
8513 while (!list_empty(&splice)) {
8514 root = list_first_entry(&splice, struct btrfs_root,
8515 delalloc_root);
8516 root = btrfs_grab_fs_root(root);
8517 BUG_ON(!root);
8518 list_move_tail(&root->delalloc_root,
8519 &fs_info->delalloc_roots);
8520 spin_unlock(&fs_info->delalloc_root_lock);
8521
8522 ret = __start_delalloc_inodes(root, delay_iput);
8523 btrfs_put_fs_root(root);
8524 if (ret)
8525 goto out;
8526
8527 spin_lock(&fs_info->delalloc_root_lock);
8ccf6f19 8528 }
eb73c1b7 8529 spin_unlock(&fs_info->delalloc_root_lock);
1eafa6c7 8530
eb73c1b7
MX
8531 atomic_inc(&fs_info->async_submit_draining);
8532 while (atomic_read(&fs_info->nr_async_submits) ||
8533 atomic_read(&fs_info->async_delalloc_pages)) {
8534 wait_event(fs_info->async_submit_wait,
8535 (atomic_read(&fs_info->nr_async_submits) == 0 &&
8536 atomic_read(&fs_info->async_delalloc_pages) == 0));
8537 }
8538 atomic_dec(&fs_info->async_submit_draining);
8539 return 0;
8540out:
1eafa6c7 8541 if (!list_empty_careful(&splice)) {
eb73c1b7
MX
8542 spin_lock(&fs_info->delalloc_root_lock);
8543 list_splice_tail(&splice, &fs_info->delalloc_roots);
8544 spin_unlock(&fs_info->delalloc_root_lock);
1eafa6c7 8545 }
8ccf6f19 8546 return ret;
ea8c2819
CM
8547}
8548
39279cc3
CM
8549static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
8550 const char *symname)
8551{
8552 struct btrfs_trans_handle *trans;
8553 struct btrfs_root *root = BTRFS_I(dir)->root;
8554 struct btrfs_path *path;
8555 struct btrfs_key key;
1832a6d5 8556 struct inode *inode = NULL;
39279cc3
CM
8557 int err;
8558 int drop_inode = 0;
8559 u64 objectid;
67871254 8560 u64 index = 0;
39279cc3
CM
8561 int name_len;
8562 int datasize;
5f39d397 8563 unsigned long ptr;
39279cc3 8564 struct btrfs_file_extent_item *ei;
5f39d397 8565 struct extent_buffer *leaf;
39279cc3 8566
f06becc4 8567 name_len = strlen(symname);
39279cc3
CM
8568 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
8569 return -ENAMETOOLONG;
1832a6d5 8570
9ed74f2d
JB
8571 /*
8572 * 2 items for inode item and ref
8573 * 2 items for dir items
8574 * 1 item for xattr if selinux is on
8575 */
a22285a6
YZ
8576 trans = btrfs_start_transaction(root, 5);
8577 if (IS_ERR(trans))
8578 return PTR_ERR(trans);
1832a6d5 8579
581bb050
LZ
8580 err = btrfs_find_free_ino(root, &objectid);
8581 if (err)
8582 goto out_unlock;
8583
aec7477b 8584 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
33345d01 8585 dentry->d_name.len, btrfs_ino(dir), objectid,
d82a6f1d 8586 S_IFLNK|S_IRWXUGO, &index);
7cf96da3
TI
8587 if (IS_ERR(inode)) {
8588 err = PTR_ERR(inode);
39279cc3 8589 goto out_unlock;
7cf96da3 8590 }
39279cc3 8591
2a7dba39 8592 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
33268eaf
JB
8593 if (err) {
8594 drop_inode = 1;
8595 goto out_unlock;
8596 }
8597
ad19db71
CS
8598 /*
8599 * If the active LSM wants to access the inode during
8600 * d_instantiate it needs these. Smack checks to see
8601 * if the filesystem supports xattrs by looking at the
8602 * ops vector.
8603 */
8604 inode->i_fop = &btrfs_file_operations;
8605 inode->i_op = &btrfs_file_inode_operations;
8606
a1b075d2 8607 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
39279cc3
CM
8608 if (err)
8609 drop_inode = 1;
8610 else {
8611 inode->i_mapping->a_ops = &btrfs_aops;
04160088 8612 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
d1310b2e 8613 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
39279cc3 8614 }
39279cc3
CM
8615 if (drop_inode)
8616 goto out_unlock;
8617
8618 path = btrfs_alloc_path();
d8926bb3
MF
8619 if (!path) {
8620 err = -ENOMEM;
8621 drop_inode = 1;
8622 goto out_unlock;
8623 }
33345d01 8624 key.objectid = btrfs_ino(inode);
39279cc3 8625 key.offset = 0;
39279cc3
CM
8626 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
8627 datasize = btrfs_file_extent_calc_inline_size(name_len);
8628 err = btrfs_insert_empty_item(trans, root, path, &key,
8629 datasize);
54aa1f4d
CM
8630 if (err) {
8631 drop_inode = 1;
b0839166 8632 btrfs_free_path(path);
54aa1f4d
CM
8633 goto out_unlock;
8634 }
5f39d397
CM
8635 leaf = path->nodes[0];
8636 ei = btrfs_item_ptr(leaf, path->slots[0],
8637 struct btrfs_file_extent_item);
8638 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
8639 btrfs_set_file_extent_type(leaf, ei,
39279cc3 8640 BTRFS_FILE_EXTENT_INLINE);
c8b97818
CM
8641 btrfs_set_file_extent_encryption(leaf, ei, 0);
8642 btrfs_set_file_extent_compression(leaf, ei, 0);
8643 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
8644 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
8645
39279cc3 8646 ptr = btrfs_file_extent_inline_start(ei);
5f39d397
CM
8647 write_extent_buffer(leaf, symname, ptr, name_len);
8648 btrfs_mark_buffer_dirty(leaf);
39279cc3 8649 btrfs_free_path(path);
5f39d397 8650
39279cc3
CM
8651 inode->i_op = &btrfs_symlink_inode_operations;
8652 inode->i_mapping->a_ops = &btrfs_symlink_aops;
04160088 8653 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
d899e052 8654 inode_set_bytes(inode, name_len);
f06becc4 8655 btrfs_i_size_write(inode, name_len);
54aa1f4d
CM
8656 err = btrfs_update_inode(trans, root, inode);
8657 if (err)
8658 drop_inode = 1;
39279cc3
CM
8659
8660out_unlock:
08c422c2
AV
8661 if (!err)
8662 d_instantiate(dentry, inode);
7ad85bb7 8663 btrfs_end_transaction(trans, root);
39279cc3
CM
8664 if (drop_inode) {
8665 inode_dec_link_count(inode);
8666 iput(inode);
8667 }
b53d3f5d 8668 btrfs_btree_balance_dirty(root);
39279cc3
CM
8669 return err;
8670}
16432985 8671
0af3d00b
JB
8672static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
8673 u64 start, u64 num_bytes, u64 min_size,
8674 loff_t actual_len, u64 *alloc_hint,
8675 struct btrfs_trans_handle *trans)
d899e052 8676{
5dc562c5
JB
8677 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
8678 struct extent_map *em;
d899e052
YZ
8679 struct btrfs_root *root = BTRFS_I(inode)->root;
8680 struct btrfs_key ins;
d899e052 8681 u64 cur_offset = start;
55a61d1d 8682 u64 i_size;
154ea289 8683 u64 cur_bytes;
d899e052 8684 int ret = 0;
0af3d00b 8685 bool own_trans = true;
d899e052 8686
0af3d00b
JB
8687 if (trans)
8688 own_trans = false;
d899e052 8689 while (num_bytes > 0) {
0af3d00b
JB
8690 if (own_trans) {
8691 trans = btrfs_start_transaction(root, 3);
8692 if (IS_ERR(trans)) {
8693 ret = PTR_ERR(trans);
8694 break;
8695 }
5a303d5d
YZ
8696 }
8697
154ea289
CM
8698 cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
8699 cur_bytes = max(cur_bytes, min_size);
00361589
JB
8700 ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
8701 *alloc_hint, &ins, 1);
5a303d5d 8702 if (ret) {
0af3d00b
JB
8703 if (own_trans)
8704 btrfs_end_transaction(trans, root);
a22285a6 8705 break;
d899e052 8706 }
5a303d5d 8707
d899e052
YZ
8708 ret = insert_reserved_file_extent(trans, inode,
8709 cur_offset, ins.objectid,
8710 ins.offset, ins.offset,
920bbbfb 8711 ins.offset, 0, 0, 0,
d899e052 8712 BTRFS_FILE_EXTENT_PREALLOC);
79787eaa 8713 if (ret) {
857cc2fc
JB
8714 btrfs_free_reserved_extent(root, ins.objectid,
8715 ins.offset);
79787eaa
JM
8716 btrfs_abort_transaction(trans, root, ret);
8717 if (own_trans)
8718 btrfs_end_transaction(trans, root);
8719 break;
8720 }
a1ed835e
CM
8721 btrfs_drop_extent_cache(inode, cur_offset,
8722 cur_offset + ins.offset -1, 0);
5a303d5d 8723
5dc562c5
JB
8724 em = alloc_extent_map();
8725 if (!em) {
8726 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
8727 &BTRFS_I(inode)->runtime_flags);
8728 goto next;
8729 }
8730
8731 em->start = cur_offset;
8732 em->orig_start = cur_offset;
8733 em->len = ins.offset;
8734 em->block_start = ins.objectid;
8735 em->block_len = ins.offset;
b4939680 8736 em->orig_block_len = ins.offset;
cc95bef6 8737 em->ram_bytes = ins.offset;
5dc562c5
JB
8738 em->bdev = root->fs_info->fs_devices->latest_bdev;
8739 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
8740 em->generation = trans->transid;
8741
8742 while (1) {
8743 write_lock(&em_tree->lock);
09a2a8f9 8744 ret = add_extent_mapping(em_tree, em, 1);
5dc562c5
JB
8745 write_unlock(&em_tree->lock);
8746 if (ret != -EEXIST)
8747 break;
8748 btrfs_drop_extent_cache(inode, cur_offset,
8749 cur_offset + ins.offset - 1,
8750 0);
8751 }
8752 free_extent_map(em);
8753next:
d899e052
YZ
8754 num_bytes -= ins.offset;
8755 cur_offset += ins.offset;
efa56464 8756 *alloc_hint = ins.objectid + ins.offset;
5a303d5d 8757
0c4d2d95 8758 inode_inc_iversion(inode);
d899e052 8759 inode->i_ctime = CURRENT_TIME;
6cbff00f 8760 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
d899e052 8761 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
efa56464
YZ
8762 (actual_len > inode->i_size) &&
8763 (cur_offset > inode->i_size)) {
d1ea6a61 8764 if (cur_offset > actual_len)
55a61d1d 8765 i_size = actual_len;
d1ea6a61 8766 else
55a61d1d
JB
8767 i_size = cur_offset;
8768 i_size_write(inode, i_size);
8769 btrfs_ordered_update_i_size(inode, i_size, NULL);
5a303d5d
YZ
8770 }
8771
d899e052 8772 ret = btrfs_update_inode(trans, root, inode);
79787eaa
JM
8773
8774 if (ret) {
8775 btrfs_abort_transaction(trans, root, ret);
8776 if (own_trans)
8777 btrfs_end_transaction(trans, root);
8778 break;
8779 }
d899e052 8780
0af3d00b
JB
8781 if (own_trans)
8782 btrfs_end_transaction(trans, root);
5a303d5d 8783 }
d899e052
YZ
8784 return ret;
8785}
8786
0af3d00b
JB
8787int btrfs_prealloc_file_range(struct inode *inode, int mode,
8788 u64 start, u64 num_bytes, u64 min_size,
8789 loff_t actual_len, u64 *alloc_hint)
8790{
8791 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
8792 min_size, actual_len, alloc_hint,
8793 NULL);
8794}
8795
8796int btrfs_prealloc_file_range_trans(struct inode *inode,
8797 struct btrfs_trans_handle *trans, int mode,
8798 u64 start, u64 num_bytes, u64 min_size,
8799 loff_t actual_len, u64 *alloc_hint)
8800{
8801 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
8802 min_size, actual_len, alloc_hint, trans);
8803}
8804
e6dcd2dc
CM
8805static int btrfs_set_page_dirty(struct page *page)
8806{
e6dcd2dc
CM
8807 return __set_page_dirty_nobuffers(page);
8808}
8809
10556cb2 8810static int btrfs_permission(struct inode *inode, int mask)
fdebe2bd 8811{
b83cc969 8812 struct btrfs_root *root = BTRFS_I(inode)->root;
cb6db4e5 8813 umode_t mode = inode->i_mode;
b83cc969 8814
cb6db4e5
JM
8815 if (mask & MAY_WRITE &&
8816 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
8817 if (btrfs_root_readonly(root))
8818 return -EROFS;
8819 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
8820 return -EACCES;
8821 }
2830ba7f 8822 return generic_permission(inode, mask);
fdebe2bd 8823}
39279cc3 8824
6e1d5dcc 8825static const struct inode_operations btrfs_dir_inode_operations = {
3394e160 8826 .getattr = btrfs_getattr,
39279cc3
CM
8827 .lookup = btrfs_lookup,
8828 .create = btrfs_create,
8829 .unlink = btrfs_unlink,
8830 .link = btrfs_link,
8831 .mkdir = btrfs_mkdir,
8832 .rmdir = btrfs_rmdir,
8833 .rename = btrfs_rename,
8834 .symlink = btrfs_symlink,
8835 .setattr = btrfs_setattr,
618e21d5 8836 .mknod = btrfs_mknod,
95819c05
CH
8837 .setxattr = btrfs_setxattr,
8838 .getxattr = btrfs_getxattr,
5103e947 8839 .listxattr = btrfs_listxattr,
95819c05 8840 .removexattr = btrfs_removexattr,
fdebe2bd 8841 .permission = btrfs_permission,
4e34e719 8842 .get_acl = btrfs_get_acl,
93fd63c2 8843 .update_time = btrfs_update_time,
39279cc3 8844};
6e1d5dcc 8845static const struct inode_operations btrfs_dir_ro_inode_operations = {
39279cc3 8846 .lookup = btrfs_lookup,
fdebe2bd 8847 .permission = btrfs_permission,
4e34e719 8848 .get_acl = btrfs_get_acl,
93fd63c2 8849 .update_time = btrfs_update_time,
39279cc3 8850};
76dda93c 8851
828c0950 8852static const struct file_operations btrfs_dir_file_operations = {
39279cc3
CM
8853 .llseek = generic_file_llseek,
8854 .read = generic_read_dir,
9cdda8d3 8855 .iterate = btrfs_real_readdir,
34287aa3 8856 .unlocked_ioctl = btrfs_ioctl,
39279cc3 8857#ifdef CONFIG_COMPAT
34287aa3 8858 .compat_ioctl = btrfs_ioctl,
39279cc3 8859#endif
6bf13c0c 8860 .release = btrfs_release_file,
e02119d5 8861 .fsync = btrfs_sync_file,
39279cc3
CM
8862};
8863
d1310b2e 8864static struct extent_io_ops btrfs_extent_io_ops = {
07157aac 8865 .fill_delalloc = run_delalloc_range,
065631f6 8866 .submit_bio_hook = btrfs_submit_bio_hook,
239b14b3 8867 .merge_bio_hook = btrfs_merge_bio_hook,
07157aac 8868 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
e6dcd2dc 8869 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
247e743c 8870 .writepage_start_hook = btrfs_writepage_start_hook,
b0c68f8b
CM
8871 .set_bit_hook = btrfs_set_bit_hook,
8872 .clear_bit_hook = btrfs_clear_bit_hook,
9ed74f2d
JB
8873 .merge_extent_hook = btrfs_merge_extent_hook,
8874 .split_extent_hook = btrfs_split_extent_hook,
07157aac
CM
8875};
8876
35054394
CM
8877/*
8878 * btrfs doesn't support the bmap operation because swapfiles
8879 * use bmap to make a mapping of extents in the file. They assume
8880 * these extents won't change over the life of the file and they
8881 * use the bmap result to do IO directly to the drive.
8882 *
8883 * the btrfs bmap call would return logical addresses that aren't
8884 * suitable for IO and they also will change frequently as COW
8885 * operations happen. So, swapfile + btrfs == corruption.
8886 *
8887 * For now we're avoiding this by dropping bmap.
8888 */
7f09410b 8889static const struct address_space_operations btrfs_aops = {
39279cc3
CM
8890 .readpage = btrfs_readpage,
8891 .writepage = btrfs_writepage,
b293f02e 8892 .writepages = btrfs_writepages,
3ab2fb5a 8893 .readpages = btrfs_readpages,
16432985 8894 .direct_IO = btrfs_direct_IO,
a52d9a80
CM
8895 .invalidatepage = btrfs_invalidatepage,
8896 .releasepage = btrfs_releasepage,
e6dcd2dc 8897 .set_page_dirty = btrfs_set_page_dirty,
465fdd97 8898 .error_remove_page = generic_error_remove_page,
39279cc3
CM
8899};
8900
7f09410b 8901static const struct address_space_operations btrfs_symlink_aops = {
39279cc3
CM
8902 .readpage = btrfs_readpage,
8903 .writepage = btrfs_writepage,
2bf5a725
CM
8904 .invalidatepage = btrfs_invalidatepage,
8905 .releasepage = btrfs_releasepage,
39279cc3
CM
8906};
8907
6e1d5dcc 8908static const struct inode_operations btrfs_file_inode_operations = {
39279cc3
CM
8909 .getattr = btrfs_getattr,
8910 .setattr = btrfs_setattr,
95819c05
CH
8911 .setxattr = btrfs_setxattr,
8912 .getxattr = btrfs_getxattr,
5103e947 8913 .listxattr = btrfs_listxattr,
95819c05 8914 .removexattr = btrfs_removexattr,
fdebe2bd 8915 .permission = btrfs_permission,
1506fcc8 8916 .fiemap = btrfs_fiemap,
4e34e719 8917 .get_acl = btrfs_get_acl,
e41f941a 8918 .update_time = btrfs_update_time,
39279cc3 8919};
6e1d5dcc 8920static const struct inode_operations btrfs_special_inode_operations = {
618e21d5
JB
8921 .getattr = btrfs_getattr,
8922 .setattr = btrfs_setattr,
fdebe2bd 8923 .permission = btrfs_permission,
95819c05
CH
8924 .setxattr = btrfs_setxattr,
8925 .getxattr = btrfs_getxattr,
33268eaf 8926 .listxattr = btrfs_listxattr,
95819c05 8927 .removexattr = btrfs_removexattr,
4e34e719 8928 .get_acl = btrfs_get_acl,
e41f941a 8929 .update_time = btrfs_update_time,
618e21d5 8930};
6e1d5dcc 8931static const struct inode_operations btrfs_symlink_inode_operations = {
39279cc3
CM
8932 .readlink = generic_readlink,
8933 .follow_link = page_follow_link_light,
8934 .put_link = page_put_link,
f209561a 8935 .getattr = btrfs_getattr,
22c44fe6 8936 .setattr = btrfs_setattr,
fdebe2bd 8937 .permission = btrfs_permission,
0279b4cd
JO
8938 .setxattr = btrfs_setxattr,
8939 .getxattr = btrfs_getxattr,
8940 .listxattr = btrfs_listxattr,
8941 .removexattr = btrfs_removexattr,
4e34e719 8942 .get_acl = btrfs_get_acl,
e41f941a 8943 .update_time = btrfs_update_time,
39279cc3 8944};
76dda93c 8945
82d339d9 8946const struct dentry_operations btrfs_dentry_operations = {
76dda93c 8947 .d_delete = btrfs_dentry_delete,
b4aff1f8 8948 .d_release = btrfs_dentry_release,
76dda93c 8949};