Btrfs: fix deadlock with freeze and sync V2
[linux-2.6-block.git] / fs / btrfs / inode.c
CommitLineData
6cbd5570
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
8f18cf13 19#include <linux/kernel.h>
065631f6 20#include <linux/bio.h>
39279cc3 21#include <linux/buffer_head.h>
f2eb0a24 22#include <linux/file.h>
39279cc3
CM
23#include <linux/fs.h>
24#include <linux/pagemap.h>
25#include <linux/highmem.h>
26#include <linux/time.h>
27#include <linux/init.h>
28#include <linux/string.h>
39279cc3
CM
29#include <linux/backing-dev.h>
30#include <linux/mpage.h>
31#include <linux/swap.h>
32#include <linux/writeback.h>
33#include <linux/statfs.h>
34#include <linux/compat.h>
9ebefb18 35#include <linux/bit_spinlock.h>
5103e947 36#include <linux/xattr.h>
33268eaf 37#include <linux/posix_acl.h>
d899e052 38#include <linux/falloc.h>
5a0e3ad6 39#include <linux/slab.h>
7a36ddec 40#include <linux/ratelimit.h>
22c44fe6 41#include <linux/mount.h>
4b4e25f2 42#include "compat.h"
39279cc3
CM
43#include "ctree.h"
44#include "disk-io.h"
45#include "transaction.h"
46#include "btrfs_inode.h"
47#include "ioctl.h"
48#include "print-tree.h"
e6dcd2dc 49#include "ordered-data.h"
95819c05 50#include "xattr.h"
e02119d5 51#include "tree-log.h"
4a54c8c1 52#include "volumes.h"
c8b97818 53#include "compression.h"
b4ce94de 54#include "locking.h"
dc89e982 55#include "free-space-cache.h"
581bb050 56#include "inode-map.h"
39279cc3
CM
57
58struct btrfs_iget_args {
59 u64 ino;
60 struct btrfs_root *root;
61};
62
6e1d5dcc
AD
63static const struct inode_operations btrfs_dir_inode_operations;
64static const struct inode_operations btrfs_symlink_inode_operations;
65static const struct inode_operations btrfs_dir_ro_inode_operations;
66static const struct inode_operations btrfs_special_inode_operations;
67static const struct inode_operations btrfs_file_inode_operations;
7f09410b
AD
68static const struct address_space_operations btrfs_aops;
69static const struct address_space_operations btrfs_symlink_aops;
828c0950 70static const struct file_operations btrfs_dir_file_operations;
d1310b2e 71static struct extent_io_ops btrfs_extent_io_ops;
39279cc3
CM
72
73static struct kmem_cache *btrfs_inode_cachep;
74struct kmem_cache *btrfs_trans_handle_cachep;
75struct kmem_cache *btrfs_transaction_cachep;
39279cc3 76struct kmem_cache *btrfs_path_cachep;
dc89e982 77struct kmem_cache *btrfs_free_space_cachep;
39279cc3
CM
78
79#define S_SHIFT 12
80static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
81 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
82 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
83 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
84 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
85 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
86 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
87 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
88};
89
a41ad394
JB
90static int btrfs_setsize(struct inode *inode, loff_t newsize);
91static int btrfs_truncate(struct inode *inode);
5fd02043 92static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
771ed689
CM
93static noinline int cow_file_range(struct inode *inode,
94 struct page *locked_page,
95 u64 start, u64 end, int *page_started,
96 unsigned long *nr_written, int unlock);
2115133f
CM
97static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
98 struct btrfs_root *root, struct inode *inode);
7b128766 99
f34f57a3 100static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
2a7dba39
EP
101 struct inode *inode, struct inode *dir,
102 const struct qstr *qstr)
0279b4cd
JO
103{
104 int err;
105
f34f57a3 106 err = btrfs_init_acl(trans, inode, dir);
0279b4cd 107 if (!err)
2a7dba39 108 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
0279b4cd
JO
109 return err;
110}
111
c8b97818
CM
112/*
113 * this does all the hard work for inserting an inline extent into
114 * the btree. The caller should have done a btrfs_drop_extents so that
115 * no overlapping inline items exist in the btree
116 */
d397712b 117static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
c8b97818
CM
118 struct btrfs_root *root, struct inode *inode,
119 u64 start, size_t size, size_t compressed_size,
fe3f566c 120 int compress_type,
c8b97818
CM
121 struct page **compressed_pages)
122{
123 struct btrfs_key key;
124 struct btrfs_path *path;
125 struct extent_buffer *leaf;
126 struct page *page = NULL;
127 char *kaddr;
128 unsigned long ptr;
129 struct btrfs_file_extent_item *ei;
130 int err = 0;
131 int ret;
132 size_t cur_size = size;
133 size_t datasize;
134 unsigned long offset;
c8b97818 135
fe3f566c 136 if (compressed_size && compressed_pages)
c8b97818 137 cur_size = compressed_size;
c8b97818 138
d397712b
CM
139 path = btrfs_alloc_path();
140 if (!path)
c8b97818
CM
141 return -ENOMEM;
142
b9473439 143 path->leave_spinning = 1;
c8b97818 144
33345d01 145 key.objectid = btrfs_ino(inode);
c8b97818
CM
146 key.offset = start;
147 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
c8b97818
CM
148 datasize = btrfs_file_extent_calc_inline_size(cur_size);
149
150 inode_add_bytes(inode, size);
151 ret = btrfs_insert_empty_item(trans, root, path, &key,
152 datasize);
c8b97818
CM
153 if (ret) {
154 err = ret;
c8b97818
CM
155 goto fail;
156 }
157 leaf = path->nodes[0];
158 ei = btrfs_item_ptr(leaf, path->slots[0],
159 struct btrfs_file_extent_item);
160 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
161 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
162 btrfs_set_file_extent_encryption(leaf, ei, 0);
163 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
164 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
165 ptr = btrfs_file_extent_inline_start(ei);
166
261507a0 167 if (compress_type != BTRFS_COMPRESS_NONE) {
c8b97818
CM
168 struct page *cpage;
169 int i = 0;
d397712b 170 while (compressed_size > 0) {
c8b97818 171 cpage = compressed_pages[i];
5b050f04 172 cur_size = min_t(unsigned long, compressed_size,
c8b97818
CM
173 PAGE_CACHE_SIZE);
174
7ac687d9 175 kaddr = kmap_atomic(cpage);
c8b97818 176 write_extent_buffer(leaf, kaddr, ptr, cur_size);
7ac687d9 177 kunmap_atomic(kaddr);
c8b97818
CM
178
179 i++;
180 ptr += cur_size;
181 compressed_size -= cur_size;
182 }
183 btrfs_set_file_extent_compression(leaf, ei,
261507a0 184 compress_type);
c8b97818
CM
185 } else {
186 page = find_get_page(inode->i_mapping,
187 start >> PAGE_CACHE_SHIFT);
188 btrfs_set_file_extent_compression(leaf, ei, 0);
7ac687d9 189 kaddr = kmap_atomic(page);
c8b97818
CM
190 offset = start & (PAGE_CACHE_SIZE - 1);
191 write_extent_buffer(leaf, kaddr + offset, ptr, size);
7ac687d9 192 kunmap_atomic(kaddr);
c8b97818
CM
193 page_cache_release(page);
194 }
195 btrfs_mark_buffer_dirty(leaf);
196 btrfs_free_path(path);
197
c2167754
YZ
198 /*
199 * we're an inline extent, so nobody can
200 * extend the file past i_size without locking
201 * a page we already have locked.
202 *
203 * We must do any isize and inode updates
204 * before we unlock the pages. Otherwise we
205 * could end up racing with unlink.
206 */
c8b97818 207 BTRFS_I(inode)->disk_i_size = inode->i_size;
79787eaa 208 ret = btrfs_update_inode(trans, root, inode);
c2167754 209
79787eaa 210 return ret;
c8b97818
CM
211fail:
212 btrfs_free_path(path);
213 return err;
214}
215
216
217/*
218 * conditionally insert an inline extent into the file. This
219 * does the checks required to make sure the data is small enough
220 * to fit as an inline extent.
221 */
7f366cfe 222static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
c8b97818
CM
223 struct btrfs_root *root,
224 struct inode *inode, u64 start, u64 end,
fe3f566c 225 size_t compressed_size, int compress_type,
c8b97818
CM
226 struct page **compressed_pages)
227{
228 u64 isize = i_size_read(inode);
229 u64 actual_end = min(end + 1, isize);
230 u64 inline_len = actual_end - start;
231 u64 aligned_end = (end + root->sectorsize - 1) &
232 ~((u64)root->sectorsize - 1);
233 u64 hint_byte;
234 u64 data_len = inline_len;
235 int ret;
236
237 if (compressed_size)
238 data_len = compressed_size;
239
240 if (start > 0 ||
70b99e69 241 actual_end >= PAGE_CACHE_SIZE ||
c8b97818
CM
242 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
243 (!compressed_size &&
244 (actual_end & (root->sectorsize - 1)) == 0) ||
245 end + 1 < isize ||
246 data_len > root->fs_info->max_inline) {
247 return 1;
248 }
249
920bbbfb 250 ret = btrfs_drop_extents(trans, inode, start, aligned_end,
a1ed835e 251 &hint_byte, 1);
79787eaa
JM
252 if (ret)
253 return ret;
c8b97818
CM
254
255 if (isize > actual_end)
256 inline_len = min_t(u64, isize, actual_end);
257 ret = insert_inline_extent(trans, root, inode, start,
258 inline_len, compressed_size,
fe3f566c 259 compress_type, compressed_pages);
2adcac1a 260 if (ret && ret != -ENOSPC) {
79787eaa
JM
261 btrfs_abort_transaction(trans, root, ret);
262 return ret;
2adcac1a
JB
263 } else if (ret == -ENOSPC) {
264 return 1;
79787eaa 265 }
2adcac1a 266
0ca1f7ce 267 btrfs_delalloc_release_metadata(inode, end + 1 - start);
a1ed835e 268 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
c8b97818
CM
269 return 0;
270}
271
771ed689
CM
272struct async_extent {
273 u64 start;
274 u64 ram_size;
275 u64 compressed_size;
276 struct page **pages;
277 unsigned long nr_pages;
261507a0 278 int compress_type;
771ed689
CM
279 struct list_head list;
280};
281
282struct async_cow {
283 struct inode *inode;
284 struct btrfs_root *root;
285 struct page *locked_page;
286 u64 start;
287 u64 end;
288 struct list_head extents;
289 struct btrfs_work work;
290};
291
292static noinline int add_async_extent(struct async_cow *cow,
293 u64 start, u64 ram_size,
294 u64 compressed_size,
295 struct page **pages,
261507a0
LZ
296 unsigned long nr_pages,
297 int compress_type)
771ed689
CM
298{
299 struct async_extent *async_extent;
300
301 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
79787eaa 302 BUG_ON(!async_extent); /* -ENOMEM */
771ed689
CM
303 async_extent->start = start;
304 async_extent->ram_size = ram_size;
305 async_extent->compressed_size = compressed_size;
306 async_extent->pages = pages;
307 async_extent->nr_pages = nr_pages;
261507a0 308 async_extent->compress_type = compress_type;
771ed689
CM
309 list_add_tail(&async_extent->list, &cow->extents);
310 return 0;
311}
312
d352ac68 313/*
771ed689
CM
314 * we create compressed extents in two phases. The first
315 * phase compresses a range of pages that have already been
316 * locked (both pages and state bits are locked).
c8b97818 317 *
771ed689
CM
318 * This is done inside an ordered work queue, and the compression
319 * is spread across many cpus. The actual IO submission is step
320 * two, and the ordered work queue takes care of making sure that
321 * happens in the same order things were put onto the queue by
322 * writepages and friends.
c8b97818 323 *
771ed689
CM
324 * If this code finds it can't get good compression, it puts an
325 * entry onto the work queue to write the uncompressed bytes. This
326 * makes sure that both compressed inodes and uncompressed inodes
327 * are written in the same order that pdflush sent them down.
d352ac68 328 */
771ed689
CM
329static noinline int compress_file_range(struct inode *inode,
330 struct page *locked_page,
331 u64 start, u64 end,
332 struct async_cow *async_cow,
333 int *num_added)
b888db2b
CM
334{
335 struct btrfs_root *root = BTRFS_I(inode)->root;
336 struct btrfs_trans_handle *trans;
db94535d 337 u64 num_bytes;
db94535d 338 u64 blocksize = root->sectorsize;
c8b97818 339 u64 actual_end;
42dc7bab 340 u64 isize = i_size_read(inode);
e6dcd2dc 341 int ret = 0;
c8b97818
CM
342 struct page **pages = NULL;
343 unsigned long nr_pages;
344 unsigned long nr_pages_ret = 0;
345 unsigned long total_compressed = 0;
346 unsigned long total_in = 0;
347 unsigned long max_compressed = 128 * 1024;
771ed689 348 unsigned long max_uncompressed = 128 * 1024;
c8b97818
CM
349 int i;
350 int will_compress;
261507a0 351 int compress_type = root->fs_info->compress_type;
b888db2b 352
4cb13e5d
LB
353 /* if this is a small write inside eof, kick off a defrag */
354 if ((end - start + 1) < 16 * 1024 &&
355 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
4cb5300b
CM
356 btrfs_add_inode_defrag(NULL, inode);
357
42dc7bab 358 actual_end = min_t(u64, isize, end + 1);
c8b97818
CM
359again:
360 will_compress = 0;
361 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
362 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
be20aa9d 363
f03d9301
CM
364 /*
365 * we don't want to send crud past the end of i_size through
366 * compression, that's just a waste of CPU time. So, if the
367 * end of the file is before the start of our current
368 * requested range of bytes, we bail out to the uncompressed
369 * cleanup code that can deal with all of this.
370 *
371 * It isn't really the fastest way to fix things, but this is a
372 * very uncommon corner.
373 */
374 if (actual_end <= start)
375 goto cleanup_and_bail_uncompressed;
376
c8b97818
CM
377 total_compressed = actual_end - start;
378
379 /* we want to make sure that amount of ram required to uncompress
380 * an extent is reasonable, so we limit the total size in ram
771ed689
CM
381 * of a compressed extent to 128k. This is a crucial number
382 * because it also controls how easily we can spread reads across
383 * cpus for decompression.
384 *
385 * We also want to make sure the amount of IO required to do
386 * a random read is reasonably small, so we limit the size of
387 * a compressed extent to 128k.
c8b97818
CM
388 */
389 total_compressed = min(total_compressed, max_uncompressed);
db94535d 390 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
be20aa9d 391 num_bytes = max(blocksize, num_bytes);
c8b97818
CM
392 total_in = 0;
393 ret = 0;
db94535d 394
771ed689
CM
395 /*
396 * we do compression for mount -o compress and when the
397 * inode has not been flagged as nocompress. This flag can
398 * change at any time if we discover bad compression ratios.
c8b97818 399 */
6cbff00f 400 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
1e701a32 401 (btrfs_test_opt(root, COMPRESS) ||
75e7cb7f
LB
402 (BTRFS_I(inode)->force_compress) ||
403 (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
c8b97818 404 WARN_ON(pages);
cfbc246e 405 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
560f7d75
LZ
406 if (!pages) {
407 /* just bail out to the uncompressed code */
408 goto cont;
409 }
c8b97818 410
261507a0
LZ
411 if (BTRFS_I(inode)->force_compress)
412 compress_type = BTRFS_I(inode)->force_compress;
413
414 ret = btrfs_compress_pages(compress_type,
415 inode->i_mapping, start,
416 total_compressed, pages,
417 nr_pages, &nr_pages_ret,
418 &total_in,
419 &total_compressed,
420 max_compressed);
c8b97818
CM
421
422 if (!ret) {
423 unsigned long offset = total_compressed &
424 (PAGE_CACHE_SIZE - 1);
425 struct page *page = pages[nr_pages_ret - 1];
426 char *kaddr;
427
428 /* zero the tail end of the last page, we might be
429 * sending it down to disk
430 */
431 if (offset) {
7ac687d9 432 kaddr = kmap_atomic(page);
c8b97818
CM
433 memset(kaddr + offset, 0,
434 PAGE_CACHE_SIZE - offset);
7ac687d9 435 kunmap_atomic(kaddr);
c8b97818
CM
436 }
437 will_compress = 1;
438 }
439 }
560f7d75 440cont:
c8b97818 441 if (start == 0) {
7a7eaa40 442 trans = btrfs_join_transaction(root);
79787eaa
JM
443 if (IS_ERR(trans)) {
444 ret = PTR_ERR(trans);
445 trans = NULL;
446 goto cleanup_and_out;
447 }
0ca1f7ce 448 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
771ed689 449
c8b97818 450 /* lets try to make an inline extent */
771ed689 451 if (ret || total_in < (actual_end - start)) {
c8b97818 452 /* we didn't compress the entire range, try
771ed689 453 * to make an uncompressed inline extent.
c8b97818
CM
454 */
455 ret = cow_file_range_inline(trans, root, inode,
fe3f566c 456 start, end, 0, 0, NULL);
c8b97818 457 } else {
771ed689 458 /* try making a compressed inline extent */
c8b97818
CM
459 ret = cow_file_range_inline(trans, root, inode,
460 start, end,
fe3f566c
LZ
461 total_compressed,
462 compress_type, pages);
c8b97818 463 }
79787eaa 464 if (ret <= 0) {
771ed689 465 /*
79787eaa
JM
466 * inline extent creation worked or returned error,
467 * we don't need to create any more async work items.
468 * Unlock and free up our temp pages.
771ed689 469 */
c8b97818 470 extent_clear_unlock_delalloc(inode,
a791e35e
CM
471 &BTRFS_I(inode)->io_tree,
472 start, end, NULL,
473 EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
a3429ab7 474 EXTENT_CLEAR_DELALLOC |
a791e35e 475 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
c2167754
YZ
476
477 btrfs_end_transaction(trans, root);
c8b97818
CM
478 goto free_pages_out;
479 }
c2167754 480 btrfs_end_transaction(trans, root);
c8b97818
CM
481 }
482
483 if (will_compress) {
484 /*
485 * we aren't doing an inline extent round the compressed size
486 * up to a block size boundary so the allocator does sane
487 * things
488 */
489 total_compressed = (total_compressed + blocksize - 1) &
490 ~(blocksize - 1);
491
492 /*
493 * one last check to make sure the compression is really a
494 * win, compare the page count read with the blocks on disk
495 */
496 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
497 ~(PAGE_CACHE_SIZE - 1);
498 if (total_compressed >= total_in) {
499 will_compress = 0;
500 } else {
c8b97818
CM
501 num_bytes = total_in;
502 }
503 }
504 if (!will_compress && pages) {
505 /*
506 * the compression code ran but failed to make things smaller,
507 * free any pages it allocated and our page pointer array
508 */
509 for (i = 0; i < nr_pages_ret; i++) {
70b99e69 510 WARN_ON(pages[i]->mapping);
c8b97818
CM
511 page_cache_release(pages[i]);
512 }
513 kfree(pages);
514 pages = NULL;
515 total_compressed = 0;
516 nr_pages_ret = 0;
517
518 /* flag the file so we don't compress in the future */
1e701a32
CM
519 if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
520 !(BTRFS_I(inode)->force_compress)) {
a555f810 521 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
1e701a32 522 }
c8b97818 523 }
771ed689
CM
524 if (will_compress) {
525 *num_added += 1;
c8b97818 526
771ed689
CM
527 /* the async work queues will take care of doing actual
528 * allocation on disk for these compressed pages,
529 * and will submit them to the elevator.
530 */
531 add_async_extent(async_cow, start, num_bytes,
261507a0
LZ
532 total_compressed, pages, nr_pages_ret,
533 compress_type);
179e29e4 534
24ae6365 535 if (start + num_bytes < end) {
771ed689
CM
536 start += num_bytes;
537 pages = NULL;
538 cond_resched();
539 goto again;
540 }
541 } else {
f03d9301 542cleanup_and_bail_uncompressed:
771ed689
CM
543 /*
544 * No compression, but we still need to write the pages in
545 * the file we've been given so far. redirty the locked
546 * page if it corresponds to our extent and set things up
547 * for the async work queue to run cow_file_range to do
548 * the normal delalloc dance
549 */
550 if (page_offset(locked_page) >= start &&
551 page_offset(locked_page) <= end) {
552 __set_page_dirty_nobuffers(locked_page);
553 /* unlocked later on in the async handlers */
554 }
261507a0
LZ
555 add_async_extent(async_cow, start, end - start + 1,
556 0, NULL, 0, BTRFS_COMPRESS_NONE);
771ed689
CM
557 *num_added += 1;
558 }
3b951516 559
771ed689 560out:
79787eaa 561 return ret;
771ed689
CM
562
563free_pages_out:
564 for (i = 0; i < nr_pages_ret; i++) {
565 WARN_ON(pages[i]->mapping);
566 page_cache_release(pages[i]);
567 }
d397712b 568 kfree(pages);
771ed689
CM
569
570 goto out;
79787eaa
JM
571
572cleanup_and_out:
573 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
574 start, end, NULL,
575 EXTENT_CLEAR_UNLOCK_PAGE |
576 EXTENT_CLEAR_DIRTY |
577 EXTENT_CLEAR_DELALLOC |
578 EXTENT_SET_WRITEBACK |
579 EXTENT_END_WRITEBACK);
580 if (!trans || IS_ERR(trans))
581 btrfs_error(root->fs_info, ret, "Failed to join transaction");
582 else
583 btrfs_abort_transaction(trans, root, ret);
584 goto free_pages_out;
771ed689
CM
585}
586
587/*
588 * phase two of compressed writeback. This is the ordered portion
589 * of the code, which only gets called in the order the work was
590 * queued. We walk all the async extents created by compress_file_range
591 * and send them down to the disk.
592 */
593static noinline int submit_compressed_extents(struct inode *inode,
594 struct async_cow *async_cow)
595{
596 struct async_extent *async_extent;
597 u64 alloc_hint = 0;
598 struct btrfs_trans_handle *trans;
599 struct btrfs_key ins;
600 struct extent_map *em;
601 struct btrfs_root *root = BTRFS_I(inode)->root;
602 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
603 struct extent_io_tree *io_tree;
f5a84ee3 604 int ret = 0;
771ed689
CM
605
606 if (list_empty(&async_cow->extents))
607 return 0;
608
771ed689 609
d397712b 610 while (!list_empty(&async_cow->extents)) {
771ed689
CM
611 async_extent = list_entry(async_cow->extents.next,
612 struct async_extent, list);
613 list_del(&async_extent->list);
c8b97818 614
771ed689
CM
615 io_tree = &BTRFS_I(inode)->io_tree;
616
f5a84ee3 617retry:
771ed689
CM
618 /* did the compression code fall back to uncompressed IO? */
619 if (!async_extent->pages) {
620 int page_started = 0;
621 unsigned long nr_written = 0;
622
623 lock_extent(io_tree, async_extent->start,
2ac55d41 624 async_extent->start +
d0082371 625 async_extent->ram_size - 1);
771ed689
CM
626
627 /* allocate blocks */
f5a84ee3
JB
628 ret = cow_file_range(inode, async_cow->locked_page,
629 async_extent->start,
630 async_extent->start +
631 async_extent->ram_size - 1,
632 &page_started, &nr_written, 0);
771ed689 633
79787eaa
JM
634 /* JDM XXX */
635
771ed689
CM
636 /*
637 * if page_started, cow_file_range inserted an
638 * inline extent and took care of all the unlocking
639 * and IO for us. Otherwise, we need to submit
640 * all those pages down to the drive.
641 */
f5a84ee3 642 if (!page_started && !ret)
771ed689
CM
643 extent_write_locked_range(io_tree,
644 inode, async_extent->start,
d397712b 645 async_extent->start +
771ed689
CM
646 async_extent->ram_size - 1,
647 btrfs_get_extent,
648 WB_SYNC_ALL);
649 kfree(async_extent);
650 cond_resched();
651 continue;
652 }
653
654 lock_extent(io_tree, async_extent->start,
d0082371 655 async_extent->start + async_extent->ram_size - 1);
771ed689 656
7a7eaa40 657 trans = btrfs_join_transaction(root);
79787eaa
JM
658 if (IS_ERR(trans)) {
659 ret = PTR_ERR(trans);
660 } else {
661 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
662 ret = btrfs_reserve_extent(trans, root,
771ed689
CM
663 async_extent->compressed_size,
664 async_extent->compressed_size,
81c9ad23 665 0, alloc_hint, &ins, 1);
79787eaa
JM
666 if (ret)
667 btrfs_abort_transaction(trans, root, ret);
668 btrfs_end_transaction(trans, root);
669 }
c2167754 670
f5a84ee3
JB
671 if (ret) {
672 int i;
673 for (i = 0; i < async_extent->nr_pages; i++) {
674 WARN_ON(async_extent->pages[i]->mapping);
675 page_cache_release(async_extent->pages[i]);
676 }
677 kfree(async_extent->pages);
678 async_extent->nr_pages = 0;
679 async_extent->pages = NULL;
680 unlock_extent(io_tree, async_extent->start,
681 async_extent->start +
d0082371 682 async_extent->ram_size - 1);
79787eaa
JM
683 if (ret == -ENOSPC)
684 goto retry;
685 goto out_free; /* JDM: Requeue? */
f5a84ee3
JB
686 }
687
c2167754
YZ
688 /*
689 * here we're doing allocation and writeback of the
690 * compressed pages
691 */
692 btrfs_drop_extent_cache(inode, async_extent->start,
693 async_extent->start +
694 async_extent->ram_size - 1, 0);
695
172ddd60 696 em = alloc_extent_map();
79787eaa 697 BUG_ON(!em); /* -ENOMEM */
771ed689
CM
698 em->start = async_extent->start;
699 em->len = async_extent->ram_size;
445a6944 700 em->orig_start = em->start;
c8b97818 701
771ed689
CM
702 em->block_start = ins.objectid;
703 em->block_len = ins.offset;
704 em->bdev = root->fs_info->fs_devices->latest_bdev;
261507a0 705 em->compress_type = async_extent->compress_type;
771ed689
CM
706 set_bit(EXTENT_FLAG_PINNED, &em->flags);
707 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
708
d397712b 709 while (1) {
890871be 710 write_lock(&em_tree->lock);
771ed689 711 ret = add_extent_mapping(em_tree, em);
890871be 712 write_unlock(&em_tree->lock);
771ed689
CM
713 if (ret != -EEXIST) {
714 free_extent_map(em);
715 break;
716 }
717 btrfs_drop_extent_cache(inode, async_extent->start,
718 async_extent->start +
719 async_extent->ram_size - 1, 0);
720 }
721
261507a0
LZ
722 ret = btrfs_add_ordered_extent_compress(inode,
723 async_extent->start,
724 ins.objectid,
725 async_extent->ram_size,
726 ins.offset,
727 BTRFS_ORDERED_COMPRESSED,
728 async_extent->compress_type);
79787eaa 729 BUG_ON(ret); /* -ENOMEM */
771ed689 730
771ed689
CM
731 /*
732 * clear dirty, set writeback and unlock the pages.
733 */
734 extent_clear_unlock_delalloc(inode,
a791e35e
CM
735 &BTRFS_I(inode)->io_tree,
736 async_extent->start,
737 async_extent->start +
738 async_extent->ram_size - 1,
739 NULL, EXTENT_CLEAR_UNLOCK_PAGE |
740 EXTENT_CLEAR_UNLOCK |
a3429ab7 741 EXTENT_CLEAR_DELALLOC |
a791e35e 742 EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
771ed689
CM
743
744 ret = btrfs_submit_compressed_write(inode,
d397712b
CM
745 async_extent->start,
746 async_extent->ram_size,
747 ins.objectid,
748 ins.offset, async_extent->pages,
749 async_extent->nr_pages);
771ed689 750
79787eaa 751 BUG_ON(ret); /* -ENOMEM */
771ed689
CM
752 alloc_hint = ins.objectid + ins.offset;
753 kfree(async_extent);
754 cond_resched();
755 }
79787eaa
JM
756 ret = 0;
757out:
758 return ret;
759out_free:
760 kfree(async_extent);
761 goto out;
771ed689
CM
762}
763
4b46fce2
JB
764static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
765 u64 num_bytes)
766{
767 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
768 struct extent_map *em;
769 u64 alloc_hint = 0;
770
771 read_lock(&em_tree->lock);
772 em = search_extent_mapping(em_tree, start, num_bytes);
773 if (em) {
774 /*
775 * if block start isn't an actual block number then find the
776 * first block in this inode and use that as a hint. If that
777 * block is also bogus then just don't worry about it.
778 */
779 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
780 free_extent_map(em);
781 em = search_extent_mapping(em_tree, 0, 0);
782 if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
783 alloc_hint = em->block_start;
784 if (em)
785 free_extent_map(em);
786 } else {
787 alloc_hint = em->block_start;
788 free_extent_map(em);
789 }
790 }
791 read_unlock(&em_tree->lock);
792
793 return alloc_hint;
794}
795
771ed689
CM
796/*
797 * when extent_io.c finds a delayed allocation range in the file,
798 * the call backs end up in this code. The basic idea is to
799 * allocate extents on disk for the range, and create ordered data structs
800 * in ram to track those extents.
801 *
802 * locked_page is the page that writepage had locked already. We use
803 * it to make sure we don't do extra locks or unlocks.
804 *
805 * *page_started is set to one if we unlock locked_page and do everything
806 * required to start IO on it. It may be clean and already done with
807 * IO when we return.
808 */
809static noinline int cow_file_range(struct inode *inode,
810 struct page *locked_page,
811 u64 start, u64 end, int *page_started,
812 unsigned long *nr_written,
813 int unlock)
814{
815 struct btrfs_root *root = BTRFS_I(inode)->root;
816 struct btrfs_trans_handle *trans;
817 u64 alloc_hint = 0;
818 u64 num_bytes;
819 unsigned long ram_size;
820 u64 disk_num_bytes;
821 u64 cur_alloc_size;
822 u64 blocksize = root->sectorsize;
771ed689
CM
823 struct btrfs_key ins;
824 struct extent_map *em;
825 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
826 int ret = 0;
827
83eea1f1 828 BUG_ON(btrfs_is_free_space_inode(inode));
7a7eaa40 829 trans = btrfs_join_transaction(root);
79787eaa
JM
830 if (IS_ERR(trans)) {
831 extent_clear_unlock_delalloc(inode,
832 &BTRFS_I(inode)->io_tree,
beb42dd7 833 start, end, locked_page,
79787eaa
JM
834 EXTENT_CLEAR_UNLOCK_PAGE |
835 EXTENT_CLEAR_UNLOCK |
836 EXTENT_CLEAR_DELALLOC |
837 EXTENT_CLEAR_DIRTY |
838 EXTENT_SET_WRITEBACK |
839 EXTENT_END_WRITEBACK);
840 return PTR_ERR(trans);
841 }
0ca1f7ce 842 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
771ed689 843
771ed689
CM
844 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
845 num_bytes = max(blocksize, num_bytes);
846 disk_num_bytes = num_bytes;
847 ret = 0;
848
4cb5300b 849 /* if this is a small write inside eof, kick off defrag */
4cb13e5d
LB
850 if (num_bytes < 64 * 1024 &&
851 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
4cb5300b
CM
852 btrfs_add_inode_defrag(trans, inode);
853
771ed689
CM
854 if (start == 0) {
855 /* lets try to make an inline extent */
856 ret = cow_file_range_inline(trans, root, inode,
fe3f566c 857 start, end, 0, 0, NULL);
771ed689
CM
858 if (ret == 0) {
859 extent_clear_unlock_delalloc(inode,
a791e35e
CM
860 &BTRFS_I(inode)->io_tree,
861 start, end, NULL,
862 EXTENT_CLEAR_UNLOCK_PAGE |
863 EXTENT_CLEAR_UNLOCK |
864 EXTENT_CLEAR_DELALLOC |
865 EXTENT_CLEAR_DIRTY |
866 EXTENT_SET_WRITEBACK |
867 EXTENT_END_WRITEBACK);
c2167754 868
771ed689
CM
869 *nr_written = *nr_written +
870 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
871 *page_started = 1;
771ed689 872 goto out;
79787eaa
JM
873 } else if (ret < 0) {
874 btrfs_abort_transaction(trans, root, ret);
875 goto out_unlock;
771ed689
CM
876 }
877 }
878
879 BUG_ON(disk_num_bytes >
6c41761f 880 btrfs_super_total_bytes(root->fs_info->super_copy));
771ed689 881
4b46fce2 882 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
771ed689
CM
883 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
884
d397712b 885 while (disk_num_bytes > 0) {
a791e35e
CM
886 unsigned long op;
887
287a0ab9 888 cur_alloc_size = disk_num_bytes;
e6dcd2dc 889 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
771ed689 890 root->sectorsize, 0, alloc_hint,
81c9ad23 891 &ins, 1);
79787eaa
JM
892 if (ret < 0) {
893 btrfs_abort_transaction(trans, root, ret);
894 goto out_unlock;
895 }
d397712b 896
172ddd60 897 em = alloc_extent_map();
79787eaa 898 BUG_ON(!em); /* -ENOMEM */
e6dcd2dc 899 em->start = start;
445a6944 900 em->orig_start = em->start;
771ed689
CM
901 ram_size = ins.offset;
902 em->len = ins.offset;
c8b97818 903
e6dcd2dc 904 em->block_start = ins.objectid;
c8b97818 905 em->block_len = ins.offset;
e6dcd2dc 906 em->bdev = root->fs_info->fs_devices->latest_bdev;
7f3c74fb 907 set_bit(EXTENT_FLAG_PINNED, &em->flags);
c8b97818 908
d397712b 909 while (1) {
890871be 910 write_lock(&em_tree->lock);
e6dcd2dc 911 ret = add_extent_mapping(em_tree, em);
890871be 912 write_unlock(&em_tree->lock);
e6dcd2dc
CM
913 if (ret != -EEXIST) {
914 free_extent_map(em);
915 break;
916 }
917 btrfs_drop_extent_cache(inode, start,
c8b97818 918 start + ram_size - 1, 0);
e6dcd2dc
CM
919 }
920
98d20f67 921 cur_alloc_size = ins.offset;
e6dcd2dc 922 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
771ed689 923 ram_size, cur_alloc_size, 0);
79787eaa 924 BUG_ON(ret); /* -ENOMEM */
c8b97818 925
17d217fe
YZ
926 if (root->root_key.objectid ==
927 BTRFS_DATA_RELOC_TREE_OBJECTID) {
928 ret = btrfs_reloc_clone_csums(inode, start,
929 cur_alloc_size);
79787eaa
JM
930 if (ret) {
931 btrfs_abort_transaction(trans, root, ret);
932 goto out_unlock;
933 }
17d217fe
YZ
934 }
935
d397712b 936 if (disk_num_bytes < cur_alloc_size)
3b951516 937 break;
d397712b 938
c8b97818
CM
939 /* we're not doing compressed IO, don't unlock the first
940 * page (which the caller expects to stay locked), don't
941 * clear any dirty bits and don't set any writeback bits
8b62b72b
CM
942 *
943 * Do set the Private2 bit so we know this page was properly
944 * setup for writepage
c8b97818 945 */
a791e35e
CM
946 op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
947 op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
948 EXTENT_SET_PRIVATE2;
949
c8b97818
CM
950 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
951 start, start + ram_size - 1,
a791e35e 952 locked_page, op);
c8b97818 953 disk_num_bytes -= cur_alloc_size;
c59f8951
CM
954 num_bytes -= cur_alloc_size;
955 alloc_hint = ins.objectid + ins.offset;
956 start += cur_alloc_size;
b888db2b 957 }
771ed689 958 ret = 0;
79787eaa 959out:
b888db2b 960 btrfs_end_transaction(trans, root);
c8b97818 961
be20aa9d 962 return ret;
79787eaa
JM
963out_unlock:
964 extent_clear_unlock_delalloc(inode,
965 &BTRFS_I(inode)->io_tree,
beb42dd7 966 start, end, locked_page,
79787eaa
JM
967 EXTENT_CLEAR_UNLOCK_PAGE |
968 EXTENT_CLEAR_UNLOCK |
969 EXTENT_CLEAR_DELALLOC |
970 EXTENT_CLEAR_DIRTY |
971 EXTENT_SET_WRITEBACK |
972 EXTENT_END_WRITEBACK);
973
974 goto out;
771ed689 975}
c8b97818 976
771ed689
CM
977/*
978 * work queue call back to started compression on a file and pages
979 */
980static noinline void async_cow_start(struct btrfs_work *work)
981{
982 struct async_cow *async_cow;
983 int num_added = 0;
984 async_cow = container_of(work, struct async_cow, work);
985
986 compress_file_range(async_cow->inode, async_cow->locked_page,
987 async_cow->start, async_cow->end, async_cow,
988 &num_added);
8180ef88 989 if (num_added == 0) {
cb77fcd8 990 btrfs_add_delayed_iput(async_cow->inode);
771ed689 991 async_cow->inode = NULL;
8180ef88 992 }
771ed689
CM
993}
994
995/*
996 * work queue call back to submit previously compressed pages
997 */
998static noinline void async_cow_submit(struct btrfs_work *work)
999{
1000 struct async_cow *async_cow;
1001 struct btrfs_root *root;
1002 unsigned long nr_pages;
1003
1004 async_cow = container_of(work, struct async_cow, work);
1005
1006 root = async_cow->root;
1007 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1008 PAGE_CACHE_SHIFT;
1009
66657b31 1010 if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
287082b0 1011 5 * 1024 * 1024 &&
771ed689
CM
1012 waitqueue_active(&root->fs_info->async_submit_wait))
1013 wake_up(&root->fs_info->async_submit_wait);
1014
d397712b 1015 if (async_cow->inode)
771ed689 1016 submit_compressed_extents(async_cow->inode, async_cow);
771ed689 1017}
c8b97818 1018
771ed689
CM
1019static noinline void async_cow_free(struct btrfs_work *work)
1020{
1021 struct async_cow *async_cow;
1022 async_cow = container_of(work, struct async_cow, work);
8180ef88 1023 if (async_cow->inode)
cb77fcd8 1024 btrfs_add_delayed_iput(async_cow->inode);
771ed689
CM
1025 kfree(async_cow);
1026}
1027
1028static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1029 u64 start, u64 end, int *page_started,
1030 unsigned long *nr_written)
1031{
1032 struct async_cow *async_cow;
1033 struct btrfs_root *root = BTRFS_I(inode)->root;
1034 unsigned long nr_pages;
1035 u64 cur_end;
287082b0 1036 int limit = 10 * 1024 * 1024;
771ed689 1037
a3429ab7
CM
1038 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1039 1, 0, NULL, GFP_NOFS);
d397712b 1040 while (start < end) {
771ed689 1041 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
79787eaa 1042 BUG_ON(!async_cow); /* -ENOMEM */
8180ef88 1043 async_cow->inode = igrab(inode);
771ed689
CM
1044 async_cow->root = root;
1045 async_cow->locked_page = locked_page;
1046 async_cow->start = start;
1047
6cbff00f 1048 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
771ed689
CM
1049 cur_end = end;
1050 else
1051 cur_end = min(end, start + 512 * 1024 - 1);
1052
1053 async_cow->end = cur_end;
1054 INIT_LIST_HEAD(&async_cow->extents);
1055
1056 async_cow->work.func = async_cow_start;
1057 async_cow->work.ordered_func = async_cow_submit;
1058 async_cow->work.ordered_free = async_cow_free;
1059 async_cow->work.flags = 0;
1060
771ed689
CM
1061 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
1062 PAGE_CACHE_SHIFT;
1063 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1064
1065 btrfs_queue_worker(&root->fs_info->delalloc_workers,
1066 &async_cow->work);
1067
1068 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1069 wait_event(root->fs_info->async_submit_wait,
1070 (atomic_read(&root->fs_info->async_delalloc_pages) <
1071 limit));
1072 }
1073
d397712b 1074 while (atomic_read(&root->fs_info->async_submit_draining) &&
771ed689
CM
1075 atomic_read(&root->fs_info->async_delalloc_pages)) {
1076 wait_event(root->fs_info->async_submit_wait,
1077 (atomic_read(&root->fs_info->async_delalloc_pages) ==
1078 0));
1079 }
1080
1081 *nr_written += nr_pages;
1082 start = cur_end + 1;
1083 }
1084 *page_started = 1;
1085 return 0;
be20aa9d
CM
1086}
1087
d397712b 1088static noinline int csum_exist_in_range(struct btrfs_root *root,
17d217fe
YZ
1089 u64 bytenr, u64 num_bytes)
1090{
1091 int ret;
1092 struct btrfs_ordered_sum *sums;
1093 LIST_HEAD(list);
1094
07d400a6 1095 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
a2de733c 1096 bytenr + num_bytes - 1, &list, 0);
17d217fe
YZ
1097 if (ret == 0 && list_empty(&list))
1098 return 0;
1099
1100 while (!list_empty(&list)) {
1101 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1102 list_del(&sums->list);
1103 kfree(sums);
1104 }
1105 return 1;
1106}
1107
d352ac68
CM
1108/*
1109 * when nowcow writeback call back. This checks for snapshots or COW copies
1110 * of the extents that exist in the file, and COWs the file as required.
1111 *
1112 * If no cow copies or snapshots exist, we write directly to the existing
1113 * blocks on disk
1114 */
7f366cfe
CM
1115static noinline int run_delalloc_nocow(struct inode *inode,
1116 struct page *locked_page,
771ed689
CM
1117 u64 start, u64 end, int *page_started, int force,
1118 unsigned long *nr_written)
be20aa9d 1119{
be20aa9d 1120 struct btrfs_root *root = BTRFS_I(inode)->root;
7ea394f1 1121 struct btrfs_trans_handle *trans;
be20aa9d 1122 struct extent_buffer *leaf;
be20aa9d 1123 struct btrfs_path *path;
80ff3856 1124 struct btrfs_file_extent_item *fi;
be20aa9d 1125 struct btrfs_key found_key;
80ff3856
YZ
1126 u64 cow_start;
1127 u64 cur_offset;
1128 u64 extent_end;
5d4f98a2 1129 u64 extent_offset;
80ff3856
YZ
1130 u64 disk_bytenr;
1131 u64 num_bytes;
1132 int extent_type;
79787eaa 1133 int ret, err;
d899e052 1134 int type;
80ff3856
YZ
1135 int nocow;
1136 int check_prev = 1;
82d5902d 1137 bool nolock;
33345d01 1138 u64 ino = btrfs_ino(inode);
be20aa9d
CM
1139
1140 path = btrfs_alloc_path();
17ca04af
JB
1141 if (!path) {
1142 extent_clear_unlock_delalloc(inode,
1143 &BTRFS_I(inode)->io_tree,
1144 start, end, locked_page,
1145 EXTENT_CLEAR_UNLOCK_PAGE |
1146 EXTENT_CLEAR_UNLOCK |
1147 EXTENT_CLEAR_DELALLOC |
1148 EXTENT_CLEAR_DIRTY |
1149 EXTENT_SET_WRITEBACK |
1150 EXTENT_END_WRITEBACK);
d8926bb3 1151 return -ENOMEM;
17ca04af 1152 }
82d5902d 1153
83eea1f1 1154 nolock = btrfs_is_free_space_inode(inode);
82d5902d
LZ
1155
1156 if (nolock)
7a7eaa40 1157 trans = btrfs_join_transaction_nolock(root);
82d5902d 1158 else
7a7eaa40 1159 trans = btrfs_join_transaction(root);
ff5714cc 1160
79787eaa 1161 if (IS_ERR(trans)) {
17ca04af
JB
1162 extent_clear_unlock_delalloc(inode,
1163 &BTRFS_I(inode)->io_tree,
1164 start, end, locked_page,
1165 EXTENT_CLEAR_UNLOCK_PAGE |
1166 EXTENT_CLEAR_UNLOCK |
1167 EXTENT_CLEAR_DELALLOC |
1168 EXTENT_CLEAR_DIRTY |
1169 EXTENT_SET_WRITEBACK |
1170 EXTENT_END_WRITEBACK);
79787eaa
JM
1171 btrfs_free_path(path);
1172 return PTR_ERR(trans);
1173 }
1174
74b21075 1175 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
be20aa9d 1176
80ff3856
YZ
1177 cow_start = (u64)-1;
1178 cur_offset = start;
1179 while (1) {
33345d01 1180 ret = btrfs_lookup_file_extent(trans, root, path, ino,
80ff3856 1181 cur_offset, 0);
79787eaa
JM
1182 if (ret < 0) {
1183 btrfs_abort_transaction(trans, root, ret);
1184 goto error;
1185 }
80ff3856
YZ
1186 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1187 leaf = path->nodes[0];
1188 btrfs_item_key_to_cpu(leaf, &found_key,
1189 path->slots[0] - 1);
33345d01 1190 if (found_key.objectid == ino &&
80ff3856
YZ
1191 found_key.type == BTRFS_EXTENT_DATA_KEY)
1192 path->slots[0]--;
1193 }
1194 check_prev = 0;
1195next_slot:
1196 leaf = path->nodes[0];
1197 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1198 ret = btrfs_next_leaf(root, path);
79787eaa
JM
1199 if (ret < 0) {
1200 btrfs_abort_transaction(trans, root, ret);
1201 goto error;
1202 }
80ff3856
YZ
1203 if (ret > 0)
1204 break;
1205 leaf = path->nodes[0];
1206 }
be20aa9d 1207
80ff3856
YZ
1208 nocow = 0;
1209 disk_bytenr = 0;
17d217fe 1210 num_bytes = 0;
80ff3856
YZ
1211 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1212
33345d01 1213 if (found_key.objectid > ino ||
80ff3856
YZ
1214 found_key.type > BTRFS_EXTENT_DATA_KEY ||
1215 found_key.offset > end)
1216 break;
1217
1218 if (found_key.offset > cur_offset) {
1219 extent_end = found_key.offset;
e9061e21 1220 extent_type = 0;
80ff3856
YZ
1221 goto out_check;
1222 }
1223
1224 fi = btrfs_item_ptr(leaf, path->slots[0],
1225 struct btrfs_file_extent_item);
1226 extent_type = btrfs_file_extent_type(leaf, fi);
1227
d899e052
YZ
1228 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1229 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
80ff3856 1230 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
5d4f98a2 1231 extent_offset = btrfs_file_extent_offset(leaf, fi);
80ff3856
YZ
1232 extent_end = found_key.offset +
1233 btrfs_file_extent_num_bytes(leaf, fi);
1234 if (extent_end <= start) {
1235 path->slots[0]++;
1236 goto next_slot;
1237 }
17d217fe
YZ
1238 if (disk_bytenr == 0)
1239 goto out_check;
80ff3856
YZ
1240 if (btrfs_file_extent_compression(leaf, fi) ||
1241 btrfs_file_extent_encryption(leaf, fi) ||
1242 btrfs_file_extent_other_encoding(leaf, fi))
1243 goto out_check;
d899e052
YZ
1244 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1245 goto out_check;
d2fb3437 1246 if (btrfs_extent_readonly(root, disk_bytenr))
80ff3856 1247 goto out_check;
33345d01 1248 if (btrfs_cross_ref_exist(trans, root, ino,
5d4f98a2
YZ
1249 found_key.offset -
1250 extent_offset, disk_bytenr))
17d217fe 1251 goto out_check;
5d4f98a2 1252 disk_bytenr += extent_offset;
17d217fe
YZ
1253 disk_bytenr += cur_offset - found_key.offset;
1254 num_bytes = min(end + 1, extent_end) - cur_offset;
1255 /*
1256 * force cow if csum exists in the range.
1257 * this ensure that csum for a given extent are
1258 * either valid or do not exist.
1259 */
1260 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1261 goto out_check;
80ff3856
YZ
1262 nocow = 1;
1263 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1264 extent_end = found_key.offset +
1265 btrfs_file_extent_inline_len(leaf, fi);
1266 extent_end = ALIGN(extent_end, root->sectorsize);
1267 } else {
1268 BUG_ON(1);
1269 }
1270out_check:
1271 if (extent_end <= start) {
1272 path->slots[0]++;
1273 goto next_slot;
1274 }
1275 if (!nocow) {
1276 if (cow_start == (u64)-1)
1277 cow_start = cur_offset;
1278 cur_offset = extent_end;
1279 if (cur_offset > end)
1280 break;
1281 path->slots[0]++;
1282 goto next_slot;
7ea394f1
YZ
1283 }
1284
b3b4aa74 1285 btrfs_release_path(path);
80ff3856
YZ
1286 if (cow_start != (u64)-1) {
1287 ret = cow_file_range(inode, locked_page, cow_start,
771ed689
CM
1288 found_key.offset - 1, page_started,
1289 nr_written, 1);
79787eaa
JM
1290 if (ret) {
1291 btrfs_abort_transaction(trans, root, ret);
1292 goto error;
1293 }
80ff3856 1294 cow_start = (u64)-1;
7ea394f1 1295 }
80ff3856 1296
d899e052
YZ
1297 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1298 struct extent_map *em;
1299 struct extent_map_tree *em_tree;
1300 em_tree = &BTRFS_I(inode)->extent_tree;
172ddd60 1301 em = alloc_extent_map();
79787eaa 1302 BUG_ON(!em); /* -ENOMEM */
d899e052 1303 em->start = cur_offset;
445a6944 1304 em->orig_start = em->start;
d899e052
YZ
1305 em->len = num_bytes;
1306 em->block_len = num_bytes;
1307 em->block_start = disk_bytenr;
1308 em->bdev = root->fs_info->fs_devices->latest_bdev;
1309 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1310 while (1) {
890871be 1311 write_lock(&em_tree->lock);
d899e052 1312 ret = add_extent_mapping(em_tree, em);
890871be 1313 write_unlock(&em_tree->lock);
d899e052
YZ
1314 if (ret != -EEXIST) {
1315 free_extent_map(em);
1316 break;
1317 }
1318 btrfs_drop_extent_cache(inode, em->start,
1319 em->start + em->len - 1, 0);
1320 }
1321 type = BTRFS_ORDERED_PREALLOC;
1322 } else {
1323 type = BTRFS_ORDERED_NOCOW;
1324 }
80ff3856
YZ
1325
1326 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
d899e052 1327 num_bytes, num_bytes, type);
79787eaa 1328 BUG_ON(ret); /* -ENOMEM */
771ed689 1329
efa56464
YZ
1330 if (root->root_key.objectid ==
1331 BTRFS_DATA_RELOC_TREE_OBJECTID) {
1332 ret = btrfs_reloc_clone_csums(inode, cur_offset,
1333 num_bytes);
79787eaa
JM
1334 if (ret) {
1335 btrfs_abort_transaction(trans, root, ret);
1336 goto error;
1337 }
efa56464
YZ
1338 }
1339
d899e052 1340 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
a791e35e
CM
1341 cur_offset, cur_offset + num_bytes - 1,
1342 locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1343 EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1344 EXTENT_SET_PRIVATE2);
80ff3856
YZ
1345 cur_offset = extent_end;
1346 if (cur_offset > end)
1347 break;
be20aa9d 1348 }
b3b4aa74 1349 btrfs_release_path(path);
80ff3856 1350
17ca04af 1351 if (cur_offset <= end && cow_start == (u64)-1) {
80ff3856 1352 cow_start = cur_offset;
17ca04af
JB
1353 cur_offset = end;
1354 }
1355
80ff3856
YZ
1356 if (cow_start != (u64)-1) {
1357 ret = cow_file_range(inode, locked_page, cow_start, end,
771ed689 1358 page_started, nr_written, 1);
79787eaa
JM
1359 if (ret) {
1360 btrfs_abort_transaction(trans, root, ret);
1361 goto error;
1362 }
80ff3856
YZ
1363 }
1364
79787eaa 1365error:
0cb59c99 1366 if (nolock) {
79787eaa 1367 err = btrfs_end_transaction_nolock(trans, root);
0cb59c99 1368 } else {
79787eaa 1369 err = btrfs_end_transaction(trans, root);
0cb59c99 1370 }
79787eaa
JM
1371 if (!ret)
1372 ret = err;
1373
17ca04af
JB
1374 if (ret && cur_offset < end)
1375 extent_clear_unlock_delalloc(inode,
1376 &BTRFS_I(inode)->io_tree,
1377 cur_offset, end, locked_page,
1378 EXTENT_CLEAR_UNLOCK_PAGE |
1379 EXTENT_CLEAR_UNLOCK |
1380 EXTENT_CLEAR_DELALLOC |
1381 EXTENT_CLEAR_DIRTY |
1382 EXTENT_SET_WRITEBACK |
1383 EXTENT_END_WRITEBACK);
1384
7ea394f1 1385 btrfs_free_path(path);
79787eaa 1386 return ret;
be20aa9d
CM
1387}
1388
d352ac68
CM
1389/*
1390 * extent_io.c call back to do delayed allocation processing
1391 */
c8b97818 1392static int run_delalloc_range(struct inode *inode, struct page *locked_page,
771ed689
CM
1393 u64 start, u64 end, int *page_started,
1394 unsigned long *nr_written)
be20aa9d 1395{
be20aa9d 1396 int ret;
7f366cfe 1397 struct btrfs_root *root = BTRFS_I(inode)->root;
a2135011 1398
7ddf5a42 1399 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) {
c8b97818 1400 ret = run_delalloc_nocow(inode, locked_page, start, end,
d397712b 1401 page_started, 1, nr_written);
7ddf5a42 1402 } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) {
d899e052 1403 ret = run_delalloc_nocow(inode, locked_page, start, end,
d397712b 1404 page_started, 0, nr_written);
7ddf5a42
JB
1405 } else if (!btrfs_test_opt(root, COMPRESS) &&
1406 !(BTRFS_I(inode)->force_compress) &&
1407 !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) {
7f366cfe
CM
1408 ret = cow_file_range(inode, locked_page, start, end,
1409 page_started, nr_written, 1);
7ddf5a42
JB
1410 } else {
1411 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1412 &BTRFS_I(inode)->runtime_flags);
771ed689 1413 ret = cow_file_range_async(inode, locked_page, start, end,
d397712b 1414 page_started, nr_written);
7ddf5a42 1415 }
b888db2b
CM
1416 return ret;
1417}
1418
1bf85046
JM
1419static void btrfs_split_extent_hook(struct inode *inode,
1420 struct extent_state *orig, u64 split)
9ed74f2d 1421{
0ca1f7ce 1422 /* not delalloc, ignore it */
9ed74f2d 1423 if (!(orig->state & EXTENT_DELALLOC))
1bf85046 1424 return;
9ed74f2d 1425
9e0baf60
JB
1426 spin_lock(&BTRFS_I(inode)->lock);
1427 BTRFS_I(inode)->outstanding_extents++;
1428 spin_unlock(&BTRFS_I(inode)->lock);
9ed74f2d
JB
1429}
1430
1431/*
1432 * extent_io.c merge_extent_hook, used to track merged delayed allocation
1433 * extents so we can keep track of new extents that are just merged onto old
1434 * extents, such as when we are doing sequential writes, so we can properly
1435 * account for the metadata space we'll need.
1436 */
1bf85046
JM
1437static void btrfs_merge_extent_hook(struct inode *inode,
1438 struct extent_state *new,
1439 struct extent_state *other)
9ed74f2d 1440{
9ed74f2d
JB
1441 /* not delalloc, ignore it */
1442 if (!(other->state & EXTENT_DELALLOC))
1bf85046 1443 return;
9ed74f2d 1444
9e0baf60
JB
1445 spin_lock(&BTRFS_I(inode)->lock);
1446 BTRFS_I(inode)->outstanding_extents--;
1447 spin_unlock(&BTRFS_I(inode)->lock);
9ed74f2d
JB
1448}
1449
d352ac68
CM
1450/*
1451 * extent_io.c set_bit_hook, used to track delayed allocation
1452 * bytes in this file, and to maintain the list of inodes that
1453 * have pending delalloc work to be done.
1454 */
1bf85046
JM
1455static void btrfs_set_bit_hook(struct inode *inode,
1456 struct extent_state *state, int *bits)
291d673e 1457{
9ed74f2d 1458
75eff68e
CM
1459 /*
1460 * set_bit and clear bit hooks normally require _irqsave/restore
27160b6b 1461 * but in this case, we are only testing for the DELALLOC
75eff68e
CM
1462 * bit, which is only set or cleared with irqs on
1463 */
0ca1f7ce 1464 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
291d673e 1465 struct btrfs_root *root = BTRFS_I(inode)->root;
0ca1f7ce 1466 u64 len = state->end + 1 - state->start;
83eea1f1 1467 bool do_list = !btrfs_is_free_space_inode(inode);
9ed74f2d 1468
9e0baf60 1469 if (*bits & EXTENT_FIRST_DELALLOC) {
0ca1f7ce 1470 *bits &= ~EXTENT_FIRST_DELALLOC;
9e0baf60
JB
1471 } else {
1472 spin_lock(&BTRFS_I(inode)->lock);
1473 BTRFS_I(inode)->outstanding_extents++;
1474 spin_unlock(&BTRFS_I(inode)->lock);
1475 }
287a0ab9 1476
75eff68e 1477 spin_lock(&root->fs_info->delalloc_lock);
0ca1f7ce
YZ
1478 BTRFS_I(inode)->delalloc_bytes += len;
1479 root->fs_info->delalloc_bytes += len;
0cb59c99 1480 if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
ea8c2819
CM
1481 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1482 &root->fs_info->delalloc_inodes);
1483 }
75eff68e 1484 spin_unlock(&root->fs_info->delalloc_lock);
291d673e 1485 }
291d673e
CM
1486}
1487
d352ac68
CM
1488/*
1489 * extent_io.c clear_bit_hook, see set_bit_hook for why
1490 */
1bf85046
JM
1491static void btrfs_clear_bit_hook(struct inode *inode,
1492 struct extent_state *state, int *bits)
291d673e 1493{
75eff68e
CM
1494 /*
1495 * set_bit and clear bit hooks normally require _irqsave/restore
27160b6b 1496 * but in this case, we are only testing for the DELALLOC
75eff68e
CM
1497 * bit, which is only set or cleared with irqs on
1498 */
0ca1f7ce 1499 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
291d673e 1500 struct btrfs_root *root = BTRFS_I(inode)->root;
0ca1f7ce 1501 u64 len = state->end + 1 - state->start;
83eea1f1 1502 bool do_list = !btrfs_is_free_space_inode(inode);
bcbfce8a 1503
9e0baf60 1504 if (*bits & EXTENT_FIRST_DELALLOC) {
0ca1f7ce 1505 *bits &= ~EXTENT_FIRST_DELALLOC;
9e0baf60
JB
1506 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1507 spin_lock(&BTRFS_I(inode)->lock);
1508 BTRFS_I(inode)->outstanding_extents--;
1509 spin_unlock(&BTRFS_I(inode)->lock);
1510 }
0ca1f7ce
YZ
1511
1512 if (*bits & EXTENT_DO_ACCOUNTING)
1513 btrfs_delalloc_release_metadata(inode, len);
1514
0cb59c99
JB
1515 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1516 && do_list)
0ca1f7ce 1517 btrfs_free_reserved_data_space(inode, len);
9ed74f2d 1518
75eff68e 1519 spin_lock(&root->fs_info->delalloc_lock);
0ca1f7ce
YZ
1520 root->fs_info->delalloc_bytes -= len;
1521 BTRFS_I(inode)->delalloc_bytes -= len;
1522
0cb59c99 1523 if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
ea8c2819
CM
1524 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1525 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1526 }
75eff68e 1527 spin_unlock(&root->fs_info->delalloc_lock);
291d673e 1528 }
291d673e
CM
1529}
1530
d352ac68
CM
1531/*
1532 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1533 * we don't create bios that span stripes or chunks
1534 */
239b14b3 1535int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
c8b97818
CM
1536 size_t size, struct bio *bio,
1537 unsigned long bio_flags)
239b14b3
CM
1538{
1539 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1540 struct btrfs_mapping_tree *map_tree;
a62b9401 1541 u64 logical = (u64)bio->bi_sector << 9;
239b14b3
CM
1542 u64 length = 0;
1543 u64 map_length;
239b14b3
CM
1544 int ret;
1545
771ed689
CM
1546 if (bio_flags & EXTENT_BIO_COMPRESSED)
1547 return 0;
1548
f2d8d74d 1549 length = bio->bi_size;
239b14b3
CM
1550 map_tree = &root->fs_info->mapping_tree;
1551 map_length = length;
cea9e445 1552 ret = btrfs_map_block(map_tree, READ, logical,
f188591e 1553 &map_length, NULL, 0);
3444a972
JM
1554 /* Will always return 0 or 1 with map_multi == NULL */
1555 BUG_ON(ret < 0);
d397712b 1556 if (map_length < length + size)
239b14b3 1557 return 1;
3444a972 1558 return 0;
239b14b3
CM
1559}
1560
d352ac68
CM
1561/*
1562 * in order to insert checksums into the metadata in large chunks,
1563 * we wait until bio submission time. All the pages in the bio are
1564 * checksummed and sums are attached onto the ordered extent record.
1565 *
1566 * At IO completion time the cums attached on the ordered extent record
1567 * are inserted into the btree
1568 */
d397712b
CM
1569static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1570 struct bio *bio, int mirror_num,
eaf25d93
CM
1571 unsigned long bio_flags,
1572 u64 bio_offset)
065631f6 1573{
065631f6 1574 struct btrfs_root *root = BTRFS_I(inode)->root;
065631f6 1575 int ret = 0;
e015640f 1576
d20f7043 1577 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
79787eaa 1578 BUG_ON(ret); /* -ENOMEM */
4a69a410
CM
1579 return 0;
1580}
e015640f 1581
4a69a410
CM
1582/*
1583 * in order to insert checksums into the metadata in large chunks,
1584 * we wait until bio submission time. All the pages in the bio are
1585 * checksummed and sums are attached onto the ordered extent record.
1586 *
1587 * At IO completion time the cums attached on the ordered extent record
1588 * are inserted into the btree
1589 */
b2950863 1590static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
eaf25d93
CM
1591 int mirror_num, unsigned long bio_flags,
1592 u64 bio_offset)
4a69a410
CM
1593{
1594 struct btrfs_root *root = BTRFS_I(inode)->root;
8b712842 1595 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
44b8bd7e
CM
1596}
1597
d352ac68 1598/*
cad321ad
CM
1599 * extent_io.c submission hook. This does the right thing for csum calculation
1600 * on write, or reading the csums from the tree before a read
d352ac68 1601 */
b2950863 1602static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
eaf25d93
CM
1603 int mirror_num, unsigned long bio_flags,
1604 u64 bio_offset)
44b8bd7e
CM
1605{
1606 struct btrfs_root *root = BTRFS_I(inode)->root;
1607 int ret = 0;
19b9bdb0 1608 int skip_sum;
0417341e 1609 int metadata = 0;
44b8bd7e 1610
6cbff00f 1611 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
cad321ad 1612
83eea1f1 1613 if (btrfs_is_free_space_inode(inode))
0417341e
JM
1614 metadata = 2;
1615
7b6d91da 1616 if (!(rw & REQ_WRITE)) {
5fd02043
JB
1617 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1618 if (ret)
1619 return ret;
1620
d20f7043 1621 if (bio_flags & EXTENT_BIO_COMPRESSED) {
c8b97818
CM
1622 return btrfs_submit_compressed_read(inode, bio,
1623 mirror_num, bio_flags);
c2db1073
TI
1624 } else if (!skip_sum) {
1625 ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1626 if (ret)
1627 return ret;
1628 }
4d1b5fb4 1629 goto mapit;
19b9bdb0 1630 } else if (!skip_sum) {
17d217fe
YZ
1631 /* csum items have already been cloned */
1632 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1633 goto mapit;
19b9bdb0
CM
1634 /* we're doing a write, do the async checksumming */
1635 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
44b8bd7e 1636 inode, rw, bio, mirror_num,
eaf25d93
CM
1637 bio_flags, bio_offset,
1638 __btrfs_submit_bio_start,
4a69a410 1639 __btrfs_submit_bio_done);
19b9bdb0
CM
1640 }
1641
0b86a832 1642mapit:
8b712842 1643 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
065631f6 1644}
6885f308 1645
d352ac68
CM
1646/*
1647 * given a list of ordered sums record them in the inode. This happens
1648 * at IO completion time based on sums calculated at bio submission time.
1649 */
ba1da2f4 1650static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
e6dcd2dc
CM
1651 struct inode *inode, u64 file_offset,
1652 struct list_head *list)
1653{
e6dcd2dc
CM
1654 struct btrfs_ordered_sum *sum;
1655
c6e30871 1656 list_for_each_entry(sum, list, list) {
d20f7043
CM
1657 btrfs_csum_file_blocks(trans,
1658 BTRFS_I(inode)->root->fs_info->csum_root, sum);
e6dcd2dc
CM
1659 }
1660 return 0;
1661}
1662
2ac55d41
JB
1663int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1664 struct extent_state **cached_state)
ea8c2819 1665{
d397712b 1666 if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
771ed689 1667 WARN_ON(1);
ea8c2819 1668 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
2ac55d41 1669 cached_state, GFP_NOFS);
ea8c2819
CM
1670}
1671
d352ac68 1672/* see btrfs_writepage_start_hook for details on why this is required */
247e743c
CM
1673struct btrfs_writepage_fixup {
1674 struct page *page;
1675 struct btrfs_work work;
1676};
1677
b2950863 1678static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
247e743c
CM
1679{
1680 struct btrfs_writepage_fixup *fixup;
1681 struct btrfs_ordered_extent *ordered;
2ac55d41 1682 struct extent_state *cached_state = NULL;
247e743c
CM
1683 struct page *page;
1684 struct inode *inode;
1685 u64 page_start;
1686 u64 page_end;
87826df0 1687 int ret;
247e743c
CM
1688
1689 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1690 page = fixup->page;
4a096752 1691again:
247e743c
CM
1692 lock_page(page);
1693 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1694 ClearPageChecked(page);
1695 goto out_page;
1696 }
1697
1698 inode = page->mapping->host;
1699 page_start = page_offset(page);
1700 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1701
2ac55d41 1702 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
d0082371 1703 &cached_state);
4a096752
CM
1704
1705 /* already ordered? We're done */
8b62b72b 1706 if (PagePrivate2(page))
247e743c 1707 goto out;
4a096752
CM
1708
1709 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1710 if (ordered) {
2ac55d41
JB
1711 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1712 page_end, &cached_state, GFP_NOFS);
4a096752
CM
1713 unlock_page(page);
1714 btrfs_start_ordered_extent(inode, ordered, 1);
87826df0 1715 btrfs_put_ordered_extent(ordered);
4a096752
CM
1716 goto again;
1717 }
247e743c 1718
87826df0
JM
1719 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
1720 if (ret) {
1721 mapping_set_error(page->mapping, ret);
1722 end_extent_writepage(page, ret, page_start, page_end);
1723 ClearPageChecked(page);
1724 goto out;
1725 }
1726
2ac55d41 1727 btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
247e743c 1728 ClearPageChecked(page);
87826df0 1729 set_page_dirty(page);
247e743c 1730out:
2ac55d41
JB
1731 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1732 &cached_state, GFP_NOFS);
247e743c
CM
1733out_page:
1734 unlock_page(page);
1735 page_cache_release(page);
b897abec 1736 kfree(fixup);
247e743c
CM
1737}
1738
1739/*
1740 * There are a few paths in the higher layers of the kernel that directly
1741 * set the page dirty bit without asking the filesystem if it is a
1742 * good idea. This causes problems because we want to make sure COW
1743 * properly happens and the data=ordered rules are followed.
1744 *
c8b97818 1745 * In our case any range that doesn't have the ORDERED bit set
247e743c
CM
1746 * hasn't been properly setup for IO. We kick off an async process
1747 * to fix it up. The async helper will wait for ordered extents, set
1748 * the delalloc bit and make it safe to write the page.
1749 */
b2950863 1750static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
247e743c
CM
1751{
1752 struct inode *inode = page->mapping->host;
1753 struct btrfs_writepage_fixup *fixup;
1754 struct btrfs_root *root = BTRFS_I(inode)->root;
247e743c 1755
8b62b72b
CM
1756 /* this page is properly in the ordered list */
1757 if (TestClearPagePrivate2(page))
247e743c
CM
1758 return 0;
1759
1760 if (PageChecked(page))
1761 return -EAGAIN;
1762
1763 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1764 if (!fixup)
1765 return -EAGAIN;
f421950f 1766
247e743c
CM
1767 SetPageChecked(page);
1768 page_cache_get(page);
1769 fixup->work.func = btrfs_writepage_fixup_worker;
1770 fixup->page = page;
1771 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
87826df0 1772 return -EBUSY;
247e743c
CM
1773}
1774
d899e052
YZ
1775static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1776 struct inode *inode, u64 file_pos,
1777 u64 disk_bytenr, u64 disk_num_bytes,
1778 u64 num_bytes, u64 ram_bytes,
1779 u8 compression, u8 encryption,
1780 u16 other_encoding, int extent_type)
1781{
1782 struct btrfs_root *root = BTRFS_I(inode)->root;
1783 struct btrfs_file_extent_item *fi;
1784 struct btrfs_path *path;
1785 struct extent_buffer *leaf;
1786 struct btrfs_key ins;
1787 u64 hint;
1788 int ret;
1789
1790 path = btrfs_alloc_path();
d8926bb3
MF
1791 if (!path)
1792 return -ENOMEM;
d899e052 1793
b9473439 1794 path->leave_spinning = 1;
a1ed835e
CM
1795
1796 /*
1797 * we may be replacing one extent in the tree with another.
1798 * The new extent is pinned in the extent map, and we don't want
1799 * to drop it from the cache until it is completely in the btree.
1800 *
1801 * So, tell btrfs_drop_extents to leave this extent in the cache.
1802 * the caller is expected to unpin it and allow it to be merged
1803 * with the others.
1804 */
920bbbfb
YZ
1805 ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
1806 &hint, 0);
79787eaa
JM
1807 if (ret)
1808 goto out;
d899e052 1809
33345d01 1810 ins.objectid = btrfs_ino(inode);
d899e052
YZ
1811 ins.offset = file_pos;
1812 ins.type = BTRFS_EXTENT_DATA_KEY;
1813 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
79787eaa
JM
1814 if (ret)
1815 goto out;
d899e052
YZ
1816 leaf = path->nodes[0];
1817 fi = btrfs_item_ptr(leaf, path->slots[0],
1818 struct btrfs_file_extent_item);
1819 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1820 btrfs_set_file_extent_type(leaf, fi, extent_type);
1821 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1822 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1823 btrfs_set_file_extent_offset(leaf, fi, 0);
1824 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1825 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1826 btrfs_set_file_extent_compression(leaf, fi, compression);
1827 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1828 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
b9473439
CM
1829
1830 btrfs_unlock_up_safe(path, 1);
1831 btrfs_set_lock_blocking(leaf);
1832
d899e052
YZ
1833 btrfs_mark_buffer_dirty(leaf);
1834
1835 inode_add_bytes(inode, num_bytes);
d899e052
YZ
1836
1837 ins.objectid = disk_bytenr;
1838 ins.offset = disk_num_bytes;
1839 ins.type = BTRFS_EXTENT_ITEM_KEY;
5d4f98a2
YZ
1840 ret = btrfs_alloc_reserved_file_extent(trans, root,
1841 root->root_key.objectid,
33345d01 1842 btrfs_ino(inode), file_pos, &ins);
79787eaa 1843out:
d899e052 1844 btrfs_free_path(path);
b9473439 1845
79787eaa 1846 return ret;
d899e052
YZ
1847}
1848
5d13a98f
CM
1849/*
1850 * helper function for btrfs_finish_ordered_io, this
1851 * just reads in some of the csum leaves to prime them into ram
1852 * before we start the transaction. It limits the amount of btree
1853 * reads required while inside the transaction.
1854 */
d352ac68
CM
1855/* as ordered data IO finishes, this gets called so we can finish
1856 * an ordered extent if the range of bytes in the file it covers are
1857 * fully written.
1858 */
5fd02043 1859static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
e6dcd2dc 1860{
5fd02043 1861 struct inode *inode = ordered_extent->inode;
e6dcd2dc 1862 struct btrfs_root *root = BTRFS_I(inode)->root;
0ca1f7ce 1863 struct btrfs_trans_handle *trans = NULL;
e6dcd2dc 1864 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2ac55d41 1865 struct extent_state *cached_state = NULL;
261507a0 1866 int compress_type = 0;
e6dcd2dc 1867 int ret;
82d5902d 1868 bool nolock;
e6dcd2dc 1869
83eea1f1 1870 nolock = btrfs_is_free_space_inode(inode);
0cb59c99 1871
5fd02043
JB
1872 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
1873 ret = -EIO;
1874 goto out;
1875 }
1876
c2167754 1877 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
79787eaa 1878 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
c2167754
YZ
1879 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1880 if (!ret) {
0cb59c99 1881 if (nolock)
7a7eaa40 1882 trans = btrfs_join_transaction_nolock(root);
0cb59c99 1883 else
7a7eaa40 1884 trans = btrfs_join_transaction(root);
79787eaa
JM
1885 if (IS_ERR(trans))
1886 return PTR_ERR(trans);
0ca1f7ce 1887 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2115133f 1888 ret = btrfs_update_inode_fallback(trans, root, inode);
79787eaa
JM
1889 if (ret) /* -ENOMEM or corruption */
1890 btrfs_abort_transaction(trans, root, ret);
c2167754
YZ
1891 }
1892 goto out;
1893 }
e6dcd2dc 1894
2ac55d41
JB
1895 lock_extent_bits(io_tree, ordered_extent->file_offset,
1896 ordered_extent->file_offset + ordered_extent->len - 1,
d0082371 1897 0, &cached_state);
e6dcd2dc 1898
0cb59c99 1899 if (nolock)
7a7eaa40 1900 trans = btrfs_join_transaction_nolock(root);
0cb59c99 1901 else
7a7eaa40 1902 trans = btrfs_join_transaction(root);
79787eaa
JM
1903 if (IS_ERR(trans)) {
1904 ret = PTR_ERR(trans);
1905 trans = NULL;
1906 goto out_unlock;
1907 }
0ca1f7ce 1908 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
c2167754 1909
c8b97818 1910 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
261507a0 1911 compress_type = ordered_extent->compress_type;
d899e052 1912 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
261507a0 1913 BUG_ON(compress_type);
920bbbfb 1914 ret = btrfs_mark_extent_written(trans, inode,
d899e052
YZ
1915 ordered_extent->file_offset,
1916 ordered_extent->file_offset +
1917 ordered_extent->len);
d899e052 1918 } else {
0af3d00b 1919 BUG_ON(root == root->fs_info->tree_root);
d899e052
YZ
1920 ret = insert_reserved_file_extent(trans, inode,
1921 ordered_extent->file_offset,
1922 ordered_extent->start,
1923 ordered_extent->disk_len,
1924 ordered_extent->len,
1925 ordered_extent->len,
261507a0 1926 compress_type, 0, 0,
d899e052 1927 BTRFS_FILE_EXTENT_REG);
a1ed835e
CM
1928 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1929 ordered_extent->file_offset,
1930 ordered_extent->len);
d899e052 1931 }
5fd02043 1932
79787eaa
JM
1933 if (ret < 0) {
1934 btrfs_abort_transaction(trans, root, ret);
5fd02043 1935 goto out_unlock;
79787eaa 1936 }
2ac55d41 1937
e6dcd2dc
CM
1938 add_pending_csums(trans, inode, ordered_extent->file_offset,
1939 &ordered_extent->list);
1940
1ef30be1 1941 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
a39f7521 1942 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
2115133f 1943 ret = btrfs_update_inode_fallback(trans, root, inode);
79787eaa
JM
1944 if (ret) { /* -ENOMEM or corruption */
1945 btrfs_abort_transaction(trans, root, ret);
5fd02043 1946 goto out_unlock;
79787eaa 1947 }
1ef30be1
JB
1948 }
1949 ret = 0;
5fd02043
JB
1950out_unlock:
1951 unlock_extent_cached(io_tree, ordered_extent->file_offset,
1952 ordered_extent->file_offset +
1953 ordered_extent->len - 1, &cached_state, GFP_NOFS);
c2167754 1954out:
5b0e95bf 1955 if (root != root->fs_info->tree_root)
0cb59c99 1956 btrfs_delalloc_release_metadata(inode, ordered_extent->len);
5b0e95bf
JB
1957 if (trans) {
1958 if (nolock)
0cb59c99 1959 btrfs_end_transaction_nolock(trans, root);
5b0e95bf 1960 else
0cb59c99
JB
1961 btrfs_end_transaction(trans, root);
1962 }
1963
5fd02043
JB
1964 if (ret)
1965 clear_extent_uptodate(io_tree, ordered_extent->file_offset,
1966 ordered_extent->file_offset +
1967 ordered_extent->len - 1, NULL, GFP_NOFS);
1968
1969 /*
1970 * This needs to be dont to make sure anybody waiting knows we are done
1971 * upating everything for this ordered extent.
1972 */
1973 btrfs_remove_ordered_extent(inode, ordered_extent);
1974
e6dcd2dc
CM
1975 /* once for us */
1976 btrfs_put_ordered_extent(ordered_extent);
1977 /* once for the tree */
1978 btrfs_put_ordered_extent(ordered_extent);
1979
5fd02043
JB
1980 return ret;
1981}
1982
1983static void finish_ordered_fn(struct btrfs_work *work)
1984{
1985 struct btrfs_ordered_extent *ordered_extent;
1986 ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
1987 btrfs_finish_ordered_io(ordered_extent);
e6dcd2dc
CM
1988}
1989
b2950863 1990static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
211f90e6
CM
1991 struct extent_state *state, int uptodate)
1992{
5fd02043
JB
1993 struct inode *inode = page->mapping->host;
1994 struct btrfs_root *root = BTRFS_I(inode)->root;
1995 struct btrfs_ordered_extent *ordered_extent = NULL;
1996 struct btrfs_workers *workers;
1997
1abe9b8a 1998 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
1999
8b62b72b 2000 ClearPagePrivate2(page);
5fd02043
JB
2001 if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
2002 end - start + 1, uptodate))
2003 return 0;
2004
2005 ordered_extent->work.func = finish_ordered_fn;
2006 ordered_extent->work.flags = 0;
2007
83eea1f1 2008 if (btrfs_is_free_space_inode(inode))
5fd02043
JB
2009 workers = &root->fs_info->endio_freespace_worker;
2010 else
2011 workers = &root->fs_info->endio_write_workers;
2012 btrfs_queue_worker(workers, &ordered_extent->work);
2013
2014 return 0;
211f90e6
CM
2015}
2016
d352ac68
CM
2017/*
2018 * when reads are done, we need to check csums to verify the data is correct
4a54c8c1
JS
2019 * if there's a match, we allow the bio to finish. If not, the code in
2020 * extent_io.c will try to find good copies for us.
d352ac68 2021 */
b2950863 2022static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
5cf1ab56 2023 struct extent_state *state, int mirror)
07157aac 2024{
35ebb934 2025 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
07157aac 2026 struct inode *inode = page->mapping->host;
d1310b2e 2027 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
07157aac 2028 char *kaddr;
aadfeb6e 2029 u64 private = ~(u32)0;
07157aac 2030 int ret;
ff79f819
CM
2031 struct btrfs_root *root = BTRFS_I(inode)->root;
2032 u32 csum = ~(u32)0;
d1310b2e 2033
d20f7043
CM
2034 if (PageChecked(page)) {
2035 ClearPageChecked(page);
2036 goto good;
2037 }
6cbff00f
CH
2038
2039 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
08d2f347 2040 goto good;
17d217fe
YZ
2041
2042 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
9655d298 2043 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
17d217fe
YZ
2044 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
2045 GFP_NOFS);
b6cda9bc 2046 return 0;
17d217fe 2047 }
d20f7043 2048
c2e639f0 2049 if (state && state->start == start) {
70dec807
CM
2050 private = state->private;
2051 ret = 0;
2052 } else {
2053 ret = get_state_private(io_tree, start, &private);
2054 }
7ac687d9 2055 kaddr = kmap_atomic(page);
d397712b 2056 if (ret)
07157aac 2057 goto zeroit;
d397712b 2058
ff79f819
CM
2059 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
2060 btrfs_csum_final(csum, (char *)&csum);
d397712b 2061 if (csum != private)
07157aac 2062 goto zeroit;
d397712b 2063
7ac687d9 2064 kunmap_atomic(kaddr);
d20f7043 2065good:
07157aac
CM
2066 return 0;
2067
2068zeroit:
945d8962 2069 printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
33345d01
LZ
2070 "private %llu\n",
2071 (unsigned long long)btrfs_ino(page->mapping->host),
193f284d
CM
2072 (unsigned long long)start, csum,
2073 (unsigned long long)private);
db94535d
CM
2074 memset(kaddr + offset, 1, end - start + 1);
2075 flush_dcache_page(page);
7ac687d9 2076 kunmap_atomic(kaddr);
3b951516
CM
2077 if (private == 0)
2078 return 0;
7e38326f 2079 return -EIO;
07157aac 2080}
b888db2b 2081
24bbcf04
YZ
2082struct delayed_iput {
2083 struct list_head list;
2084 struct inode *inode;
2085};
2086
79787eaa
JM
2087/* JDM: If this is fs-wide, why can't we add a pointer to
2088 * btrfs_inode instead and avoid the allocation? */
24bbcf04
YZ
2089void btrfs_add_delayed_iput(struct inode *inode)
2090{
2091 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2092 struct delayed_iput *delayed;
2093
2094 if (atomic_add_unless(&inode->i_count, -1, 1))
2095 return;
2096
2097 delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
2098 delayed->inode = inode;
2099
2100 spin_lock(&fs_info->delayed_iput_lock);
2101 list_add_tail(&delayed->list, &fs_info->delayed_iputs);
2102 spin_unlock(&fs_info->delayed_iput_lock);
2103}
2104
2105void btrfs_run_delayed_iputs(struct btrfs_root *root)
2106{
2107 LIST_HEAD(list);
2108 struct btrfs_fs_info *fs_info = root->fs_info;
2109 struct delayed_iput *delayed;
2110 int empty;
2111
2112 spin_lock(&fs_info->delayed_iput_lock);
2113 empty = list_empty(&fs_info->delayed_iputs);
2114 spin_unlock(&fs_info->delayed_iput_lock);
2115 if (empty)
2116 return;
2117
2118 down_read(&root->fs_info->cleanup_work_sem);
2119 spin_lock(&fs_info->delayed_iput_lock);
2120 list_splice_init(&fs_info->delayed_iputs, &list);
2121 spin_unlock(&fs_info->delayed_iput_lock);
2122
2123 while (!list_empty(&list)) {
2124 delayed = list_entry(list.next, struct delayed_iput, list);
2125 list_del(&delayed->list);
2126 iput(delayed->inode);
2127 kfree(delayed);
2128 }
2129 up_read(&root->fs_info->cleanup_work_sem);
2130}
2131
d68fc57b
YZ
2132enum btrfs_orphan_cleanup_state {
2133 ORPHAN_CLEANUP_STARTED = 1,
2134 ORPHAN_CLEANUP_DONE = 2,
2135};
2136
2137/*
42b2aa86 2138 * This is called in transaction commit time. If there are no orphan
d68fc57b
YZ
2139 * files in the subvolume, it removes orphan item and frees block_rsv
2140 * structure.
2141 */
2142void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2143 struct btrfs_root *root)
2144{
90290e19 2145 struct btrfs_block_rsv *block_rsv;
d68fc57b
YZ
2146 int ret;
2147
8a35d95f 2148 if (atomic_read(&root->orphan_inodes) ||
d68fc57b
YZ
2149 root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
2150 return;
2151
90290e19 2152 spin_lock(&root->orphan_lock);
8a35d95f 2153 if (atomic_read(&root->orphan_inodes)) {
90290e19
JB
2154 spin_unlock(&root->orphan_lock);
2155 return;
2156 }
2157
2158 if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
2159 spin_unlock(&root->orphan_lock);
2160 return;
2161 }
2162
2163 block_rsv = root->orphan_block_rsv;
2164 root->orphan_block_rsv = NULL;
2165 spin_unlock(&root->orphan_lock);
2166
d68fc57b
YZ
2167 if (root->orphan_item_inserted &&
2168 btrfs_root_refs(&root->root_item) > 0) {
2169 ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
2170 root->root_key.objectid);
2171 BUG_ON(ret);
2172 root->orphan_item_inserted = 0;
2173 }
2174
90290e19
JB
2175 if (block_rsv) {
2176 WARN_ON(block_rsv->size > 0);
2177 btrfs_free_block_rsv(root, block_rsv);
d68fc57b
YZ
2178 }
2179}
2180
7b128766
JB
2181/*
2182 * This creates an orphan entry for the given inode in case something goes
2183 * wrong in the middle of an unlink/truncate.
d68fc57b
YZ
2184 *
2185 * NOTE: caller of this function should reserve 5 units of metadata for
2186 * this function.
7b128766
JB
2187 */
2188int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2189{
2190 struct btrfs_root *root = BTRFS_I(inode)->root;
d68fc57b
YZ
2191 struct btrfs_block_rsv *block_rsv = NULL;
2192 int reserve = 0;
2193 int insert = 0;
2194 int ret;
7b128766 2195
d68fc57b
YZ
2196 if (!root->orphan_block_rsv) {
2197 block_rsv = btrfs_alloc_block_rsv(root);
b532402e
TI
2198 if (!block_rsv)
2199 return -ENOMEM;
d68fc57b 2200 }
7b128766 2201
d68fc57b
YZ
2202 spin_lock(&root->orphan_lock);
2203 if (!root->orphan_block_rsv) {
2204 root->orphan_block_rsv = block_rsv;
2205 } else if (block_rsv) {
2206 btrfs_free_block_rsv(root, block_rsv);
2207 block_rsv = NULL;
7b128766 2208 }
7b128766 2209
8a35d95f
JB
2210 if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2211 &BTRFS_I(inode)->runtime_flags)) {
d68fc57b
YZ
2212#if 0
2213 /*
2214 * For proper ENOSPC handling, we should do orphan
2215 * cleanup when mounting. But this introduces backward
2216 * compatibility issue.
2217 */
2218 if (!xchg(&root->orphan_item_inserted, 1))
2219 insert = 2;
2220 else
2221 insert = 1;
2222#endif
2223 insert = 1;
8a35d95f 2224 atomic_dec(&root->orphan_inodes);
7b128766
JB
2225 }
2226
72ac3c0d
JB
2227 if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
2228 &BTRFS_I(inode)->runtime_flags))
d68fc57b 2229 reserve = 1;
d68fc57b 2230 spin_unlock(&root->orphan_lock);
7b128766 2231
d68fc57b
YZ
2232 /* grab metadata reservation from transaction handle */
2233 if (reserve) {
2234 ret = btrfs_orphan_reserve_metadata(trans, inode);
79787eaa 2235 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
d68fc57b 2236 }
7b128766 2237
d68fc57b
YZ
2238 /* insert an orphan item to track this unlinked/truncated file */
2239 if (insert >= 1) {
33345d01 2240 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
79787eaa 2241 if (ret && ret != -EEXIST) {
8a35d95f
JB
2242 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2243 &BTRFS_I(inode)->runtime_flags);
79787eaa
JM
2244 btrfs_abort_transaction(trans, root, ret);
2245 return ret;
2246 }
2247 ret = 0;
d68fc57b
YZ
2248 }
2249
2250 /* insert an orphan item to track subvolume contains orphan files */
2251 if (insert >= 2) {
2252 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
2253 root->root_key.objectid);
79787eaa
JM
2254 if (ret && ret != -EEXIST) {
2255 btrfs_abort_transaction(trans, root, ret);
2256 return ret;
2257 }
d68fc57b
YZ
2258 }
2259 return 0;
7b128766
JB
2260}
2261
2262/*
2263 * We have done the truncate/delete so we can go ahead and remove the orphan
2264 * item for this particular inode.
2265 */
2266int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2267{
2268 struct btrfs_root *root = BTRFS_I(inode)->root;
d68fc57b
YZ
2269 int delete_item = 0;
2270 int release_rsv = 0;
7b128766
JB
2271 int ret = 0;
2272
d68fc57b 2273 spin_lock(&root->orphan_lock);
8a35d95f
JB
2274 if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2275 &BTRFS_I(inode)->runtime_flags))
d68fc57b 2276 delete_item = 1;
7b128766 2277
72ac3c0d
JB
2278 if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
2279 &BTRFS_I(inode)->runtime_flags))
d68fc57b 2280 release_rsv = 1;
d68fc57b 2281 spin_unlock(&root->orphan_lock);
7b128766 2282
d68fc57b 2283 if (trans && delete_item) {
33345d01 2284 ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
79787eaa 2285 BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
d68fc57b 2286 }
7b128766 2287
8a35d95f 2288 if (release_rsv) {
d68fc57b 2289 btrfs_orphan_release_metadata(inode);
8a35d95f
JB
2290 atomic_dec(&root->orphan_inodes);
2291 }
7b128766 2292
d68fc57b 2293 return 0;
7b128766
JB
2294}
2295
2296/*
2297 * this cleans up any orphans that may be left on the list from the last use
2298 * of this root.
2299 */
66b4ffd1 2300int btrfs_orphan_cleanup(struct btrfs_root *root)
7b128766
JB
2301{
2302 struct btrfs_path *path;
2303 struct extent_buffer *leaf;
7b128766
JB
2304 struct btrfs_key key, found_key;
2305 struct btrfs_trans_handle *trans;
2306 struct inode *inode;
8f6d7f4f 2307 u64 last_objectid = 0;
7b128766
JB
2308 int ret = 0, nr_unlink = 0, nr_truncate = 0;
2309
d68fc57b 2310 if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
66b4ffd1 2311 return 0;
c71bf099
YZ
2312
2313 path = btrfs_alloc_path();
66b4ffd1
JB
2314 if (!path) {
2315 ret = -ENOMEM;
2316 goto out;
2317 }
7b128766
JB
2318 path->reada = -1;
2319
2320 key.objectid = BTRFS_ORPHAN_OBJECTID;
2321 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
2322 key.offset = (u64)-1;
2323
7b128766
JB
2324 while (1) {
2325 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
66b4ffd1
JB
2326 if (ret < 0)
2327 goto out;
7b128766
JB
2328
2329 /*
2330 * if ret == 0 means we found what we were searching for, which
25985edc 2331 * is weird, but possible, so only screw with path if we didn't
7b128766
JB
2332 * find the key and see if we have stuff that matches
2333 */
2334 if (ret > 0) {
66b4ffd1 2335 ret = 0;
7b128766
JB
2336 if (path->slots[0] == 0)
2337 break;
2338 path->slots[0]--;
2339 }
2340
2341 /* pull out the item */
2342 leaf = path->nodes[0];
7b128766
JB
2343 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2344
2345 /* make sure the item matches what we want */
2346 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2347 break;
2348 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
2349 break;
2350
2351 /* release the path since we're done with it */
b3b4aa74 2352 btrfs_release_path(path);
7b128766
JB
2353
2354 /*
2355 * this is where we are basically btrfs_lookup, without the
2356 * crossing root thing. we store the inode number in the
2357 * offset of the orphan item.
2358 */
8f6d7f4f
JB
2359
2360 if (found_key.offset == last_objectid) {
2361 printk(KERN_ERR "btrfs: Error removing orphan entry, "
2362 "stopping orphan cleanup\n");
2363 ret = -EINVAL;
2364 goto out;
2365 }
2366
2367 last_objectid = found_key.offset;
2368
5d4f98a2
YZ
2369 found_key.objectid = found_key.offset;
2370 found_key.type = BTRFS_INODE_ITEM_KEY;
2371 found_key.offset = 0;
73f73415 2372 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
a8c9e576
JB
2373 ret = PTR_RET(inode);
2374 if (ret && ret != -ESTALE)
66b4ffd1 2375 goto out;
7b128766 2376
f8e9e0b0
AJ
2377 if (ret == -ESTALE && root == root->fs_info->tree_root) {
2378 struct btrfs_root *dead_root;
2379 struct btrfs_fs_info *fs_info = root->fs_info;
2380 int is_dead_root = 0;
2381
2382 /*
2383 * this is an orphan in the tree root. Currently these
2384 * could come from 2 sources:
2385 * a) a snapshot deletion in progress
2386 * b) a free space cache inode
2387 * We need to distinguish those two, as the snapshot
2388 * orphan must not get deleted.
2389 * find_dead_roots already ran before us, so if this
2390 * is a snapshot deletion, we should find the root
2391 * in the dead_roots list
2392 */
2393 spin_lock(&fs_info->trans_lock);
2394 list_for_each_entry(dead_root, &fs_info->dead_roots,
2395 root_list) {
2396 if (dead_root->root_key.objectid ==
2397 found_key.objectid) {
2398 is_dead_root = 1;
2399 break;
2400 }
2401 }
2402 spin_unlock(&fs_info->trans_lock);
2403 if (is_dead_root) {
2404 /* prevent this orphan from being found again */
2405 key.offset = found_key.objectid - 1;
2406 continue;
2407 }
2408 }
7b128766 2409 /*
a8c9e576
JB
2410 * Inode is already gone but the orphan item is still there,
2411 * kill the orphan item.
7b128766 2412 */
a8c9e576
JB
2413 if (ret == -ESTALE) {
2414 trans = btrfs_start_transaction(root, 1);
66b4ffd1
JB
2415 if (IS_ERR(trans)) {
2416 ret = PTR_ERR(trans);
2417 goto out;
2418 }
8a35d95f
JB
2419 printk(KERN_ERR "auto deleting %Lu\n",
2420 found_key.objectid);
a8c9e576
JB
2421 ret = btrfs_del_orphan_item(trans, root,
2422 found_key.objectid);
79787eaa 2423 BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
5b21f2ed 2424 btrfs_end_transaction(trans, root);
7b128766
JB
2425 continue;
2426 }
2427
a8c9e576
JB
2428 /*
2429 * add this inode to the orphan list so btrfs_orphan_del does
2430 * the proper thing when we hit it
2431 */
8a35d95f
JB
2432 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2433 &BTRFS_I(inode)->runtime_flags);
a8c9e576 2434
7b128766
JB
2435 /* if we have links, this was a truncate, lets do that */
2436 if (inode->i_nlink) {
a41ad394
JB
2437 if (!S_ISREG(inode->i_mode)) {
2438 WARN_ON(1);
2439 iput(inode);
2440 continue;
2441 }
7b128766 2442 nr_truncate++;
66b4ffd1 2443 ret = btrfs_truncate(inode);
7b128766
JB
2444 } else {
2445 nr_unlink++;
2446 }
2447
2448 /* this will do delete_inode and everything for us */
2449 iput(inode);
66b4ffd1
JB
2450 if (ret)
2451 goto out;
7b128766 2452 }
3254c876
MX
2453 /* release the path since we're done with it */
2454 btrfs_release_path(path);
2455
d68fc57b
YZ
2456 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
2457
2458 if (root->orphan_block_rsv)
2459 btrfs_block_rsv_release(root, root->orphan_block_rsv,
2460 (u64)-1);
2461
2462 if (root->orphan_block_rsv || root->orphan_item_inserted) {
7a7eaa40 2463 trans = btrfs_join_transaction(root);
66b4ffd1
JB
2464 if (!IS_ERR(trans))
2465 btrfs_end_transaction(trans, root);
d68fc57b 2466 }
7b128766
JB
2467
2468 if (nr_unlink)
2469 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2470 if (nr_truncate)
2471 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
66b4ffd1
JB
2472
2473out:
2474 if (ret)
2475 printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
2476 btrfs_free_path(path);
2477 return ret;
7b128766
JB
2478}
2479
46a53cca
CM
2480/*
2481 * very simple check to peek ahead in the leaf looking for xattrs. If we
2482 * don't find any xattrs, we know there can't be any acls.
2483 *
2484 * slot is the slot the inode is in, objectid is the objectid of the inode
2485 */
2486static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2487 int slot, u64 objectid)
2488{
2489 u32 nritems = btrfs_header_nritems(leaf);
2490 struct btrfs_key found_key;
2491 int scanned = 0;
2492
2493 slot++;
2494 while (slot < nritems) {
2495 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2496
2497 /* we found a different objectid, there must not be acls */
2498 if (found_key.objectid != objectid)
2499 return 0;
2500
2501 /* we found an xattr, assume we've got an acl */
2502 if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2503 return 1;
2504
2505 /*
2506 * we found a key greater than an xattr key, there can't
2507 * be any acls later on
2508 */
2509 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2510 return 0;
2511
2512 slot++;
2513 scanned++;
2514
2515 /*
2516 * it goes inode, inode backrefs, xattrs, extents,
2517 * so if there are a ton of hard links to an inode there can
2518 * be a lot of backrefs. Don't waste time searching too hard,
2519 * this is just an optimization
2520 */
2521 if (scanned >= 8)
2522 break;
2523 }
2524 /* we hit the end of the leaf before we found an xattr or
2525 * something larger than an xattr. We have to assume the inode
2526 * has acls
2527 */
2528 return 1;
2529}
2530
d352ac68
CM
2531/*
2532 * read an inode from the btree into the in-memory inode
2533 */
5d4f98a2 2534static void btrfs_read_locked_inode(struct inode *inode)
39279cc3
CM
2535{
2536 struct btrfs_path *path;
5f39d397 2537 struct extent_buffer *leaf;
39279cc3 2538 struct btrfs_inode_item *inode_item;
0b86a832 2539 struct btrfs_timespec *tspec;
39279cc3
CM
2540 struct btrfs_root *root = BTRFS_I(inode)->root;
2541 struct btrfs_key location;
46a53cca 2542 int maybe_acls;
618e21d5 2543 u32 rdev;
39279cc3 2544 int ret;
2f7e33d4
MX
2545 bool filled = false;
2546
2547 ret = btrfs_fill_inode(inode, &rdev);
2548 if (!ret)
2549 filled = true;
39279cc3
CM
2550
2551 path = btrfs_alloc_path();
1748f843
MF
2552 if (!path)
2553 goto make_bad;
2554
d90c7321 2555 path->leave_spinning = 1;
39279cc3 2556 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
dc17ff8f 2557
39279cc3 2558 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
5f39d397 2559 if (ret)
39279cc3 2560 goto make_bad;
39279cc3 2561
5f39d397 2562 leaf = path->nodes[0];
2f7e33d4
MX
2563
2564 if (filled)
2565 goto cache_acl;
2566
5f39d397
CM
2567 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2568 struct btrfs_inode_item);
5f39d397 2569 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
bfe86848 2570 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
5f39d397
CM
2571 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2572 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
dbe674a9 2573 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
5f39d397
CM
2574
2575 tspec = btrfs_inode_atime(inode_item);
2576 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2577 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2578
2579 tspec = btrfs_inode_mtime(inode_item);
2580 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2581 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2582
2583 tspec = btrfs_inode_ctime(inode_item);
2584 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2585 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2586
a76a3cd4 2587 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
e02119d5 2588 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
0c4d2d95 2589 inode->i_version = btrfs_inode_sequence(leaf, inode_item);
e02119d5 2590 inode->i_generation = BTRFS_I(inode)->generation;
618e21d5 2591 inode->i_rdev = 0;
5f39d397
CM
2592 rdev = btrfs_inode_rdev(leaf, inode_item);
2593
aec7477b 2594 BTRFS_I(inode)->index_cnt = (u64)-1;
d2fb3437 2595 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2f7e33d4 2596cache_acl:
46a53cca
CM
2597 /*
2598 * try to precache a NULL acl entry for files that don't have
2599 * any xattrs or acls
2600 */
33345d01
LZ
2601 maybe_acls = acls_after_inode_item(leaf, path->slots[0],
2602 btrfs_ino(inode));
72c04902
AV
2603 if (!maybe_acls)
2604 cache_no_acl(inode);
46a53cca 2605
39279cc3 2606 btrfs_free_path(path);
39279cc3 2607
39279cc3 2608 switch (inode->i_mode & S_IFMT) {
39279cc3
CM
2609 case S_IFREG:
2610 inode->i_mapping->a_ops = &btrfs_aops;
04160088 2611 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
d1310b2e 2612 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
39279cc3
CM
2613 inode->i_fop = &btrfs_file_operations;
2614 inode->i_op = &btrfs_file_inode_operations;
2615 break;
2616 case S_IFDIR:
2617 inode->i_fop = &btrfs_dir_file_operations;
2618 if (root == root->fs_info->tree_root)
2619 inode->i_op = &btrfs_dir_ro_inode_operations;
2620 else
2621 inode->i_op = &btrfs_dir_inode_operations;
2622 break;
2623 case S_IFLNK:
2624 inode->i_op = &btrfs_symlink_inode_operations;
2625 inode->i_mapping->a_ops = &btrfs_symlink_aops;
04160088 2626 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
39279cc3 2627 break;
618e21d5 2628 default:
0279b4cd 2629 inode->i_op = &btrfs_special_inode_operations;
618e21d5
JB
2630 init_special_inode(inode, inode->i_mode, rdev);
2631 break;
39279cc3 2632 }
6cbff00f
CH
2633
2634 btrfs_update_iflags(inode);
39279cc3
CM
2635 return;
2636
2637make_bad:
39279cc3 2638 btrfs_free_path(path);
39279cc3
CM
2639 make_bad_inode(inode);
2640}
2641
d352ac68
CM
2642/*
2643 * given a leaf and an inode, copy the inode fields into the leaf
2644 */
e02119d5
CM
2645static void fill_inode_item(struct btrfs_trans_handle *trans,
2646 struct extent_buffer *leaf,
5f39d397 2647 struct btrfs_inode_item *item,
39279cc3
CM
2648 struct inode *inode)
2649{
5f39d397
CM
2650 btrfs_set_inode_uid(leaf, item, inode->i_uid);
2651 btrfs_set_inode_gid(leaf, item, inode->i_gid);
dbe674a9 2652 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
5f39d397
CM
2653 btrfs_set_inode_mode(leaf, item, inode->i_mode);
2654 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2655
2656 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2657 inode->i_atime.tv_sec);
2658 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2659 inode->i_atime.tv_nsec);
2660
2661 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2662 inode->i_mtime.tv_sec);
2663 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2664 inode->i_mtime.tv_nsec);
2665
2666 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2667 inode->i_ctime.tv_sec);
2668 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2669 inode->i_ctime.tv_nsec);
2670
a76a3cd4 2671 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
e02119d5 2672 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
0c4d2d95 2673 btrfs_set_inode_sequence(leaf, item, inode->i_version);
e02119d5 2674 btrfs_set_inode_transid(leaf, item, trans->transid);
5f39d397 2675 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
b98b6767 2676 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
d82a6f1d 2677 btrfs_set_inode_block_group(leaf, item, 0);
39279cc3
CM
2678}
2679
d352ac68
CM
2680/*
2681 * copy everything in the in-memory inode into the btree.
2682 */
2115133f 2683static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
d397712b 2684 struct btrfs_root *root, struct inode *inode)
39279cc3
CM
2685{
2686 struct btrfs_inode_item *inode_item;
2687 struct btrfs_path *path;
5f39d397 2688 struct extent_buffer *leaf;
39279cc3
CM
2689 int ret;
2690
2691 path = btrfs_alloc_path();
16cdcec7
MX
2692 if (!path)
2693 return -ENOMEM;
2694
b9473439 2695 path->leave_spinning = 1;
16cdcec7
MX
2696 ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
2697 1);
39279cc3
CM
2698 if (ret) {
2699 if (ret > 0)
2700 ret = -ENOENT;
2701 goto failed;
2702 }
2703
b4ce94de 2704 btrfs_unlock_up_safe(path, 1);
5f39d397
CM
2705 leaf = path->nodes[0];
2706 inode_item = btrfs_item_ptr(leaf, path->slots[0],
16cdcec7 2707 struct btrfs_inode_item);
39279cc3 2708
e02119d5 2709 fill_inode_item(trans, leaf, inode_item, inode);
5f39d397 2710 btrfs_mark_buffer_dirty(leaf);
15ee9bc7 2711 btrfs_set_inode_last_trans(trans, inode);
39279cc3
CM
2712 ret = 0;
2713failed:
39279cc3
CM
2714 btrfs_free_path(path);
2715 return ret;
2716}
2717
2115133f
CM
2718/*
2719 * copy everything in the in-memory inode into the btree.
2720 */
2721noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2722 struct btrfs_root *root, struct inode *inode)
2723{
2724 int ret;
2725
2726 /*
2727 * If the inode is a free space inode, we can deadlock during commit
2728 * if we put it into the delayed code.
2729 *
2730 * The data relocation inode should also be directly updated
2731 * without delay
2732 */
83eea1f1 2733 if (!btrfs_is_free_space_inode(inode)
2115133f 2734 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
8ea05e3a
AB
2735 btrfs_update_root_times(trans, root);
2736
2115133f
CM
2737 ret = btrfs_delayed_update_inode(trans, root, inode);
2738 if (!ret)
2739 btrfs_set_inode_last_trans(trans, inode);
2740 return ret;
2741 }
2742
2743 return btrfs_update_inode_item(trans, root, inode);
2744}
2745
2746static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
2747 struct btrfs_root *root, struct inode *inode)
2748{
2749 int ret;
2750
2751 ret = btrfs_update_inode(trans, root, inode);
2752 if (ret == -ENOSPC)
2753 return btrfs_update_inode_item(trans, root, inode);
2754 return ret;
2755}
2756
d352ac68
CM
2757/*
2758 * unlink helper that gets used here in inode.c and in the tree logging
2759 * recovery code. It remove a link in a directory with a given name, and
2760 * also drops the back refs in the inode to the directory
2761 */
92986796
AV
2762static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2763 struct btrfs_root *root,
2764 struct inode *dir, struct inode *inode,
2765 const char *name, int name_len)
39279cc3
CM
2766{
2767 struct btrfs_path *path;
39279cc3 2768 int ret = 0;
5f39d397 2769 struct extent_buffer *leaf;
39279cc3 2770 struct btrfs_dir_item *di;
5f39d397 2771 struct btrfs_key key;
aec7477b 2772 u64 index;
33345d01
LZ
2773 u64 ino = btrfs_ino(inode);
2774 u64 dir_ino = btrfs_ino(dir);
39279cc3
CM
2775
2776 path = btrfs_alloc_path();
54aa1f4d
CM
2777 if (!path) {
2778 ret = -ENOMEM;
554233a6 2779 goto out;
54aa1f4d
CM
2780 }
2781
b9473439 2782 path->leave_spinning = 1;
33345d01 2783 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
39279cc3
CM
2784 name, name_len, -1);
2785 if (IS_ERR(di)) {
2786 ret = PTR_ERR(di);
2787 goto err;
2788 }
2789 if (!di) {
2790 ret = -ENOENT;
2791 goto err;
2792 }
5f39d397
CM
2793 leaf = path->nodes[0];
2794 btrfs_dir_item_key_to_cpu(leaf, di, &key);
39279cc3 2795 ret = btrfs_delete_one_dir_name(trans, root, path, di);
54aa1f4d
CM
2796 if (ret)
2797 goto err;
b3b4aa74 2798 btrfs_release_path(path);
39279cc3 2799
33345d01
LZ
2800 ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
2801 dir_ino, &index);
aec7477b 2802 if (ret) {
d397712b 2803 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
33345d01
LZ
2804 "inode %llu parent %llu\n", name_len, name,
2805 (unsigned long long)ino, (unsigned long long)dir_ino);
79787eaa 2806 btrfs_abort_transaction(trans, root, ret);
aec7477b
JB
2807 goto err;
2808 }
2809
16cdcec7 2810 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
79787eaa
JM
2811 if (ret) {
2812 btrfs_abort_transaction(trans, root, ret);
39279cc3 2813 goto err;
79787eaa 2814 }
39279cc3 2815
e02119d5 2816 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
33345d01 2817 inode, dir_ino);
79787eaa
JM
2818 if (ret != 0 && ret != -ENOENT) {
2819 btrfs_abort_transaction(trans, root, ret);
2820 goto err;
2821 }
e02119d5
CM
2822
2823 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2824 dir, index);
6418c961
CM
2825 if (ret == -ENOENT)
2826 ret = 0;
39279cc3
CM
2827err:
2828 btrfs_free_path(path);
e02119d5
CM
2829 if (ret)
2830 goto out;
2831
2832 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
0c4d2d95
JB
2833 inode_inc_iversion(inode);
2834 inode_inc_iversion(dir);
e02119d5 2835 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
b9959295 2836 ret = btrfs_update_inode(trans, root, dir);
e02119d5 2837out:
39279cc3
CM
2838 return ret;
2839}
2840
92986796
AV
2841int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2842 struct btrfs_root *root,
2843 struct inode *dir, struct inode *inode,
2844 const char *name, int name_len)
2845{
2846 int ret;
2847 ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
2848 if (!ret) {
2849 btrfs_drop_nlink(inode);
2850 ret = btrfs_update_inode(trans, root, inode);
2851 }
2852 return ret;
2853}
2854
2855
a22285a6
YZ
2856/* helper to check if there is any shared block in the path */
2857static int check_path_shared(struct btrfs_root *root,
2858 struct btrfs_path *path)
39279cc3 2859{
a22285a6
YZ
2860 struct extent_buffer *eb;
2861 int level;
0e4dcbef 2862 u64 refs = 1;
5df6a9f6 2863
a22285a6 2864 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
dedefd72
JB
2865 int ret;
2866
a22285a6
YZ
2867 if (!path->nodes[level])
2868 break;
2869 eb = path->nodes[level];
2870 if (!btrfs_block_can_be_shared(root, eb))
2871 continue;
2872 ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
2873 &refs, NULL);
2874 if (refs > 1)
2875 return 1;
5df6a9f6 2876 }
dedefd72 2877 return 0;
39279cc3
CM
2878}
2879
a22285a6
YZ
2880/*
2881 * helper to start transaction for unlink and rmdir.
2882 *
2883 * unlink and rmdir are special in btrfs, they do not always free space.
2884 * so in enospc case, we should make sure they will free space before
2885 * allowing them to use the global metadata reservation.
2886 */
2887static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2888 struct dentry *dentry)
4df27c4d 2889{
39279cc3 2890 struct btrfs_trans_handle *trans;
a22285a6 2891 struct btrfs_root *root = BTRFS_I(dir)->root;
4df27c4d 2892 struct btrfs_path *path;
a22285a6 2893 struct btrfs_inode_ref *ref;
4df27c4d 2894 struct btrfs_dir_item *di;
7b128766 2895 struct inode *inode = dentry->d_inode;
4df27c4d 2896 u64 index;
a22285a6
YZ
2897 int check_link = 1;
2898 int err = -ENOSPC;
4df27c4d 2899 int ret;
33345d01
LZ
2900 u64 ino = btrfs_ino(inode);
2901 u64 dir_ino = btrfs_ino(dir);
4df27c4d 2902
e70bea5f
JB
2903 /*
2904 * 1 for the possible orphan item
2905 * 1 for the dir item
2906 * 1 for the dir index
2907 * 1 for the inode ref
2908 * 1 for the inode ref in the tree log
2909 * 2 for the dir entries in the log
2910 * 1 for the inode
2911 */
2912 trans = btrfs_start_transaction(root, 8);
a22285a6
YZ
2913 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
2914 return trans;
4df27c4d 2915
33345d01 2916 if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
a22285a6 2917 return ERR_PTR(-ENOSPC);
4df27c4d 2918
a22285a6
YZ
2919 /* check if there is someone else holds reference */
2920 if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
2921 return ERR_PTR(-ENOSPC);
4df27c4d 2922
a22285a6
YZ
2923 if (atomic_read(&inode->i_count) > 2)
2924 return ERR_PTR(-ENOSPC);
4df27c4d 2925
a22285a6
YZ
2926 if (xchg(&root->fs_info->enospc_unlink, 1))
2927 return ERR_PTR(-ENOSPC);
2928
2929 path = btrfs_alloc_path();
2930 if (!path) {
2931 root->fs_info->enospc_unlink = 0;
2932 return ERR_PTR(-ENOMEM);
4df27c4d
YZ
2933 }
2934
3880a1b4
JB
2935 /* 1 for the orphan item */
2936 trans = btrfs_start_transaction(root, 1);
5df6a9f6 2937 if (IS_ERR(trans)) {
a22285a6
YZ
2938 btrfs_free_path(path);
2939 root->fs_info->enospc_unlink = 0;
2940 return trans;
2941 }
4df27c4d 2942
a22285a6
YZ
2943 path->skip_locking = 1;
2944 path->search_commit_root = 1;
4df27c4d 2945
a22285a6
YZ
2946 ret = btrfs_lookup_inode(trans, root, path,
2947 &BTRFS_I(dir)->location, 0);
2948 if (ret < 0) {
2949 err = ret;
2950 goto out;
2951 }
2952 if (ret == 0) {
2953 if (check_path_shared(root, path))
2954 goto out;
2955 } else {
2956 check_link = 0;
5df6a9f6 2957 }
b3b4aa74 2958 btrfs_release_path(path);
a22285a6
YZ
2959
2960 ret = btrfs_lookup_inode(trans, root, path,
2961 &BTRFS_I(inode)->location, 0);
2962 if (ret < 0) {
2963 err = ret;
2964 goto out;
2965 }
2966 if (ret == 0) {
2967 if (check_path_shared(root, path))
2968 goto out;
2969 } else {
2970 check_link = 0;
2971 }
b3b4aa74 2972 btrfs_release_path(path);
a22285a6
YZ
2973
2974 if (ret == 0 && S_ISREG(inode->i_mode)) {
2975 ret = btrfs_lookup_file_extent(trans, root, path,
33345d01 2976 ino, (u64)-1, 0);
a22285a6
YZ
2977 if (ret < 0) {
2978 err = ret;
2979 goto out;
2980 }
79787eaa 2981 BUG_ON(ret == 0); /* Corruption */
a22285a6
YZ
2982 if (check_path_shared(root, path))
2983 goto out;
b3b4aa74 2984 btrfs_release_path(path);
a22285a6
YZ
2985 }
2986
2987 if (!check_link) {
2988 err = 0;
2989 goto out;
2990 }
2991
33345d01 2992 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
a22285a6
YZ
2993 dentry->d_name.name, dentry->d_name.len, 0);
2994 if (IS_ERR(di)) {
2995 err = PTR_ERR(di);
2996 goto out;
2997 }
2998 if (di) {
2999 if (check_path_shared(root, path))
3000 goto out;
3001 } else {
3002 err = 0;
3003 goto out;
3004 }
b3b4aa74 3005 btrfs_release_path(path);
a22285a6
YZ
3006
3007 ref = btrfs_lookup_inode_ref(trans, root, path,
3008 dentry->d_name.name, dentry->d_name.len,
33345d01 3009 ino, dir_ino, 0);
a22285a6
YZ
3010 if (IS_ERR(ref)) {
3011 err = PTR_ERR(ref);
3012 goto out;
3013 }
79787eaa 3014 BUG_ON(!ref); /* Logic error */
a22285a6
YZ
3015 if (check_path_shared(root, path))
3016 goto out;
3017 index = btrfs_inode_ref_index(path->nodes[0], ref);
b3b4aa74 3018 btrfs_release_path(path);
a22285a6 3019
16cdcec7
MX
3020 /*
3021 * This is a commit root search, if we can lookup inode item and other
3022 * relative items in the commit root, it means the transaction of
3023 * dir/file creation has been committed, and the dir index item that we
3024 * delay to insert has also been inserted into the commit root. So
3025 * we needn't worry about the delayed insertion of the dir index item
3026 * here.
3027 */
33345d01 3028 di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
a22285a6
YZ
3029 dentry->d_name.name, dentry->d_name.len, 0);
3030 if (IS_ERR(di)) {
3031 err = PTR_ERR(di);
3032 goto out;
3033 }
3034 BUG_ON(ret == -ENOENT);
3035 if (check_path_shared(root, path))
3036 goto out;
3037
3038 err = 0;
3039out:
3040 btrfs_free_path(path);
3880a1b4
JB
3041 /* Migrate the orphan reservation over */
3042 if (!err)
3043 err = btrfs_block_rsv_migrate(trans->block_rsv,
3044 &root->fs_info->global_block_rsv,
5a77d76c 3045 trans->bytes_reserved);
3880a1b4 3046
a22285a6
YZ
3047 if (err) {
3048 btrfs_end_transaction(trans, root);
3049 root->fs_info->enospc_unlink = 0;
3050 return ERR_PTR(err);
3051 }
3052
3053 trans->block_rsv = &root->fs_info->global_block_rsv;
3054 return trans;
3055}
3056
3057static void __unlink_end_trans(struct btrfs_trans_handle *trans,
3058 struct btrfs_root *root)
3059{
3060 if (trans->block_rsv == &root->fs_info->global_block_rsv) {
5a77d76c
JB
3061 btrfs_block_rsv_release(root, trans->block_rsv,
3062 trans->bytes_reserved);
3063 trans->block_rsv = &root->fs_info->trans_block_rsv;
a22285a6
YZ
3064 BUG_ON(!root->fs_info->enospc_unlink);
3065 root->fs_info->enospc_unlink = 0;
3066 }
7ad85bb7 3067 btrfs_end_transaction(trans, root);
a22285a6
YZ
3068}
3069
3070static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
3071{
3072 struct btrfs_root *root = BTRFS_I(dir)->root;
3073 struct btrfs_trans_handle *trans;
3074 struct inode *inode = dentry->d_inode;
3075 int ret;
3076 unsigned long nr = 0;
3077
3078 trans = __unlink_start_trans(dir, dentry);
3079 if (IS_ERR(trans))
3080 return PTR_ERR(trans);
5f39d397 3081
12fcfd22
CM
3082 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
3083
e02119d5
CM
3084 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3085 dentry->d_name.name, dentry->d_name.len);
b532402e
TI
3086 if (ret)
3087 goto out;
7b128766 3088
a22285a6 3089 if (inode->i_nlink == 0) {
7b128766 3090 ret = btrfs_orphan_add(trans, inode);
b532402e
TI
3091 if (ret)
3092 goto out;
a22285a6 3093 }
7b128766 3094
b532402e 3095out:
d3c2fdcf 3096 nr = trans->blocks_used;
a22285a6 3097 __unlink_end_trans(trans, root);
d3c2fdcf 3098 btrfs_btree_balance_dirty(root, nr);
39279cc3
CM
3099 return ret;
3100}
3101
4df27c4d
YZ
3102int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3103 struct btrfs_root *root,
3104 struct inode *dir, u64 objectid,
3105 const char *name, int name_len)
3106{
3107 struct btrfs_path *path;
3108 struct extent_buffer *leaf;
3109 struct btrfs_dir_item *di;
3110 struct btrfs_key key;
3111 u64 index;
3112 int ret;
33345d01 3113 u64 dir_ino = btrfs_ino(dir);
4df27c4d
YZ
3114
3115 path = btrfs_alloc_path();
3116 if (!path)
3117 return -ENOMEM;
3118
33345d01 3119 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4df27c4d 3120 name, name_len, -1);
79787eaa
JM
3121 if (IS_ERR_OR_NULL(di)) {
3122 if (!di)
3123 ret = -ENOENT;
3124 else
3125 ret = PTR_ERR(di);
3126 goto out;
3127 }
4df27c4d
YZ
3128
3129 leaf = path->nodes[0];
3130 btrfs_dir_item_key_to_cpu(leaf, di, &key);
3131 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
3132 ret = btrfs_delete_one_dir_name(trans, root, path, di);
79787eaa
JM
3133 if (ret) {
3134 btrfs_abort_transaction(trans, root, ret);
3135 goto out;
3136 }
b3b4aa74 3137 btrfs_release_path(path);
4df27c4d
YZ
3138
3139 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
3140 objectid, root->root_key.objectid,
33345d01 3141 dir_ino, &index, name, name_len);
4df27c4d 3142 if (ret < 0) {
79787eaa
JM
3143 if (ret != -ENOENT) {
3144 btrfs_abort_transaction(trans, root, ret);
3145 goto out;
3146 }
33345d01 3147 di = btrfs_search_dir_index_item(root, path, dir_ino,
4df27c4d 3148 name, name_len);
79787eaa
JM
3149 if (IS_ERR_OR_NULL(di)) {
3150 if (!di)
3151 ret = -ENOENT;
3152 else
3153 ret = PTR_ERR(di);
3154 btrfs_abort_transaction(trans, root, ret);
3155 goto out;
3156 }
4df27c4d
YZ
3157
3158 leaf = path->nodes[0];
3159 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
b3b4aa74 3160 btrfs_release_path(path);
4df27c4d
YZ
3161 index = key.offset;
3162 }
945d8962 3163 btrfs_release_path(path);
4df27c4d 3164
16cdcec7 3165 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
79787eaa
JM
3166 if (ret) {
3167 btrfs_abort_transaction(trans, root, ret);
3168 goto out;
3169 }
4df27c4d
YZ
3170
3171 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
0c4d2d95 3172 inode_inc_iversion(dir);
4df27c4d 3173 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
5a24e84c 3174 ret = btrfs_update_inode_fallback(trans, root, dir);
79787eaa
JM
3175 if (ret)
3176 btrfs_abort_transaction(trans, root, ret);
3177out:
71d7aed0 3178 btrfs_free_path(path);
79787eaa 3179 return ret;
4df27c4d
YZ
3180}
3181
39279cc3
CM
3182static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3183{
3184 struct inode *inode = dentry->d_inode;
1832a6d5 3185 int err = 0;
39279cc3 3186 struct btrfs_root *root = BTRFS_I(dir)->root;
39279cc3 3187 struct btrfs_trans_handle *trans;
1832a6d5 3188 unsigned long nr = 0;
39279cc3 3189
3394e160 3190 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
33345d01 3191 btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
134d4512
Y
3192 return -ENOTEMPTY;
3193
a22285a6
YZ
3194 trans = __unlink_start_trans(dir, dentry);
3195 if (IS_ERR(trans))
5df6a9f6 3196 return PTR_ERR(trans);
5df6a9f6 3197
33345d01 3198 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4df27c4d
YZ
3199 err = btrfs_unlink_subvol(trans, root, dir,
3200 BTRFS_I(inode)->location.objectid,
3201 dentry->d_name.name,
3202 dentry->d_name.len);
3203 goto out;
3204 }
3205
7b128766
JB
3206 err = btrfs_orphan_add(trans, inode);
3207 if (err)
4df27c4d 3208 goto out;
7b128766 3209
39279cc3 3210 /* now the directory is empty */
e02119d5
CM
3211 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3212 dentry->d_name.name, dentry->d_name.len);
d397712b 3213 if (!err)
dbe674a9 3214 btrfs_i_size_write(inode, 0);
4df27c4d 3215out:
d3c2fdcf 3216 nr = trans->blocks_used;
a22285a6 3217 __unlink_end_trans(trans, root);
d3c2fdcf 3218 btrfs_btree_balance_dirty(root, nr);
3954401f 3219
39279cc3
CM
3220 return err;
3221}
3222
39279cc3
CM
3223/*
3224 * this can truncate away extent items, csum items and directory items.
3225 * It starts at a high offset and removes keys until it can't find
d352ac68 3226 * any higher than new_size
39279cc3
CM
3227 *
3228 * csum items that cross the new i_size are truncated to the new size
3229 * as well.
7b128766
JB
3230 *
3231 * min_type is the minimum key type to truncate down to. If set to 0, this
3232 * will kill all the items on this inode, including the INODE_ITEM_KEY.
39279cc3 3233 */
8082510e
YZ
3234int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3235 struct btrfs_root *root,
3236 struct inode *inode,
3237 u64 new_size, u32 min_type)
39279cc3 3238{
39279cc3 3239 struct btrfs_path *path;
5f39d397 3240 struct extent_buffer *leaf;
39279cc3 3241 struct btrfs_file_extent_item *fi;
8082510e
YZ
3242 struct btrfs_key key;
3243 struct btrfs_key found_key;
39279cc3 3244 u64 extent_start = 0;
db94535d 3245 u64 extent_num_bytes = 0;
5d4f98a2 3246 u64 extent_offset = 0;
39279cc3 3247 u64 item_end = 0;
8082510e
YZ
3248 u64 mask = root->sectorsize - 1;
3249 u32 found_type = (u8)-1;
39279cc3
CM
3250 int found_extent;
3251 int del_item;
85e21bac
CM
3252 int pending_del_nr = 0;
3253 int pending_del_slot = 0;
179e29e4 3254 int extent_type = -1;
8082510e
YZ
3255 int ret;
3256 int err = 0;
33345d01 3257 u64 ino = btrfs_ino(inode);
8082510e
YZ
3258
3259 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
39279cc3 3260
0eb0e19c
MF
3261 path = btrfs_alloc_path();
3262 if (!path)
3263 return -ENOMEM;
3264 path->reada = -1;
3265
0af3d00b 3266 if (root->ref_cows || root == root->fs_info->tree_root)
5b21f2ed 3267 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
8082510e 3268
16cdcec7
MX
3269 /*
3270 * This function is also used to drop the items in the log tree before
3271 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
3272 * it is used to drop the loged items. So we shouldn't kill the delayed
3273 * items.
3274 */
3275 if (min_type == 0 && root == BTRFS_I(inode)->root)
3276 btrfs_kill_delayed_inode_items(inode);
3277
33345d01 3278 key.objectid = ino;
39279cc3 3279 key.offset = (u64)-1;
5f39d397
CM
3280 key.type = (u8)-1;
3281
85e21bac 3282search_again:
b9473439 3283 path->leave_spinning = 1;
85e21bac 3284 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8082510e
YZ
3285 if (ret < 0) {
3286 err = ret;
3287 goto out;
3288 }
d397712b 3289
85e21bac 3290 if (ret > 0) {
e02119d5
CM
3291 /* there are no items in the tree for us to truncate, we're
3292 * done
3293 */
8082510e
YZ
3294 if (path->slots[0] == 0)
3295 goto out;
85e21bac
CM
3296 path->slots[0]--;
3297 }
3298
d397712b 3299 while (1) {
39279cc3 3300 fi = NULL;
5f39d397
CM
3301 leaf = path->nodes[0];
3302 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3303 found_type = btrfs_key_type(&found_key);
39279cc3 3304
33345d01 3305 if (found_key.objectid != ino)
39279cc3 3306 break;
5f39d397 3307
85e21bac 3308 if (found_type < min_type)
39279cc3
CM
3309 break;
3310
5f39d397 3311 item_end = found_key.offset;
39279cc3 3312 if (found_type == BTRFS_EXTENT_DATA_KEY) {
5f39d397 3313 fi = btrfs_item_ptr(leaf, path->slots[0],
39279cc3 3314 struct btrfs_file_extent_item);
179e29e4
CM
3315 extent_type = btrfs_file_extent_type(leaf, fi);
3316 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
5f39d397 3317 item_end +=
db94535d 3318 btrfs_file_extent_num_bytes(leaf, fi);
179e29e4 3319 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
179e29e4 3320 item_end += btrfs_file_extent_inline_len(leaf,
c8b97818 3321 fi);
39279cc3 3322 }
008630c1 3323 item_end--;
39279cc3 3324 }
8082510e
YZ
3325 if (found_type > min_type) {
3326 del_item = 1;
3327 } else {
3328 if (item_end < new_size)
b888db2b 3329 break;
8082510e
YZ
3330 if (found_key.offset >= new_size)
3331 del_item = 1;
3332 else
3333 del_item = 0;
39279cc3 3334 }
39279cc3 3335 found_extent = 0;
39279cc3 3336 /* FIXME, shrink the extent if the ref count is only 1 */
179e29e4
CM
3337 if (found_type != BTRFS_EXTENT_DATA_KEY)
3338 goto delete;
3339
3340 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
39279cc3 3341 u64 num_dec;
db94535d 3342 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
f70a9a6b 3343 if (!del_item) {
db94535d
CM
3344 u64 orig_num_bytes =
3345 btrfs_file_extent_num_bytes(leaf, fi);
e02119d5 3346 extent_num_bytes = new_size -
5f39d397 3347 found_key.offset + root->sectorsize - 1;
b1632b10
Y
3348 extent_num_bytes = extent_num_bytes &
3349 ~((u64)root->sectorsize - 1);
db94535d
CM
3350 btrfs_set_file_extent_num_bytes(leaf, fi,
3351 extent_num_bytes);
3352 num_dec = (orig_num_bytes -
9069218d 3353 extent_num_bytes);
e02119d5 3354 if (root->ref_cows && extent_start != 0)
a76a3cd4 3355 inode_sub_bytes(inode, num_dec);
5f39d397 3356 btrfs_mark_buffer_dirty(leaf);
39279cc3 3357 } else {
db94535d
CM
3358 extent_num_bytes =
3359 btrfs_file_extent_disk_num_bytes(leaf,
3360 fi);
5d4f98a2
YZ
3361 extent_offset = found_key.offset -
3362 btrfs_file_extent_offset(leaf, fi);
3363
39279cc3 3364 /* FIXME blocksize != 4096 */
9069218d 3365 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
39279cc3
CM
3366 if (extent_start != 0) {
3367 found_extent = 1;
e02119d5 3368 if (root->ref_cows)
a76a3cd4 3369 inode_sub_bytes(inode, num_dec);
e02119d5 3370 }
39279cc3 3371 }
9069218d 3372 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
c8b97818
CM
3373 /*
3374 * we can't truncate inline items that have had
3375 * special encodings
3376 */
3377 if (!del_item &&
3378 btrfs_file_extent_compression(leaf, fi) == 0 &&
3379 btrfs_file_extent_encryption(leaf, fi) == 0 &&
3380 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
e02119d5
CM
3381 u32 size = new_size - found_key.offset;
3382
3383 if (root->ref_cows) {
a76a3cd4
YZ
3384 inode_sub_bytes(inode, item_end + 1 -
3385 new_size);
e02119d5
CM
3386 }
3387 size =
3388 btrfs_file_extent_calc_inline_size(size);
143bede5
JM
3389 btrfs_truncate_item(trans, root, path,
3390 size, 1);
e02119d5 3391 } else if (root->ref_cows) {
a76a3cd4
YZ
3392 inode_sub_bytes(inode, item_end + 1 -
3393 found_key.offset);
9069218d 3394 }
39279cc3 3395 }
179e29e4 3396delete:
39279cc3 3397 if (del_item) {
85e21bac
CM
3398 if (!pending_del_nr) {
3399 /* no pending yet, add ourselves */
3400 pending_del_slot = path->slots[0];
3401 pending_del_nr = 1;
3402 } else if (pending_del_nr &&
3403 path->slots[0] + 1 == pending_del_slot) {
3404 /* hop on the pending chunk */
3405 pending_del_nr++;
3406 pending_del_slot = path->slots[0];
3407 } else {
d397712b 3408 BUG();
85e21bac 3409 }
39279cc3
CM
3410 } else {
3411 break;
3412 }
0af3d00b
JB
3413 if (found_extent && (root->ref_cows ||
3414 root == root->fs_info->tree_root)) {
b9473439 3415 btrfs_set_path_blocking(path);
39279cc3 3416 ret = btrfs_free_extent(trans, root, extent_start,
5d4f98a2
YZ
3417 extent_num_bytes, 0,
3418 btrfs_header_owner(leaf),
66d7e7f0 3419 ino, extent_offset, 0);
39279cc3
CM
3420 BUG_ON(ret);
3421 }
85e21bac 3422
8082510e
YZ
3423 if (found_type == BTRFS_INODE_ITEM_KEY)
3424 break;
3425
3426 if (path->slots[0] == 0 ||
3427 path->slots[0] != pending_del_slot) {
82d5902d
LZ
3428 if (root->ref_cows &&
3429 BTRFS_I(inode)->location.objectid !=
3430 BTRFS_FREE_INO_OBJECTID) {
8082510e
YZ
3431 err = -EAGAIN;
3432 goto out;
3433 }
3434 if (pending_del_nr) {
3435 ret = btrfs_del_items(trans, root, path,
3436 pending_del_slot,
3437 pending_del_nr);
79787eaa
JM
3438 if (ret) {
3439 btrfs_abort_transaction(trans,
3440 root, ret);
3441 goto error;
3442 }
8082510e
YZ
3443 pending_del_nr = 0;
3444 }
b3b4aa74 3445 btrfs_release_path(path);
85e21bac 3446 goto search_again;
8082510e
YZ
3447 } else {
3448 path->slots[0]--;
85e21bac 3449 }
39279cc3 3450 }
8082510e 3451out:
85e21bac
CM
3452 if (pending_del_nr) {
3453 ret = btrfs_del_items(trans, root, path, pending_del_slot,
3454 pending_del_nr);
79787eaa
JM
3455 if (ret)
3456 btrfs_abort_transaction(trans, root, ret);
85e21bac 3457 }
79787eaa 3458error:
39279cc3 3459 btrfs_free_path(path);
8082510e 3460 return err;
39279cc3
CM
3461}
3462
3463/*
3464 * taken from block_truncate_page, but does cow as it zeros out
3465 * any bytes left in the last page in the file.
3466 */
3467static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3468{
3469 struct inode *inode = mapping->host;
db94535d 3470 struct btrfs_root *root = BTRFS_I(inode)->root;
e6dcd2dc
CM
3471 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3472 struct btrfs_ordered_extent *ordered;
2ac55d41 3473 struct extent_state *cached_state = NULL;
e6dcd2dc 3474 char *kaddr;
db94535d 3475 u32 blocksize = root->sectorsize;
39279cc3
CM
3476 pgoff_t index = from >> PAGE_CACHE_SHIFT;
3477 unsigned offset = from & (PAGE_CACHE_SIZE-1);
3478 struct page *page;
3b16a4e3 3479 gfp_t mask = btrfs_alloc_write_mask(mapping);
39279cc3 3480 int ret = 0;
a52d9a80 3481 u64 page_start;
e6dcd2dc 3482 u64 page_end;
39279cc3
CM
3483
3484 if ((offset & (blocksize - 1)) == 0)
3485 goto out;
0ca1f7ce 3486 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
5d5e103a
JB
3487 if (ret)
3488 goto out;
39279cc3
CM
3489
3490 ret = -ENOMEM;
211c17f5 3491again:
3b16a4e3 3492 page = find_or_create_page(mapping, index, mask);
5d5e103a 3493 if (!page) {
0ca1f7ce 3494 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
39279cc3 3495 goto out;
5d5e103a 3496 }
e6dcd2dc
CM
3497
3498 page_start = page_offset(page);
3499 page_end = page_start + PAGE_CACHE_SIZE - 1;
3500
39279cc3 3501 if (!PageUptodate(page)) {
9ebefb18 3502 ret = btrfs_readpage(NULL, page);
39279cc3 3503 lock_page(page);
211c17f5
CM
3504 if (page->mapping != mapping) {
3505 unlock_page(page);
3506 page_cache_release(page);
3507 goto again;
3508 }
39279cc3
CM
3509 if (!PageUptodate(page)) {
3510 ret = -EIO;
89642229 3511 goto out_unlock;
39279cc3
CM
3512 }
3513 }
211c17f5 3514 wait_on_page_writeback(page);
e6dcd2dc 3515
d0082371 3516 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
e6dcd2dc
CM
3517 set_page_extent_mapped(page);
3518
3519 ordered = btrfs_lookup_ordered_extent(inode, page_start);
3520 if (ordered) {
2ac55d41
JB
3521 unlock_extent_cached(io_tree, page_start, page_end,
3522 &cached_state, GFP_NOFS);
e6dcd2dc
CM
3523 unlock_page(page);
3524 page_cache_release(page);
eb84ae03 3525 btrfs_start_ordered_extent(inode, ordered, 1);
e6dcd2dc
CM
3526 btrfs_put_ordered_extent(ordered);
3527 goto again;
3528 }
3529
2ac55d41 3530 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
5d5e103a 3531 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
2ac55d41 3532 0, 0, &cached_state, GFP_NOFS);
5d5e103a 3533
2ac55d41
JB
3534 ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
3535 &cached_state);
9ed74f2d 3536 if (ret) {
2ac55d41
JB
3537 unlock_extent_cached(io_tree, page_start, page_end,
3538 &cached_state, GFP_NOFS);
9ed74f2d
JB
3539 goto out_unlock;
3540 }
3541
e6dcd2dc
CM
3542 ret = 0;
3543 if (offset != PAGE_CACHE_SIZE) {
3544 kaddr = kmap(page);
3545 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
3546 flush_dcache_page(page);
3547 kunmap(page);
3548 }
247e743c 3549 ClearPageChecked(page);
e6dcd2dc 3550 set_page_dirty(page);
2ac55d41
JB
3551 unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
3552 GFP_NOFS);
39279cc3 3553
89642229 3554out_unlock:
5d5e103a 3555 if (ret)
0ca1f7ce 3556 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
39279cc3
CM
3557 unlock_page(page);
3558 page_cache_release(page);
3559out:
3560 return ret;
3561}
3562
695a0d0d
JB
3563/*
3564 * This function puts in dummy file extents for the area we're creating a hole
3565 * for. So if we are truncating this file to a larger size we need to insert
3566 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
3567 * the range between oldsize and size
3568 */
a41ad394 3569int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
39279cc3 3570{
9036c102
YZ
3571 struct btrfs_trans_handle *trans;
3572 struct btrfs_root *root = BTRFS_I(inode)->root;
3573 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
a22285a6 3574 struct extent_map *em = NULL;
2ac55d41 3575 struct extent_state *cached_state = NULL;
9036c102 3576 u64 mask = root->sectorsize - 1;
a41ad394 3577 u64 hole_start = (oldsize + mask) & ~mask;
9036c102
YZ
3578 u64 block_end = (size + mask) & ~mask;
3579 u64 last_byte;
3580 u64 cur_offset;
3581 u64 hole_size;
9ed74f2d 3582 int err = 0;
39279cc3 3583
9036c102
YZ
3584 if (size <= hole_start)
3585 return 0;
3586
9036c102
YZ
3587 while (1) {
3588 struct btrfs_ordered_extent *ordered;
3589 btrfs_wait_ordered_range(inode, hole_start,
3590 block_end - hole_start);
2ac55d41 3591 lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
d0082371 3592 &cached_state);
9036c102
YZ
3593 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3594 if (!ordered)
3595 break;
2ac55d41
JB
3596 unlock_extent_cached(io_tree, hole_start, block_end - 1,
3597 &cached_state, GFP_NOFS);
9036c102
YZ
3598 btrfs_put_ordered_extent(ordered);
3599 }
39279cc3 3600
9036c102
YZ
3601 cur_offset = hole_start;
3602 while (1) {
3603 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3604 block_end - cur_offset, 0);
79787eaa
JM
3605 if (IS_ERR(em)) {
3606 err = PTR_ERR(em);
3607 break;
3608 }
9036c102
YZ
3609 last_byte = min(extent_map_end(em), block_end);
3610 last_byte = (last_byte + mask) & ~mask;
8082510e 3611 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
771ed689 3612 u64 hint_byte = 0;
9036c102 3613 hole_size = last_byte - cur_offset;
9ed74f2d 3614
3642320e 3615 trans = btrfs_start_transaction(root, 3);
a22285a6
YZ
3616 if (IS_ERR(trans)) {
3617 err = PTR_ERR(trans);
9ed74f2d 3618 break;
a22285a6 3619 }
8082510e
YZ
3620
3621 err = btrfs_drop_extents(trans, inode, cur_offset,
3622 cur_offset + hole_size,
3623 &hint_byte, 1);
5b397377 3624 if (err) {
79787eaa 3625 btrfs_abort_transaction(trans, root, err);
5b397377 3626 btrfs_end_transaction(trans, root);
3893e33b 3627 break;
5b397377 3628 }
8082510e 3629
9036c102 3630 err = btrfs_insert_file_extent(trans, root,
33345d01 3631 btrfs_ino(inode), cur_offset, 0,
9036c102
YZ
3632 0, hole_size, 0, hole_size,
3633 0, 0, 0);
5b397377 3634 if (err) {
79787eaa 3635 btrfs_abort_transaction(trans, root, err);
5b397377 3636 btrfs_end_transaction(trans, root);
3893e33b 3637 break;
5b397377 3638 }
8082510e 3639
9036c102
YZ
3640 btrfs_drop_extent_cache(inode, hole_start,
3641 last_byte - 1, 0);
8082510e 3642
3642320e 3643 btrfs_update_inode(trans, root, inode);
8082510e 3644 btrfs_end_transaction(trans, root);
9036c102
YZ
3645 }
3646 free_extent_map(em);
a22285a6 3647 em = NULL;
9036c102 3648 cur_offset = last_byte;
8082510e 3649 if (cur_offset >= block_end)
9036c102
YZ
3650 break;
3651 }
1832a6d5 3652
a22285a6 3653 free_extent_map(em);
2ac55d41
JB
3654 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
3655 GFP_NOFS);
9036c102
YZ
3656 return err;
3657}
39279cc3 3658
a41ad394 3659static int btrfs_setsize(struct inode *inode, loff_t newsize)
8082510e 3660{
f4a2f4c5
MX
3661 struct btrfs_root *root = BTRFS_I(inode)->root;
3662 struct btrfs_trans_handle *trans;
a41ad394 3663 loff_t oldsize = i_size_read(inode);
8082510e
YZ
3664 int ret;
3665
a41ad394 3666 if (newsize == oldsize)
8082510e
YZ
3667 return 0;
3668
a41ad394 3669 if (newsize > oldsize) {
a41ad394
JB
3670 truncate_pagecache(inode, oldsize, newsize);
3671 ret = btrfs_cont_expand(inode, oldsize, newsize);
f4a2f4c5 3672 if (ret)
8082510e 3673 return ret;
8082510e 3674
f4a2f4c5
MX
3675 trans = btrfs_start_transaction(root, 1);
3676 if (IS_ERR(trans))
3677 return PTR_ERR(trans);
3678
3679 i_size_write(inode, newsize);
3680 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
3681 ret = btrfs_update_inode(trans, root, inode);
7ad85bb7 3682 btrfs_end_transaction(trans, root);
a41ad394 3683 } else {
8082510e 3684
a41ad394
JB
3685 /*
3686 * We're truncating a file that used to have good data down to
3687 * zero. Make sure it gets into the ordered flush list so that
3688 * any new writes get down to disk quickly.
3689 */
3690 if (newsize == 0)
72ac3c0d
JB
3691 set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
3692 &BTRFS_I(inode)->runtime_flags);
8082510e 3693
a41ad394
JB
3694 /* we don't support swapfiles, so vmtruncate shouldn't fail */
3695 truncate_setsize(inode, newsize);
3696 ret = btrfs_truncate(inode);
8082510e
YZ
3697 }
3698
a41ad394 3699 return ret;
8082510e
YZ
3700}
3701
9036c102
YZ
3702static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3703{
3704 struct inode *inode = dentry->d_inode;
b83cc969 3705 struct btrfs_root *root = BTRFS_I(inode)->root;
9036c102 3706 int err;
39279cc3 3707
b83cc969
LZ
3708 if (btrfs_root_readonly(root))
3709 return -EROFS;
3710
9036c102
YZ
3711 err = inode_change_ok(inode, attr);
3712 if (err)
3713 return err;
2bf5a725 3714
5a3f23d5 3715 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
a41ad394 3716 err = btrfs_setsize(inode, attr->ia_size);
8082510e
YZ
3717 if (err)
3718 return err;
39279cc3 3719 }
9036c102 3720
1025774c
CH
3721 if (attr->ia_valid) {
3722 setattr_copy(inode, attr);
0c4d2d95 3723 inode_inc_iversion(inode);
22c44fe6 3724 err = btrfs_dirty_inode(inode);
1025774c 3725
22c44fe6 3726 if (!err && attr->ia_valid & ATTR_MODE)
1025774c
CH
3727 err = btrfs_acl_chmod(inode);
3728 }
33268eaf 3729
39279cc3
CM
3730 return err;
3731}
61295eb8 3732
bd555975 3733void btrfs_evict_inode(struct inode *inode)
39279cc3
CM
3734{
3735 struct btrfs_trans_handle *trans;
3736 struct btrfs_root *root = BTRFS_I(inode)->root;
726c35fa 3737 struct btrfs_block_rsv *rsv, *global_rsv;
07127184 3738 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
d3c2fdcf 3739 unsigned long nr;
39279cc3
CM
3740 int ret;
3741
1abe9b8a 3742 trace_btrfs_inode_evict(inode);
3743
39279cc3 3744 truncate_inode_pages(&inode->i_data, 0);
0af3d00b 3745 if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
83eea1f1 3746 btrfs_is_free_space_inode(inode)))
bd555975
AV
3747 goto no_delete;
3748
39279cc3 3749 if (is_bad_inode(inode)) {
7b128766 3750 btrfs_orphan_del(NULL, inode);
39279cc3
CM
3751 goto no_delete;
3752 }
bd555975 3753 /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
4a096752 3754 btrfs_wait_ordered_range(inode, 0, (u64)-1);
5f39d397 3755
c71bf099 3756 if (root->fs_info->log_root_recovering) {
6bf02314 3757 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
8a35d95f 3758 &BTRFS_I(inode)->runtime_flags));
c71bf099
YZ
3759 goto no_delete;
3760 }
3761
76dda93c
YZ
3762 if (inode->i_nlink > 0) {
3763 BUG_ON(btrfs_root_refs(&root->root_item) != 0);
3764 goto no_delete;
3765 }
3766
4289a667
JB
3767 rsv = btrfs_alloc_block_rsv(root);
3768 if (!rsv) {
3769 btrfs_orphan_del(NULL, inode);
3770 goto no_delete;
3771 }
4a338542 3772 rsv->size = min_size;
726c35fa 3773 global_rsv = &root->fs_info->global_block_rsv;
4289a667 3774
dbe674a9 3775 btrfs_i_size_write(inode, 0);
5f39d397 3776
4289a667
JB
3777 /*
3778 * This is a bit simpler than btrfs_truncate since
3779 *
3780 * 1) We've already reserved our space for our orphan item in the
3781 * unlink.
3782 * 2) We're going to delete the inode item, so we don't need to update
3783 * it at all.
3784 *
3785 * So we just need to reserve some slack space in case we add bytes when
3786 * doing the truncate.
3787 */
8082510e 3788 while (1) {
aa38a711 3789 ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
726c35fa
JB
3790
3791 /*
3792 * Try and steal from the global reserve since we will
3793 * likely not use this space anyway, we want to try as
3794 * hard as possible to get this to work.
3795 */
3796 if (ret)
3797 ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
d68fc57b 3798
d68fc57b 3799 if (ret) {
4289a667 3800 printk(KERN_WARNING "Could not get space for a "
482e6dc5 3801 "delete, will truncate on mount %d\n", ret);
4289a667
JB
3802 btrfs_orphan_del(NULL, inode);
3803 btrfs_free_block_rsv(root, rsv);
3804 goto no_delete;
d68fc57b 3805 }
7b128766 3806
4289a667
JB
3807 trans = btrfs_start_transaction(root, 0);
3808 if (IS_ERR(trans)) {
3809 btrfs_orphan_del(NULL, inode);
3810 btrfs_free_block_rsv(root, rsv);
3811 goto no_delete;
d68fc57b 3812 }
7b128766 3813
4289a667
JB
3814 trans->block_rsv = rsv;
3815
d68fc57b 3816 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
8082510e
YZ
3817 if (ret != -EAGAIN)
3818 break;
85e21bac 3819
8082510e
YZ
3820 nr = trans->blocks_used;
3821 btrfs_end_transaction(trans, root);
3822 trans = NULL;
3823 btrfs_btree_balance_dirty(root, nr);
3824 }
5f39d397 3825
4289a667
JB
3826 btrfs_free_block_rsv(root, rsv);
3827
8082510e 3828 if (ret == 0) {
4289a667 3829 trans->block_rsv = root->orphan_block_rsv;
8082510e
YZ
3830 ret = btrfs_orphan_del(trans, inode);
3831 BUG_ON(ret);
3832 }
54aa1f4d 3833
4289a667 3834 trans->block_rsv = &root->fs_info->trans_block_rsv;
581bb050
LZ
3835 if (!(root == root->fs_info->tree_root ||
3836 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
33345d01 3837 btrfs_return_ino(root, btrfs_ino(inode));
581bb050 3838
d3c2fdcf 3839 nr = trans->blocks_used;
54aa1f4d 3840 btrfs_end_transaction(trans, root);
d3c2fdcf 3841 btrfs_btree_balance_dirty(root, nr);
39279cc3 3842no_delete:
dbd5768f 3843 clear_inode(inode);
8082510e 3844 return;
39279cc3
CM
3845}
3846
3847/*
3848 * this returns the key found in the dir entry in the location pointer.
3849 * If no dir entries were found, location->objectid is 0.
3850 */
3851static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3852 struct btrfs_key *location)
3853{
3854 const char *name = dentry->d_name.name;
3855 int namelen = dentry->d_name.len;
3856 struct btrfs_dir_item *di;
3857 struct btrfs_path *path;
3858 struct btrfs_root *root = BTRFS_I(dir)->root;
0d9f7f3e 3859 int ret = 0;
39279cc3
CM
3860
3861 path = btrfs_alloc_path();
d8926bb3
MF
3862 if (!path)
3863 return -ENOMEM;
3954401f 3864
33345d01 3865 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
39279cc3 3866 namelen, 0);
0d9f7f3e
Y
3867 if (IS_ERR(di))
3868 ret = PTR_ERR(di);
d397712b 3869
c704005d 3870 if (IS_ERR_OR_NULL(di))
3954401f 3871 goto out_err;
d397712b 3872
5f39d397 3873 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
39279cc3 3874out:
39279cc3
CM
3875 btrfs_free_path(path);
3876 return ret;
3954401f
CM
3877out_err:
3878 location->objectid = 0;
3879 goto out;
39279cc3
CM
3880}
3881
3882/*
3883 * when we hit a tree root in a directory, the btrfs part of the inode
3884 * needs to be changed to reflect the root directory of the tree root. This
3885 * is kind of like crossing a mount point.
3886 */
3887static int fixup_tree_root_location(struct btrfs_root *root,
4df27c4d
YZ
3888 struct inode *dir,
3889 struct dentry *dentry,
3890 struct btrfs_key *location,
3891 struct btrfs_root **sub_root)
39279cc3 3892{
4df27c4d
YZ
3893 struct btrfs_path *path;
3894 struct btrfs_root *new_root;
3895 struct btrfs_root_ref *ref;
3896 struct extent_buffer *leaf;
3897 int ret;
3898 int err = 0;
39279cc3 3899
4df27c4d
YZ
3900 path = btrfs_alloc_path();
3901 if (!path) {
3902 err = -ENOMEM;
3903 goto out;
3904 }
39279cc3 3905
4df27c4d
YZ
3906 err = -ENOENT;
3907 ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
3908 BTRFS_I(dir)->root->root_key.objectid,
3909 location->objectid);
3910 if (ret) {
3911 if (ret < 0)
3912 err = ret;
3913 goto out;
3914 }
39279cc3 3915
4df27c4d
YZ
3916 leaf = path->nodes[0];
3917 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
33345d01 3918 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
4df27c4d
YZ
3919 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
3920 goto out;
39279cc3 3921
4df27c4d
YZ
3922 ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
3923 (unsigned long)(ref + 1),
3924 dentry->d_name.len);
3925 if (ret)
3926 goto out;
3927
b3b4aa74 3928 btrfs_release_path(path);
4df27c4d
YZ
3929
3930 new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
3931 if (IS_ERR(new_root)) {
3932 err = PTR_ERR(new_root);
3933 goto out;
3934 }
3935
3936 if (btrfs_root_refs(&new_root->root_item) == 0) {
3937 err = -ENOENT;
3938 goto out;
3939 }
3940
3941 *sub_root = new_root;
3942 location->objectid = btrfs_root_dirid(&new_root->root_item);
3943 location->type = BTRFS_INODE_ITEM_KEY;
3944 location->offset = 0;
3945 err = 0;
3946out:
3947 btrfs_free_path(path);
3948 return err;
39279cc3
CM
3949}
3950
5d4f98a2
YZ
3951static void inode_tree_add(struct inode *inode)
3952{
3953 struct btrfs_root *root = BTRFS_I(inode)->root;
3954 struct btrfs_inode *entry;
03e860bd
FNP
3955 struct rb_node **p;
3956 struct rb_node *parent;
33345d01 3957 u64 ino = btrfs_ino(inode);
03e860bd
FNP
3958again:
3959 p = &root->inode_tree.rb_node;
3960 parent = NULL;
5d4f98a2 3961
1d3382cb 3962 if (inode_unhashed(inode))
76dda93c
YZ
3963 return;
3964
5d4f98a2
YZ
3965 spin_lock(&root->inode_lock);
3966 while (*p) {
3967 parent = *p;
3968 entry = rb_entry(parent, struct btrfs_inode, rb_node);
3969
33345d01 3970 if (ino < btrfs_ino(&entry->vfs_inode))
03e860bd 3971 p = &parent->rb_left;
33345d01 3972 else if (ino > btrfs_ino(&entry->vfs_inode))
03e860bd 3973 p = &parent->rb_right;
5d4f98a2
YZ
3974 else {
3975 WARN_ON(!(entry->vfs_inode.i_state &
a4ffdde6 3976 (I_WILL_FREE | I_FREEING)));
03e860bd
FNP
3977 rb_erase(parent, &root->inode_tree);
3978 RB_CLEAR_NODE(parent);
3979 spin_unlock(&root->inode_lock);
3980 goto again;
5d4f98a2
YZ
3981 }
3982 }
3983 rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
3984 rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3985 spin_unlock(&root->inode_lock);
3986}
3987
3988static void inode_tree_del(struct inode *inode)
3989{
3990 struct btrfs_root *root = BTRFS_I(inode)->root;
76dda93c 3991 int empty = 0;
5d4f98a2 3992
03e860bd 3993 spin_lock(&root->inode_lock);
5d4f98a2 3994 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
5d4f98a2 3995 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
5d4f98a2 3996 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
76dda93c 3997 empty = RB_EMPTY_ROOT(&root->inode_tree);
5d4f98a2 3998 }
03e860bd 3999 spin_unlock(&root->inode_lock);
76dda93c 4000
0af3d00b
JB
4001 /*
4002 * Free space cache has inodes in the tree root, but the tree root has a
4003 * root_refs of 0, so this could end up dropping the tree root as a
4004 * snapshot, so we need the extra !root->fs_info->tree_root check to
4005 * make sure we don't drop it.
4006 */
4007 if (empty && btrfs_root_refs(&root->root_item) == 0 &&
4008 root != root->fs_info->tree_root) {
76dda93c
YZ
4009 synchronize_srcu(&root->fs_info->subvol_srcu);
4010 spin_lock(&root->inode_lock);
4011 empty = RB_EMPTY_ROOT(&root->inode_tree);
4012 spin_unlock(&root->inode_lock);
4013 if (empty)
4014 btrfs_add_dead_root(root);
4015 }
4016}
4017
143bede5 4018void btrfs_invalidate_inodes(struct btrfs_root *root)
76dda93c
YZ
4019{
4020 struct rb_node *node;
4021 struct rb_node *prev;
4022 struct btrfs_inode *entry;
4023 struct inode *inode;
4024 u64 objectid = 0;
4025
4026 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4027
4028 spin_lock(&root->inode_lock);
4029again:
4030 node = root->inode_tree.rb_node;
4031 prev = NULL;
4032 while (node) {
4033 prev = node;
4034 entry = rb_entry(node, struct btrfs_inode, rb_node);
4035
33345d01 4036 if (objectid < btrfs_ino(&entry->vfs_inode))
76dda93c 4037 node = node->rb_left;
33345d01 4038 else if (objectid > btrfs_ino(&entry->vfs_inode))
76dda93c
YZ
4039 node = node->rb_right;
4040 else
4041 break;
4042 }
4043 if (!node) {
4044 while (prev) {
4045 entry = rb_entry(prev, struct btrfs_inode, rb_node);
33345d01 4046 if (objectid <= btrfs_ino(&entry->vfs_inode)) {
76dda93c
YZ
4047 node = prev;
4048 break;
4049 }
4050 prev = rb_next(prev);
4051 }
4052 }
4053 while (node) {
4054 entry = rb_entry(node, struct btrfs_inode, rb_node);
33345d01 4055 objectid = btrfs_ino(&entry->vfs_inode) + 1;
76dda93c
YZ
4056 inode = igrab(&entry->vfs_inode);
4057 if (inode) {
4058 spin_unlock(&root->inode_lock);
4059 if (atomic_read(&inode->i_count) > 1)
4060 d_prune_aliases(inode);
4061 /*
45321ac5 4062 * btrfs_drop_inode will have it removed from
76dda93c
YZ
4063 * the inode cache when its usage count
4064 * hits zero.
4065 */
4066 iput(inode);
4067 cond_resched();
4068 spin_lock(&root->inode_lock);
4069 goto again;
4070 }
4071
4072 if (cond_resched_lock(&root->inode_lock))
4073 goto again;
4074
4075 node = rb_next(node);
4076 }
4077 spin_unlock(&root->inode_lock);
5d4f98a2
YZ
4078}
4079
e02119d5
CM
4080static int btrfs_init_locked_inode(struct inode *inode, void *p)
4081{
4082 struct btrfs_iget_args *args = p;
4083 inode->i_ino = args->ino;
e02119d5 4084 BTRFS_I(inode)->root = args->root;
39279cc3
CM
4085 return 0;
4086}
4087
4088static int btrfs_find_actor(struct inode *inode, void *opaque)
4089{
4090 struct btrfs_iget_args *args = opaque;
33345d01 4091 return args->ino == btrfs_ino(inode) &&
d397712b 4092 args->root == BTRFS_I(inode)->root;
39279cc3
CM
4093}
4094
5d4f98a2
YZ
4095static struct inode *btrfs_iget_locked(struct super_block *s,
4096 u64 objectid,
4097 struct btrfs_root *root)
39279cc3
CM
4098{
4099 struct inode *inode;
4100 struct btrfs_iget_args args;
4101 args.ino = objectid;
4102 args.root = root;
4103
4104 inode = iget5_locked(s, objectid, btrfs_find_actor,
4105 btrfs_init_locked_inode,
4106 (void *)&args);
4107 return inode;
4108}
4109
1a54ef8c
BR
4110/* Get an inode object given its location and corresponding root.
4111 * Returns in *is_new if the inode was read from disk
4112 */
4113struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
73f73415 4114 struct btrfs_root *root, int *new)
1a54ef8c
BR
4115{
4116 struct inode *inode;
4117
4118 inode = btrfs_iget_locked(s, location->objectid, root);
4119 if (!inode)
5d4f98a2 4120 return ERR_PTR(-ENOMEM);
1a54ef8c
BR
4121
4122 if (inode->i_state & I_NEW) {
4123 BTRFS_I(inode)->root = root;
4124 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
4125 btrfs_read_locked_inode(inode);
1748f843
MF
4126 if (!is_bad_inode(inode)) {
4127 inode_tree_add(inode);
4128 unlock_new_inode(inode);
4129 if (new)
4130 *new = 1;
4131 } else {
e0b6d65b
ST
4132 unlock_new_inode(inode);
4133 iput(inode);
4134 inode = ERR_PTR(-ESTALE);
1748f843
MF
4135 }
4136 }
4137
1a54ef8c
BR
4138 return inode;
4139}
4140
4df27c4d
YZ
4141static struct inode *new_simple_dir(struct super_block *s,
4142 struct btrfs_key *key,
4143 struct btrfs_root *root)
4144{
4145 struct inode *inode = new_inode(s);
4146
4147 if (!inode)
4148 return ERR_PTR(-ENOMEM);
4149
4df27c4d
YZ
4150 BTRFS_I(inode)->root = root;
4151 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
72ac3c0d 4152 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
4df27c4d
YZ
4153
4154 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
848cce0d 4155 inode->i_op = &btrfs_dir_ro_inode_operations;
4df27c4d
YZ
4156 inode->i_fop = &simple_dir_operations;
4157 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
4158 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4159
4160 return inode;
4161}
4162
3de4586c 4163struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
39279cc3 4164{
d397712b 4165 struct inode *inode;
4df27c4d 4166 struct btrfs_root *root = BTRFS_I(dir)->root;
39279cc3
CM
4167 struct btrfs_root *sub_root = root;
4168 struct btrfs_key location;
76dda93c 4169 int index;
b4aff1f8 4170 int ret = 0;
39279cc3
CM
4171
4172 if (dentry->d_name.len > BTRFS_NAME_LEN)
4173 return ERR_PTR(-ENAMETOOLONG);
5f39d397 4174
b4aff1f8
JB
4175 if (unlikely(d_need_lookup(dentry))) {
4176 memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key));
4177 kfree(dentry->d_fsdata);
4178 dentry->d_fsdata = NULL;
a66e7cc6
JB
4179 /* This thing is hashed, drop it for now */
4180 d_drop(dentry);
b4aff1f8
JB
4181 } else {
4182 ret = btrfs_inode_by_name(dir, dentry, &location);
4183 }
5f39d397 4184
39279cc3
CM
4185 if (ret < 0)
4186 return ERR_PTR(ret);
5f39d397 4187
4df27c4d
YZ
4188 if (location.objectid == 0)
4189 return NULL;
4190
4191 if (location.type == BTRFS_INODE_ITEM_KEY) {
73f73415 4192 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
4df27c4d
YZ
4193 return inode;
4194 }
4195
4196 BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
4197
76dda93c 4198 index = srcu_read_lock(&root->fs_info->subvol_srcu);
4df27c4d
YZ
4199 ret = fixup_tree_root_location(root, dir, dentry,
4200 &location, &sub_root);
4201 if (ret < 0) {
4202 if (ret != -ENOENT)
4203 inode = ERR_PTR(ret);
4204 else
4205 inode = new_simple_dir(dir->i_sb, &location, sub_root);
4206 } else {
73f73415 4207 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
39279cc3 4208 }
76dda93c
YZ
4209 srcu_read_unlock(&root->fs_info->subvol_srcu, index);
4210
34d19bad 4211 if (!IS_ERR(inode) && root != sub_root) {
c71bf099
YZ
4212 down_read(&root->fs_info->cleanup_work_sem);
4213 if (!(inode->i_sb->s_flags & MS_RDONLY))
66b4ffd1 4214 ret = btrfs_orphan_cleanup(sub_root);
c71bf099 4215 up_read(&root->fs_info->cleanup_work_sem);
66b4ffd1
JB
4216 if (ret)
4217 inode = ERR_PTR(ret);
c71bf099
YZ
4218 }
4219
3de4586c
CM
4220 return inode;
4221}
4222
fe15ce44 4223static int btrfs_dentry_delete(const struct dentry *dentry)
76dda93c
YZ
4224{
4225 struct btrfs_root *root;
848cce0d 4226 struct inode *inode = dentry->d_inode;
76dda93c 4227
848cce0d
LZ
4228 if (!inode && !IS_ROOT(dentry))
4229 inode = dentry->d_parent->d_inode;
76dda93c 4230
848cce0d
LZ
4231 if (inode) {
4232 root = BTRFS_I(inode)->root;
efefb143
YZ
4233 if (btrfs_root_refs(&root->root_item) == 0)
4234 return 1;
848cce0d
LZ
4235
4236 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
4237 return 1;
efefb143 4238 }
76dda93c
YZ
4239 return 0;
4240}
4241
b4aff1f8
JB
4242static void btrfs_dentry_release(struct dentry *dentry)
4243{
4244 if (dentry->d_fsdata)
4245 kfree(dentry->d_fsdata);
4246}
4247
3de4586c
CM
4248static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
4249 struct nameidata *nd)
4250{
a66e7cc6
JB
4251 struct dentry *ret;
4252
4253 ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
4254 if (unlikely(d_need_lookup(dentry))) {
4255 spin_lock(&dentry->d_lock);
4256 dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
4257 spin_unlock(&dentry->d_lock);
4258 }
4259 return ret;
39279cc3
CM
4260}
4261
16cdcec7 4262unsigned char btrfs_filetype_table[] = {
39279cc3
CM
4263 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
4264};
4265
cbdf5a24
DW
4266static int btrfs_real_readdir(struct file *filp, void *dirent,
4267 filldir_t filldir)
39279cc3 4268{
6da6abae 4269 struct inode *inode = filp->f_dentry->d_inode;
39279cc3
CM
4270 struct btrfs_root *root = BTRFS_I(inode)->root;
4271 struct btrfs_item *item;
4272 struct btrfs_dir_item *di;
4273 struct btrfs_key key;
5f39d397 4274 struct btrfs_key found_key;
39279cc3 4275 struct btrfs_path *path;
16cdcec7
MX
4276 struct list_head ins_list;
4277 struct list_head del_list;
39279cc3 4278 int ret;
5f39d397 4279 struct extent_buffer *leaf;
39279cc3 4280 int slot;
39279cc3
CM
4281 unsigned char d_type;
4282 int over = 0;
4283 u32 di_cur;
4284 u32 di_total;
4285 u32 di_len;
4286 int key_type = BTRFS_DIR_INDEX_KEY;
5f39d397
CM
4287 char tmp_name[32];
4288 char *name_ptr;
4289 int name_len;
16cdcec7 4290 int is_curr = 0; /* filp->f_pos points to the current index? */
39279cc3
CM
4291
4292 /* FIXME, use a real flag for deciding about the key type */
4293 if (root->fs_info->tree_root == root)
4294 key_type = BTRFS_DIR_ITEM_KEY;
5f39d397 4295
3954401f
CM
4296 /* special case for "." */
4297 if (filp->f_pos == 0) {
3765fefa
HS
4298 over = filldir(dirent, ".", 1,
4299 filp->f_pos, btrfs_ino(inode), DT_DIR);
3954401f
CM
4300 if (over)
4301 return 0;
4302 filp->f_pos = 1;
4303 }
3954401f
CM
4304 /* special case for .., just use the back ref */
4305 if (filp->f_pos == 1) {
5ecc7e5d 4306 u64 pino = parent_ino(filp->f_path.dentry);
3954401f 4307 over = filldir(dirent, "..", 2,
3765fefa 4308 filp->f_pos, pino, DT_DIR);
3954401f 4309 if (over)
49593bfa 4310 return 0;
3954401f
CM
4311 filp->f_pos = 2;
4312 }
49593bfa 4313 path = btrfs_alloc_path();
16cdcec7
MX
4314 if (!path)
4315 return -ENOMEM;
ff5714cc 4316
026fd317 4317 path->reada = 1;
49593bfa 4318
16cdcec7
MX
4319 if (key_type == BTRFS_DIR_INDEX_KEY) {
4320 INIT_LIST_HEAD(&ins_list);
4321 INIT_LIST_HEAD(&del_list);
4322 btrfs_get_delayed_items(inode, &ins_list, &del_list);
4323 }
4324
39279cc3
CM
4325 btrfs_set_key_type(&key, key_type);
4326 key.offset = filp->f_pos;
33345d01 4327 key.objectid = btrfs_ino(inode);
5f39d397 4328
39279cc3
CM
4329 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4330 if (ret < 0)
4331 goto err;
49593bfa
DW
4332
4333 while (1) {
5f39d397 4334 leaf = path->nodes[0];
39279cc3 4335 slot = path->slots[0];
b9e03af0
LZ
4336 if (slot >= btrfs_header_nritems(leaf)) {
4337 ret = btrfs_next_leaf(root, path);
4338 if (ret < 0)
4339 goto err;
4340 else if (ret > 0)
4341 break;
4342 continue;
39279cc3 4343 }
3de4586c 4344
5f39d397
CM
4345 item = btrfs_item_nr(leaf, slot);
4346 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4347
4348 if (found_key.objectid != key.objectid)
39279cc3 4349 break;
5f39d397 4350 if (btrfs_key_type(&found_key) != key_type)
39279cc3 4351 break;
5f39d397 4352 if (found_key.offset < filp->f_pos)
b9e03af0 4353 goto next;
16cdcec7
MX
4354 if (key_type == BTRFS_DIR_INDEX_KEY &&
4355 btrfs_should_delete_dir_index(&del_list,
4356 found_key.offset))
4357 goto next;
5f39d397
CM
4358
4359 filp->f_pos = found_key.offset;
16cdcec7 4360 is_curr = 1;
49593bfa 4361
39279cc3
CM
4362 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
4363 di_cur = 0;
5f39d397 4364 di_total = btrfs_item_size(leaf, item);
49593bfa
DW
4365
4366 while (di_cur < di_total) {
5f39d397
CM
4367 struct btrfs_key location;
4368
22a94d44
JB
4369 if (verify_dir_item(root, leaf, di))
4370 break;
4371
5f39d397 4372 name_len = btrfs_dir_name_len(leaf, di);
49593bfa 4373 if (name_len <= sizeof(tmp_name)) {
5f39d397
CM
4374 name_ptr = tmp_name;
4375 } else {
4376 name_ptr = kmalloc(name_len, GFP_NOFS);
49593bfa
DW
4377 if (!name_ptr) {
4378 ret = -ENOMEM;
4379 goto err;
4380 }
5f39d397
CM
4381 }
4382 read_extent_buffer(leaf, name_ptr,
4383 (unsigned long)(di + 1), name_len);
4384
4385 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
4386 btrfs_dir_item_key_to_cpu(leaf, di, &location);
3de4586c 4387
fede766f 4388
3de4586c 4389 /* is this a reference to our own snapshot? If so
8c9c2bf7
AJ
4390 * skip it.
4391 *
4392 * In contrast to old kernels, we insert the snapshot's
4393 * dir item and dir index after it has been created, so
4394 * we won't find a reference to our own snapshot. We
4395 * still keep the following code for backward
4396 * compatibility.
3de4586c
CM
4397 */
4398 if (location.type == BTRFS_ROOT_ITEM_KEY &&
4399 location.objectid == root->root_key.objectid) {
4400 over = 0;
4401 goto skip;
4402 }
5f39d397 4403 over = filldir(dirent, name_ptr, name_len,
49593bfa 4404 found_key.offset, location.objectid,
39279cc3 4405 d_type);
5f39d397 4406
3de4586c 4407skip:
5f39d397
CM
4408 if (name_ptr != tmp_name)
4409 kfree(name_ptr);
4410
39279cc3
CM
4411 if (over)
4412 goto nopos;
5103e947 4413 di_len = btrfs_dir_name_len(leaf, di) +
49593bfa 4414 btrfs_dir_data_len(leaf, di) + sizeof(*di);
39279cc3
CM
4415 di_cur += di_len;
4416 di = (struct btrfs_dir_item *)((char *)di + di_len);
4417 }
b9e03af0
LZ
4418next:
4419 path->slots[0]++;
39279cc3 4420 }
49593bfa 4421
16cdcec7
MX
4422 if (key_type == BTRFS_DIR_INDEX_KEY) {
4423 if (is_curr)
4424 filp->f_pos++;
4425 ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
4426 &ins_list);
4427 if (ret)
4428 goto nopos;
4429 }
4430
49593bfa 4431 /* Reached end of directory/root. Bump pos past the last item. */
5e591a07 4432 if (key_type == BTRFS_DIR_INDEX_KEY)
406266ab
JE
4433 /*
4434 * 32-bit glibc will use getdents64, but then strtol -
4435 * so the last number we can serve is this.
4436 */
4437 filp->f_pos = 0x7fffffff;
5e591a07
YZ
4438 else
4439 filp->f_pos++;
39279cc3
CM
4440nopos:
4441 ret = 0;
4442err:
16cdcec7
MX
4443 if (key_type == BTRFS_DIR_INDEX_KEY)
4444 btrfs_put_delayed_items(&ins_list, &del_list);
39279cc3 4445 btrfs_free_path(path);
39279cc3
CM
4446 return ret;
4447}
4448
a9185b41 4449int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
39279cc3
CM
4450{
4451 struct btrfs_root *root = BTRFS_I(inode)->root;
4452 struct btrfs_trans_handle *trans;
4453 int ret = 0;
0af3d00b 4454 bool nolock = false;
39279cc3 4455
72ac3c0d 4456 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
4ca8b41e
CM
4457 return 0;
4458
83eea1f1 4459 if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
82d5902d 4460 nolock = true;
0af3d00b 4461
a9185b41 4462 if (wbc->sync_mode == WB_SYNC_ALL) {
0af3d00b 4463 if (nolock)
7a7eaa40 4464 trans = btrfs_join_transaction_nolock(root);
0af3d00b 4465 else
7a7eaa40 4466 trans = btrfs_join_transaction(root);
3612b495
TI
4467 if (IS_ERR(trans))
4468 return PTR_ERR(trans);
0af3d00b
JB
4469 if (nolock)
4470 ret = btrfs_end_transaction_nolock(trans, root);
4471 else
4472 ret = btrfs_commit_transaction(trans, root);
39279cc3
CM
4473 }
4474 return ret;
4475}
4476
4477/*
54aa1f4d 4478 * This is somewhat expensive, updating the tree every time the
39279cc3
CM
4479 * inode changes. But, it is most likely to find the inode in cache.
4480 * FIXME, needs more benchmarking...there are no reasons other than performance
4481 * to keep or drop this code.
4482 */
22c44fe6 4483int btrfs_dirty_inode(struct inode *inode)
39279cc3
CM
4484{
4485 struct btrfs_root *root = BTRFS_I(inode)->root;
4486 struct btrfs_trans_handle *trans;
8929ecfa
YZ
4487 int ret;
4488
72ac3c0d 4489 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
22c44fe6 4490 return 0;
39279cc3 4491
7a7eaa40 4492 trans = btrfs_join_transaction(root);
22c44fe6
JB
4493 if (IS_ERR(trans))
4494 return PTR_ERR(trans);
8929ecfa
YZ
4495
4496 ret = btrfs_update_inode(trans, root, inode);
94b60442
CM
4497 if (ret && ret == -ENOSPC) {
4498 /* whoops, lets try again with the full transaction */
4499 btrfs_end_transaction(trans, root);
4500 trans = btrfs_start_transaction(root, 1);
22c44fe6
JB
4501 if (IS_ERR(trans))
4502 return PTR_ERR(trans);
8929ecfa 4503
94b60442 4504 ret = btrfs_update_inode(trans, root, inode);
94b60442 4505 }
39279cc3 4506 btrfs_end_transaction(trans, root);
16cdcec7
MX
4507 if (BTRFS_I(inode)->delayed_node)
4508 btrfs_balance_delayed_items(root);
22c44fe6
JB
4509
4510 return ret;
4511}
4512
4513/*
4514 * This is a copy of file_update_time. We need this so we can return error on
4515 * ENOSPC for updating the inode in the case of file write and mmap writes.
4516 */
e41f941a
JB
4517static int btrfs_update_time(struct inode *inode, struct timespec *now,
4518 int flags)
22c44fe6 4519{
2bc55652
AB
4520 struct btrfs_root *root = BTRFS_I(inode)->root;
4521
4522 if (btrfs_root_readonly(root))
4523 return -EROFS;
4524
e41f941a 4525 if (flags & S_VERSION)
22c44fe6 4526 inode_inc_iversion(inode);
e41f941a
JB
4527 if (flags & S_CTIME)
4528 inode->i_ctime = *now;
4529 if (flags & S_MTIME)
4530 inode->i_mtime = *now;
4531 if (flags & S_ATIME)
4532 inode->i_atime = *now;
4533 return btrfs_dirty_inode(inode);
39279cc3
CM
4534}
4535
d352ac68
CM
4536/*
4537 * find the highest existing sequence number in a directory
4538 * and then set the in-memory index_cnt variable to reflect
4539 * free sequence numbers
4540 */
aec7477b
JB
4541static int btrfs_set_inode_index_count(struct inode *inode)
4542{
4543 struct btrfs_root *root = BTRFS_I(inode)->root;
4544 struct btrfs_key key, found_key;
4545 struct btrfs_path *path;
4546 struct extent_buffer *leaf;
4547 int ret;
4548
33345d01 4549 key.objectid = btrfs_ino(inode);
aec7477b
JB
4550 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
4551 key.offset = (u64)-1;
4552
4553 path = btrfs_alloc_path();
4554 if (!path)
4555 return -ENOMEM;
4556
4557 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4558 if (ret < 0)
4559 goto out;
4560 /* FIXME: we should be able to handle this */
4561 if (ret == 0)
4562 goto out;
4563 ret = 0;
4564
4565 /*
4566 * MAGIC NUMBER EXPLANATION:
4567 * since we search a directory based on f_pos we have to start at 2
4568 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
4569 * else has to start at 2
4570 */
4571 if (path->slots[0] == 0) {
4572 BTRFS_I(inode)->index_cnt = 2;
4573 goto out;
4574 }
4575
4576 path->slots[0]--;
4577
4578 leaf = path->nodes[0];
4579 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4580
33345d01 4581 if (found_key.objectid != btrfs_ino(inode) ||
aec7477b
JB
4582 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
4583 BTRFS_I(inode)->index_cnt = 2;
4584 goto out;
4585 }
4586
4587 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
4588out:
4589 btrfs_free_path(path);
4590 return ret;
4591}
4592
d352ac68
CM
4593/*
4594 * helper to find a free sequence number in a given directory. This current
4595 * code is very simple, later versions will do smarter things in the btree
4596 */
3de4586c 4597int btrfs_set_inode_index(struct inode *dir, u64 *index)
aec7477b
JB
4598{
4599 int ret = 0;
4600
4601 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
16cdcec7
MX
4602 ret = btrfs_inode_delayed_dir_index_count(dir);
4603 if (ret) {
4604 ret = btrfs_set_inode_index_count(dir);
4605 if (ret)
4606 return ret;
4607 }
aec7477b
JB
4608 }
4609
00e4e6b3 4610 *index = BTRFS_I(dir)->index_cnt;
aec7477b
JB
4611 BTRFS_I(dir)->index_cnt++;
4612
4613 return ret;
4614}
4615
39279cc3
CM
4616static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4617 struct btrfs_root *root,
aec7477b 4618 struct inode *dir,
9c58309d 4619 const char *name, int name_len,
175a4eb7
AV
4620 u64 ref_objectid, u64 objectid,
4621 umode_t mode, u64 *index)
39279cc3
CM
4622{
4623 struct inode *inode;
5f39d397 4624 struct btrfs_inode_item *inode_item;
39279cc3 4625 struct btrfs_key *location;
5f39d397 4626 struct btrfs_path *path;
9c58309d
CM
4627 struct btrfs_inode_ref *ref;
4628 struct btrfs_key key[2];
4629 u32 sizes[2];
4630 unsigned long ptr;
39279cc3
CM
4631 int ret;
4632 int owner;
4633
5f39d397 4634 path = btrfs_alloc_path();
d8926bb3
MF
4635 if (!path)
4636 return ERR_PTR(-ENOMEM);
5f39d397 4637
39279cc3 4638 inode = new_inode(root->fs_info->sb);
8fb27640
YS
4639 if (!inode) {
4640 btrfs_free_path(path);
39279cc3 4641 return ERR_PTR(-ENOMEM);
8fb27640 4642 }
39279cc3 4643
581bb050
LZ
4644 /*
4645 * we have to initialize this early, so we can reclaim the inode
4646 * number if we fail afterwards in this function.
4647 */
4648 inode->i_ino = objectid;
4649
aec7477b 4650 if (dir) {
1abe9b8a 4651 trace_btrfs_inode_request(dir);
4652
3de4586c 4653 ret = btrfs_set_inode_index(dir, index);
09771430 4654 if (ret) {
8fb27640 4655 btrfs_free_path(path);
09771430 4656 iput(inode);
aec7477b 4657 return ERR_PTR(ret);
09771430 4658 }
aec7477b
JB
4659 }
4660 /*
4661 * index_cnt is ignored for everything but a dir,
4662 * btrfs_get_inode_index_count has an explanation for the magic
4663 * number
4664 */
4665 BTRFS_I(inode)->index_cnt = 2;
39279cc3 4666 BTRFS_I(inode)->root = root;
e02119d5 4667 BTRFS_I(inode)->generation = trans->transid;
76195853 4668 inode->i_generation = BTRFS_I(inode)->generation;
b888db2b 4669
569254b0 4670 if (S_ISDIR(mode))
39279cc3
CM
4671 owner = 0;
4672 else
4673 owner = 1;
9c58309d
CM
4674
4675 key[0].objectid = objectid;
4676 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
4677 key[0].offset = 0;
4678
4679 key[1].objectid = objectid;
4680 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
4681 key[1].offset = ref_objectid;
4682
4683 sizes[0] = sizeof(struct btrfs_inode_item);
4684 sizes[1] = name_len + sizeof(*ref);
4685
b9473439 4686 path->leave_spinning = 1;
9c58309d
CM
4687 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
4688 if (ret != 0)
5f39d397
CM
4689 goto fail;
4690
ecc11fab 4691 inode_init_owner(inode, dir, mode);
a76a3cd4 4692 inode_set_bytes(inode, 0);
39279cc3 4693 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
5f39d397
CM
4694 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4695 struct btrfs_inode_item);
293f7e07
LZ
4696 memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
4697 sizeof(*inode_item));
e02119d5 4698 fill_inode_item(trans, path->nodes[0], inode_item, inode);
9c58309d
CM
4699
4700 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
4701 struct btrfs_inode_ref);
4702 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
00e4e6b3 4703 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
9c58309d
CM
4704 ptr = (unsigned long)(ref + 1);
4705 write_extent_buffer(path->nodes[0], name, ptr, name_len);
4706
5f39d397
CM
4707 btrfs_mark_buffer_dirty(path->nodes[0]);
4708 btrfs_free_path(path);
4709
39279cc3
CM
4710 location = &BTRFS_I(inode)->location;
4711 location->objectid = objectid;
39279cc3
CM
4712 location->offset = 0;
4713 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
4714
6cbff00f
CH
4715 btrfs_inherit_iflags(inode, dir);
4716
569254b0 4717 if (S_ISREG(mode)) {
94272164
CM
4718 if (btrfs_test_opt(root, NODATASUM))
4719 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
75e7cb7f
LB
4720 if (btrfs_test_opt(root, NODATACOW) ||
4721 (BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW))
94272164
CM
4722 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
4723 }
4724
39279cc3 4725 insert_inode_hash(inode);
5d4f98a2 4726 inode_tree_add(inode);
1abe9b8a 4727
4728 trace_btrfs_inode_new(inode);
1973f0fa 4729 btrfs_set_inode_last_trans(trans, inode);
1abe9b8a 4730
8ea05e3a
AB
4731 btrfs_update_root_times(trans, root);
4732
39279cc3 4733 return inode;
5f39d397 4734fail:
aec7477b
JB
4735 if (dir)
4736 BTRFS_I(dir)->index_cnt--;
5f39d397 4737 btrfs_free_path(path);
09771430 4738 iput(inode);
5f39d397 4739 return ERR_PTR(ret);
39279cc3
CM
4740}
4741
4742static inline u8 btrfs_inode_type(struct inode *inode)
4743{
4744 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
4745}
4746
d352ac68
CM
4747/*
4748 * utility function to add 'inode' into 'parent_inode' with
4749 * a give name and a given sequence number.
4750 * if 'add_backref' is true, also insert a backref from the
4751 * inode to the parent directory.
4752 */
e02119d5
CM
4753int btrfs_add_link(struct btrfs_trans_handle *trans,
4754 struct inode *parent_inode, struct inode *inode,
4755 const char *name, int name_len, int add_backref, u64 index)
39279cc3 4756{
4df27c4d 4757 int ret = 0;
39279cc3 4758 struct btrfs_key key;
e02119d5 4759 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
33345d01
LZ
4760 u64 ino = btrfs_ino(inode);
4761 u64 parent_ino = btrfs_ino(parent_inode);
5f39d397 4762
33345d01 4763 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4df27c4d
YZ
4764 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
4765 } else {
33345d01 4766 key.objectid = ino;
4df27c4d
YZ
4767 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
4768 key.offset = 0;
4769 }
4770
33345d01 4771 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4df27c4d
YZ
4772 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
4773 key.objectid, root->root_key.objectid,
33345d01 4774 parent_ino, index, name, name_len);
4df27c4d 4775 } else if (add_backref) {
33345d01
LZ
4776 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
4777 parent_ino, index);
4df27c4d 4778 }
39279cc3 4779
79787eaa
JM
4780 /* Nothing to clean up yet */
4781 if (ret)
4782 return ret;
4df27c4d 4783
79787eaa
JM
4784 ret = btrfs_insert_dir_item(trans, root, name, name_len,
4785 parent_inode, &key,
4786 btrfs_inode_type(inode), index);
4787 if (ret == -EEXIST)
4788 goto fail_dir_item;
4789 else if (ret) {
4790 btrfs_abort_transaction(trans, root, ret);
4791 return ret;
39279cc3 4792 }
79787eaa
JM
4793
4794 btrfs_i_size_write(parent_inode, parent_inode->i_size +
4795 name_len * 2);
0c4d2d95 4796 inode_inc_iversion(parent_inode);
79787eaa
JM
4797 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4798 ret = btrfs_update_inode(trans, root, parent_inode);
4799 if (ret)
4800 btrfs_abort_transaction(trans, root, ret);
39279cc3 4801 return ret;
fe66a05a
CM
4802
4803fail_dir_item:
4804 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4805 u64 local_index;
4806 int err;
4807 err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
4808 key.objectid, root->root_key.objectid,
4809 parent_ino, &local_index, name, name_len);
4810
4811 } else if (add_backref) {
4812 u64 local_index;
4813 int err;
4814
4815 err = btrfs_del_inode_ref(trans, root, name, name_len,
4816 ino, parent_ino, &local_index);
4817 }
4818 return ret;
39279cc3
CM
4819}
4820
4821static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
a1b075d2
JB
4822 struct inode *dir, struct dentry *dentry,
4823 struct inode *inode, int backref, u64 index)
39279cc3 4824{
a1b075d2
JB
4825 int err = btrfs_add_link(trans, dir, inode,
4826 dentry->d_name.name, dentry->d_name.len,
4827 backref, index);
39279cc3
CM
4828 if (err > 0)
4829 err = -EEXIST;
4830 return err;
4831}
4832
618e21d5 4833static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
1a67aafb 4834 umode_t mode, dev_t rdev)
618e21d5
JB
4835{
4836 struct btrfs_trans_handle *trans;
4837 struct btrfs_root *root = BTRFS_I(dir)->root;
1832a6d5 4838 struct inode *inode = NULL;
618e21d5
JB
4839 int err;
4840 int drop_inode = 0;
4841 u64 objectid;
1832a6d5 4842 unsigned long nr = 0;
00e4e6b3 4843 u64 index = 0;
618e21d5
JB
4844
4845 if (!new_valid_dev(rdev))
4846 return -EINVAL;
4847
9ed74f2d
JB
4848 /*
4849 * 2 for inode item and ref
4850 * 2 for dir items
4851 * 1 for xattr if selinux is on
4852 */
a22285a6
YZ
4853 trans = btrfs_start_transaction(root, 5);
4854 if (IS_ERR(trans))
4855 return PTR_ERR(trans);
1832a6d5 4856
581bb050
LZ
4857 err = btrfs_find_free_ino(root, &objectid);
4858 if (err)
4859 goto out_unlock;
4860
aec7477b 4861 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
33345d01 4862 dentry->d_name.len, btrfs_ino(dir), objectid,
d82a6f1d 4863 mode, &index);
7cf96da3
TI
4864 if (IS_ERR(inode)) {
4865 err = PTR_ERR(inode);
618e21d5 4866 goto out_unlock;
7cf96da3 4867 }
618e21d5 4868
2a7dba39 4869 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
33268eaf
JB
4870 if (err) {
4871 drop_inode = 1;
4872 goto out_unlock;
4873 }
4874
ad19db71
CS
4875 /*
4876 * If the active LSM wants to access the inode during
4877 * d_instantiate it needs these. Smack checks to see
4878 * if the filesystem supports xattrs by looking at the
4879 * ops vector.
4880 */
4881
4882 inode->i_op = &btrfs_special_inode_operations;
a1b075d2 4883 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
618e21d5
JB
4884 if (err)
4885 drop_inode = 1;
4886 else {
618e21d5 4887 init_special_inode(inode, inode->i_mode, rdev);
1b4ab1bb 4888 btrfs_update_inode(trans, root, inode);
08c422c2 4889 d_instantiate(dentry, inode);
618e21d5 4890 }
618e21d5 4891out_unlock:
d3c2fdcf 4892 nr = trans->blocks_used;
7ad85bb7 4893 btrfs_end_transaction(trans, root);
a22285a6 4894 btrfs_btree_balance_dirty(root, nr);
618e21d5
JB
4895 if (drop_inode) {
4896 inode_dec_link_count(inode);
4897 iput(inode);
4898 }
618e21d5
JB
4899 return err;
4900}
4901
39279cc3 4902static int btrfs_create(struct inode *dir, struct dentry *dentry,
4acdaf27 4903 umode_t mode, struct nameidata *nd)
39279cc3
CM
4904{
4905 struct btrfs_trans_handle *trans;
4906 struct btrfs_root *root = BTRFS_I(dir)->root;
1832a6d5 4907 struct inode *inode = NULL;
39279cc3 4908 int drop_inode = 0;
a22285a6 4909 int err;
1832a6d5 4910 unsigned long nr = 0;
39279cc3 4911 u64 objectid;
00e4e6b3 4912 u64 index = 0;
39279cc3 4913
9ed74f2d
JB
4914 /*
4915 * 2 for inode item and ref
4916 * 2 for dir items
4917 * 1 for xattr if selinux is on
4918 */
a22285a6
YZ
4919 trans = btrfs_start_transaction(root, 5);
4920 if (IS_ERR(trans))
4921 return PTR_ERR(trans);
9ed74f2d 4922
581bb050
LZ
4923 err = btrfs_find_free_ino(root, &objectid);
4924 if (err)
4925 goto out_unlock;
4926
aec7477b 4927 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
33345d01 4928 dentry->d_name.len, btrfs_ino(dir), objectid,
d82a6f1d 4929 mode, &index);
7cf96da3
TI
4930 if (IS_ERR(inode)) {
4931 err = PTR_ERR(inode);
39279cc3 4932 goto out_unlock;
7cf96da3 4933 }
39279cc3 4934
2a7dba39 4935 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
33268eaf
JB
4936 if (err) {
4937 drop_inode = 1;
4938 goto out_unlock;
4939 }
4940
ad19db71
CS
4941 /*
4942 * If the active LSM wants to access the inode during
4943 * d_instantiate it needs these. Smack checks to see
4944 * if the filesystem supports xattrs by looking at the
4945 * ops vector.
4946 */
4947 inode->i_fop = &btrfs_file_operations;
4948 inode->i_op = &btrfs_file_inode_operations;
4949
a1b075d2 4950 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
39279cc3
CM
4951 if (err)
4952 drop_inode = 1;
4953 else {
4954 inode->i_mapping->a_ops = &btrfs_aops;
04160088 4955 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
d1310b2e 4956 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
08c422c2 4957 d_instantiate(dentry, inode);
39279cc3 4958 }
39279cc3 4959out_unlock:
d3c2fdcf 4960 nr = trans->blocks_used;
7ad85bb7 4961 btrfs_end_transaction(trans, root);
39279cc3
CM
4962 if (drop_inode) {
4963 inode_dec_link_count(inode);
4964 iput(inode);
4965 }
d3c2fdcf 4966 btrfs_btree_balance_dirty(root, nr);
39279cc3
CM
4967 return err;
4968}
4969
4970static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4971 struct dentry *dentry)
4972{
4973 struct btrfs_trans_handle *trans;
4974 struct btrfs_root *root = BTRFS_I(dir)->root;
4975 struct inode *inode = old_dentry->d_inode;
00e4e6b3 4976 u64 index;
1832a6d5 4977 unsigned long nr = 0;
39279cc3
CM
4978 int err;
4979 int drop_inode = 0;
4980
4a8be425
TH
4981 /* do not allow sys_link's with other subvols of the same device */
4982 if (root->objectid != BTRFS_I(inode)->root->objectid)
3ab3564f 4983 return -EXDEV;
4a8be425 4984
c055e99e
AV
4985 if (inode->i_nlink == ~0U)
4986 return -EMLINK;
4a8be425 4987
3de4586c 4988 err = btrfs_set_inode_index(dir, &index);
aec7477b
JB
4989 if (err)
4990 goto fail;
4991
a22285a6 4992 /*
7e6b6465 4993 * 2 items for inode and inode ref
a22285a6 4994 * 2 items for dir items
7e6b6465 4995 * 1 item for parent inode
a22285a6 4996 */
7e6b6465 4997 trans = btrfs_start_transaction(root, 5);
a22285a6
YZ
4998 if (IS_ERR(trans)) {
4999 err = PTR_ERR(trans);
5000 goto fail;
5001 }
5f39d397 5002
3153495d 5003 btrfs_inc_nlink(inode);
0c4d2d95 5004 inode_inc_iversion(inode);
3153495d 5005 inode->i_ctime = CURRENT_TIME;
7de9c6ee 5006 ihold(inode);
aec7477b 5007
a1b075d2 5008 err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
5f39d397 5009
a5719521 5010 if (err) {
54aa1f4d 5011 drop_inode = 1;
a5719521 5012 } else {
10d9f309 5013 struct dentry *parent = dentry->d_parent;
a5719521 5014 err = btrfs_update_inode(trans, root, inode);
79787eaa
JM
5015 if (err)
5016 goto fail;
08c422c2 5017 d_instantiate(dentry, inode);
6a912213 5018 btrfs_log_new_name(trans, inode, NULL, parent);
a5719521 5019 }
39279cc3 5020
d3c2fdcf 5021 nr = trans->blocks_used;
7ad85bb7 5022 btrfs_end_transaction(trans, root);
1832a6d5 5023fail:
39279cc3
CM
5024 if (drop_inode) {
5025 inode_dec_link_count(inode);
5026 iput(inode);
5027 }
d3c2fdcf 5028 btrfs_btree_balance_dirty(root, nr);
39279cc3
CM
5029 return err;
5030}
5031
18bb1db3 5032static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
39279cc3 5033{
b9d86667 5034 struct inode *inode = NULL;
39279cc3
CM
5035 struct btrfs_trans_handle *trans;
5036 struct btrfs_root *root = BTRFS_I(dir)->root;
5037 int err = 0;
5038 int drop_on_err = 0;
b9d86667 5039 u64 objectid = 0;
00e4e6b3 5040 u64 index = 0;
d3c2fdcf 5041 unsigned long nr = 1;
39279cc3 5042
9ed74f2d
JB
5043 /*
5044 * 2 items for inode and ref
5045 * 2 items for dir items
5046 * 1 for xattr if selinux is on
5047 */
a22285a6
YZ
5048 trans = btrfs_start_transaction(root, 5);
5049 if (IS_ERR(trans))
5050 return PTR_ERR(trans);
39279cc3 5051
581bb050
LZ
5052 err = btrfs_find_free_ino(root, &objectid);
5053 if (err)
5054 goto out_fail;
5055
aec7477b 5056 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
33345d01 5057 dentry->d_name.len, btrfs_ino(dir), objectid,
d82a6f1d 5058 S_IFDIR | mode, &index);
39279cc3
CM
5059 if (IS_ERR(inode)) {
5060 err = PTR_ERR(inode);
5061 goto out_fail;
5062 }
5f39d397 5063
39279cc3 5064 drop_on_err = 1;
33268eaf 5065
2a7dba39 5066 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
33268eaf
JB
5067 if (err)
5068 goto out_fail;
5069
39279cc3
CM
5070 inode->i_op = &btrfs_dir_inode_operations;
5071 inode->i_fop = &btrfs_dir_file_operations;
39279cc3 5072
dbe674a9 5073 btrfs_i_size_write(inode, 0);
39279cc3
CM
5074 err = btrfs_update_inode(trans, root, inode);
5075 if (err)
5076 goto out_fail;
5f39d397 5077
a1b075d2
JB
5078 err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
5079 dentry->d_name.len, 0, index);
39279cc3
CM
5080 if (err)
5081 goto out_fail;
5f39d397 5082
39279cc3
CM
5083 d_instantiate(dentry, inode);
5084 drop_on_err = 0;
39279cc3
CM
5085
5086out_fail:
d3c2fdcf 5087 nr = trans->blocks_used;
7ad85bb7 5088 btrfs_end_transaction(trans, root);
39279cc3
CM
5089 if (drop_on_err)
5090 iput(inode);
d3c2fdcf 5091 btrfs_btree_balance_dirty(root, nr);
39279cc3
CM
5092 return err;
5093}
5094
d352ac68
CM
5095/* helper for btfs_get_extent. Given an existing extent in the tree,
5096 * and an extent that you want to insert, deal with overlap and insert
5097 * the new extent into the tree.
5098 */
3b951516
CM
5099static int merge_extent_mapping(struct extent_map_tree *em_tree,
5100 struct extent_map *existing,
e6dcd2dc
CM
5101 struct extent_map *em,
5102 u64 map_start, u64 map_len)
3b951516
CM
5103{
5104 u64 start_diff;
3b951516 5105
e6dcd2dc
CM
5106 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
5107 start_diff = map_start - em->start;
5108 em->start = map_start;
5109 em->len = map_len;
c8b97818
CM
5110 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
5111 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
e6dcd2dc 5112 em->block_start += start_diff;
c8b97818
CM
5113 em->block_len -= start_diff;
5114 }
e6dcd2dc 5115 return add_extent_mapping(em_tree, em);
3b951516
CM
5116}
5117
c8b97818
CM
5118static noinline int uncompress_inline(struct btrfs_path *path,
5119 struct inode *inode, struct page *page,
5120 size_t pg_offset, u64 extent_offset,
5121 struct btrfs_file_extent_item *item)
5122{
5123 int ret;
5124 struct extent_buffer *leaf = path->nodes[0];
5125 char *tmp;
5126 size_t max_size;
5127 unsigned long inline_size;
5128 unsigned long ptr;
261507a0 5129 int compress_type;
c8b97818
CM
5130
5131 WARN_ON(pg_offset != 0);
261507a0 5132 compress_type = btrfs_file_extent_compression(leaf, item);
c8b97818
CM
5133 max_size = btrfs_file_extent_ram_bytes(leaf, item);
5134 inline_size = btrfs_file_extent_inline_item_len(leaf,
5135 btrfs_item_nr(leaf, path->slots[0]));
5136 tmp = kmalloc(inline_size, GFP_NOFS);
8d413713
TI
5137 if (!tmp)
5138 return -ENOMEM;
c8b97818
CM
5139 ptr = btrfs_file_extent_inline_start(item);
5140
5141 read_extent_buffer(leaf, tmp, ptr, inline_size);
5142
5b050f04 5143 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
261507a0
LZ
5144 ret = btrfs_decompress(compress_type, tmp, page,
5145 extent_offset, inline_size, max_size);
c8b97818 5146 if (ret) {
7ac687d9 5147 char *kaddr = kmap_atomic(page);
c8b97818
CM
5148 unsigned long copy_size = min_t(u64,
5149 PAGE_CACHE_SIZE - pg_offset,
5150 max_size - extent_offset);
5151 memset(kaddr + pg_offset, 0, copy_size);
7ac687d9 5152 kunmap_atomic(kaddr);
c8b97818
CM
5153 }
5154 kfree(tmp);
5155 return 0;
5156}
5157
d352ac68
CM
5158/*
5159 * a bit scary, this does extent mapping from logical file offset to the disk.
d397712b
CM
5160 * the ugly parts come from merging extents from the disk with the in-ram
5161 * representation. This gets more complex because of the data=ordered code,
d352ac68
CM
5162 * where the in-ram extents might be locked pending data=ordered completion.
5163 *
5164 * This also copies inline extents directly into the page.
5165 */
d397712b 5166
a52d9a80 5167struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
70dec807 5168 size_t pg_offset, u64 start, u64 len,
a52d9a80
CM
5169 int create)
5170{
5171 int ret;
5172 int err = 0;
db94535d 5173 u64 bytenr;
a52d9a80
CM
5174 u64 extent_start = 0;
5175 u64 extent_end = 0;
33345d01 5176 u64 objectid = btrfs_ino(inode);
a52d9a80 5177 u32 found_type;
f421950f 5178 struct btrfs_path *path = NULL;
a52d9a80
CM
5179 struct btrfs_root *root = BTRFS_I(inode)->root;
5180 struct btrfs_file_extent_item *item;
5f39d397
CM
5181 struct extent_buffer *leaf;
5182 struct btrfs_key found_key;
a52d9a80
CM
5183 struct extent_map *em = NULL;
5184 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
d1310b2e 5185 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
a52d9a80 5186 struct btrfs_trans_handle *trans = NULL;
261507a0 5187 int compress_type;
a52d9a80 5188
a52d9a80 5189again:
890871be 5190 read_lock(&em_tree->lock);
d1310b2e 5191 em = lookup_extent_mapping(em_tree, start, len);
a061fc8d
CM
5192 if (em)
5193 em->bdev = root->fs_info->fs_devices->latest_bdev;
890871be 5194 read_unlock(&em_tree->lock);
d1310b2e 5195
a52d9a80 5196 if (em) {
e1c4b745
CM
5197 if (em->start > start || em->start + em->len <= start)
5198 free_extent_map(em);
5199 else if (em->block_start == EXTENT_MAP_INLINE && page)
70dec807
CM
5200 free_extent_map(em);
5201 else
5202 goto out;
a52d9a80 5203 }
172ddd60 5204 em = alloc_extent_map();
a52d9a80 5205 if (!em) {
d1310b2e
CM
5206 err = -ENOMEM;
5207 goto out;
a52d9a80 5208 }
e6dcd2dc 5209 em->bdev = root->fs_info->fs_devices->latest_bdev;
d1310b2e 5210 em->start = EXTENT_MAP_HOLE;
445a6944 5211 em->orig_start = EXTENT_MAP_HOLE;
d1310b2e 5212 em->len = (u64)-1;
c8b97818 5213 em->block_len = (u64)-1;
f421950f
CM
5214
5215 if (!path) {
5216 path = btrfs_alloc_path();
026fd317
JB
5217 if (!path) {
5218 err = -ENOMEM;
5219 goto out;
5220 }
5221 /*
5222 * Chances are we'll be called again, so go ahead and do
5223 * readahead
5224 */
5225 path->reada = 1;
f421950f
CM
5226 }
5227
179e29e4
CM
5228 ret = btrfs_lookup_file_extent(trans, root, path,
5229 objectid, start, trans != NULL);
a52d9a80
CM
5230 if (ret < 0) {
5231 err = ret;
5232 goto out;
5233 }
5234
5235 if (ret != 0) {
5236 if (path->slots[0] == 0)
5237 goto not_found;
5238 path->slots[0]--;
5239 }
5240
5f39d397
CM
5241 leaf = path->nodes[0];
5242 item = btrfs_item_ptr(leaf, path->slots[0],
a52d9a80 5243 struct btrfs_file_extent_item);
a52d9a80 5244 /* are we inside the extent that was found? */
5f39d397
CM
5245 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5246 found_type = btrfs_key_type(&found_key);
5247 if (found_key.objectid != objectid ||
a52d9a80
CM
5248 found_type != BTRFS_EXTENT_DATA_KEY) {
5249 goto not_found;
5250 }
5251
5f39d397
CM
5252 found_type = btrfs_file_extent_type(leaf, item);
5253 extent_start = found_key.offset;
261507a0 5254 compress_type = btrfs_file_extent_compression(leaf, item);
d899e052
YZ
5255 if (found_type == BTRFS_FILE_EXTENT_REG ||
5256 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
a52d9a80 5257 extent_end = extent_start +
db94535d 5258 btrfs_file_extent_num_bytes(leaf, item);
9036c102
YZ
5259 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5260 size_t size;
5261 size = btrfs_file_extent_inline_len(leaf, item);
5262 extent_end = (extent_start + size + root->sectorsize - 1) &
5263 ~((u64)root->sectorsize - 1);
5264 }
5265
5266 if (start >= extent_end) {
5267 path->slots[0]++;
5268 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
5269 ret = btrfs_next_leaf(root, path);
5270 if (ret < 0) {
5271 err = ret;
5272 goto out;
a52d9a80 5273 }
9036c102
YZ
5274 if (ret > 0)
5275 goto not_found;
5276 leaf = path->nodes[0];
a52d9a80 5277 }
9036c102
YZ
5278 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5279 if (found_key.objectid != objectid ||
5280 found_key.type != BTRFS_EXTENT_DATA_KEY)
5281 goto not_found;
5282 if (start + len <= found_key.offset)
5283 goto not_found;
5284 em->start = start;
5285 em->len = found_key.offset - start;
5286 goto not_found_em;
5287 }
5288
d899e052
YZ
5289 if (found_type == BTRFS_FILE_EXTENT_REG ||
5290 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
9036c102
YZ
5291 em->start = extent_start;
5292 em->len = extent_end - extent_start;
ff5b7ee3
YZ
5293 em->orig_start = extent_start -
5294 btrfs_file_extent_offset(leaf, item);
db94535d
CM
5295 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
5296 if (bytenr == 0) {
5f39d397 5297 em->block_start = EXTENT_MAP_HOLE;
a52d9a80
CM
5298 goto insert;
5299 }
261507a0 5300 if (compress_type != BTRFS_COMPRESS_NONE) {
c8b97818 5301 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
261507a0 5302 em->compress_type = compress_type;
c8b97818
CM
5303 em->block_start = bytenr;
5304 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
5305 item);
5306 } else {
5307 bytenr += btrfs_file_extent_offset(leaf, item);
5308 em->block_start = bytenr;
5309 em->block_len = em->len;
d899e052
YZ
5310 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
5311 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
c8b97818 5312 }
a52d9a80
CM
5313 goto insert;
5314 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5f39d397 5315 unsigned long ptr;
a52d9a80 5316 char *map;
3326d1b0
CM
5317 size_t size;
5318 size_t extent_offset;
5319 size_t copy_size;
a52d9a80 5320
689f9346 5321 em->block_start = EXTENT_MAP_INLINE;
c8b97818 5322 if (!page || create) {
689f9346 5323 em->start = extent_start;
9036c102 5324 em->len = extent_end - extent_start;
689f9346
Y
5325 goto out;
5326 }
5f39d397 5327
9036c102
YZ
5328 size = btrfs_file_extent_inline_len(leaf, item);
5329 extent_offset = page_offset(page) + pg_offset - extent_start;
70dec807 5330 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
3326d1b0 5331 size - extent_offset);
3326d1b0 5332 em->start = extent_start + extent_offset;
70dec807
CM
5333 em->len = (copy_size + root->sectorsize - 1) &
5334 ~((u64)root->sectorsize - 1);
ff5b7ee3 5335 em->orig_start = EXTENT_MAP_INLINE;
261507a0 5336 if (compress_type) {
c8b97818 5337 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
261507a0
LZ
5338 em->compress_type = compress_type;
5339 }
689f9346 5340 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
179e29e4 5341 if (create == 0 && !PageUptodate(page)) {
261507a0
LZ
5342 if (btrfs_file_extent_compression(leaf, item) !=
5343 BTRFS_COMPRESS_NONE) {
c8b97818
CM
5344 ret = uncompress_inline(path, inode, page,
5345 pg_offset,
5346 extent_offset, item);
79787eaa 5347 BUG_ON(ret); /* -ENOMEM */
c8b97818
CM
5348 } else {
5349 map = kmap(page);
5350 read_extent_buffer(leaf, map + pg_offset, ptr,
5351 copy_size);
93c82d57
CM
5352 if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
5353 memset(map + pg_offset + copy_size, 0,
5354 PAGE_CACHE_SIZE - pg_offset -
5355 copy_size);
5356 }
c8b97818
CM
5357 kunmap(page);
5358 }
179e29e4
CM
5359 flush_dcache_page(page);
5360 } else if (create && PageUptodate(page)) {
6bf7e080 5361 BUG();
179e29e4
CM
5362 if (!trans) {
5363 kunmap(page);
5364 free_extent_map(em);
5365 em = NULL;
ff5714cc 5366
b3b4aa74 5367 btrfs_release_path(path);
7a7eaa40 5368 trans = btrfs_join_transaction(root);
ff5714cc 5369
3612b495
TI
5370 if (IS_ERR(trans))
5371 return ERR_CAST(trans);
179e29e4
CM
5372 goto again;
5373 }
c8b97818 5374 map = kmap(page);
70dec807 5375 write_extent_buffer(leaf, map + pg_offset, ptr,
179e29e4 5376 copy_size);
c8b97818 5377 kunmap(page);
179e29e4 5378 btrfs_mark_buffer_dirty(leaf);
a52d9a80 5379 }
d1310b2e 5380 set_extent_uptodate(io_tree, em->start,
507903b8 5381 extent_map_end(em) - 1, NULL, GFP_NOFS);
a52d9a80
CM
5382 goto insert;
5383 } else {
d397712b 5384 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
a52d9a80
CM
5385 WARN_ON(1);
5386 }
5387not_found:
5388 em->start = start;
d1310b2e 5389 em->len = len;
a52d9a80 5390not_found_em:
5f39d397 5391 em->block_start = EXTENT_MAP_HOLE;
9036c102 5392 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
a52d9a80 5393insert:
b3b4aa74 5394 btrfs_release_path(path);
d1310b2e 5395 if (em->start > start || extent_map_end(em) <= start) {
d397712b
CM
5396 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
5397 "[%llu %llu]\n", (unsigned long long)em->start,
5398 (unsigned long long)em->len,
5399 (unsigned long long)start,
5400 (unsigned long long)len);
a52d9a80
CM
5401 err = -EIO;
5402 goto out;
5403 }
d1310b2e
CM
5404
5405 err = 0;
890871be 5406 write_lock(&em_tree->lock);
a52d9a80 5407 ret = add_extent_mapping(em_tree, em);
3b951516
CM
5408 /* it is possible that someone inserted the extent into the tree
5409 * while we had the lock dropped. It is also possible that
5410 * an overlapping map exists in the tree
5411 */
a52d9a80 5412 if (ret == -EEXIST) {
3b951516 5413 struct extent_map *existing;
e6dcd2dc
CM
5414
5415 ret = 0;
5416
3b951516 5417 existing = lookup_extent_mapping(em_tree, start, len);
e1c4b745
CM
5418 if (existing && (existing->start > start ||
5419 existing->start + existing->len <= start)) {
5420 free_extent_map(existing);
5421 existing = NULL;
5422 }
3b951516
CM
5423 if (!existing) {
5424 existing = lookup_extent_mapping(em_tree, em->start,
5425 em->len);
5426 if (existing) {
5427 err = merge_extent_mapping(em_tree, existing,
e6dcd2dc
CM
5428 em, start,
5429 root->sectorsize);
3b951516
CM
5430 free_extent_map(existing);
5431 if (err) {
5432 free_extent_map(em);
5433 em = NULL;
5434 }
5435 } else {
5436 err = -EIO;
3b951516
CM
5437 free_extent_map(em);
5438 em = NULL;
5439 }
5440 } else {
5441 free_extent_map(em);
5442 em = existing;
e6dcd2dc 5443 err = 0;
a52d9a80 5444 }
a52d9a80 5445 }
890871be 5446 write_unlock(&em_tree->lock);
a52d9a80 5447out:
1abe9b8a 5448
5449 trace_btrfs_get_extent(root, em);
5450
f421950f
CM
5451 if (path)
5452 btrfs_free_path(path);
a52d9a80
CM
5453 if (trans) {
5454 ret = btrfs_end_transaction(trans, root);
d397712b 5455 if (!err)
a52d9a80
CM
5456 err = ret;
5457 }
a52d9a80
CM
5458 if (err) {
5459 free_extent_map(em);
a52d9a80
CM
5460 return ERR_PTR(err);
5461 }
79787eaa 5462 BUG_ON(!em); /* Error is always set */
a52d9a80
CM
5463 return em;
5464}
5465
ec29ed5b
CM
5466struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
5467 size_t pg_offset, u64 start, u64 len,
5468 int create)
5469{
5470 struct extent_map *em;
5471 struct extent_map *hole_em = NULL;
5472 u64 range_start = start;
5473 u64 end;
5474 u64 found;
5475 u64 found_end;
5476 int err = 0;
5477
5478 em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
5479 if (IS_ERR(em))
5480 return em;
5481 if (em) {
5482 /*
5483 * if our em maps to a hole, there might
5484 * actually be delalloc bytes behind it
5485 */
5486 if (em->block_start != EXTENT_MAP_HOLE)
5487 return em;
5488 else
5489 hole_em = em;
5490 }
5491
5492 /* check to see if we've wrapped (len == -1 or similar) */
5493 end = start + len;
5494 if (end < start)
5495 end = (u64)-1;
5496 else
5497 end -= 1;
5498
5499 em = NULL;
5500
5501 /* ok, we didn't find anything, lets look for delalloc */
5502 found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
5503 end, len, EXTENT_DELALLOC, 1);
5504 found_end = range_start + found;
5505 if (found_end < range_start)
5506 found_end = (u64)-1;
5507
5508 /*
5509 * we didn't find anything useful, return
5510 * the original results from get_extent()
5511 */
5512 if (range_start > end || found_end <= start) {
5513 em = hole_em;
5514 hole_em = NULL;
5515 goto out;
5516 }
5517
5518 /* adjust the range_start to make sure it doesn't
5519 * go backwards from the start they passed in
5520 */
5521 range_start = max(start,range_start);
5522 found = found_end - range_start;
5523
5524 if (found > 0) {
5525 u64 hole_start = start;
5526 u64 hole_len = len;
5527
172ddd60 5528 em = alloc_extent_map();
ec29ed5b
CM
5529 if (!em) {
5530 err = -ENOMEM;
5531 goto out;
5532 }
5533 /*
5534 * when btrfs_get_extent can't find anything it
5535 * returns one huge hole
5536 *
5537 * make sure what it found really fits our range, and
5538 * adjust to make sure it is based on the start from
5539 * the caller
5540 */
5541 if (hole_em) {
5542 u64 calc_end = extent_map_end(hole_em);
5543
5544 if (calc_end <= start || (hole_em->start > end)) {
5545 free_extent_map(hole_em);
5546 hole_em = NULL;
5547 } else {
5548 hole_start = max(hole_em->start, start);
5549 hole_len = calc_end - hole_start;
5550 }
5551 }
5552 em->bdev = NULL;
5553 if (hole_em && range_start > hole_start) {
5554 /* our hole starts before our delalloc, so we
5555 * have to return just the parts of the hole
5556 * that go until the delalloc starts
5557 */
5558 em->len = min(hole_len,
5559 range_start - hole_start);
5560 em->start = hole_start;
5561 em->orig_start = hole_start;
5562 /*
5563 * don't adjust block start at all,
5564 * it is fixed at EXTENT_MAP_HOLE
5565 */
5566 em->block_start = hole_em->block_start;
5567 em->block_len = hole_len;
5568 } else {
5569 em->start = range_start;
5570 em->len = found;
5571 em->orig_start = range_start;
5572 em->block_start = EXTENT_MAP_DELALLOC;
5573 em->block_len = found;
5574 }
5575 } else if (hole_em) {
5576 return hole_em;
5577 }
5578out:
5579
5580 free_extent_map(hole_em);
5581 if (err) {
5582 free_extent_map(em);
5583 return ERR_PTR(err);
5584 }
5585 return em;
5586}
5587
4b46fce2 5588static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
16d299ac 5589 struct extent_map *em,
4b46fce2
JB
5590 u64 start, u64 len)
5591{
5592 struct btrfs_root *root = BTRFS_I(inode)->root;
5593 struct btrfs_trans_handle *trans;
4b46fce2
JB
5594 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5595 struct btrfs_key ins;
5596 u64 alloc_hint;
5597 int ret;
16d299ac 5598 bool insert = false;
4b46fce2 5599
16d299ac
JB
5600 /*
5601 * Ok if the extent map we looked up is a hole and is for the exact
5602 * range we want, there is no reason to allocate a new one, however if
5603 * it is not right then we need to free this one and drop the cache for
5604 * our range.
5605 */
5606 if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
5607 em->len != len) {
5608 free_extent_map(em);
5609 em = NULL;
5610 insert = true;
5611 btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
5612 }
4b46fce2 5613
7a7eaa40 5614 trans = btrfs_join_transaction(root);
3612b495
TI
5615 if (IS_ERR(trans))
5616 return ERR_CAST(trans);
4b46fce2 5617
4cb5300b
CM
5618 if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024)
5619 btrfs_add_inode_defrag(trans, inode);
5620
4b46fce2
JB
5621 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
5622
5623 alloc_hint = get_extent_allocation_hint(inode, start, len);
5624 ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
81c9ad23 5625 alloc_hint, &ins, 1);
4b46fce2
JB
5626 if (ret) {
5627 em = ERR_PTR(ret);
5628 goto out;
5629 }
5630
4b46fce2 5631 if (!em) {
172ddd60 5632 em = alloc_extent_map();
16d299ac
JB
5633 if (!em) {
5634 em = ERR_PTR(-ENOMEM);
5635 goto out;
5636 }
4b46fce2
JB
5637 }
5638
5639 em->start = start;
5640 em->orig_start = em->start;
5641 em->len = ins.offset;
5642
5643 em->block_start = ins.objectid;
5644 em->block_len = ins.offset;
5645 em->bdev = root->fs_info->fs_devices->latest_bdev;
16d299ac
JB
5646
5647 /*
5648 * We need to do this because if we're using the original em we searched
5649 * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
5650 */
5651 em->flags = 0;
4b46fce2
JB
5652 set_bit(EXTENT_FLAG_PINNED, &em->flags);
5653
16d299ac 5654 while (insert) {
4b46fce2
JB
5655 write_lock(&em_tree->lock);
5656 ret = add_extent_mapping(em_tree, em);
5657 write_unlock(&em_tree->lock);
5658 if (ret != -EEXIST)
5659 break;
5660 btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0);
5661 }
5662
5663 ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
5664 ins.offset, ins.offset, 0);
5665 if (ret) {
5666 btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
5667 em = ERR_PTR(ret);
5668 }
5669out:
5670 btrfs_end_transaction(trans, root);
5671 return em;
5672}
5673
46bfbb5c
CM
5674/*
5675 * returns 1 when the nocow is safe, < 1 on error, 0 if the
5676 * block must be cow'd
5677 */
5678static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
5679 struct inode *inode, u64 offset, u64 len)
5680{
5681 struct btrfs_path *path;
5682 int ret;
5683 struct extent_buffer *leaf;
5684 struct btrfs_root *root = BTRFS_I(inode)->root;
5685 struct btrfs_file_extent_item *fi;
5686 struct btrfs_key key;
5687 u64 disk_bytenr;
5688 u64 backref_offset;
5689 u64 extent_end;
5690 u64 num_bytes;
5691 int slot;
5692 int found_type;
5693
5694 path = btrfs_alloc_path();
5695 if (!path)
5696 return -ENOMEM;
5697
33345d01 5698 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
46bfbb5c
CM
5699 offset, 0);
5700 if (ret < 0)
5701 goto out;
5702
5703 slot = path->slots[0];
5704 if (ret == 1) {
5705 if (slot == 0) {
5706 /* can't find the item, must cow */
5707 ret = 0;
5708 goto out;
5709 }
5710 slot--;
5711 }
5712 ret = 0;
5713 leaf = path->nodes[0];
5714 btrfs_item_key_to_cpu(leaf, &key, slot);
33345d01 5715 if (key.objectid != btrfs_ino(inode) ||
46bfbb5c
CM
5716 key.type != BTRFS_EXTENT_DATA_KEY) {
5717 /* not our file or wrong item type, must cow */
5718 goto out;
5719 }
5720
5721 if (key.offset > offset) {
5722 /* Wrong offset, must cow */
5723 goto out;
5724 }
5725
5726 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5727 found_type = btrfs_file_extent_type(leaf, fi);
5728 if (found_type != BTRFS_FILE_EXTENT_REG &&
5729 found_type != BTRFS_FILE_EXTENT_PREALLOC) {
5730 /* not a regular extent, must cow */
5731 goto out;
5732 }
5733 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
5734 backref_offset = btrfs_file_extent_offset(leaf, fi);
5735
5736 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
5737 if (extent_end < offset + len) {
5738 /* extent doesn't include our full range, must cow */
5739 goto out;
5740 }
5741
5742 if (btrfs_extent_readonly(root, disk_bytenr))
5743 goto out;
5744
5745 /*
5746 * look for other files referencing this extent, if we
5747 * find any we must cow
5748 */
33345d01 5749 if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
46bfbb5c
CM
5750 key.offset - backref_offset, disk_bytenr))
5751 goto out;
5752
5753 /*
5754 * adjust disk_bytenr and num_bytes to cover just the bytes
5755 * in this extent we are about to write. If there
5756 * are any csums in that range we have to cow in order
5757 * to keep the csums correct
5758 */
5759 disk_bytenr += backref_offset;
5760 disk_bytenr += offset - key.offset;
5761 num_bytes = min(offset + len, extent_end) - offset;
5762 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
5763 goto out;
5764 /*
5765 * all of the above have passed, it is safe to overwrite this extent
5766 * without cow
5767 */
5768 ret = 1;
5769out:
5770 btrfs_free_path(path);
5771 return ret;
5772}
5773
eb838e73
JB
5774static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
5775 struct extent_state **cached_state, int writing)
5776{
5777 struct btrfs_ordered_extent *ordered;
5778 int ret = 0;
5779
5780 while (1) {
5781 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
5782 0, cached_state);
5783 /*
5784 * We're concerned with the entire range that we're going to be
5785 * doing DIO to, so we need to make sure theres no ordered
5786 * extents in this range.
5787 */
5788 ordered = btrfs_lookup_ordered_range(inode, lockstart,
5789 lockend - lockstart + 1);
5790
5791 /*
5792 * We need to make sure there are no buffered pages in this
5793 * range either, we could have raced between the invalidate in
5794 * generic_file_direct_write and locking the extent. The
5795 * invalidate needs to happen so that reads after a write do not
5796 * get stale data.
5797 */
5798 if (!ordered && (!writing ||
5799 !test_range_bit(&BTRFS_I(inode)->io_tree,
5800 lockstart, lockend, EXTENT_UPTODATE, 0,
5801 *cached_state)))
5802 break;
5803
5804 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
5805 cached_state, GFP_NOFS);
5806
5807 if (ordered) {
5808 btrfs_start_ordered_extent(inode, ordered, 1);
5809 btrfs_put_ordered_extent(ordered);
5810 } else {
5811 /* Screw you mmap */
5812 ret = filemap_write_and_wait_range(inode->i_mapping,
5813 lockstart,
5814 lockend);
5815 if (ret)
5816 break;
5817
5818 /*
5819 * If we found a page that couldn't be invalidated just
5820 * fall back to buffered.
5821 */
5822 ret = invalidate_inode_pages2_range(inode->i_mapping,
5823 lockstart >> PAGE_CACHE_SHIFT,
5824 lockend >> PAGE_CACHE_SHIFT);
5825 if (ret)
5826 break;
5827 }
5828
5829 cond_resched();
5830 }
5831
5832 return ret;
5833}
5834
4b46fce2
JB
5835static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5836 struct buffer_head *bh_result, int create)
5837{
5838 struct extent_map *em;
5839 struct btrfs_root *root = BTRFS_I(inode)->root;
eb838e73 5840 struct extent_state *cached_state = NULL;
4b46fce2 5841 u64 start = iblock << inode->i_blkbits;
eb838e73 5842 u64 lockstart, lockend;
4b46fce2 5843 u64 len = bh_result->b_size;
46bfbb5c 5844 struct btrfs_trans_handle *trans;
eb838e73
JB
5845 int unlock_bits = EXTENT_LOCKED;
5846 int ret;
5847
eb838e73
JB
5848 if (create) {
5849 ret = btrfs_delalloc_reserve_space(inode, len);
5850 if (ret)
5851 return ret;
5852 unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
c329861d
JB
5853 } else {
5854 len = min_t(u64, len, root->sectorsize);
eb838e73
JB
5855 }
5856
c329861d
JB
5857 lockstart = start;
5858 lockend = start + len - 1;
5859
eb838e73
JB
5860 /*
5861 * If this errors out it's because we couldn't invalidate pagecache for
5862 * this range and we need to fallback to buffered.
5863 */
5864 if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
5865 return -ENOTBLK;
5866
5867 if (create) {
5868 ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
5869 lockend, EXTENT_DELALLOC, NULL,
5870 &cached_state, GFP_NOFS);
5871 if (ret)
5872 goto unlock_err;
5873 }
4b46fce2
JB
5874
5875 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
eb838e73
JB
5876 if (IS_ERR(em)) {
5877 ret = PTR_ERR(em);
5878 goto unlock_err;
5879 }
4b46fce2
JB
5880
5881 /*
5882 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
5883 * io. INLINE is special, and we could probably kludge it in here, but
5884 * it's still buffered so for safety lets just fall back to the generic
5885 * buffered path.
5886 *
5887 * For COMPRESSED we _have_ to read the entire extent in so we can
5888 * decompress it, so there will be buffering required no matter what we
5889 * do, so go ahead and fallback to buffered.
5890 *
5891 * We return -ENOTBLK because thats what makes DIO go ahead and go back
5892 * to buffered IO. Don't blame me, this is the price we pay for using
5893 * the generic code.
5894 */
5895 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
5896 em->block_start == EXTENT_MAP_INLINE) {
5897 free_extent_map(em);
eb838e73
JB
5898 ret = -ENOTBLK;
5899 goto unlock_err;
4b46fce2
JB
5900 }
5901
5902 /* Just a good old fashioned hole, return */
5903 if (!create && (em->block_start == EXTENT_MAP_HOLE ||
5904 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
5905 free_extent_map(em);
eb838e73
JB
5906 ret = 0;
5907 goto unlock_err;
4b46fce2
JB
5908 }
5909
5910 /*
5911 * We don't allocate a new extent in the following cases
5912 *
5913 * 1) The inode is marked as NODATACOW. In this case we'll just use the
5914 * existing extent.
5915 * 2) The extent is marked as PREALLOC. We're good to go here and can
5916 * just use the extent.
5917 *
5918 */
46bfbb5c 5919 if (!create) {
eb838e73
JB
5920 len = min(len, em->len - (start - em->start));
5921 lockstart = start + len;
5922 goto unlock;
46bfbb5c 5923 }
4b46fce2
JB
5924
5925 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
5926 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
5927 em->block_start != EXTENT_MAP_HOLE)) {
4b46fce2
JB
5928 int type;
5929 int ret;
46bfbb5c 5930 u64 block_start;
4b46fce2
JB
5931
5932 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5933 type = BTRFS_ORDERED_PREALLOC;
5934 else
5935 type = BTRFS_ORDERED_NOCOW;
46bfbb5c 5936 len = min(len, em->len - (start - em->start));
4b46fce2 5937 block_start = em->block_start + (start - em->start);
46bfbb5c
CM
5938
5939 /*
5940 * we're not going to log anything, but we do need
5941 * to make sure the current transaction stays open
5942 * while we look for nocow cross refs
5943 */
7a7eaa40 5944 trans = btrfs_join_transaction(root);
3612b495 5945 if (IS_ERR(trans))
46bfbb5c
CM
5946 goto must_cow;
5947
5948 if (can_nocow_odirect(trans, inode, start, len) == 1) {
5949 ret = btrfs_add_ordered_extent_dio(inode, start,
5950 block_start, len, len, type);
5951 btrfs_end_transaction(trans, root);
5952 if (ret) {
5953 free_extent_map(em);
eb838e73 5954 goto unlock_err;
46bfbb5c
CM
5955 }
5956 goto unlock;
4b46fce2 5957 }
46bfbb5c 5958 btrfs_end_transaction(trans, root);
4b46fce2 5959 }
46bfbb5c
CM
5960must_cow:
5961 /*
5962 * this will cow the extent, reset the len in case we changed
5963 * it above
5964 */
5965 len = bh_result->b_size;
16d299ac 5966 em = btrfs_new_extent_direct(inode, em, start, len);
eb838e73
JB
5967 if (IS_ERR(em)) {
5968 ret = PTR_ERR(em);
5969 goto unlock_err;
5970 }
46bfbb5c
CM
5971 len = min(len, em->len - (start - em->start));
5972unlock:
4b46fce2
JB
5973 bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
5974 inode->i_blkbits;
46bfbb5c 5975 bh_result->b_size = len;
4b46fce2
JB
5976 bh_result->b_bdev = em->bdev;
5977 set_buffer_mapped(bh_result);
c3473e83
JB
5978 if (create) {
5979 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5980 set_buffer_new(bh_result);
5981
5982 /*
5983 * Need to update the i_size under the extent lock so buffered
5984 * readers will get the updated i_size when we unlock.
5985 */
5986 if (start + len > i_size_read(inode))
5987 i_size_write(inode, start + len);
5988 }
4b46fce2 5989
eb838e73
JB
5990 /*
5991 * In the case of write we need to clear and unlock the entire range,
5992 * in the case of read we need to unlock only the end area that we
5993 * aren't using if there is any left over space.
5994 */
5995 if (lockstart < lockend)
5996 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
5997 unlock_bits, 1, 0, &cached_state, GFP_NOFS);
5998 else
5999 free_extent_state(cached_state);
6000
4b46fce2
JB
6001 free_extent_map(em);
6002
6003 return 0;
eb838e73
JB
6004
6005unlock_err:
6006 if (create)
6007 unlock_bits |= EXTENT_DO_ACCOUNTING;
6008
6009 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6010 unlock_bits, 1, 0, &cached_state, GFP_NOFS);
6011 return ret;
4b46fce2
JB
6012}
6013
6014struct btrfs_dio_private {
6015 struct inode *inode;
6016 u64 logical_offset;
6017 u64 disk_bytenr;
6018 u64 bytes;
4b46fce2 6019 void *private;
e65e1535
MX
6020
6021 /* number of bios pending for this dio */
6022 atomic_t pending_bios;
6023
6024 /* IO errors */
6025 int errors;
6026
6027 struct bio *orig_bio;
4b46fce2
JB
6028};
6029
6030static void btrfs_endio_direct_read(struct bio *bio, int err)
6031{
e65e1535 6032 struct btrfs_dio_private *dip = bio->bi_private;
4b46fce2
JB
6033 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
6034 struct bio_vec *bvec = bio->bi_io_vec;
4b46fce2
JB
6035 struct inode *inode = dip->inode;
6036 struct btrfs_root *root = BTRFS_I(inode)->root;
6037 u64 start;
4b46fce2
JB
6038
6039 start = dip->logical_offset;
6040 do {
6041 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
6042 struct page *page = bvec->bv_page;
6043 char *kaddr;
6044 u32 csum = ~(u32)0;
c329861d 6045 u64 private = ~(u32)0;
4b46fce2
JB
6046 unsigned long flags;
6047
c329861d
JB
6048 if (get_state_private(&BTRFS_I(inode)->io_tree,
6049 start, &private))
6050 goto failed;
4b46fce2 6051 local_irq_save(flags);
7ac687d9 6052 kaddr = kmap_atomic(page);
4b46fce2
JB
6053 csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
6054 csum, bvec->bv_len);
6055 btrfs_csum_final(csum, (char *)&csum);
7ac687d9 6056 kunmap_atomic(kaddr);
4b46fce2
JB
6057 local_irq_restore(flags);
6058
6059 flush_dcache_page(bvec->bv_page);
c329861d
JB
6060 if (csum != private) {
6061failed:
33345d01 6062 printk(KERN_ERR "btrfs csum failed ino %llu off"
4b46fce2 6063 " %llu csum %u private %u\n",
33345d01
LZ
6064 (unsigned long long)btrfs_ino(inode),
6065 (unsigned long long)start,
c329861d 6066 csum, (unsigned)private);
4b46fce2
JB
6067 err = -EIO;
6068 }
6069 }
6070
6071 start += bvec->bv_len;
4b46fce2
JB
6072 bvec++;
6073 } while (bvec <= bvec_end);
6074
6075 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
d0082371 6076 dip->logical_offset + dip->bytes - 1);
4b46fce2
JB
6077 bio->bi_private = dip->private;
6078
4b46fce2 6079 kfree(dip);
c0da7aa1
JB
6080
6081 /* If we had a csum failure make sure to clear the uptodate flag */
6082 if (err)
6083 clear_bit(BIO_UPTODATE, &bio->bi_flags);
4b46fce2
JB
6084 dio_end_io(bio, err);
6085}
6086
6087static void btrfs_endio_direct_write(struct bio *bio, int err)
6088{
6089 struct btrfs_dio_private *dip = bio->bi_private;
6090 struct inode *inode = dip->inode;
6091 struct btrfs_root *root = BTRFS_I(inode)->root;
4b46fce2 6092 struct btrfs_ordered_extent *ordered = NULL;
163cf09c
CM
6093 u64 ordered_offset = dip->logical_offset;
6094 u64 ordered_bytes = dip->bytes;
4b46fce2
JB
6095 int ret;
6096
6097 if (err)
6098 goto out_done;
163cf09c
CM
6099again:
6100 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
6101 &ordered_offset,
5fd02043 6102 ordered_bytes, !err);
4b46fce2 6103 if (!ret)
163cf09c 6104 goto out_test;
4b46fce2 6105
5fd02043
JB
6106 ordered->work.func = finish_ordered_fn;
6107 ordered->work.flags = 0;
6108 btrfs_queue_worker(&root->fs_info->endio_write_workers,
6109 &ordered->work);
163cf09c
CM
6110out_test:
6111 /*
6112 * our bio might span multiple ordered extents. If we haven't
6113 * completed the accounting for the whole dio, go back and try again
6114 */
6115 if (ordered_offset < dip->logical_offset + dip->bytes) {
6116 ordered_bytes = dip->logical_offset + dip->bytes -
6117 ordered_offset;
5fd02043 6118 ordered = NULL;
163cf09c
CM
6119 goto again;
6120 }
4b46fce2
JB
6121out_done:
6122 bio->bi_private = dip->private;
6123
4b46fce2 6124 kfree(dip);
c0da7aa1
JB
6125
6126 /* If we had an error make sure to clear the uptodate flag */
6127 if (err)
6128 clear_bit(BIO_UPTODATE, &bio->bi_flags);
4b46fce2
JB
6129 dio_end_io(bio, err);
6130}
6131
eaf25d93
CM
6132static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
6133 struct bio *bio, int mirror_num,
6134 unsigned long bio_flags, u64 offset)
6135{
6136 int ret;
6137 struct btrfs_root *root = BTRFS_I(inode)->root;
6138 ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
79787eaa 6139 BUG_ON(ret); /* -ENOMEM */
eaf25d93
CM
6140 return 0;
6141}
6142
e65e1535
MX
6143static void btrfs_end_dio_bio(struct bio *bio, int err)
6144{
6145 struct btrfs_dio_private *dip = bio->bi_private;
6146
6147 if (err) {
33345d01 6148 printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
3dd1462e 6149 "sector %#Lx len %u err no %d\n",
33345d01 6150 (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
3dd1462e 6151 (unsigned long long)bio->bi_sector, bio->bi_size, err);
e65e1535
MX
6152 dip->errors = 1;
6153
6154 /*
6155 * before atomic variable goto zero, we must make sure
6156 * dip->errors is perceived to be set.
6157 */
6158 smp_mb__before_atomic_dec();
6159 }
6160
6161 /* if there are more bios still pending for this dio, just exit */
6162 if (!atomic_dec_and_test(&dip->pending_bios))
6163 goto out;
6164
6165 if (dip->errors)
6166 bio_io_error(dip->orig_bio);
6167 else {
6168 set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
6169 bio_endio(dip->orig_bio, 0);
6170 }
6171out:
6172 bio_put(bio);
6173}
6174
6175static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
6176 u64 first_sector, gfp_t gfp_flags)
6177{
6178 int nr_vecs = bio_get_nr_vecs(bdev);
6179 return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
6180}
6181
6182static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
6183 int rw, u64 file_offset, int skip_sum,
c329861d 6184 int async_submit)
e65e1535
MX
6185{
6186 int write = rw & REQ_WRITE;
6187 struct btrfs_root *root = BTRFS_I(inode)->root;
6188 int ret;
6189
6190 bio_get(bio);
5fd02043
JB
6191
6192 if (!write) {
6193 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
6194 if (ret)
6195 goto err;
6196 }
e65e1535 6197
1ae39938
JB
6198 if (skip_sum)
6199 goto map;
6200
6201 if (write && async_submit) {
e65e1535
MX
6202 ret = btrfs_wq_submit_bio(root->fs_info,
6203 inode, rw, bio, 0, 0,
6204 file_offset,
6205 __btrfs_submit_bio_start_direct_io,
6206 __btrfs_submit_bio_done);
6207 goto err;
1ae39938
JB
6208 } else if (write) {
6209 /*
6210 * If we aren't doing async submit, calculate the csum of the
6211 * bio now.
6212 */
6213 ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
6214 if (ret)
6215 goto err;
c2db1073 6216 } else if (!skip_sum) {
c329861d 6217 ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset);
c2db1073
TI
6218 if (ret)
6219 goto err;
6220 }
e65e1535 6221
1ae39938
JB
6222map:
6223 ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
e65e1535
MX
6224err:
6225 bio_put(bio);
6226 return ret;
6227}
6228
6229static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6230 int skip_sum)
6231{
6232 struct inode *inode = dip->inode;
6233 struct btrfs_root *root = BTRFS_I(inode)->root;
6234 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
6235 struct bio *bio;
6236 struct bio *orig_bio = dip->orig_bio;
6237 struct bio_vec *bvec = orig_bio->bi_io_vec;
6238 u64 start_sector = orig_bio->bi_sector;
6239 u64 file_offset = dip->logical_offset;
6240 u64 submit_len = 0;
6241 u64 map_length;
6242 int nr_pages = 0;
e65e1535 6243 int ret = 0;
1ae39938 6244 int async_submit = 0;
e65e1535 6245
e65e1535
MX
6246 map_length = orig_bio->bi_size;
6247 ret = btrfs_map_block(map_tree, READ, start_sector << 9,
6248 &map_length, NULL, 0);
6249 if (ret) {
64728bbb 6250 bio_put(orig_bio);
e65e1535
MX
6251 return -EIO;
6252 }
6253
02f57c7a
JB
6254 if (map_length >= orig_bio->bi_size) {
6255 bio = orig_bio;
6256 goto submit;
6257 }
6258
1ae39938 6259 async_submit = 1;
02f57c7a
JB
6260 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
6261 if (!bio)
6262 return -ENOMEM;
6263 bio->bi_private = dip;
6264 bio->bi_end_io = btrfs_end_dio_bio;
6265 atomic_inc(&dip->pending_bios);
6266
e65e1535
MX
6267 while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
6268 if (unlikely(map_length < submit_len + bvec->bv_len ||
6269 bio_add_page(bio, bvec->bv_page, bvec->bv_len,
6270 bvec->bv_offset) < bvec->bv_len)) {
6271 /*
6272 * inc the count before we submit the bio so
6273 * we know the end IO handler won't happen before
6274 * we inc the count. Otherwise, the dip might get freed
6275 * before we're done setting it up
6276 */
6277 atomic_inc(&dip->pending_bios);
6278 ret = __btrfs_submit_dio_bio(bio, inode, rw,
6279 file_offset, skip_sum,
c329861d 6280 async_submit);
e65e1535
MX
6281 if (ret) {
6282 bio_put(bio);
6283 atomic_dec(&dip->pending_bios);
6284 goto out_err;
6285 }
6286
e65e1535
MX
6287 start_sector += submit_len >> 9;
6288 file_offset += submit_len;
6289
6290 submit_len = 0;
6291 nr_pages = 0;
6292
6293 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
6294 start_sector, GFP_NOFS);
6295 if (!bio)
6296 goto out_err;
6297 bio->bi_private = dip;
6298 bio->bi_end_io = btrfs_end_dio_bio;
6299
6300 map_length = orig_bio->bi_size;
6301 ret = btrfs_map_block(map_tree, READ, start_sector << 9,
6302 &map_length, NULL, 0);
6303 if (ret) {
6304 bio_put(bio);
6305 goto out_err;
6306 }
6307 } else {
6308 submit_len += bvec->bv_len;
6309 nr_pages ++;
6310 bvec++;
6311 }
6312 }
6313
02f57c7a 6314submit:
e65e1535 6315 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
c329861d 6316 async_submit);
e65e1535
MX
6317 if (!ret)
6318 return 0;
6319
6320 bio_put(bio);
6321out_err:
6322 dip->errors = 1;
6323 /*
6324 * before atomic variable goto zero, we must
6325 * make sure dip->errors is perceived to be set.
6326 */
6327 smp_mb__before_atomic_dec();
6328 if (atomic_dec_and_test(&dip->pending_bios))
6329 bio_io_error(dip->orig_bio);
6330
6331 /* bio_end_io() will handle error, so we needn't return it */
6332 return 0;
6333}
6334
4b46fce2
JB
6335static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
6336 loff_t file_offset)
6337{
6338 struct btrfs_root *root = BTRFS_I(inode)->root;
6339 struct btrfs_dio_private *dip;
6340 struct bio_vec *bvec = bio->bi_io_vec;
4b46fce2 6341 int skip_sum;
7b6d91da 6342 int write = rw & REQ_WRITE;
4b46fce2
JB
6343 int ret = 0;
6344
6345 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
6346
6347 dip = kmalloc(sizeof(*dip), GFP_NOFS);
6348 if (!dip) {
6349 ret = -ENOMEM;
6350 goto free_ordered;
6351 }
4b46fce2
JB
6352
6353 dip->private = bio->bi_private;
6354 dip->inode = inode;
6355 dip->logical_offset = file_offset;
6356
4b46fce2
JB
6357 dip->bytes = 0;
6358 do {
6359 dip->bytes += bvec->bv_len;
6360 bvec++;
6361 } while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));
6362
46bfbb5c 6363 dip->disk_bytenr = (u64)bio->bi_sector << 9;
4b46fce2 6364 bio->bi_private = dip;
e65e1535
MX
6365 dip->errors = 0;
6366 dip->orig_bio = bio;
6367 atomic_set(&dip->pending_bios, 0);
4b46fce2
JB
6368
6369 if (write)
6370 bio->bi_end_io = btrfs_endio_direct_write;
6371 else
6372 bio->bi_end_io = btrfs_endio_direct_read;
6373
e65e1535
MX
6374 ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
6375 if (!ret)
eaf25d93 6376 return;
4b46fce2
JB
6377free_ordered:
6378 /*
6379 * If this is a write, we need to clean up the reserved space and kill
6380 * the ordered extent.
6381 */
6382 if (write) {
6383 struct btrfs_ordered_extent *ordered;
955256f2 6384 ordered = btrfs_lookup_ordered_extent(inode, file_offset);
4b46fce2
JB
6385 if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
6386 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
6387 btrfs_free_reserved_extent(root, ordered->start,
6388 ordered->disk_len);
6389 btrfs_put_ordered_extent(ordered);
6390 btrfs_put_ordered_extent(ordered);
6391 }
6392 bio_endio(bio, ret);
6393}
6394
5a5f79b5
CM
6395static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
6396 const struct iovec *iov, loff_t offset,
6397 unsigned long nr_segs)
6398{
6399 int seg;
a1b75f7d 6400 int i;
5a5f79b5
CM
6401 size_t size;
6402 unsigned long addr;
6403 unsigned blocksize_mask = root->sectorsize - 1;
6404 ssize_t retval = -EINVAL;
6405 loff_t end = offset;
6406
6407 if (offset & blocksize_mask)
6408 goto out;
6409
6410 /* Check the memory alignment. Blocks cannot straddle pages */
6411 for (seg = 0; seg < nr_segs; seg++) {
6412 addr = (unsigned long)iov[seg].iov_base;
6413 size = iov[seg].iov_len;
6414 end += size;
a1b75f7d 6415 if ((addr & blocksize_mask) || (size & blocksize_mask))
5a5f79b5 6416 goto out;
a1b75f7d
JB
6417
6418 /* If this is a write we don't need to check anymore */
6419 if (rw & WRITE)
6420 continue;
6421
6422 /*
6423 * Check to make sure we don't have duplicate iov_base's in this
6424 * iovec, if so return EINVAL, otherwise we'll get csum errors
6425 * when reading back.
6426 */
6427 for (i = seg + 1; i < nr_segs; i++) {
6428 if (iov[seg].iov_base == iov[i].iov_base)
6429 goto out;
6430 }
5a5f79b5
CM
6431 }
6432 retval = 0;
6433out:
6434 return retval;
6435}
eb838e73 6436
16432985
CM
6437static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
6438 const struct iovec *iov, loff_t offset,
6439 unsigned long nr_segs)
6440{
4b46fce2
JB
6441 struct file *file = iocb->ki_filp;
6442 struct inode *inode = file->f_mapping->host;
4b46fce2 6443
5a5f79b5 6444 if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
eb838e73 6445 offset, nr_segs))
5a5f79b5 6446 return 0;
4845e44f 6447
eb838e73 6448 return __blockdev_direct_IO(rw, iocb, inode,
5a5f79b5
CM
6449 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
6450 iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
6451 btrfs_submit_direct, 0);
16432985
CM
6452}
6453
1506fcc8
YS
6454static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
6455 __u64 start, __u64 len)
6456{
ec29ed5b 6457 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
1506fcc8
YS
6458}
6459
a52d9a80 6460int btrfs_readpage(struct file *file, struct page *page)
9ebefb18 6461{
d1310b2e
CM
6462 struct extent_io_tree *tree;
6463 tree = &BTRFS_I(page->mapping->host)->io_tree;
8ddc7d9c 6464 return extent_read_full_page(tree, page, btrfs_get_extent, 0);
9ebefb18 6465}
1832a6d5 6466
a52d9a80 6467static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
39279cc3 6468{
d1310b2e 6469 struct extent_io_tree *tree;
b888db2b
CM
6470
6471
6472 if (current->flags & PF_MEMALLOC) {
6473 redirty_page_for_writepage(wbc, page);
6474 unlock_page(page);
6475 return 0;
6476 }
d1310b2e 6477 tree = &BTRFS_I(page->mapping->host)->io_tree;
a52d9a80 6478 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
9ebefb18
CM
6479}
6480
f421950f
CM
6481int btrfs_writepages(struct address_space *mapping,
6482 struct writeback_control *wbc)
b293f02e 6483{
d1310b2e 6484 struct extent_io_tree *tree;
771ed689 6485
d1310b2e 6486 tree = &BTRFS_I(mapping->host)->io_tree;
b293f02e
CM
6487 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
6488}
6489
3ab2fb5a
CM
6490static int
6491btrfs_readpages(struct file *file, struct address_space *mapping,
6492 struct list_head *pages, unsigned nr_pages)
6493{
d1310b2e
CM
6494 struct extent_io_tree *tree;
6495 tree = &BTRFS_I(mapping->host)->io_tree;
3ab2fb5a
CM
6496 return extent_readpages(tree, mapping, pages, nr_pages,
6497 btrfs_get_extent);
6498}
e6dcd2dc 6499static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
9ebefb18 6500{
d1310b2e
CM
6501 struct extent_io_tree *tree;
6502 struct extent_map_tree *map;
a52d9a80 6503 int ret;
8c2383c3 6504
d1310b2e
CM
6505 tree = &BTRFS_I(page->mapping->host)->io_tree;
6506 map = &BTRFS_I(page->mapping->host)->extent_tree;
70dec807 6507 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
a52d9a80
CM
6508 if (ret == 1) {
6509 ClearPagePrivate(page);
6510 set_page_private(page, 0);
6511 page_cache_release(page);
39279cc3 6512 }
a52d9a80 6513 return ret;
39279cc3
CM
6514}
6515
e6dcd2dc
CM
6516static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
6517{
98509cfc
CM
6518 if (PageWriteback(page) || PageDirty(page))
6519 return 0;
b335b003 6520 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
e6dcd2dc
CM
6521}
6522
a52d9a80 6523static void btrfs_invalidatepage(struct page *page, unsigned long offset)
39279cc3 6524{
5fd02043 6525 struct inode *inode = page->mapping->host;
d1310b2e 6526 struct extent_io_tree *tree;
e6dcd2dc 6527 struct btrfs_ordered_extent *ordered;
2ac55d41 6528 struct extent_state *cached_state = NULL;
e6dcd2dc
CM
6529 u64 page_start = page_offset(page);
6530 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
39279cc3 6531
8b62b72b
CM
6532 /*
6533 * we have the page locked, so new writeback can't start,
6534 * and the dirty bit won't be cleared while we are here.
6535 *
6536 * Wait for IO on this page so that we can safely clear
6537 * the PagePrivate2 bit and do ordered accounting
6538 */
e6dcd2dc 6539 wait_on_page_writeback(page);
8b62b72b 6540
5fd02043 6541 tree = &BTRFS_I(inode)->io_tree;
e6dcd2dc
CM
6542 if (offset) {
6543 btrfs_releasepage(page, GFP_NOFS);
6544 return;
6545 }
d0082371 6546 lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
5fd02043 6547 ordered = btrfs_lookup_ordered_extent(inode,
e6dcd2dc
CM
6548 page_offset(page));
6549 if (ordered) {
eb84ae03
CM
6550 /*
6551 * IO on this page will never be started, so we need
6552 * to account for any ordered extents now
6553 */
e6dcd2dc
CM
6554 clear_extent_bit(tree, page_start, page_end,
6555 EXTENT_DIRTY | EXTENT_DELALLOC |
32c00aff 6556 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
2ac55d41 6557 &cached_state, GFP_NOFS);
8b62b72b
CM
6558 /*
6559 * whoever cleared the private bit is responsible
6560 * for the finish_ordered_io
6561 */
5fd02043
JB
6562 if (TestClearPagePrivate2(page) &&
6563 btrfs_dec_test_ordered_pending(inode, &ordered, page_start,
6564 PAGE_CACHE_SIZE, 1)) {
6565 btrfs_finish_ordered_io(ordered);
8b62b72b 6566 }
e6dcd2dc 6567 btrfs_put_ordered_extent(ordered);
2ac55d41 6568 cached_state = NULL;
d0082371 6569 lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
e6dcd2dc
CM
6570 }
6571 clear_extent_bit(tree, page_start, page_end,
32c00aff 6572 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
2ac55d41 6573 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
e6dcd2dc
CM
6574 __btrfs_releasepage(page, GFP_NOFS);
6575
4a096752 6576 ClearPageChecked(page);
9ad6b7bc 6577 if (PagePrivate(page)) {
9ad6b7bc
CM
6578 ClearPagePrivate(page);
6579 set_page_private(page, 0);
6580 page_cache_release(page);
6581 }
39279cc3
CM
6582}
6583
9ebefb18
CM
6584/*
6585 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
6586 * called from a page fault handler when a page is first dirtied. Hence we must
6587 * be careful to check for EOF conditions here. We set the page up correctly
6588 * for a written page which means we get ENOSPC checking when writing into
6589 * holes and correct delalloc and unwritten extent mapping on filesystems that
6590 * support these features.
6591 *
6592 * We are not allowed to take the i_mutex here so we have to play games to
6593 * protect against truncate races as the page could now be beyond EOF. Because
6594 * vmtruncate() writes the inode size before removing pages, once we have the
6595 * page lock we can determine safely if the page is beyond EOF. If it is not
6596 * beyond EOF, then the page is guaranteed safe against truncation until we
6597 * unlock the page.
6598 */
c2ec175c 6599int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
9ebefb18 6600{
c2ec175c 6601 struct page *page = vmf->page;
6da6abae 6602 struct inode *inode = fdentry(vma->vm_file)->d_inode;
1832a6d5 6603 struct btrfs_root *root = BTRFS_I(inode)->root;
e6dcd2dc
CM
6604 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6605 struct btrfs_ordered_extent *ordered;
2ac55d41 6606 struct extent_state *cached_state = NULL;
e6dcd2dc
CM
6607 char *kaddr;
6608 unsigned long zero_start;
9ebefb18 6609 loff_t size;
1832a6d5 6610 int ret;
9998eb70 6611 int reserved = 0;
a52d9a80 6612 u64 page_start;
e6dcd2dc 6613 u64 page_end;
9ebefb18 6614
0ca1f7ce 6615 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
9998eb70 6616 if (!ret) {
e41f941a 6617 ret = file_update_time(vma->vm_file);
9998eb70
CM
6618 reserved = 1;
6619 }
56a76f82
NP
6620 if (ret) {
6621 if (ret == -ENOMEM)
6622 ret = VM_FAULT_OOM;
6623 else /* -ENOSPC, -EIO, etc */
6624 ret = VM_FAULT_SIGBUS;
9998eb70
CM
6625 if (reserved)
6626 goto out;
6627 goto out_noreserve;
56a76f82 6628 }
1832a6d5 6629
56a76f82 6630 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
e6dcd2dc 6631again:
9ebefb18 6632 lock_page(page);
9ebefb18 6633 size = i_size_read(inode);
e6dcd2dc
CM
6634 page_start = page_offset(page);
6635 page_end = page_start + PAGE_CACHE_SIZE - 1;
a52d9a80 6636
9ebefb18 6637 if ((page->mapping != inode->i_mapping) ||
e6dcd2dc 6638 (page_start >= size)) {
9ebefb18
CM
6639 /* page got truncated out from underneath us */
6640 goto out_unlock;
6641 }
e6dcd2dc
CM
6642 wait_on_page_writeback(page);
6643
d0082371 6644 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
e6dcd2dc
CM
6645 set_page_extent_mapped(page);
6646
eb84ae03
CM
6647 /*
6648 * we can't set the delalloc bits if there are pending ordered
6649 * extents. Drop our locks and wait for them to finish
6650 */
e6dcd2dc
CM
6651 ordered = btrfs_lookup_ordered_extent(inode, page_start);
6652 if (ordered) {
2ac55d41
JB
6653 unlock_extent_cached(io_tree, page_start, page_end,
6654 &cached_state, GFP_NOFS);
e6dcd2dc 6655 unlock_page(page);
eb84ae03 6656 btrfs_start_ordered_extent(inode, ordered, 1);
e6dcd2dc
CM
6657 btrfs_put_ordered_extent(ordered);
6658 goto again;
6659 }
6660
fbf19087
JB
6661 /*
6662 * XXX - page_mkwrite gets called every time the page is dirtied, even
6663 * if it was already dirty, so for space accounting reasons we need to
6664 * clear any delalloc bits for the range we are fixing to save. There
6665 * is probably a better way to do this, but for now keep consistent with
6666 * prepare_pages in the normal write path.
6667 */
2ac55d41 6668 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
32c00aff 6669 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
2ac55d41 6670 0, 0, &cached_state, GFP_NOFS);
fbf19087 6671
2ac55d41
JB
6672 ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
6673 &cached_state);
9ed74f2d 6674 if (ret) {
2ac55d41
JB
6675 unlock_extent_cached(io_tree, page_start, page_end,
6676 &cached_state, GFP_NOFS);
9ed74f2d
JB
6677 ret = VM_FAULT_SIGBUS;
6678 goto out_unlock;
6679 }
e6dcd2dc 6680 ret = 0;
9ebefb18
CM
6681
6682 /* page is wholly or partially inside EOF */
a52d9a80 6683 if (page_start + PAGE_CACHE_SIZE > size)
e6dcd2dc 6684 zero_start = size & ~PAGE_CACHE_MASK;
9ebefb18 6685 else
e6dcd2dc 6686 zero_start = PAGE_CACHE_SIZE;
9ebefb18 6687
e6dcd2dc
CM
6688 if (zero_start != PAGE_CACHE_SIZE) {
6689 kaddr = kmap(page);
6690 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
6691 flush_dcache_page(page);
6692 kunmap(page);
6693 }
247e743c 6694 ClearPageChecked(page);
e6dcd2dc 6695 set_page_dirty(page);
50a9b214 6696 SetPageUptodate(page);
5a3f23d5 6697
257c62e1
CM
6698 BTRFS_I(inode)->last_trans = root->fs_info->generation;
6699 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
6700
2ac55d41 6701 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
9ebefb18
CM
6702
6703out_unlock:
50a9b214
CM
6704 if (!ret)
6705 return VM_FAULT_LOCKED;
9ebefb18 6706 unlock_page(page);
1832a6d5 6707out:
ec39e180 6708 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
9998eb70 6709out_noreserve:
9ebefb18
CM
6710 return ret;
6711}
6712
a41ad394 6713static int btrfs_truncate(struct inode *inode)
39279cc3
CM
6714{
6715 struct btrfs_root *root = BTRFS_I(inode)->root;
fcb80c2a 6716 struct btrfs_block_rsv *rsv;
39279cc3 6717 int ret;
3893e33b 6718 int err = 0;
39279cc3 6719 struct btrfs_trans_handle *trans;
d3c2fdcf 6720 unsigned long nr;
dbe674a9 6721 u64 mask = root->sectorsize - 1;
07127184 6722 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
39279cc3 6723
5d5e103a
JB
6724 ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
6725 if (ret)
a41ad394 6726 return ret;
8082510e 6727
4a096752 6728 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
8082510e 6729 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
39279cc3 6730
fcb80c2a
JB
6731 /*
6732 * Yes ladies and gentelment, this is indeed ugly. The fact is we have
6733 * 3 things going on here
6734 *
6735 * 1) We need to reserve space for our orphan item and the space to
6736 * delete our orphan item. Lord knows we don't want to have a dangling
6737 * orphan item because we didn't reserve space to remove it.
6738 *
6739 * 2) We need to reserve space to update our inode.
6740 *
6741 * 3) We need to have something to cache all the space that is going to
6742 * be free'd up by the truncate operation, but also have some slack
6743 * space reserved in case it uses space during the truncate (thank you
6744 * very much snapshotting).
6745 *
6746 * And we need these to all be seperate. The fact is we can use alot of
6747 * space doing the truncate, and we have no earthly idea how much space
6748 * we will use, so we need the truncate reservation to be seperate so it
6749 * doesn't end up using space reserved for updating the inode or
6750 * removing the orphan item. We also need to be able to stop the
6751 * transaction and start a new one, which means we need to be able to
6752 * update the inode several times, and we have no idea of knowing how
6753 * many times that will be, so we can't just reserve 1 item for the
6754 * entirety of the opration, so that has to be done seperately as well.
6755 * Then there is the orphan item, which does indeed need to be held on
6756 * to for the whole operation, and we need nobody to touch this reserved
6757 * space except the orphan code.
6758 *
6759 * So that leaves us with
6760 *
6761 * 1) root->orphan_block_rsv - for the orphan deletion.
6762 * 2) rsv - for the truncate reservation, which we will steal from the
6763 * transaction reservation.
6764 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
6765 * updating the inode.
6766 */
6767 rsv = btrfs_alloc_block_rsv(root);
6768 if (!rsv)
6769 return -ENOMEM;
4a338542 6770 rsv->size = min_size;
f0cd846e 6771
907cbceb 6772 /*
07127184 6773 * 1 for the truncate slack space
907cbceb
JB
6774 * 1 for the orphan item we're going to add
6775 * 1 for the orphan item deletion
6776 * 1 for updating the inode.
6777 */
fcb80c2a
JB
6778 trans = btrfs_start_transaction(root, 4);
6779 if (IS_ERR(trans)) {
6780 err = PTR_ERR(trans);
6781 goto out;
6782 }
f0cd846e 6783
907cbceb
JB
6784 /* Migrate the slack space for the truncate to our reserve */
6785 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
6786 min_size);
fcb80c2a 6787 BUG_ON(ret);
f0cd846e
JB
6788
6789 ret = btrfs_orphan_add(trans, inode);
6790 if (ret) {
6791 btrfs_end_transaction(trans, root);
fcb80c2a 6792 goto out;
f0cd846e
JB
6793 }
6794
5a3f23d5
CM
6795 /*
6796 * setattr is responsible for setting the ordered_data_close flag,
6797 * but that is only tested during the last file release. That
6798 * could happen well after the next commit, leaving a great big
6799 * window where new writes may get lost if someone chooses to write
6800 * to this file after truncating to zero
6801 *
6802 * The inode doesn't have any dirty data here, and so if we commit
6803 * this is a noop. If someone immediately starts writing to the inode
6804 * it is very likely we'll catch some of their writes in this
6805 * transaction, and the commit will find this file on the ordered
6806 * data list with good things to send down.
6807 *
6808 * This is a best effort solution, there is still a window where
6809 * using truncate to replace the contents of the file will
6810 * end up with a zero length file after a crash.
6811 */
72ac3c0d
JB
6812 if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
6813 &BTRFS_I(inode)->runtime_flags))
5a3f23d5
CM
6814 btrfs_add_ordered_operation(trans, root, inode);
6815
8082510e 6816 while (1) {
36ba022a 6817 ret = btrfs_block_rsv_refill(root, rsv, min_size);
907cbceb
JB
6818 if (ret) {
6819 /*
6820 * This can only happen with the original transaction we
6821 * started above, every other time we shouldn't have a
6822 * transaction started yet.
6823 */
6824 if (ret == -EAGAIN)
6825 goto end_trans;
6826 err = ret;
6827 break;
6828 }
6829
d68fc57b 6830 if (!trans) {
907cbceb
JB
6831 /* Just need the 1 for updating the inode */
6832 trans = btrfs_start_transaction(root, 1);
fcb80c2a 6833 if (IS_ERR(trans)) {
7041ee97
JB
6834 ret = err = PTR_ERR(trans);
6835 trans = NULL;
6836 break;
fcb80c2a 6837 }
d68fc57b
YZ
6838 }
6839
907cbceb
JB
6840 trans->block_rsv = rsv;
6841
8082510e
YZ
6842 ret = btrfs_truncate_inode_items(trans, root, inode,
6843 inode->i_size,
6844 BTRFS_EXTENT_DATA_KEY);
3893e33b
JB
6845 if (ret != -EAGAIN) {
6846 err = ret;
8082510e 6847 break;
3893e33b 6848 }
39279cc3 6849
fcb80c2a 6850 trans->block_rsv = &root->fs_info->trans_block_rsv;
8082510e 6851 ret = btrfs_update_inode(trans, root, inode);
3893e33b
JB
6852 if (ret) {
6853 err = ret;
6854 break;
6855 }
907cbceb 6856end_trans:
8082510e
YZ
6857 nr = trans->blocks_used;
6858 btrfs_end_transaction(trans, root);
d68fc57b 6859 trans = NULL;
8082510e 6860 btrfs_btree_balance_dirty(root, nr);
8082510e
YZ
6861 }
6862
6863 if (ret == 0 && inode->i_nlink > 0) {
fcb80c2a 6864 trans->block_rsv = root->orphan_block_rsv;
8082510e 6865 ret = btrfs_orphan_del(trans, inode);
3893e33b
JB
6866 if (ret)
6867 err = ret;
ded5db9d
JB
6868 } else if (ret && inode->i_nlink > 0) {
6869 /*
6870 * Failed to do the truncate, remove us from the in memory
6871 * orphan list.
6872 */
6873 ret = btrfs_orphan_del(NULL, inode);
8082510e
YZ
6874 }
6875
917c16b2
CM
6876 if (trans) {
6877 trans->block_rsv = &root->fs_info->trans_block_rsv;
6878 ret = btrfs_update_inode(trans, root, inode);
6879 if (ret && !err)
6880 err = ret;
7b128766 6881
917c16b2 6882 nr = trans->blocks_used;
7ad85bb7 6883 ret = btrfs_end_transaction(trans, root);
917c16b2
CM
6884 btrfs_btree_balance_dirty(root, nr);
6885 }
fcb80c2a
JB
6886
6887out:
6888 btrfs_free_block_rsv(root, rsv);
6889
3893e33b
JB
6890 if (ret && !err)
6891 err = ret;
a41ad394 6892
3893e33b 6893 return err;
39279cc3
CM
6894}
6895
d352ac68
CM
6896/*
6897 * create a new subvolume directory/inode (helper for the ioctl).
6898 */
d2fb3437 6899int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
d82a6f1d 6900 struct btrfs_root *new_root, u64 new_dirid)
39279cc3 6901{
39279cc3 6902 struct inode *inode;
76dda93c 6903 int err;
00e4e6b3 6904 u64 index = 0;
39279cc3 6905
12fc9d09
FA
6906 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
6907 new_dirid, new_dirid,
6908 S_IFDIR | (~current_umask() & S_IRWXUGO),
6909 &index);
54aa1f4d 6910 if (IS_ERR(inode))
f46b5a66 6911 return PTR_ERR(inode);
39279cc3
CM
6912 inode->i_op = &btrfs_dir_inode_operations;
6913 inode->i_fop = &btrfs_dir_file_operations;
6914
bfe86848 6915 set_nlink(inode, 1);
dbe674a9 6916 btrfs_i_size_write(inode, 0);
3b96362c 6917
76dda93c 6918 err = btrfs_update_inode(trans, new_root, inode);
cb8e7090 6919
76dda93c 6920 iput(inode);
ce598979 6921 return err;
39279cc3
CM
6922}
6923
39279cc3
CM
6924struct inode *btrfs_alloc_inode(struct super_block *sb)
6925{
6926 struct btrfs_inode *ei;
2ead6ae7 6927 struct inode *inode;
39279cc3
CM
6928
6929 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
6930 if (!ei)
6931 return NULL;
2ead6ae7
YZ
6932
6933 ei->root = NULL;
2ead6ae7 6934 ei->generation = 0;
15ee9bc7 6935 ei->last_trans = 0;
257c62e1 6936 ei->last_sub_trans = 0;
e02119d5 6937 ei->logged_trans = 0;
2ead6ae7 6938 ei->delalloc_bytes = 0;
2ead6ae7
YZ
6939 ei->disk_i_size = 0;
6940 ei->flags = 0;
7709cde3 6941 ei->csum_bytes = 0;
2ead6ae7
YZ
6942 ei->index_cnt = (u64)-1;
6943 ei->last_unlink_trans = 0;
6944
9e0baf60
JB
6945 spin_lock_init(&ei->lock);
6946 ei->outstanding_extents = 0;
6947 ei->reserved_extents = 0;
2ead6ae7 6948
72ac3c0d 6949 ei->runtime_flags = 0;
261507a0 6950 ei->force_compress = BTRFS_COMPRESS_NONE;
2ead6ae7 6951
16cdcec7
MX
6952 ei->delayed_node = NULL;
6953
2ead6ae7 6954 inode = &ei->vfs_inode;
a8067e02 6955 extent_map_tree_init(&ei->extent_tree);
f993c883
DS
6956 extent_io_tree_init(&ei->io_tree, &inode->i_data);
6957 extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
0b32f4bb
JB
6958 ei->io_tree.track_uptodate = 1;
6959 ei->io_failure_tree.track_uptodate = 1;
2ead6ae7 6960 mutex_init(&ei->log_mutex);
f248679e 6961 mutex_init(&ei->delalloc_mutex);
e6dcd2dc 6962 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
2ead6ae7 6963 INIT_LIST_HEAD(&ei->delalloc_inodes);
5a3f23d5 6964 INIT_LIST_HEAD(&ei->ordered_operations);
2ead6ae7
YZ
6965 RB_CLEAR_NODE(&ei->rb_node);
6966
6967 return inode;
39279cc3
CM
6968}
6969
fa0d7e3d
NP
6970static void btrfs_i_callback(struct rcu_head *head)
6971{
6972 struct inode *inode = container_of(head, struct inode, i_rcu);
fa0d7e3d
NP
6973 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
6974}
6975
39279cc3
CM
6976void btrfs_destroy_inode(struct inode *inode)
6977{
e6dcd2dc 6978 struct btrfs_ordered_extent *ordered;
5a3f23d5
CM
6979 struct btrfs_root *root = BTRFS_I(inode)->root;
6980
39279cc3
CM
6981 WARN_ON(!list_empty(&inode->i_dentry));
6982 WARN_ON(inode->i_data.nrpages);
9e0baf60
JB
6983 WARN_ON(BTRFS_I(inode)->outstanding_extents);
6984 WARN_ON(BTRFS_I(inode)->reserved_extents);
7709cde3
JB
6985 WARN_ON(BTRFS_I(inode)->delalloc_bytes);
6986 WARN_ON(BTRFS_I(inode)->csum_bytes);
39279cc3 6987
a6dbd429
JB
6988 /*
6989 * This can happen where we create an inode, but somebody else also
6990 * created the same inode and we need to destroy the one we already
6991 * created.
6992 */
6993 if (!root)
6994 goto free;
6995
5a3f23d5
CM
6996 /*
6997 * Make sure we're properly removed from the ordered operation
6998 * lists.
6999 */
7000 smp_mb();
7001 if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
7002 spin_lock(&root->fs_info->ordered_extent_lock);
7003 list_del_init(&BTRFS_I(inode)->ordered_operations);
7004 spin_unlock(&root->fs_info->ordered_extent_lock);
7005 }
7006
8a35d95f
JB
7007 if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
7008 &BTRFS_I(inode)->runtime_flags)) {
33345d01
LZ
7009 printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
7010 (unsigned long long)btrfs_ino(inode));
8a35d95f 7011 atomic_dec(&root->orphan_inodes);
7b128766 7012 }
7b128766 7013
d397712b 7014 while (1) {
e6dcd2dc
CM
7015 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
7016 if (!ordered)
7017 break;
7018 else {
d397712b
CM
7019 printk(KERN_ERR "btrfs found ordered "
7020 "extent %llu %llu on inode cleanup\n",
7021 (unsigned long long)ordered->file_offset,
7022 (unsigned long long)ordered->len);
e6dcd2dc
CM
7023 btrfs_remove_ordered_extent(inode, ordered);
7024 btrfs_put_ordered_extent(ordered);
7025 btrfs_put_ordered_extent(ordered);
7026 }
7027 }
5d4f98a2 7028 inode_tree_del(inode);
5b21f2ed 7029 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
a6dbd429 7030free:
16cdcec7 7031 btrfs_remove_delayed_node(inode);
fa0d7e3d 7032 call_rcu(&inode->i_rcu, btrfs_i_callback);
39279cc3
CM
7033}
7034
45321ac5 7035int btrfs_drop_inode(struct inode *inode)
76dda93c
YZ
7036{
7037 struct btrfs_root *root = BTRFS_I(inode)->root;
45321ac5 7038
0af3d00b 7039 if (btrfs_root_refs(&root->root_item) == 0 &&
83eea1f1 7040 !btrfs_is_free_space_inode(inode))
45321ac5 7041 return 1;
76dda93c 7042 else
45321ac5 7043 return generic_drop_inode(inode);
76dda93c
YZ
7044}
7045
0ee0fda0 7046static void init_once(void *foo)
39279cc3
CM
7047{
7048 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
7049
7050 inode_init_once(&ei->vfs_inode);
7051}
7052
7053void btrfs_destroy_cachep(void)
7054{
7055 if (btrfs_inode_cachep)
7056 kmem_cache_destroy(btrfs_inode_cachep);
7057 if (btrfs_trans_handle_cachep)
7058 kmem_cache_destroy(btrfs_trans_handle_cachep);
7059 if (btrfs_transaction_cachep)
7060 kmem_cache_destroy(btrfs_transaction_cachep);
39279cc3
CM
7061 if (btrfs_path_cachep)
7062 kmem_cache_destroy(btrfs_path_cachep);
dc89e982
JB
7063 if (btrfs_free_space_cachep)
7064 kmem_cache_destroy(btrfs_free_space_cachep);
39279cc3
CM
7065}
7066
7067int btrfs_init_cachep(void)
7068{
9601e3f6
CH
7069 btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
7070 sizeof(struct btrfs_inode), 0,
7071 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
39279cc3
CM
7072 if (!btrfs_inode_cachep)
7073 goto fail;
9601e3f6
CH
7074
7075 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
7076 sizeof(struct btrfs_trans_handle), 0,
7077 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
39279cc3
CM
7078 if (!btrfs_trans_handle_cachep)
7079 goto fail;
9601e3f6
CH
7080
7081 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
7082 sizeof(struct btrfs_transaction), 0,
7083 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
39279cc3
CM
7084 if (!btrfs_transaction_cachep)
7085 goto fail;
9601e3f6
CH
7086
7087 btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
7088 sizeof(struct btrfs_path), 0,
7089 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
39279cc3
CM
7090 if (!btrfs_path_cachep)
7091 goto fail;
9601e3f6 7092
dc89e982
JB
7093 btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache",
7094 sizeof(struct btrfs_free_space), 0,
7095 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7096 if (!btrfs_free_space_cachep)
7097 goto fail;
7098
39279cc3
CM
7099 return 0;
7100fail:
7101 btrfs_destroy_cachep();
7102 return -ENOMEM;
7103}
7104
7105static int btrfs_getattr(struct vfsmount *mnt,
7106 struct dentry *dentry, struct kstat *stat)
7107{
7108 struct inode *inode = dentry->d_inode;
fadc0d8b
DS
7109 u32 blocksize = inode->i_sb->s_blocksize;
7110
39279cc3 7111 generic_fillattr(inode, stat);
0ee5dc67 7112 stat->dev = BTRFS_I(inode)->root->anon_dev;
d6667462 7113 stat->blksize = PAGE_CACHE_SIZE;
fadc0d8b
DS
7114 stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
7115 ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9;
39279cc3
CM
7116 return 0;
7117}
7118
75e7cb7f
LB
7119/*
7120 * If a file is moved, it will inherit the cow and compression flags of the new
7121 * directory.
7122 */
7123static void fixup_inode_flags(struct inode *dir, struct inode *inode)
7124{
7125 struct btrfs_inode *b_dir = BTRFS_I(dir);
7126 struct btrfs_inode *b_inode = BTRFS_I(inode);
7127
7128 if (b_dir->flags & BTRFS_INODE_NODATACOW)
7129 b_inode->flags |= BTRFS_INODE_NODATACOW;
7130 else
7131 b_inode->flags &= ~BTRFS_INODE_NODATACOW;
7132
bc178237 7133 if (b_dir->flags & BTRFS_INODE_COMPRESS) {
75e7cb7f 7134 b_inode->flags |= BTRFS_INODE_COMPRESS;
bc178237
LB
7135 b_inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
7136 } else {
7137 b_inode->flags &= ~(BTRFS_INODE_COMPRESS |
7138 BTRFS_INODE_NOCOMPRESS);
7139 }
75e7cb7f
LB
7140}
7141
d397712b
CM
7142static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7143 struct inode *new_dir, struct dentry *new_dentry)
39279cc3
CM
7144{
7145 struct btrfs_trans_handle *trans;
7146 struct btrfs_root *root = BTRFS_I(old_dir)->root;
4df27c4d 7147 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
39279cc3
CM
7148 struct inode *new_inode = new_dentry->d_inode;
7149 struct inode *old_inode = old_dentry->d_inode;
7150 struct timespec ctime = CURRENT_TIME;
00e4e6b3 7151 u64 index = 0;
4df27c4d 7152 u64 root_objectid;
39279cc3 7153 int ret;
33345d01 7154 u64 old_ino = btrfs_ino(old_inode);
39279cc3 7155
33345d01 7156 if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
f679a840
YZ
7157 return -EPERM;
7158
4df27c4d 7159 /* we only allow rename subvolume link between subvolumes */
33345d01 7160 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
3394e160
CM
7161 return -EXDEV;
7162
33345d01
LZ
7163 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
7164 (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
39279cc3 7165 return -ENOTEMPTY;
5f39d397 7166
4df27c4d
YZ
7167 if (S_ISDIR(old_inode->i_mode) && new_inode &&
7168 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
7169 return -ENOTEMPTY;
5a3f23d5
CM
7170 /*
7171 * we're using rename to replace one file with another.
7172 * and the replacement file is large. Start IO on it now so
7173 * we don't add too much work to the end of the transaction
7174 */
4baf8c92 7175 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
5a3f23d5
CM
7176 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
7177 filemap_flush(old_inode->i_mapping);
7178
76dda93c 7179 /* close the racy window with snapshot create/destroy ioctl */
33345d01 7180 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
76dda93c 7181 down_read(&root->fs_info->subvol_sem);
a22285a6
YZ
7182 /*
7183 * We want to reserve the absolute worst case amount of items. So if
7184 * both inodes are subvols and we need to unlink them then that would
7185 * require 4 item modifications, but if they are both normal inodes it
7186 * would require 5 item modifications, so we'll assume their normal
7187 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
7188 * should cover the worst case number of items we'll modify.
7189 */
7190 trans = btrfs_start_transaction(root, 20);
b44c59a8
JL
7191 if (IS_ERR(trans)) {
7192 ret = PTR_ERR(trans);
7193 goto out_notrans;
7194 }
76dda93c 7195
4df27c4d
YZ
7196 if (dest != root)
7197 btrfs_record_root_in_trans(trans, dest);
5f39d397 7198
a5719521
YZ
7199 ret = btrfs_set_inode_index(new_dir, &index);
7200 if (ret)
7201 goto out_fail;
5a3f23d5 7202
33345d01 7203 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4df27c4d
YZ
7204 /* force full log commit if subvolume involved. */
7205 root->fs_info->last_trans_log_full_commit = trans->transid;
7206 } else {
a5719521
YZ
7207 ret = btrfs_insert_inode_ref(trans, dest,
7208 new_dentry->d_name.name,
7209 new_dentry->d_name.len,
33345d01
LZ
7210 old_ino,
7211 btrfs_ino(new_dir), index);
a5719521
YZ
7212 if (ret)
7213 goto out_fail;
4df27c4d
YZ
7214 /*
7215 * this is an ugly little race, but the rename is required
7216 * to make sure that if we crash, the inode is either at the
7217 * old name or the new one. pinning the log transaction lets
7218 * us make sure we don't allow a log commit to come in after
7219 * we unlink the name but before we add the new name back in.
7220 */
7221 btrfs_pin_log_trans(root);
7222 }
5a3f23d5
CM
7223 /*
7224 * make sure the inode gets flushed if it is replacing
7225 * something.
7226 */
33345d01 7227 if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
5a3f23d5 7228 btrfs_add_ordered_operation(trans, root, old_inode);
5a3f23d5 7229
0c4d2d95
JB
7230 inode_inc_iversion(old_dir);
7231 inode_inc_iversion(new_dir);
7232 inode_inc_iversion(old_inode);
39279cc3
CM
7233 old_dir->i_ctime = old_dir->i_mtime = ctime;
7234 new_dir->i_ctime = new_dir->i_mtime = ctime;
7235 old_inode->i_ctime = ctime;
5f39d397 7236
12fcfd22
CM
7237 if (old_dentry->d_parent != new_dentry->d_parent)
7238 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
7239
33345d01 7240 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4df27c4d
YZ
7241 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
7242 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
7243 old_dentry->d_name.name,
7244 old_dentry->d_name.len);
7245 } else {
92986796
AV
7246 ret = __btrfs_unlink_inode(trans, root, old_dir,
7247 old_dentry->d_inode,
7248 old_dentry->d_name.name,
7249 old_dentry->d_name.len);
7250 if (!ret)
7251 ret = btrfs_update_inode(trans, root, old_inode);
4df27c4d 7252 }
79787eaa
JM
7253 if (ret) {
7254 btrfs_abort_transaction(trans, root, ret);
7255 goto out_fail;
7256 }
39279cc3
CM
7257
7258 if (new_inode) {
0c4d2d95 7259 inode_inc_iversion(new_inode);
39279cc3 7260 new_inode->i_ctime = CURRENT_TIME;
33345d01 7261 if (unlikely(btrfs_ino(new_inode) ==
4df27c4d
YZ
7262 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
7263 root_objectid = BTRFS_I(new_inode)->location.objectid;
7264 ret = btrfs_unlink_subvol(trans, dest, new_dir,
7265 root_objectid,
7266 new_dentry->d_name.name,
7267 new_dentry->d_name.len);
7268 BUG_ON(new_inode->i_nlink == 0);
7269 } else {
7270 ret = btrfs_unlink_inode(trans, dest, new_dir,
7271 new_dentry->d_inode,
7272 new_dentry->d_name.name,
7273 new_dentry->d_name.len);
7274 }
79787eaa 7275 if (!ret && new_inode->i_nlink == 0) {
e02119d5 7276 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
4df27c4d 7277 BUG_ON(ret);
7b128766 7278 }
79787eaa
JM
7279 if (ret) {
7280 btrfs_abort_transaction(trans, root, ret);
7281 goto out_fail;
7282 }
39279cc3 7283 }
aec7477b 7284
75e7cb7f
LB
7285 fixup_inode_flags(new_dir, old_inode);
7286
4df27c4d
YZ
7287 ret = btrfs_add_link(trans, new_dir, old_inode,
7288 new_dentry->d_name.name,
a5719521 7289 new_dentry->d_name.len, 0, index);
79787eaa
JM
7290 if (ret) {
7291 btrfs_abort_transaction(trans, root, ret);
7292 goto out_fail;
7293 }
39279cc3 7294
33345d01 7295 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
10d9f309 7296 struct dentry *parent = new_dentry->d_parent;
6a912213 7297 btrfs_log_new_name(trans, old_inode, old_dir, parent);
4df27c4d
YZ
7298 btrfs_end_log_trans(root);
7299 }
39279cc3 7300out_fail:
7ad85bb7 7301 btrfs_end_transaction(trans, root);
b44c59a8 7302out_notrans:
33345d01 7303 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
76dda93c 7304 up_read(&root->fs_info->subvol_sem);
9ed74f2d 7305
39279cc3
CM
7306 return ret;
7307}
7308
d352ac68
CM
7309/*
7310 * some fairly slow code that needs optimization. This walks the list
7311 * of all the inodes with pending delalloc and forces them to disk.
7312 */
24bbcf04 7313int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
ea8c2819
CM
7314{
7315 struct list_head *head = &root->fs_info->delalloc_inodes;
7316 struct btrfs_inode *binode;
5b21f2ed 7317 struct inode *inode;
ea8c2819 7318
c146afad
YZ
7319 if (root->fs_info->sb->s_flags & MS_RDONLY)
7320 return -EROFS;
7321
75eff68e 7322 spin_lock(&root->fs_info->delalloc_lock);
d397712b 7323 while (!list_empty(head)) {
ea8c2819
CM
7324 binode = list_entry(head->next, struct btrfs_inode,
7325 delalloc_inodes);
5b21f2ed
ZY
7326 inode = igrab(&binode->vfs_inode);
7327 if (!inode)
7328 list_del_init(&binode->delalloc_inodes);
75eff68e 7329 spin_unlock(&root->fs_info->delalloc_lock);
5b21f2ed 7330 if (inode) {
8c8bee1d 7331 filemap_flush(inode->i_mapping);
24bbcf04
YZ
7332 if (delay_iput)
7333 btrfs_add_delayed_iput(inode);
7334 else
7335 iput(inode);
5b21f2ed
ZY
7336 }
7337 cond_resched();
75eff68e 7338 spin_lock(&root->fs_info->delalloc_lock);
ea8c2819 7339 }
75eff68e 7340 spin_unlock(&root->fs_info->delalloc_lock);
8c8bee1d
CM
7341
7342 /* the filemap_flush will queue IO into the worker threads, but
7343 * we have to make sure the IO is actually started and that
7344 * ordered extents get created before we return
7345 */
7346 atomic_inc(&root->fs_info->async_submit_draining);
d397712b 7347 while (atomic_read(&root->fs_info->nr_async_submits) ||
771ed689 7348 atomic_read(&root->fs_info->async_delalloc_pages)) {
8c8bee1d 7349 wait_event(root->fs_info->async_submit_wait,
771ed689
CM
7350 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
7351 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
8c8bee1d
CM
7352 }
7353 atomic_dec(&root->fs_info->async_submit_draining);
ea8c2819
CM
7354 return 0;
7355}
7356
39279cc3
CM
7357static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7358 const char *symname)
7359{
7360 struct btrfs_trans_handle *trans;
7361 struct btrfs_root *root = BTRFS_I(dir)->root;
7362 struct btrfs_path *path;
7363 struct btrfs_key key;
1832a6d5 7364 struct inode *inode = NULL;
39279cc3
CM
7365 int err;
7366 int drop_inode = 0;
7367 u64 objectid;
00e4e6b3 7368 u64 index = 0 ;
39279cc3
CM
7369 int name_len;
7370 int datasize;
5f39d397 7371 unsigned long ptr;
39279cc3 7372 struct btrfs_file_extent_item *ei;
5f39d397 7373 struct extent_buffer *leaf;
1832a6d5 7374 unsigned long nr = 0;
39279cc3
CM
7375
7376 name_len = strlen(symname) + 1;
7377 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
7378 return -ENAMETOOLONG;
1832a6d5 7379
9ed74f2d
JB
7380 /*
7381 * 2 items for inode item and ref
7382 * 2 items for dir items
7383 * 1 item for xattr if selinux is on
7384 */
a22285a6
YZ
7385 trans = btrfs_start_transaction(root, 5);
7386 if (IS_ERR(trans))
7387 return PTR_ERR(trans);
1832a6d5 7388
581bb050
LZ
7389 err = btrfs_find_free_ino(root, &objectid);
7390 if (err)
7391 goto out_unlock;
7392
aec7477b 7393 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
33345d01 7394 dentry->d_name.len, btrfs_ino(dir), objectid,
d82a6f1d 7395 S_IFLNK|S_IRWXUGO, &index);
7cf96da3
TI
7396 if (IS_ERR(inode)) {
7397 err = PTR_ERR(inode);
39279cc3 7398 goto out_unlock;
7cf96da3 7399 }
39279cc3 7400
2a7dba39 7401 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
33268eaf
JB
7402 if (err) {
7403 drop_inode = 1;
7404 goto out_unlock;
7405 }
7406
ad19db71
CS
7407 /*
7408 * If the active LSM wants to access the inode during
7409 * d_instantiate it needs these. Smack checks to see
7410 * if the filesystem supports xattrs by looking at the
7411 * ops vector.
7412 */
7413 inode->i_fop = &btrfs_file_operations;
7414 inode->i_op = &btrfs_file_inode_operations;
7415
a1b075d2 7416 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
39279cc3
CM
7417 if (err)
7418 drop_inode = 1;
7419 else {
7420 inode->i_mapping->a_ops = &btrfs_aops;
04160088 7421 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
d1310b2e 7422 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
39279cc3 7423 }
39279cc3
CM
7424 if (drop_inode)
7425 goto out_unlock;
7426
7427 path = btrfs_alloc_path();
d8926bb3
MF
7428 if (!path) {
7429 err = -ENOMEM;
7430 drop_inode = 1;
7431 goto out_unlock;
7432 }
33345d01 7433 key.objectid = btrfs_ino(inode);
39279cc3 7434 key.offset = 0;
39279cc3
CM
7435 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
7436 datasize = btrfs_file_extent_calc_inline_size(name_len);
7437 err = btrfs_insert_empty_item(trans, root, path, &key,
7438 datasize);
54aa1f4d
CM
7439 if (err) {
7440 drop_inode = 1;
b0839166 7441 btrfs_free_path(path);
54aa1f4d
CM
7442 goto out_unlock;
7443 }
5f39d397
CM
7444 leaf = path->nodes[0];
7445 ei = btrfs_item_ptr(leaf, path->slots[0],
7446 struct btrfs_file_extent_item);
7447 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
7448 btrfs_set_file_extent_type(leaf, ei,
39279cc3 7449 BTRFS_FILE_EXTENT_INLINE);
c8b97818
CM
7450 btrfs_set_file_extent_encryption(leaf, ei, 0);
7451 btrfs_set_file_extent_compression(leaf, ei, 0);
7452 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
7453 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
7454
39279cc3 7455 ptr = btrfs_file_extent_inline_start(ei);
5f39d397
CM
7456 write_extent_buffer(leaf, symname, ptr, name_len);
7457 btrfs_mark_buffer_dirty(leaf);
39279cc3 7458 btrfs_free_path(path);
5f39d397 7459
39279cc3
CM
7460 inode->i_op = &btrfs_symlink_inode_operations;
7461 inode->i_mapping->a_ops = &btrfs_symlink_aops;
04160088 7462 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
d899e052 7463 inode_set_bytes(inode, name_len);
dbe674a9 7464 btrfs_i_size_write(inode, name_len - 1);
54aa1f4d
CM
7465 err = btrfs_update_inode(trans, root, inode);
7466 if (err)
7467 drop_inode = 1;
39279cc3
CM
7468
7469out_unlock:
08c422c2
AV
7470 if (!err)
7471 d_instantiate(dentry, inode);
d3c2fdcf 7472 nr = trans->blocks_used;
7ad85bb7 7473 btrfs_end_transaction(trans, root);
39279cc3
CM
7474 if (drop_inode) {
7475 inode_dec_link_count(inode);
7476 iput(inode);
7477 }
d3c2fdcf 7478 btrfs_btree_balance_dirty(root, nr);
39279cc3
CM
7479 return err;
7480}
16432985 7481
0af3d00b
JB
7482static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
7483 u64 start, u64 num_bytes, u64 min_size,
7484 loff_t actual_len, u64 *alloc_hint,
7485 struct btrfs_trans_handle *trans)
d899e052 7486{
d899e052
YZ
7487 struct btrfs_root *root = BTRFS_I(inode)->root;
7488 struct btrfs_key ins;
d899e052 7489 u64 cur_offset = start;
55a61d1d 7490 u64 i_size;
d899e052 7491 int ret = 0;
0af3d00b 7492 bool own_trans = true;
d899e052 7493
0af3d00b
JB
7494 if (trans)
7495 own_trans = false;
d899e052 7496 while (num_bytes > 0) {
0af3d00b
JB
7497 if (own_trans) {
7498 trans = btrfs_start_transaction(root, 3);
7499 if (IS_ERR(trans)) {
7500 ret = PTR_ERR(trans);
7501 break;
7502 }
5a303d5d
YZ
7503 }
7504
efa56464 7505 ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
81c9ad23 7506 0, *alloc_hint, &ins, 1);
5a303d5d 7507 if (ret) {
0af3d00b
JB
7508 if (own_trans)
7509 btrfs_end_transaction(trans, root);
a22285a6 7510 break;
d899e052 7511 }
5a303d5d 7512
d899e052
YZ
7513 ret = insert_reserved_file_extent(trans, inode,
7514 cur_offset, ins.objectid,
7515 ins.offset, ins.offset,
920bbbfb 7516 ins.offset, 0, 0, 0,
d899e052 7517 BTRFS_FILE_EXTENT_PREALLOC);
79787eaa
JM
7518 if (ret) {
7519 btrfs_abort_transaction(trans, root, ret);
7520 if (own_trans)
7521 btrfs_end_transaction(trans, root);
7522 break;
7523 }
a1ed835e
CM
7524 btrfs_drop_extent_cache(inode, cur_offset,
7525 cur_offset + ins.offset -1, 0);
5a303d5d 7526
d899e052
YZ
7527 num_bytes -= ins.offset;
7528 cur_offset += ins.offset;
efa56464 7529 *alloc_hint = ins.objectid + ins.offset;
5a303d5d 7530
0c4d2d95 7531 inode_inc_iversion(inode);
d899e052 7532 inode->i_ctime = CURRENT_TIME;
6cbff00f 7533 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
d899e052 7534 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
efa56464
YZ
7535 (actual_len > inode->i_size) &&
7536 (cur_offset > inode->i_size)) {
d1ea6a61 7537 if (cur_offset > actual_len)
55a61d1d 7538 i_size = actual_len;
d1ea6a61 7539 else
55a61d1d
JB
7540 i_size = cur_offset;
7541 i_size_write(inode, i_size);
7542 btrfs_ordered_update_i_size(inode, i_size, NULL);
5a303d5d
YZ
7543 }
7544
d899e052 7545 ret = btrfs_update_inode(trans, root, inode);
79787eaa
JM
7546
7547 if (ret) {
7548 btrfs_abort_transaction(trans, root, ret);
7549 if (own_trans)
7550 btrfs_end_transaction(trans, root);
7551 break;
7552 }
d899e052 7553
0af3d00b
JB
7554 if (own_trans)
7555 btrfs_end_transaction(trans, root);
5a303d5d 7556 }
d899e052
YZ
7557 return ret;
7558}
7559
0af3d00b
JB
7560int btrfs_prealloc_file_range(struct inode *inode, int mode,
7561 u64 start, u64 num_bytes, u64 min_size,
7562 loff_t actual_len, u64 *alloc_hint)
7563{
7564 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
7565 min_size, actual_len, alloc_hint,
7566 NULL);
7567}
7568
7569int btrfs_prealloc_file_range_trans(struct inode *inode,
7570 struct btrfs_trans_handle *trans, int mode,
7571 u64 start, u64 num_bytes, u64 min_size,
7572 loff_t actual_len, u64 *alloc_hint)
7573{
7574 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
7575 min_size, actual_len, alloc_hint, trans);
7576}
7577
e6dcd2dc
CM
7578static int btrfs_set_page_dirty(struct page *page)
7579{
e6dcd2dc
CM
7580 return __set_page_dirty_nobuffers(page);
7581}
7582
10556cb2 7583static int btrfs_permission(struct inode *inode, int mask)
fdebe2bd 7584{
b83cc969 7585 struct btrfs_root *root = BTRFS_I(inode)->root;
cb6db4e5 7586 umode_t mode = inode->i_mode;
b83cc969 7587
cb6db4e5
JM
7588 if (mask & MAY_WRITE &&
7589 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
7590 if (btrfs_root_readonly(root))
7591 return -EROFS;
7592 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
7593 return -EACCES;
7594 }
2830ba7f 7595 return generic_permission(inode, mask);
fdebe2bd 7596}
39279cc3 7597
6e1d5dcc 7598static const struct inode_operations btrfs_dir_inode_operations = {
3394e160 7599 .getattr = btrfs_getattr,
39279cc3
CM
7600 .lookup = btrfs_lookup,
7601 .create = btrfs_create,
7602 .unlink = btrfs_unlink,
7603 .link = btrfs_link,
7604 .mkdir = btrfs_mkdir,
7605 .rmdir = btrfs_rmdir,
7606 .rename = btrfs_rename,
7607 .symlink = btrfs_symlink,
7608 .setattr = btrfs_setattr,
618e21d5 7609 .mknod = btrfs_mknod,
95819c05
CH
7610 .setxattr = btrfs_setxattr,
7611 .getxattr = btrfs_getxattr,
5103e947 7612 .listxattr = btrfs_listxattr,
95819c05 7613 .removexattr = btrfs_removexattr,
fdebe2bd 7614 .permission = btrfs_permission,
4e34e719 7615 .get_acl = btrfs_get_acl,
39279cc3 7616};
6e1d5dcc 7617static const struct inode_operations btrfs_dir_ro_inode_operations = {
39279cc3 7618 .lookup = btrfs_lookup,
fdebe2bd 7619 .permission = btrfs_permission,
4e34e719 7620 .get_acl = btrfs_get_acl,
39279cc3 7621};
76dda93c 7622
828c0950 7623static const struct file_operations btrfs_dir_file_operations = {
39279cc3
CM
7624 .llseek = generic_file_llseek,
7625 .read = generic_read_dir,
cbdf5a24 7626 .readdir = btrfs_real_readdir,
34287aa3 7627 .unlocked_ioctl = btrfs_ioctl,
39279cc3 7628#ifdef CONFIG_COMPAT
34287aa3 7629 .compat_ioctl = btrfs_ioctl,
39279cc3 7630#endif
6bf13c0c 7631 .release = btrfs_release_file,
e02119d5 7632 .fsync = btrfs_sync_file,
39279cc3
CM
7633};
7634
d1310b2e 7635static struct extent_io_ops btrfs_extent_io_ops = {
07157aac 7636 .fill_delalloc = run_delalloc_range,
065631f6 7637 .submit_bio_hook = btrfs_submit_bio_hook,
239b14b3 7638 .merge_bio_hook = btrfs_merge_bio_hook,
07157aac 7639 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
e6dcd2dc 7640 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
247e743c 7641 .writepage_start_hook = btrfs_writepage_start_hook,
b0c68f8b
CM
7642 .set_bit_hook = btrfs_set_bit_hook,
7643 .clear_bit_hook = btrfs_clear_bit_hook,
9ed74f2d
JB
7644 .merge_extent_hook = btrfs_merge_extent_hook,
7645 .split_extent_hook = btrfs_split_extent_hook,
07157aac
CM
7646};
7647
35054394
CM
7648/*
7649 * btrfs doesn't support the bmap operation because swapfiles
7650 * use bmap to make a mapping of extents in the file. They assume
7651 * these extents won't change over the life of the file and they
7652 * use the bmap result to do IO directly to the drive.
7653 *
7654 * the btrfs bmap call would return logical addresses that aren't
7655 * suitable for IO and they also will change frequently as COW
7656 * operations happen. So, swapfile + btrfs == corruption.
7657 *
7658 * For now we're avoiding this by dropping bmap.
7659 */
7f09410b 7660static const struct address_space_operations btrfs_aops = {
39279cc3
CM
7661 .readpage = btrfs_readpage,
7662 .writepage = btrfs_writepage,
b293f02e 7663 .writepages = btrfs_writepages,
3ab2fb5a 7664 .readpages = btrfs_readpages,
16432985 7665 .direct_IO = btrfs_direct_IO,
a52d9a80
CM
7666 .invalidatepage = btrfs_invalidatepage,
7667 .releasepage = btrfs_releasepage,
e6dcd2dc 7668 .set_page_dirty = btrfs_set_page_dirty,
465fdd97 7669 .error_remove_page = generic_error_remove_page,
39279cc3
CM
7670};
7671
7f09410b 7672static const struct address_space_operations btrfs_symlink_aops = {
39279cc3
CM
7673 .readpage = btrfs_readpage,
7674 .writepage = btrfs_writepage,
2bf5a725
CM
7675 .invalidatepage = btrfs_invalidatepage,
7676 .releasepage = btrfs_releasepage,
39279cc3
CM
7677};
7678
6e1d5dcc 7679static const struct inode_operations btrfs_file_inode_operations = {
39279cc3
CM
7680 .getattr = btrfs_getattr,
7681 .setattr = btrfs_setattr,
95819c05
CH
7682 .setxattr = btrfs_setxattr,
7683 .getxattr = btrfs_getxattr,
5103e947 7684 .listxattr = btrfs_listxattr,
95819c05 7685 .removexattr = btrfs_removexattr,
fdebe2bd 7686 .permission = btrfs_permission,
1506fcc8 7687 .fiemap = btrfs_fiemap,
4e34e719 7688 .get_acl = btrfs_get_acl,
e41f941a 7689 .update_time = btrfs_update_time,
39279cc3 7690};
6e1d5dcc 7691static const struct inode_operations btrfs_special_inode_operations = {
618e21d5
JB
7692 .getattr = btrfs_getattr,
7693 .setattr = btrfs_setattr,
fdebe2bd 7694 .permission = btrfs_permission,
95819c05
CH
7695 .setxattr = btrfs_setxattr,
7696 .getxattr = btrfs_getxattr,
33268eaf 7697 .listxattr = btrfs_listxattr,
95819c05 7698 .removexattr = btrfs_removexattr,
4e34e719 7699 .get_acl = btrfs_get_acl,
e41f941a 7700 .update_time = btrfs_update_time,
618e21d5 7701};
6e1d5dcc 7702static const struct inode_operations btrfs_symlink_inode_operations = {
39279cc3
CM
7703 .readlink = generic_readlink,
7704 .follow_link = page_follow_link_light,
7705 .put_link = page_put_link,
f209561a 7706 .getattr = btrfs_getattr,
22c44fe6 7707 .setattr = btrfs_setattr,
fdebe2bd 7708 .permission = btrfs_permission,
0279b4cd
JO
7709 .setxattr = btrfs_setxattr,
7710 .getxattr = btrfs_getxattr,
7711 .listxattr = btrfs_listxattr,
7712 .removexattr = btrfs_removexattr,
4e34e719 7713 .get_acl = btrfs_get_acl,
e41f941a 7714 .update_time = btrfs_update_time,
39279cc3 7715};
76dda93c 7716
82d339d9 7717const struct dentry_operations btrfs_dentry_operations = {
76dda93c 7718 .d_delete = btrfs_dentry_delete,
b4aff1f8 7719 .d_release = btrfs_dentry_release,
76dda93c 7720};