2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include <linux/slab.h>
40 #include <linux/ratelimit.h>
41 #include <linux/mount.h>
42 #include <linux/btrfs.h>
43 #include <linux/blkdev.h>
44 #include <linux/posix_acl_xattr.h>
45 #include <linux/uio.h>
48 #include "transaction.h"
49 #include "btrfs_inode.h"
50 #include "print-tree.h"
51 #include "ordered-data.h"
55 #include "compression.h"
57 #include "free-space-cache.h"
58 #include "inode-map.h"
64 struct btrfs_iget_args {
65 struct btrfs_key *location;
66 struct btrfs_root *root;
69 static const struct inode_operations btrfs_dir_inode_operations;
70 static const struct inode_operations btrfs_symlink_inode_operations;
71 static const struct inode_operations btrfs_dir_ro_inode_operations;
72 static const struct inode_operations btrfs_special_inode_operations;
73 static const struct inode_operations btrfs_file_inode_operations;
74 static const struct address_space_operations btrfs_aops;
75 static const struct address_space_operations btrfs_symlink_aops;
76 static const struct file_operations btrfs_dir_file_operations;
77 static struct extent_io_ops btrfs_extent_io_ops;
79 static struct kmem_cache *btrfs_inode_cachep;
80 static struct kmem_cache *btrfs_delalloc_work_cachep;
81 struct kmem_cache *btrfs_trans_handle_cachep;
82 struct kmem_cache *btrfs_transaction_cachep;
83 struct kmem_cache *btrfs_path_cachep;
84 struct kmem_cache *btrfs_free_space_cachep;
87 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
88 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
89 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
90 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
91 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
92 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
93 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
94 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
97 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
98 static int btrfs_truncate(struct inode *inode);
99 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
100 static noinline int cow_file_range(struct inode *inode,
101 struct page *locked_page,
102 u64 start, u64 end, int *page_started,
103 unsigned long *nr_written, int unlock);
104 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
105 u64 len, u64 orig_start,
106 u64 block_start, u64 block_len,
107 u64 orig_block_len, u64 ram_bytes,
110 static int btrfs_dirty_inode(struct inode *inode);
112 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
113 void btrfs_test_inode_set_ops(struct inode *inode)
115 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
119 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
120 struct inode *inode, struct inode *dir,
121 const struct qstr *qstr)
125 err = btrfs_init_acl(trans, inode, dir);
127 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
132 * this does all the hard work for inserting an inline extent into
133 * the btree. The caller should have done a btrfs_drop_extents so that
134 * no overlapping inline items exist in the btree
136 static int insert_inline_extent(struct btrfs_trans_handle *trans,
137 struct btrfs_path *path, int extent_inserted,
138 struct btrfs_root *root, struct inode *inode,
139 u64 start, size_t size, size_t compressed_size,
141 struct page **compressed_pages)
143 struct extent_buffer *leaf;
144 struct page *page = NULL;
147 struct btrfs_file_extent_item *ei;
150 size_t cur_size = size;
151 unsigned long offset;
153 if (compressed_size && compressed_pages)
154 cur_size = compressed_size;
156 inode_add_bytes(inode, size);
158 if (!extent_inserted) {
159 struct btrfs_key key;
162 key.objectid = btrfs_ino(inode);
164 key.type = BTRFS_EXTENT_DATA_KEY;
166 datasize = btrfs_file_extent_calc_inline_size(cur_size);
167 path->leave_spinning = 1;
168 ret = btrfs_insert_empty_item(trans, root, path, &key,
175 leaf = path->nodes[0];
176 ei = btrfs_item_ptr(leaf, path->slots[0],
177 struct btrfs_file_extent_item);
178 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
179 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
180 btrfs_set_file_extent_encryption(leaf, ei, 0);
181 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
182 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
183 ptr = btrfs_file_extent_inline_start(ei);
185 if (compress_type != BTRFS_COMPRESS_NONE) {
188 while (compressed_size > 0) {
189 cpage = compressed_pages[i];
190 cur_size = min_t(unsigned long, compressed_size,
193 kaddr = kmap_atomic(cpage);
194 write_extent_buffer(leaf, kaddr, ptr, cur_size);
195 kunmap_atomic(kaddr);
199 compressed_size -= cur_size;
201 btrfs_set_file_extent_compression(leaf, ei,
204 page = find_get_page(inode->i_mapping,
205 start >> PAGE_CACHE_SHIFT);
206 btrfs_set_file_extent_compression(leaf, ei, 0);
207 kaddr = kmap_atomic(page);
208 offset = start & (PAGE_CACHE_SIZE - 1);
209 write_extent_buffer(leaf, kaddr + offset, ptr, size);
210 kunmap_atomic(kaddr);
211 page_cache_release(page);
213 btrfs_mark_buffer_dirty(leaf);
214 btrfs_release_path(path);
217 * we're an inline extent, so nobody can
218 * extend the file past i_size without locking
219 * a page we already have locked.
221 * We must do any isize and inode updates
222 * before we unlock the pages. Otherwise we
223 * could end up racing with unlink.
225 BTRFS_I(inode)->disk_i_size = inode->i_size;
226 ret = btrfs_update_inode(trans, root, inode);
235 * conditionally insert an inline extent into the file. This
236 * does the checks required to make sure the data is small enough
237 * to fit as an inline extent.
239 static noinline int cow_file_range_inline(struct btrfs_root *root,
240 struct inode *inode, u64 start,
241 u64 end, size_t compressed_size,
243 struct page **compressed_pages)
245 struct btrfs_trans_handle *trans;
246 u64 isize = i_size_read(inode);
247 u64 actual_end = min(end + 1, isize);
248 u64 inline_len = actual_end - start;
249 u64 aligned_end = ALIGN(end, root->sectorsize);
250 u64 data_len = inline_len;
252 struct btrfs_path *path;
253 int extent_inserted = 0;
254 u32 extent_item_size;
257 data_len = compressed_size;
260 actual_end > PAGE_CACHE_SIZE ||
261 data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) ||
263 (actual_end & (root->sectorsize - 1)) == 0) ||
265 data_len > root->fs_info->max_inline) {
269 path = btrfs_alloc_path();
273 trans = btrfs_join_transaction(root);
275 btrfs_free_path(path);
276 return PTR_ERR(trans);
278 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
280 if (compressed_size && compressed_pages)
281 extent_item_size = btrfs_file_extent_calc_inline_size(
284 extent_item_size = btrfs_file_extent_calc_inline_size(
287 ret = __btrfs_drop_extents(trans, root, inode, path,
288 start, aligned_end, NULL,
289 1, 1, extent_item_size, &extent_inserted);
291 btrfs_abort_transaction(trans, root, ret);
295 if (isize > actual_end)
296 inline_len = min_t(u64, isize, actual_end);
297 ret = insert_inline_extent(trans, path, extent_inserted,
299 inline_len, compressed_size,
300 compress_type, compressed_pages);
301 if (ret && ret != -ENOSPC) {
302 btrfs_abort_transaction(trans, root, ret);
304 } else if (ret == -ENOSPC) {
309 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
310 btrfs_delalloc_release_metadata(inode, end + 1 - start);
311 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
314 * Don't forget to free the reserved space, as for inlined extent
315 * it won't count as data extent, free them directly here.
316 * And at reserve time, it's always aligned to page size, so
317 * just free one page here.
319 btrfs_qgroup_free_data(inode, 0, PAGE_CACHE_SIZE);
320 btrfs_free_path(path);
321 btrfs_end_transaction(trans, root);
325 struct async_extent {
330 unsigned long nr_pages;
332 struct list_head list;
337 struct btrfs_root *root;
338 struct page *locked_page;
341 struct list_head extents;
342 struct btrfs_work work;
345 static noinline int add_async_extent(struct async_cow *cow,
346 u64 start, u64 ram_size,
349 unsigned long nr_pages,
352 struct async_extent *async_extent;
354 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
355 BUG_ON(!async_extent); /* -ENOMEM */
356 async_extent->start = start;
357 async_extent->ram_size = ram_size;
358 async_extent->compressed_size = compressed_size;
359 async_extent->pages = pages;
360 async_extent->nr_pages = nr_pages;
361 async_extent->compress_type = compress_type;
362 list_add_tail(&async_extent->list, &cow->extents);
366 static inline int inode_need_compress(struct inode *inode)
368 struct btrfs_root *root = BTRFS_I(inode)->root;
371 if (btrfs_test_opt(root, FORCE_COMPRESS))
373 /* bad compression ratios */
374 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
376 if (btrfs_test_opt(root, COMPRESS) ||
377 BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
378 BTRFS_I(inode)->force_compress)
384 * we create compressed extents in two phases. The first
385 * phase compresses a range of pages that have already been
386 * locked (both pages and state bits are locked).
388 * This is done inside an ordered work queue, and the compression
389 * is spread across many cpus. The actual IO submission is step
390 * two, and the ordered work queue takes care of making sure that
391 * happens in the same order things were put onto the queue by
392 * writepages and friends.
394 * If this code finds it can't get good compression, it puts an
395 * entry onto the work queue to write the uncompressed bytes. This
396 * makes sure that both compressed inodes and uncompressed inodes
397 * are written in the same order that the flusher thread sent them
400 static noinline void compress_file_range(struct inode *inode,
401 struct page *locked_page,
403 struct async_cow *async_cow,
406 struct btrfs_root *root = BTRFS_I(inode)->root;
408 u64 blocksize = root->sectorsize;
410 u64 isize = i_size_read(inode);
412 struct page **pages = NULL;
413 unsigned long nr_pages;
414 unsigned long nr_pages_ret = 0;
415 unsigned long total_compressed = 0;
416 unsigned long total_in = 0;
417 unsigned long max_compressed = 128 * 1024;
418 unsigned long max_uncompressed = 128 * 1024;
421 int compress_type = root->fs_info->compress_type;
424 /* if this is a small write inside eof, kick off a defrag */
425 if ((end - start + 1) < 16 * 1024 &&
426 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
427 btrfs_add_inode_defrag(NULL, inode);
429 actual_end = min_t(u64, isize, end + 1);
432 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
433 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
436 * we don't want to send crud past the end of i_size through
437 * compression, that's just a waste of CPU time. So, if the
438 * end of the file is before the start of our current
439 * requested range of bytes, we bail out to the uncompressed
440 * cleanup code that can deal with all of this.
442 * It isn't really the fastest way to fix things, but this is a
443 * very uncommon corner.
445 if (actual_end <= start)
446 goto cleanup_and_bail_uncompressed;
448 total_compressed = actual_end - start;
451 * skip compression for a small file range(<=blocksize) that
452 * isn't an inline extent, since it dosen't save disk space at all.
454 if (total_compressed <= blocksize &&
455 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
456 goto cleanup_and_bail_uncompressed;
458 /* we want to make sure that amount of ram required to uncompress
459 * an extent is reasonable, so we limit the total size in ram
460 * of a compressed extent to 128k. This is a crucial number
461 * because it also controls how easily we can spread reads across
462 * cpus for decompression.
464 * We also want to make sure the amount of IO required to do
465 * a random read is reasonably small, so we limit the size of
466 * a compressed extent to 128k.
468 total_compressed = min(total_compressed, max_uncompressed);
469 num_bytes = ALIGN(end - start + 1, blocksize);
470 num_bytes = max(blocksize, num_bytes);
475 * we do compression for mount -o compress and when the
476 * inode has not been flagged as nocompress. This flag can
477 * change at any time if we discover bad compression ratios.
479 if (inode_need_compress(inode)) {
481 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
483 /* just bail out to the uncompressed code */
487 if (BTRFS_I(inode)->force_compress)
488 compress_type = BTRFS_I(inode)->force_compress;
491 * we need to call clear_page_dirty_for_io on each
492 * page in the range. Otherwise applications with the file
493 * mmap'd can wander in and change the page contents while
494 * we are compressing them.
496 * If the compression fails for any reason, we set the pages
497 * dirty again later on.
499 extent_range_clear_dirty_for_io(inode, start, end);
501 ret = btrfs_compress_pages(compress_type,
502 inode->i_mapping, start,
503 total_compressed, pages,
504 nr_pages, &nr_pages_ret,
510 unsigned long offset = total_compressed &
511 (PAGE_CACHE_SIZE - 1);
512 struct page *page = pages[nr_pages_ret - 1];
515 /* zero the tail end of the last page, we might be
516 * sending it down to disk
519 kaddr = kmap_atomic(page);
520 memset(kaddr + offset, 0,
521 PAGE_CACHE_SIZE - offset);
522 kunmap_atomic(kaddr);
529 /* lets try to make an inline extent */
530 if (ret || total_in < (actual_end - start)) {
531 /* we didn't compress the entire range, try
532 * to make an uncompressed inline extent.
534 ret = cow_file_range_inline(root, inode, start, end,
537 /* try making a compressed inline extent */
538 ret = cow_file_range_inline(root, inode, start, end,
540 compress_type, pages);
543 unsigned long clear_flags = EXTENT_DELALLOC |
545 unsigned long page_error_op;
547 clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0;
548 page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
551 * inline extent creation worked or returned error,
552 * we don't need to create any more async work items.
553 * Unlock and free up our temp pages.
555 extent_clear_unlock_delalloc(inode, start, end, NULL,
556 clear_flags, PAGE_UNLOCK |
567 * we aren't doing an inline extent round the compressed size
568 * up to a block size boundary so the allocator does sane
571 total_compressed = ALIGN(total_compressed, blocksize);
574 * one last check to make sure the compression is really a
575 * win, compare the page count read with the blocks on disk
577 total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
578 if (total_compressed >= total_in) {
581 num_bytes = total_in;
584 if (!will_compress && pages) {
586 * the compression code ran but failed to make things smaller,
587 * free any pages it allocated and our page pointer array
589 for (i = 0; i < nr_pages_ret; i++) {
590 WARN_ON(pages[i]->mapping);
591 page_cache_release(pages[i]);
595 total_compressed = 0;
598 /* flag the file so we don't compress in the future */
599 if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
600 !(BTRFS_I(inode)->force_compress)) {
601 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
607 /* the async work queues will take care of doing actual
608 * allocation on disk for these compressed pages,
609 * and will submit them to the elevator.
611 add_async_extent(async_cow, start, num_bytes,
612 total_compressed, pages, nr_pages_ret,
615 if (start + num_bytes < end) {
622 cleanup_and_bail_uncompressed:
624 * No compression, but we still need to write the pages in
625 * the file we've been given so far. redirty the locked
626 * page if it corresponds to our extent and set things up
627 * for the async work queue to run cow_file_range to do
628 * the normal delalloc dance
630 if (page_offset(locked_page) >= start &&
631 page_offset(locked_page) <= end) {
632 __set_page_dirty_nobuffers(locked_page);
633 /* unlocked later on in the async handlers */
636 extent_range_redirty_for_io(inode, start, end);
637 add_async_extent(async_cow, start, end - start + 1,
638 0, NULL, 0, BTRFS_COMPRESS_NONE);
645 for (i = 0; i < nr_pages_ret; i++) {
646 WARN_ON(pages[i]->mapping);
647 page_cache_release(pages[i]);
652 static void free_async_extent_pages(struct async_extent *async_extent)
656 if (!async_extent->pages)
659 for (i = 0; i < async_extent->nr_pages; i++) {
660 WARN_ON(async_extent->pages[i]->mapping);
661 page_cache_release(async_extent->pages[i]);
663 kfree(async_extent->pages);
664 async_extent->nr_pages = 0;
665 async_extent->pages = NULL;
669 * phase two of compressed writeback. This is the ordered portion
670 * of the code, which only gets called in the order the work was
671 * queued. We walk all the async extents created by compress_file_range
672 * and send them down to the disk.
674 static noinline void submit_compressed_extents(struct inode *inode,
675 struct async_cow *async_cow)
677 struct async_extent *async_extent;
679 struct btrfs_key ins;
680 struct extent_map *em;
681 struct btrfs_root *root = BTRFS_I(inode)->root;
682 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
683 struct extent_io_tree *io_tree;
687 while (!list_empty(&async_cow->extents)) {
688 async_extent = list_entry(async_cow->extents.next,
689 struct async_extent, list);
690 list_del(&async_extent->list);
692 io_tree = &BTRFS_I(inode)->io_tree;
695 /* did the compression code fall back to uncompressed IO? */
696 if (!async_extent->pages) {
697 int page_started = 0;
698 unsigned long nr_written = 0;
700 lock_extent(io_tree, async_extent->start,
701 async_extent->start +
702 async_extent->ram_size - 1);
704 /* allocate blocks */
705 ret = cow_file_range(inode, async_cow->locked_page,
707 async_extent->start +
708 async_extent->ram_size - 1,
709 &page_started, &nr_written, 0);
714 * if page_started, cow_file_range inserted an
715 * inline extent and took care of all the unlocking
716 * and IO for us. Otherwise, we need to submit
717 * all those pages down to the drive.
719 if (!page_started && !ret)
720 extent_write_locked_range(io_tree,
721 inode, async_extent->start,
722 async_extent->start +
723 async_extent->ram_size - 1,
727 unlock_page(async_cow->locked_page);
733 lock_extent(io_tree, async_extent->start,
734 async_extent->start + async_extent->ram_size - 1);
736 ret = btrfs_reserve_extent(root,
737 async_extent->compressed_size,
738 async_extent->compressed_size,
739 0, alloc_hint, &ins, 1, 1);
741 free_async_extent_pages(async_extent);
743 if (ret == -ENOSPC) {
744 unlock_extent(io_tree, async_extent->start,
745 async_extent->start +
746 async_extent->ram_size - 1);
749 * we need to redirty the pages if we decide to
750 * fallback to uncompressed IO, otherwise we
751 * will not submit these pages down to lower
754 extent_range_redirty_for_io(inode,
756 async_extent->start +
757 async_extent->ram_size - 1);
764 * here we're doing allocation and writeback of the
767 btrfs_drop_extent_cache(inode, async_extent->start,
768 async_extent->start +
769 async_extent->ram_size - 1, 0);
771 em = alloc_extent_map();
774 goto out_free_reserve;
776 em->start = async_extent->start;
777 em->len = async_extent->ram_size;
778 em->orig_start = em->start;
779 em->mod_start = em->start;
780 em->mod_len = em->len;
782 em->block_start = ins.objectid;
783 em->block_len = ins.offset;
784 em->orig_block_len = ins.offset;
785 em->ram_bytes = async_extent->ram_size;
786 em->bdev = root->fs_info->fs_devices->latest_bdev;
787 em->compress_type = async_extent->compress_type;
788 set_bit(EXTENT_FLAG_PINNED, &em->flags);
789 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
793 write_lock(&em_tree->lock);
794 ret = add_extent_mapping(em_tree, em, 1);
795 write_unlock(&em_tree->lock);
796 if (ret != -EEXIST) {
800 btrfs_drop_extent_cache(inode, async_extent->start,
801 async_extent->start +
802 async_extent->ram_size - 1, 0);
806 goto out_free_reserve;
808 ret = btrfs_add_ordered_extent_compress(inode,
811 async_extent->ram_size,
813 BTRFS_ORDERED_COMPRESSED,
814 async_extent->compress_type);
816 btrfs_drop_extent_cache(inode, async_extent->start,
817 async_extent->start +
818 async_extent->ram_size - 1, 0);
819 goto out_free_reserve;
823 * clear dirty, set writeback and unlock the pages.
825 extent_clear_unlock_delalloc(inode, async_extent->start,
826 async_extent->start +
827 async_extent->ram_size - 1,
828 NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
829 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
831 ret = btrfs_submit_compressed_write(inode,
833 async_extent->ram_size,
835 ins.offset, async_extent->pages,
836 async_extent->nr_pages);
838 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
839 struct page *p = async_extent->pages[0];
840 const u64 start = async_extent->start;
841 const u64 end = start + async_extent->ram_size - 1;
843 p->mapping = inode->i_mapping;
844 tree->ops->writepage_end_io_hook(p, start, end,
847 extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
850 free_async_extent_pages(async_extent);
852 alloc_hint = ins.objectid + ins.offset;
858 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
860 extent_clear_unlock_delalloc(inode, async_extent->start,
861 async_extent->start +
862 async_extent->ram_size - 1,
863 NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
864 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
865 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
866 PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
868 free_async_extent_pages(async_extent);
873 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
876 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
877 struct extent_map *em;
880 read_lock(&em_tree->lock);
881 em = search_extent_mapping(em_tree, start, num_bytes);
884 * if block start isn't an actual block number then find the
885 * first block in this inode and use that as a hint. If that
886 * block is also bogus then just don't worry about it.
888 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
890 em = search_extent_mapping(em_tree, 0, 0);
891 if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
892 alloc_hint = em->block_start;
896 alloc_hint = em->block_start;
900 read_unlock(&em_tree->lock);
906 * when extent_io.c finds a delayed allocation range in the file,
907 * the call backs end up in this code. The basic idea is to
908 * allocate extents on disk for the range, and create ordered data structs
909 * in ram to track those extents.
911 * locked_page is the page that writepage had locked already. We use
912 * it to make sure we don't do extra locks or unlocks.
914 * *page_started is set to one if we unlock locked_page and do everything
915 * required to start IO on it. It may be clean and already done with
918 static noinline int cow_file_range(struct inode *inode,
919 struct page *locked_page,
920 u64 start, u64 end, int *page_started,
921 unsigned long *nr_written,
924 struct btrfs_root *root = BTRFS_I(inode)->root;
927 unsigned long ram_size;
930 u64 blocksize = root->sectorsize;
931 struct btrfs_key ins;
932 struct extent_map *em;
933 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
936 if (btrfs_is_free_space_inode(inode)) {
942 num_bytes = ALIGN(end - start + 1, blocksize);
943 num_bytes = max(blocksize, num_bytes);
944 disk_num_bytes = num_bytes;
946 /* if this is a small write inside eof, kick off defrag */
947 if (num_bytes < 64 * 1024 &&
948 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
949 btrfs_add_inode_defrag(NULL, inode);
952 /* lets try to make an inline extent */
953 ret = cow_file_range_inline(root, inode, start, end, 0, 0,
956 extent_clear_unlock_delalloc(inode, start, end, NULL,
957 EXTENT_LOCKED | EXTENT_DELALLOC |
958 EXTENT_DEFRAG, PAGE_UNLOCK |
959 PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
962 *nr_written = *nr_written +
963 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
966 } else if (ret < 0) {
971 BUG_ON(disk_num_bytes >
972 btrfs_super_total_bytes(root->fs_info->super_copy));
974 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
975 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
977 while (disk_num_bytes > 0) {
980 cur_alloc_size = disk_num_bytes;
981 ret = btrfs_reserve_extent(root, cur_alloc_size,
982 root->sectorsize, 0, alloc_hint,
987 em = alloc_extent_map();
993 em->orig_start = em->start;
994 ram_size = ins.offset;
995 em->len = ins.offset;
996 em->mod_start = em->start;
997 em->mod_len = em->len;
999 em->block_start = ins.objectid;
1000 em->block_len = ins.offset;
1001 em->orig_block_len = ins.offset;
1002 em->ram_bytes = ram_size;
1003 em->bdev = root->fs_info->fs_devices->latest_bdev;
1004 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1005 em->generation = -1;
1008 write_lock(&em_tree->lock);
1009 ret = add_extent_mapping(em_tree, em, 1);
1010 write_unlock(&em_tree->lock);
1011 if (ret != -EEXIST) {
1012 free_extent_map(em);
1015 btrfs_drop_extent_cache(inode, start,
1016 start + ram_size - 1, 0);
1021 cur_alloc_size = ins.offset;
1022 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
1023 ram_size, cur_alloc_size, 0);
1025 goto out_drop_extent_cache;
1027 if (root->root_key.objectid ==
1028 BTRFS_DATA_RELOC_TREE_OBJECTID) {
1029 ret = btrfs_reloc_clone_csums(inode, start,
1032 goto out_drop_extent_cache;
1035 if (disk_num_bytes < cur_alloc_size)
1038 /* we're not doing compressed IO, don't unlock the first
1039 * page (which the caller expects to stay locked), don't
1040 * clear any dirty bits and don't set any writeback bits
1042 * Do set the Private2 bit so we know this page was properly
1043 * setup for writepage
1045 op = unlock ? PAGE_UNLOCK : 0;
1046 op |= PAGE_SET_PRIVATE2;
1048 extent_clear_unlock_delalloc(inode, start,
1049 start + ram_size - 1, locked_page,
1050 EXTENT_LOCKED | EXTENT_DELALLOC,
1052 disk_num_bytes -= cur_alloc_size;
1053 num_bytes -= cur_alloc_size;
1054 alloc_hint = ins.objectid + ins.offset;
1055 start += cur_alloc_size;
1060 out_drop_extent_cache:
1061 btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
1063 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
1065 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1066 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
1067 EXTENT_DELALLOC | EXTENT_DEFRAG,
1068 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
1069 PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK);
1074 * work queue call back to started compression on a file and pages
1076 static noinline void async_cow_start(struct btrfs_work *work)
1078 struct async_cow *async_cow;
1080 async_cow = container_of(work, struct async_cow, work);
1082 compress_file_range(async_cow->inode, async_cow->locked_page,
1083 async_cow->start, async_cow->end, async_cow,
1085 if (num_added == 0) {
1086 btrfs_add_delayed_iput(async_cow->inode);
1087 async_cow->inode = NULL;
1092 * work queue call back to submit previously compressed pages
1094 static noinline void async_cow_submit(struct btrfs_work *work)
1096 struct async_cow *async_cow;
1097 struct btrfs_root *root;
1098 unsigned long nr_pages;
1100 async_cow = container_of(work, struct async_cow, work);
1102 root = async_cow->root;
1103 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1107 * atomic_sub_return implies a barrier for waitqueue_active
1109 if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
1111 waitqueue_active(&root->fs_info->async_submit_wait))
1112 wake_up(&root->fs_info->async_submit_wait);
1114 if (async_cow->inode)
1115 submit_compressed_extents(async_cow->inode, async_cow);
1118 static noinline void async_cow_free(struct btrfs_work *work)
1120 struct async_cow *async_cow;
1121 async_cow = container_of(work, struct async_cow, work);
1122 if (async_cow->inode)
1123 btrfs_add_delayed_iput(async_cow->inode);
1127 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1128 u64 start, u64 end, int *page_started,
1129 unsigned long *nr_written)
1131 struct async_cow *async_cow;
1132 struct btrfs_root *root = BTRFS_I(inode)->root;
1133 unsigned long nr_pages;
1135 int limit = 10 * 1024 * 1024;
1137 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1138 1, 0, NULL, GFP_NOFS);
1139 while (start < end) {
1140 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1141 BUG_ON(!async_cow); /* -ENOMEM */
1142 async_cow->inode = igrab(inode);
1143 async_cow->root = root;
1144 async_cow->locked_page = locked_page;
1145 async_cow->start = start;
1147 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
1148 !btrfs_test_opt(root, FORCE_COMPRESS))
1151 cur_end = min(end, start + 512 * 1024 - 1);
1153 async_cow->end = cur_end;
1154 INIT_LIST_HEAD(&async_cow->extents);
1156 btrfs_init_work(&async_cow->work,
1157 btrfs_delalloc_helper,
1158 async_cow_start, async_cow_submit,
1161 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
1163 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1165 btrfs_queue_work(root->fs_info->delalloc_workers,
1168 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1169 wait_event(root->fs_info->async_submit_wait,
1170 (atomic_read(&root->fs_info->async_delalloc_pages) <
1174 while (atomic_read(&root->fs_info->async_submit_draining) &&
1175 atomic_read(&root->fs_info->async_delalloc_pages)) {
1176 wait_event(root->fs_info->async_submit_wait,
1177 (atomic_read(&root->fs_info->async_delalloc_pages) ==
1181 *nr_written += nr_pages;
1182 start = cur_end + 1;
1188 static noinline int csum_exist_in_range(struct btrfs_root *root,
1189 u64 bytenr, u64 num_bytes)
1192 struct btrfs_ordered_sum *sums;
1195 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1196 bytenr + num_bytes - 1, &list, 0);
1197 if (ret == 0 && list_empty(&list))
1200 while (!list_empty(&list)) {
1201 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1202 list_del(&sums->list);
1209 * when nowcow writeback call back. This checks for snapshots or COW copies
1210 * of the extents that exist in the file, and COWs the file as required.
1212 * If no cow copies or snapshots exist, we write directly to the existing
1215 static noinline int run_delalloc_nocow(struct inode *inode,
1216 struct page *locked_page,
1217 u64 start, u64 end, int *page_started, int force,
1218 unsigned long *nr_written)
1220 struct btrfs_root *root = BTRFS_I(inode)->root;
1221 struct btrfs_trans_handle *trans;
1222 struct extent_buffer *leaf;
1223 struct btrfs_path *path;
1224 struct btrfs_file_extent_item *fi;
1225 struct btrfs_key found_key;
1240 u64 ino = btrfs_ino(inode);
1242 path = btrfs_alloc_path();
1244 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1245 EXTENT_LOCKED | EXTENT_DELALLOC |
1246 EXTENT_DO_ACCOUNTING |
1247 EXTENT_DEFRAG, PAGE_UNLOCK |
1249 PAGE_SET_WRITEBACK |
1250 PAGE_END_WRITEBACK);
1254 nolock = btrfs_is_free_space_inode(inode);
1257 trans = btrfs_join_transaction_nolock(root);
1259 trans = btrfs_join_transaction(root);
1261 if (IS_ERR(trans)) {
1262 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1263 EXTENT_LOCKED | EXTENT_DELALLOC |
1264 EXTENT_DO_ACCOUNTING |
1265 EXTENT_DEFRAG, PAGE_UNLOCK |
1267 PAGE_SET_WRITEBACK |
1268 PAGE_END_WRITEBACK);
1269 btrfs_free_path(path);
1270 return PTR_ERR(trans);
1273 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1275 cow_start = (u64)-1;
1278 ret = btrfs_lookup_file_extent(trans, root, path, ino,
1282 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1283 leaf = path->nodes[0];
1284 btrfs_item_key_to_cpu(leaf, &found_key,
1285 path->slots[0] - 1);
1286 if (found_key.objectid == ino &&
1287 found_key.type == BTRFS_EXTENT_DATA_KEY)
1292 leaf = path->nodes[0];
1293 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1294 ret = btrfs_next_leaf(root, path);
1299 leaf = path->nodes[0];
1305 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1307 if (found_key.objectid > ino)
1309 if (WARN_ON_ONCE(found_key.objectid < ino) ||
1310 found_key.type < BTRFS_EXTENT_DATA_KEY) {
1314 if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
1315 found_key.offset > end)
1318 if (found_key.offset > cur_offset) {
1319 extent_end = found_key.offset;
1324 fi = btrfs_item_ptr(leaf, path->slots[0],
1325 struct btrfs_file_extent_item);
1326 extent_type = btrfs_file_extent_type(leaf, fi);
1328 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1329 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1330 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1331 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1332 extent_offset = btrfs_file_extent_offset(leaf, fi);
1333 extent_end = found_key.offset +
1334 btrfs_file_extent_num_bytes(leaf, fi);
1336 btrfs_file_extent_disk_num_bytes(leaf, fi);
1337 if (extent_end <= start) {
1341 if (disk_bytenr == 0)
1343 if (btrfs_file_extent_compression(leaf, fi) ||
1344 btrfs_file_extent_encryption(leaf, fi) ||
1345 btrfs_file_extent_other_encoding(leaf, fi))
1347 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1349 if (btrfs_extent_readonly(root, disk_bytenr))
1351 if (btrfs_cross_ref_exist(trans, root, ino,
1353 extent_offset, disk_bytenr))
1355 disk_bytenr += extent_offset;
1356 disk_bytenr += cur_offset - found_key.offset;
1357 num_bytes = min(end + 1, extent_end) - cur_offset;
1359 * if there are pending snapshots for this root,
1360 * we fall into common COW way.
1363 err = btrfs_start_write_no_snapshoting(root);
1368 * force cow if csum exists in the range.
1369 * this ensure that csum for a given extent are
1370 * either valid or do not exist.
1372 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1375 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1376 extent_end = found_key.offset +
1377 btrfs_file_extent_inline_len(leaf,
1378 path->slots[0], fi);
1379 extent_end = ALIGN(extent_end, root->sectorsize);
1384 if (extent_end <= start) {
1386 if (!nolock && nocow)
1387 btrfs_end_write_no_snapshoting(root);
1391 if (cow_start == (u64)-1)
1392 cow_start = cur_offset;
1393 cur_offset = extent_end;
1394 if (cur_offset > end)
1400 btrfs_release_path(path);
1401 if (cow_start != (u64)-1) {
1402 ret = cow_file_range(inode, locked_page,
1403 cow_start, found_key.offset - 1,
1404 page_started, nr_written, 1);
1406 if (!nolock && nocow)
1407 btrfs_end_write_no_snapshoting(root);
1410 cow_start = (u64)-1;
1413 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1414 struct extent_map *em;
1415 struct extent_map_tree *em_tree;
1416 em_tree = &BTRFS_I(inode)->extent_tree;
1417 em = alloc_extent_map();
1418 BUG_ON(!em); /* -ENOMEM */
1419 em->start = cur_offset;
1420 em->orig_start = found_key.offset - extent_offset;
1421 em->len = num_bytes;
1422 em->block_len = num_bytes;
1423 em->block_start = disk_bytenr;
1424 em->orig_block_len = disk_num_bytes;
1425 em->ram_bytes = ram_bytes;
1426 em->bdev = root->fs_info->fs_devices->latest_bdev;
1427 em->mod_start = em->start;
1428 em->mod_len = em->len;
1429 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1430 set_bit(EXTENT_FLAG_FILLING, &em->flags);
1431 em->generation = -1;
1433 write_lock(&em_tree->lock);
1434 ret = add_extent_mapping(em_tree, em, 1);
1435 write_unlock(&em_tree->lock);
1436 if (ret != -EEXIST) {
1437 free_extent_map(em);
1440 btrfs_drop_extent_cache(inode, em->start,
1441 em->start + em->len - 1, 0);
1443 type = BTRFS_ORDERED_PREALLOC;
1445 type = BTRFS_ORDERED_NOCOW;
1448 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1449 num_bytes, num_bytes, type);
1450 BUG_ON(ret); /* -ENOMEM */
1452 if (root->root_key.objectid ==
1453 BTRFS_DATA_RELOC_TREE_OBJECTID) {
1454 ret = btrfs_reloc_clone_csums(inode, cur_offset,
1457 if (!nolock && nocow)
1458 btrfs_end_write_no_snapshoting(root);
1463 extent_clear_unlock_delalloc(inode, cur_offset,
1464 cur_offset + num_bytes - 1,
1465 locked_page, EXTENT_LOCKED |
1466 EXTENT_DELALLOC, PAGE_UNLOCK |
1468 if (!nolock && nocow)
1469 btrfs_end_write_no_snapshoting(root);
1470 cur_offset = extent_end;
1471 if (cur_offset > end)
1474 btrfs_release_path(path);
1476 if (cur_offset <= end && cow_start == (u64)-1) {
1477 cow_start = cur_offset;
1481 if (cow_start != (u64)-1) {
1482 ret = cow_file_range(inode, locked_page, cow_start, end,
1483 page_started, nr_written, 1);
1489 err = btrfs_end_transaction(trans, root);
1493 if (ret && cur_offset < end)
1494 extent_clear_unlock_delalloc(inode, cur_offset, end,
1495 locked_page, EXTENT_LOCKED |
1496 EXTENT_DELALLOC | EXTENT_DEFRAG |
1497 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1499 PAGE_SET_WRITEBACK |
1500 PAGE_END_WRITEBACK);
1501 btrfs_free_path(path);
1505 static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
1508 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
1509 !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC))
1513 * @defrag_bytes is a hint value, no spinlock held here,
1514 * if is not zero, it means the file is defragging.
1515 * Force cow if given extent needs to be defragged.
1517 if (BTRFS_I(inode)->defrag_bytes &&
1518 test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1519 EXTENT_DEFRAG, 0, NULL))
1526 * extent_io.c call back to do delayed allocation processing
1528 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1529 u64 start, u64 end, int *page_started,
1530 unsigned long *nr_written)
1533 int force_cow = need_force_cow(inode, start, end);
1535 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
1536 ret = run_delalloc_nocow(inode, locked_page, start, end,
1537 page_started, 1, nr_written);
1538 } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
1539 ret = run_delalloc_nocow(inode, locked_page, start, end,
1540 page_started, 0, nr_written);
1541 } else if (!inode_need_compress(inode)) {
1542 ret = cow_file_range(inode, locked_page, start, end,
1543 page_started, nr_written, 1);
1545 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1546 &BTRFS_I(inode)->runtime_flags);
1547 ret = cow_file_range_async(inode, locked_page, start, end,
1548 page_started, nr_written);
1553 static void btrfs_split_extent_hook(struct inode *inode,
1554 struct extent_state *orig, u64 split)
1558 /* not delalloc, ignore it */
1559 if (!(orig->state & EXTENT_DELALLOC))
1562 size = orig->end - orig->start + 1;
1563 if (size > BTRFS_MAX_EXTENT_SIZE) {
1568 * See the explanation in btrfs_merge_extent_hook, the same
1569 * applies here, just in reverse.
1571 new_size = orig->end - split + 1;
1572 num_extents = div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1573 BTRFS_MAX_EXTENT_SIZE);
1574 new_size = split - orig->start;
1575 num_extents += div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1576 BTRFS_MAX_EXTENT_SIZE);
1577 if (div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1,
1578 BTRFS_MAX_EXTENT_SIZE) >= num_extents)
1582 spin_lock(&BTRFS_I(inode)->lock);
1583 BTRFS_I(inode)->outstanding_extents++;
1584 spin_unlock(&BTRFS_I(inode)->lock);
1588 * extent_io.c merge_extent_hook, used to track merged delayed allocation
1589 * extents so we can keep track of new extents that are just merged onto old
1590 * extents, such as when we are doing sequential writes, so we can properly
1591 * account for the metadata space we'll need.
1593 static void btrfs_merge_extent_hook(struct inode *inode,
1594 struct extent_state *new,
1595 struct extent_state *other)
1597 u64 new_size, old_size;
1600 /* not delalloc, ignore it */
1601 if (!(other->state & EXTENT_DELALLOC))
1604 if (new->start > other->start)
1605 new_size = new->end - other->start + 1;
1607 new_size = other->end - new->start + 1;
1609 /* we're not bigger than the max, unreserve the space and go */
1610 if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
1611 spin_lock(&BTRFS_I(inode)->lock);
1612 BTRFS_I(inode)->outstanding_extents--;
1613 spin_unlock(&BTRFS_I(inode)->lock);
1618 * We have to add up either side to figure out how many extents were
1619 * accounted for before we merged into one big extent. If the number of
1620 * extents we accounted for is <= the amount we need for the new range
1621 * then we can return, otherwise drop. Think of it like this
1625 * So we've grown the extent by a MAX_SIZE extent, this would mean we
1626 * need 2 outstanding extents, on one side we have 1 and the other side
1627 * we have 1 so they are == and we can return. But in this case
1629 * [MAX_SIZE+4k][MAX_SIZE+4k]
1631 * Each range on their own accounts for 2 extents, but merged together
1632 * they are only 3 extents worth of accounting, so we need to drop in
1635 old_size = other->end - other->start + 1;
1636 num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
1637 BTRFS_MAX_EXTENT_SIZE);
1638 old_size = new->end - new->start + 1;
1639 num_extents += div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
1640 BTRFS_MAX_EXTENT_SIZE);
1642 if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1643 BTRFS_MAX_EXTENT_SIZE) >= num_extents)
1646 spin_lock(&BTRFS_I(inode)->lock);
1647 BTRFS_I(inode)->outstanding_extents--;
1648 spin_unlock(&BTRFS_I(inode)->lock);
1651 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1652 struct inode *inode)
1654 spin_lock(&root->delalloc_lock);
1655 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1656 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1657 &root->delalloc_inodes);
1658 set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1659 &BTRFS_I(inode)->runtime_flags);
1660 root->nr_delalloc_inodes++;
1661 if (root->nr_delalloc_inodes == 1) {
1662 spin_lock(&root->fs_info->delalloc_root_lock);
1663 BUG_ON(!list_empty(&root->delalloc_root));
1664 list_add_tail(&root->delalloc_root,
1665 &root->fs_info->delalloc_roots);
1666 spin_unlock(&root->fs_info->delalloc_root_lock);
1669 spin_unlock(&root->delalloc_lock);
1672 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1673 struct inode *inode)
1675 spin_lock(&root->delalloc_lock);
1676 if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1677 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1678 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1679 &BTRFS_I(inode)->runtime_flags);
1680 root->nr_delalloc_inodes--;
1681 if (!root->nr_delalloc_inodes) {
1682 spin_lock(&root->fs_info->delalloc_root_lock);
1683 BUG_ON(list_empty(&root->delalloc_root));
1684 list_del_init(&root->delalloc_root);
1685 spin_unlock(&root->fs_info->delalloc_root_lock);
1688 spin_unlock(&root->delalloc_lock);
1692 * extent_io.c set_bit_hook, used to track delayed allocation
1693 * bytes in this file, and to maintain the list of inodes that
1694 * have pending delalloc work to be done.
1696 static void btrfs_set_bit_hook(struct inode *inode,
1697 struct extent_state *state, unsigned *bits)
1700 if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
1703 * set_bit and clear bit hooks normally require _irqsave/restore
1704 * but in this case, we are only testing for the DELALLOC
1705 * bit, which is only set or cleared with irqs on
1707 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1708 struct btrfs_root *root = BTRFS_I(inode)->root;
1709 u64 len = state->end + 1 - state->start;
1710 bool do_list = !btrfs_is_free_space_inode(inode);
1712 if (*bits & EXTENT_FIRST_DELALLOC) {
1713 *bits &= ~EXTENT_FIRST_DELALLOC;
1715 spin_lock(&BTRFS_I(inode)->lock);
1716 BTRFS_I(inode)->outstanding_extents++;
1717 spin_unlock(&BTRFS_I(inode)->lock);
1720 /* For sanity tests */
1721 if (btrfs_test_is_dummy_root(root))
1724 __percpu_counter_add(&root->fs_info->delalloc_bytes, len,
1725 root->fs_info->delalloc_batch);
1726 spin_lock(&BTRFS_I(inode)->lock);
1727 BTRFS_I(inode)->delalloc_bytes += len;
1728 if (*bits & EXTENT_DEFRAG)
1729 BTRFS_I(inode)->defrag_bytes += len;
1730 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1731 &BTRFS_I(inode)->runtime_flags))
1732 btrfs_add_delalloc_inodes(root, inode);
1733 spin_unlock(&BTRFS_I(inode)->lock);
1738 * extent_io.c clear_bit_hook, see set_bit_hook for why
1740 static void btrfs_clear_bit_hook(struct inode *inode,
1741 struct extent_state *state,
1744 u64 len = state->end + 1 - state->start;
1745 u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1,
1746 BTRFS_MAX_EXTENT_SIZE);
1748 spin_lock(&BTRFS_I(inode)->lock);
1749 if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG))
1750 BTRFS_I(inode)->defrag_bytes -= len;
1751 spin_unlock(&BTRFS_I(inode)->lock);
1754 * set_bit and clear bit hooks normally require _irqsave/restore
1755 * but in this case, we are only testing for the DELALLOC
1756 * bit, which is only set or cleared with irqs on
1758 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1759 struct btrfs_root *root = BTRFS_I(inode)->root;
1760 bool do_list = !btrfs_is_free_space_inode(inode);
1762 if (*bits & EXTENT_FIRST_DELALLOC) {
1763 *bits &= ~EXTENT_FIRST_DELALLOC;
1764 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1765 spin_lock(&BTRFS_I(inode)->lock);
1766 BTRFS_I(inode)->outstanding_extents -= num_extents;
1767 spin_unlock(&BTRFS_I(inode)->lock);
1771 * We don't reserve metadata space for space cache inodes so we
1772 * don't need to call dellalloc_release_metadata if there is an
1775 if (*bits & EXTENT_DO_ACCOUNTING &&
1776 root != root->fs_info->tree_root)
1777 btrfs_delalloc_release_metadata(inode, len);
1779 /* For sanity tests. */
1780 if (btrfs_test_is_dummy_root(root))
1783 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1784 && do_list && !(state->state & EXTENT_NORESERVE))
1785 btrfs_free_reserved_data_space_noquota(inode,
1788 __percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
1789 root->fs_info->delalloc_batch);
1790 spin_lock(&BTRFS_I(inode)->lock);
1791 BTRFS_I(inode)->delalloc_bytes -= len;
1792 if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1793 test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1794 &BTRFS_I(inode)->runtime_flags))
1795 btrfs_del_delalloc_inode(root, inode);
1796 spin_unlock(&BTRFS_I(inode)->lock);
1801 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1802 * we don't create bios that span stripes or chunks
1804 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
1805 size_t size, struct bio *bio,
1806 unsigned long bio_flags)
1808 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1809 u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1814 if (bio_flags & EXTENT_BIO_COMPRESSED)
1817 length = bio->bi_iter.bi_size;
1818 map_length = length;
1819 ret = btrfs_map_block(root->fs_info, rw, logical,
1820 &map_length, NULL, 0);
1821 /* Will always return 0 with map_multi == NULL */
1823 if (map_length < length + size)
1829 * in order to insert checksums into the metadata in large chunks,
1830 * we wait until bio submission time. All the pages in the bio are
1831 * checksummed and sums are attached onto the ordered extent record.
1833 * At IO completion time the cums attached on the ordered extent record
1834 * are inserted into the btree
1836 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1837 struct bio *bio, int mirror_num,
1838 unsigned long bio_flags,
1841 struct btrfs_root *root = BTRFS_I(inode)->root;
1844 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1845 BUG_ON(ret); /* -ENOMEM */
1850 * in order to insert checksums into the metadata in large chunks,
1851 * we wait until bio submission time. All the pages in the bio are
1852 * checksummed and sums are attached onto the ordered extent record.
1854 * At IO completion time the cums attached on the ordered extent record
1855 * are inserted into the btree
1857 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1858 int mirror_num, unsigned long bio_flags,
1861 struct btrfs_root *root = BTRFS_I(inode)->root;
1864 ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
1866 bio->bi_error = ret;
1873 * extent_io.c submission hook. This does the right thing for csum calculation
1874 * on write, or reading the csums from the tree before a read
1876 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1877 int mirror_num, unsigned long bio_flags,
1880 struct btrfs_root *root = BTRFS_I(inode)->root;
1881 enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
1884 int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
1886 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1888 if (btrfs_is_free_space_inode(inode))
1889 metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
1891 if (!(rw & REQ_WRITE)) {
1892 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1896 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1897 ret = btrfs_submit_compressed_read(inode, bio,
1901 } else if (!skip_sum) {
1902 ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1907 } else if (async && !skip_sum) {
1908 /* csum items have already been cloned */
1909 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1911 /* we're doing a write, do the async checksumming */
1912 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1913 inode, rw, bio, mirror_num,
1914 bio_flags, bio_offset,
1915 __btrfs_submit_bio_start,
1916 __btrfs_submit_bio_done);
1918 } else if (!skip_sum) {
1919 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1925 ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
1929 bio->bi_error = ret;
1936 * given a list of ordered sums record them in the inode. This happens
1937 * at IO completion time based on sums calculated at bio submission time.
1939 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1940 struct inode *inode, u64 file_offset,
1941 struct list_head *list)
1943 struct btrfs_ordered_sum *sum;
1945 list_for_each_entry(sum, list, list) {
1946 trans->adding_csums = 1;
1947 btrfs_csum_file_blocks(trans,
1948 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1949 trans->adding_csums = 0;
1954 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1955 struct extent_state **cached_state)
1957 WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0);
1958 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1959 cached_state, GFP_NOFS);
1962 /* see btrfs_writepage_start_hook for details on why this is required */
1963 struct btrfs_writepage_fixup {
1965 struct btrfs_work work;
1968 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1970 struct btrfs_writepage_fixup *fixup;
1971 struct btrfs_ordered_extent *ordered;
1972 struct extent_state *cached_state = NULL;
1974 struct inode *inode;
1979 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1983 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1984 ClearPageChecked(page);
1988 inode = page->mapping->host;
1989 page_start = page_offset(page);
1990 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1992 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1995 /* already ordered? We're done */
1996 if (PagePrivate2(page))
1999 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2001 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
2002 page_end, &cached_state, GFP_NOFS);
2004 btrfs_start_ordered_extent(inode, ordered, 1);
2005 btrfs_put_ordered_extent(ordered);
2009 ret = btrfs_delalloc_reserve_space(inode, page_start,
2012 mapping_set_error(page->mapping, ret);
2013 end_extent_writepage(page, ret, page_start, page_end);
2014 ClearPageChecked(page);
2018 btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
2019 ClearPageChecked(page);
2020 set_page_dirty(page);
2022 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
2023 &cached_state, GFP_NOFS);
2026 page_cache_release(page);
2031 * There are a few paths in the higher layers of the kernel that directly
2032 * set the page dirty bit without asking the filesystem if it is a
2033 * good idea. This causes problems because we want to make sure COW
2034 * properly happens and the data=ordered rules are followed.
2036 * In our case any range that doesn't have the ORDERED bit set
2037 * hasn't been properly setup for IO. We kick off an async process
2038 * to fix it up. The async helper will wait for ordered extents, set
2039 * the delalloc bit and make it safe to write the page.
2041 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
2043 struct inode *inode = page->mapping->host;
2044 struct btrfs_writepage_fixup *fixup;
2045 struct btrfs_root *root = BTRFS_I(inode)->root;
2047 /* this page is properly in the ordered list */
2048 if (TestClearPagePrivate2(page))
2051 if (PageChecked(page))
2054 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2058 SetPageChecked(page);
2059 page_cache_get(page);
2060 btrfs_init_work(&fixup->work, btrfs_fixup_helper,
2061 btrfs_writepage_fixup_worker, NULL, NULL);
2063 btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work);
2067 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2068 struct inode *inode, u64 file_pos,
2069 u64 disk_bytenr, u64 disk_num_bytes,
2070 u64 num_bytes, u64 ram_bytes,
2071 u8 compression, u8 encryption,
2072 u16 other_encoding, int extent_type)
2074 struct btrfs_root *root = BTRFS_I(inode)->root;
2075 struct btrfs_file_extent_item *fi;
2076 struct btrfs_path *path;
2077 struct extent_buffer *leaf;
2078 struct btrfs_key ins;
2079 int extent_inserted = 0;
2082 path = btrfs_alloc_path();
2087 * we may be replacing one extent in the tree with another.
2088 * The new extent is pinned in the extent map, and we don't want
2089 * to drop it from the cache until it is completely in the btree.
2091 * So, tell btrfs_drop_extents to leave this extent in the cache.
2092 * the caller is expected to unpin it and allow it to be merged
2095 ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
2096 file_pos + num_bytes, NULL, 0,
2097 1, sizeof(*fi), &extent_inserted);
2101 if (!extent_inserted) {
2102 ins.objectid = btrfs_ino(inode);
2103 ins.offset = file_pos;
2104 ins.type = BTRFS_EXTENT_DATA_KEY;
2106 path->leave_spinning = 1;
2107 ret = btrfs_insert_empty_item(trans, root, path, &ins,
2112 leaf = path->nodes[0];
2113 fi = btrfs_item_ptr(leaf, path->slots[0],
2114 struct btrfs_file_extent_item);
2115 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2116 btrfs_set_file_extent_type(leaf, fi, extent_type);
2117 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
2118 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
2119 btrfs_set_file_extent_offset(leaf, fi, 0);
2120 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2121 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
2122 btrfs_set_file_extent_compression(leaf, fi, compression);
2123 btrfs_set_file_extent_encryption(leaf, fi, encryption);
2124 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
2126 btrfs_mark_buffer_dirty(leaf);
2127 btrfs_release_path(path);
2129 inode_add_bytes(inode, num_bytes);
2131 ins.objectid = disk_bytenr;
2132 ins.offset = disk_num_bytes;
2133 ins.type = BTRFS_EXTENT_ITEM_KEY;
2134 ret = btrfs_alloc_reserved_file_extent(trans, root,
2135 root->root_key.objectid,
2136 btrfs_ino(inode), file_pos,
2139 * Release the reserved range from inode dirty range map, as it is
2140 * already moved into delayed_ref_head
2142 btrfs_qgroup_release_data(inode, file_pos, ram_bytes);
2144 btrfs_free_path(path);
2149 /* snapshot-aware defrag */
2150 struct sa_defrag_extent_backref {
2151 struct rb_node node;
2152 struct old_sa_defrag_extent *old;
2161 struct old_sa_defrag_extent {
2162 struct list_head list;
2163 struct new_sa_defrag_extent *new;
2172 struct new_sa_defrag_extent {
2173 struct rb_root root;
2174 struct list_head head;
2175 struct btrfs_path *path;
2176 struct inode *inode;
2184 static int backref_comp(struct sa_defrag_extent_backref *b1,
2185 struct sa_defrag_extent_backref *b2)
2187 if (b1->root_id < b2->root_id)
2189 else if (b1->root_id > b2->root_id)
2192 if (b1->inum < b2->inum)
2194 else if (b1->inum > b2->inum)
2197 if (b1->file_pos < b2->file_pos)
2199 else if (b1->file_pos > b2->file_pos)
2203 * [------------------------------] ===> (a range of space)
2204 * |<--->| |<---->| =============> (fs/file tree A)
2205 * |<---------------------------->| ===> (fs/file tree B)
2207 * A range of space can refer to two file extents in one tree while
2208 * refer to only one file extent in another tree.
2210 * So we may process a disk offset more than one time(two extents in A)
2211 * and locate at the same extent(one extent in B), then insert two same
2212 * backrefs(both refer to the extent in B).
2217 static void backref_insert(struct rb_root *root,
2218 struct sa_defrag_extent_backref *backref)
2220 struct rb_node **p = &root->rb_node;
2221 struct rb_node *parent = NULL;
2222 struct sa_defrag_extent_backref *entry;
2227 entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2229 ret = backref_comp(backref, entry);
2233 p = &(*p)->rb_right;
2236 rb_link_node(&backref->node, parent, p);
2237 rb_insert_color(&backref->node, root);
2241 * Note the backref might has changed, and in this case we just return 0.
2243 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2246 struct btrfs_file_extent_item *extent;
2247 struct btrfs_fs_info *fs_info;
2248 struct old_sa_defrag_extent *old = ctx;
2249 struct new_sa_defrag_extent *new = old->new;
2250 struct btrfs_path *path = new->path;
2251 struct btrfs_key key;
2252 struct btrfs_root *root;
2253 struct sa_defrag_extent_backref *backref;
2254 struct extent_buffer *leaf;
2255 struct inode *inode = new->inode;
2261 if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2262 inum == btrfs_ino(inode))
2265 key.objectid = root_id;
2266 key.type = BTRFS_ROOT_ITEM_KEY;
2267 key.offset = (u64)-1;
2269 fs_info = BTRFS_I(inode)->root->fs_info;
2270 root = btrfs_read_fs_root_no_name(fs_info, &key);
2272 if (PTR_ERR(root) == -ENOENT)
2275 pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
2276 inum, offset, root_id);
2277 return PTR_ERR(root);
2280 key.objectid = inum;
2281 key.type = BTRFS_EXTENT_DATA_KEY;
2282 if (offset > (u64)-1 << 32)
2285 key.offset = offset;
2287 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2288 if (WARN_ON(ret < 0))
2295 leaf = path->nodes[0];
2296 slot = path->slots[0];
2298 if (slot >= btrfs_header_nritems(leaf)) {
2299 ret = btrfs_next_leaf(root, path);
2302 } else if (ret > 0) {
2311 btrfs_item_key_to_cpu(leaf, &key, slot);
2313 if (key.objectid > inum)
2316 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2319 extent = btrfs_item_ptr(leaf, slot,
2320 struct btrfs_file_extent_item);
2322 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2326 * 'offset' refers to the exact key.offset,
2327 * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2328 * (key.offset - extent_offset).
2330 if (key.offset != offset)
2333 extent_offset = btrfs_file_extent_offset(leaf, extent);
2334 num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2336 if (extent_offset >= old->extent_offset + old->offset +
2337 old->len || extent_offset + num_bytes <=
2338 old->extent_offset + old->offset)
2343 backref = kmalloc(sizeof(*backref), GFP_NOFS);
2349 backref->root_id = root_id;
2350 backref->inum = inum;
2351 backref->file_pos = offset;
2352 backref->num_bytes = num_bytes;
2353 backref->extent_offset = extent_offset;
2354 backref->generation = btrfs_file_extent_generation(leaf, extent);
2356 backref_insert(&new->root, backref);
2359 btrfs_release_path(path);
2364 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2365 struct new_sa_defrag_extent *new)
2367 struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
2368 struct old_sa_defrag_extent *old, *tmp;
2373 list_for_each_entry_safe(old, tmp, &new->head, list) {
2374 ret = iterate_inodes_from_logical(old->bytenr +
2375 old->extent_offset, fs_info,
2376 path, record_one_backref,
2378 if (ret < 0 && ret != -ENOENT)
2381 /* no backref to be processed for this extent */
2383 list_del(&old->list);
2388 if (list_empty(&new->head))
2394 static int relink_is_mergable(struct extent_buffer *leaf,
2395 struct btrfs_file_extent_item *fi,
2396 struct new_sa_defrag_extent *new)
2398 if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr)
2401 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2404 if (btrfs_file_extent_compression(leaf, fi) != new->compress_type)
2407 if (btrfs_file_extent_encryption(leaf, fi) ||
2408 btrfs_file_extent_other_encoding(leaf, fi))
2415 * Note the backref might has changed, and in this case we just return 0.
2417 static noinline int relink_extent_backref(struct btrfs_path *path,
2418 struct sa_defrag_extent_backref *prev,
2419 struct sa_defrag_extent_backref *backref)
2421 struct btrfs_file_extent_item *extent;
2422 struct btrfs_file_extent_item *item;
2423 struct btrfs_ordered_extent *ordered;
2424 struct btrfs_trans_handle *trans;
2425 struct btrfs_fs_info *fs_info;
2426 struct btrfs_root *root;
2427 struct btrfs_key key;
2428 struct extent_buffer *leaf;
2429 struct old_sa_defrag_extent *old = backref->old;
2430 struct new_sa_defrag_extent *new = old->new;
2431 struct inode *src_inode = new->inode;
2432 struct inode *inode;
2433 struct extent_state *cached = NULL;
2442 if (prev && prev->root_id == backref->root_id &&
2443 prev->inum == backref->inum &&
2444 prev->file_pos + prev->num_bytes == backref->file_pos)
2447 /* step 1: get root */
2448 key.objectid = backref->root_id;
2449 key.type = BTRFS_ROOT_ITEM_KEY;
2450 key.offset = (u64)-1;
2452 fs_info = BTRFS_I(src_inode)->root->fs_info;
2453 index = srcu_read_lock(&fs_info->subvol_srcu);
2455 root = btrfs_read_fs_root_no_name(fs_info, &key);
2457 srcu_read_unlock(&fs_info->subvol_srcu, index);
2458 if (PTR_ERR(root) == -ENOENT)
2460 return PTR_ERR(root);
2463 if (btrfs_root_readonly(root)) {
2464 srcu_read_unlock(&fs_info->subvol_srcu, index);
2468 /* step 2: get inode */
2469 key.objectid = backref->inum;
2470 key.type = BTRFS_INODE_ITEM_KEY;
2473 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2474 if (IS_ERR(inode)) {
2475 srcu_read_unlock(&fs_info->subvol_srcu, index);
2479 srcu_read_unlock(&fs_info->subvol_srcu, index);
2481 /* step 3: relink backref */
2482 lock_start = backref->file_pos;
2483 lock_end = backref->file_pos + backref->num_bytes - 1;
2484 lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2487 ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2489 btrfs_put_ordered_extent(ordered);
2493 trans = btrfs_join_transaction(root);
2494 if (IS_ERR(trans)) {
2495 ret = PTR_ERR(trans);
2499 key.objectid = backref->inum;
2500 key.type = BTRFS_EXTENT_DATA_KEY;
2501 key.offset = backref->file_pos;
2503 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2506 } else if (ret > 0) {
2511 extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2512 struct btrfs_file_extent_item);
2514 if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2515 backref->generation)
2518 btrfs_release_path(path);
2520 start = backref->file_pos;
2521 if (backref->extent_offset < old->extent_offset + old->offset)
2522 start += old->extent_offset + old->offset -
2523 backref->extent_offset;
2525 len = min(backref->extent_offset + backref->num_bytes,
2526 old->extent_offset + old->offset + old->len);
2527 len -= max(backref->extent_offset, old->extent_offset + old->offset);
2529 ret = btrfs_drop_extents(trans, root, inode, start,
2534 key.objectid = btrfs_ino(inode);
2535 key.type = BTRFS_EXTENT_DATA_KEY;
2538 path->leave_spinning = 1;
2540 struct btrfs_file_extent_item *fi;
2542 struct btrfs_key found_key;
2544 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2549 leaf = path->nodes[0];
2550 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2552 fi = btrfs_item_ptr(leaf, path->slots[0],
2553 struct btrfs_file_extent_item);
2554 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2556 if (extent_len + found_key.offset == start &&
2557 relink_is_mergable(leaf, fi, new)) {
2558 btrfs_set_file_extent_num_bytes(leaf, fi,
2560 btrfs_mark_buffer_dirty(leaf);
2561 inode_add_bytes(inode, len);
2567 btrfs_release_path(path);
2572 ret = btrfs_insert_empty_item(trans, root, path, &key,
2575 btrfs_abort_transaction(trans, root, ret);
2579 leaf = path->nodes[0];
2580 item = btrfs_item_ptr(leaf, path->slots[0],
2581 struct btrfs_file_extent_item);
2582 btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2583 btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2584 btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2585 btrfs_set_file_extent_num_bytes(leaf, item, len);
2586 btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2587 btrfs_set_file_extent_generation(leaf, item, trans->transid);
2588 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2589 btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2590 btrfs_set_file_extent_encryption(leaf, item, 0);
2591 btrfs_set_file_extent_other_encoding(leaf, item, 0);
2593 btrfs_mark_buffer_dirty(leaf);
2594 inode_add_bytes(inode, len);
2595 btrfs_release_path(path);
2597 ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
2599 backref->root_id, backref->inum,
2600 new->file_pos); /* start - extent_offset */
2602 btrfs_abort_transaction(trans, root, ret);
2608 btrfs_release_path(path);
2609 path->leave_spinning = 0;
2610 btrfs_end_transaction(trans, root);
2612 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2618 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
2620 struct old_sa_defrag_extent *old, *tmp;
2625 list_for_each_entry_safe(old, tmp, &new->head, list) {
2631 static void relink_file_extents(struct new_sa_defrag_extent *new)
2633 struct btrfs_path *path;
2634 struct sa_defrag_extent_backref *backref;
2635 struct sa_defrag_extent_backref *prev = NULL;
2636 struct inode *inode;
2637 struct btrfs_root *root;
2638 struct rb_node *node;
2642 root = BTRFS_I(inode)->root;
2644 path = btrfs_alloc_path();
2648 if (!record_extent_backrefs(path, new)) {
2649 btrfs_free_path(path);
2652 btrfs_release_path(path);
2655 node = rb_first(&new->root);
2658 rb_erase(node, &new->root);
2660 backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2662 ret = relink_extent_backref(path, prev, backref);
2675 btrfs_free_path(path);
2677 free_sa_defrag_extent(new);
2679 atomic_dec(&root->fs_info->defrag_running);
2680 wake_up(&root->fs_info->transaction_wait);
2683 static struct new_sa_defrag_extent *
2684 record_old_file_extents(struct inode *inode,
2685 struct btrfs_ordered_extent *ordered)
2687 struct btrfs_root *root = BTRFS_I(inode)->root;
2688 struct btrfs_path *path;
2689 struct btrfs_key key;
2690 struct old_sa_defrag_extent *old;
2691 struct new_sa_defrag_extent *new;
2694 new = kmalloc(sizeof(*new), GFP_NOFS);
2699 new->file_pos = ordered->file_offset;
2700 new->len = ordered->len;
2701 new->bytenr = ordered->start;
2702 new->disk_len = ordered->disk_len;
2703 new->compress_type = ordered->compress_type;
2704 new->root = RB_ROOT;
2705 INIT_LIST_HEAD(&new->head);
2707 path = btrfs_alloc_path();
2711 key.objectid = btrfs_ino(inode);
2712 key.type = BTRFS_EXTENT_DATA_KEY;
2713 key.offset = new->file_pos;
2715 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2718 if (ret > 0 && path->slots[0] > 0)
2721 /* find out all the old extents for the file range */
2723 struct btrfs_file_extent_item *extent;
2724 struct extent_buffer *l;
2733 slot = path->slots[0];
2735 if (slot >= btrfs_header_nritems(l)) {
2736 ret = btrfs_next_leaf(root, path);
2744 btrfs_item_key_to_cpu(l, &key, slot);
2746 if (key.objectid != btrfs_ino(inode))
2748 if (key.type != BTRFS_EXTENT_DATA_KEY)
2750 if (key.offset >= new->file_pos + new->len)
2753 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2755 num_bytes = btrfs_file_extent_num_bytes(l, extent);
2756 if (key.offset + num_bytes < new->file_pos)
2759 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2763 extent_offset = btrfs_file_extent_offset(l, extent);
2765 old = kmalloc(sizeof(*old), GFP_NOFS);
2769 offset = max(new->file_pos, key.offset);
2770 end = min(new->file_pos + new->len, key.offset + num_bytes);
2772 old->bytenr = disk_bytenr;
2773 old->extent_offset = extent_offset;
2774 old->offset = offset - key.offset;
2775 old->len = end - offset;
2778 list_add_tail(&old->list, &new->head);
2784 btrfs_free_path(path);
2785 atomic_inc(&root->fs_info->defrag_running);
2790 btrfs_free_path(path);
2792 free_sa_defrag_extent(new);
2796 static void btrfs_release_delalloc_bytes(struct btrfs_root *root,
2799 struct btrfs_block_group_cache *cache;
2801 cache = btrfs_lookup_block_group(root->fs_info, start);
2804 spin_lock(&cache->lock);
2805 cache->delalloc_bytes -= len;
2806 spin_unlock(&cache->lock);
2808 btrfs_put_block_group(cache);
2811 /* as ordered data IO finishes, this gets called so we can finish
2812 * an ordered extent if the range of bytes in the file it covers are
2815 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2817 struct inode *inode = ordered_extent->inode;
2818 struct btrfs_root *root = BTRFS_I(inode)->root;
2819 struct btrfs_trans_handle *trans = NULL;
2820 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2821 struct extent_state *cached_state = NULL;
2822 struct new_sa_defrag_extent *new = NULL;
2823 int compress_type = 0;
2825 u64 logical_len = ordered_extent->len;
2827 bool truncated = false;
2829 nolock = btrfs_is_free_space_inode(inode);
2831 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2836 btrfs_free_io_failure_record(inode, ordered_extent->file_offset,
2837 ordered_extent->file_offset +
2838 ordered_extent->len - 1);
2840 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
2842 logical_len = ordered_extent->truncated_len;
2843 /* Truncated the entire extent, don't bother adding */
2848 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2849 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2852 * For mwrite(mmap + memset to write) case, we still reserve
2853 * space for NOCOW range.
2854 * As NOCOW won't cause a new delayed ref, just free the space
2856 btrfs_qgroup_free_data(inode, ordered_extent->file_offset,
2857 ordered_extent->len);
2858 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2860 trans = btrfs_join_transaction_nolock(root);
2862 trans = btrfs_join_transaction(root);
2863 if (IS_ERR(trans)) {
2864 ret = PTR_ERR(trans);
2868 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2869 ret = btrfs_update_inode_fallback(trans, root, inode);
2870 if (ret) /* -ENOMEM or corruption */
2871 btrfs_abort_transaction(trans, root, ret);
2875 lock_extent_bits(io_tree, ordered_extent->file_offset,
2876 ordered_extent->file_offset + ordered_extent->len - 1,
2879 ret = test_range_bit(io_tree, ordered_extent->file_offset,
2880 ordered_extent->file_offset + ordered_extent->len - 1,
2881 EXTENT_DEFRAG, 1, cached_state);
2883 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2884 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
2885 /* the inode is shared */
2886 new = record_old_file_extents(inode, ordered_extent);
2888 clear_extent_bit(io_tree, ordered_extent->file_offset,
2889 ordered_extent->file_offset + ordered_extent->len - 1,
2890 EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
2894 trans = btrfs_join_transaction_nolock(root);
2896 trans = btrfs_join_transaction(root);
2897 if (IS_ERR(trans)) {
2898 ret = PTR_ERR(trans);
2903 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2905 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
2906 compress_type = ordered_extent->compress_type;
2907 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
2908 BUG_ON(compress_type);
2909 ret = btrfs_mark_extent_written(trans, inode,
2910 ordered_extent->file_offset,
2911 ordered_extent->file_offset +
2914 BUG_ON(root == root->fs_info->tree_root);
2915 ret = insert_reserved_file_extent(trans, inode,
2916 ordered_extent->file_offset,
2917 ordered_extent->start,
2918 ordered_extent->disk_len,
2919 logical_len, logical_len,
2920 compress_type, 0, 0,
2921 BTRFS_FILE_EXTENT_REG);
2923 btrfs_release_delalloc_bytes(root,
2924 ordered_extent->start,
2925 ordered_extent->disk_len);
2927 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
2928 ordered_extent->file_offset, ordered_extent->len,
2931 btrfs_abort_transaction(trans, root, ret);
2935 add_pending_csums(trans, inode, ordered_extent->file_offset,
2936 &ordered_extent->list);
2938 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2939 ret = btrfs_update_inode_fallback(trans, root, inode);
2940 if (ret) { /* -ENOMEM or corruption */
2941 btrfs_abort_transaction(trans, root, ret);
2946 unlock_extent_cached(io_tree, ordered_extent->file_offset,
2947 ordered_extent->file_offset +
2948 ordered_extent->len - 1, &cached_state, GFP_NOFS);
2950 if (root != root->fs_info->tree_root)
2951 btrfs_delalloc_release_metadata(inode, ordered_extent->len);
2953 btrfs_end_transaction(trans, root);
2955 if (ret || truncated) {
2959 start = ordered_extent->file_offset + logical_len;
2961 start = ordered_extent->file_offset;
2962 end = ordered_extent->file_offset + ordered_extent->len - 1;
2963 clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS);
2965 /* Drop the cache for the part of the extent we didn't write. */
2966 btrfs_drop_extent_cache(inode, start, end, 0);
2969 * If the ordered extent had an IOERR or something else went
2970 * wrong we need to return the space for this ordered extent
2971 * back to the allocator. We only free the extent in the
2972 * truncated case if we didn't write out the extent at all.
2974 if ((ret || !logical_len) &&
2975 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2976 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
2977 btrfs_free_reserved_extent(root, ordered_extent->start,
2978 ordered_extent->disk_len, 1);
2983 * This needs to be done to make sure anybody waiting knows we are done
2984 * updating everything for this ordered extent.
2986 btrfs_remove_ordered_extent(inode, ordered_extent);
2988 /* for snapshot-aware defrag */
2991 free_sa_defrag_extent(new);
2992 atomic_dec(&root->fs_info->defrag_running);
2994 relink_file_extents(new);
2999 btrfs_put_ordered_extent(ordered_extent);
3000 /* once for the tree */
3001 btrfs_put_ordered_extent(ordered_extent);
3006 static void finish_ordered_fn(struct btrfs_work *work)
3008 struct btrfs_ordered_extent *ordered_extent;
3009 ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
3010 btrfs_finish_ordered_io(ordered_extent);
3013 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
3014 struct extent_state *state, int uptodate)
3016 struct inode *inode = page->mapping->host;
3017 struct btrfs_root *root = BTRFS_I(inode)->root;
3018 struct btrfs_ordered_extent *ordered_extent = NULL;
3019 struct btrfs_workqueue *wq;
3020 btrfs_work_func_t func;
3022 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
3024 ClearPagePrivate2(page);
3025 if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
3026 end - start + 1, uptodate))
3029 if (btrfs_is_free_space_inode(inode)) {
3030 wq = root->fs_info->endio_freespace_worker;
3031 func = btrfs_freespace_write_helper;
3033 wq = root->fs_info->endio_write_workers;
3034 func = btrfs_endio_write_helper;
3037 btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
3039 btrfs_queue_work(wq, &ordered_extent->work);
3044 static int __readpage_endio_check(struct inode *inode,
3045 struct btrfs_io_bio *io_bio,
3046 int icsum, struct page *page,
3047 int pgoff, u64 start, size_t len)
3053 csum_expected = *(((u32 *)io_bio->csum) + icsum);
3055 kaddr = kmap_atomic(page);
3056 csum = btrfs_csum_data(kaddr + pgoff, csum, len);
3057 btrfs_csum_final(csum, (char *)&csum);
3058 if (csum != csum_expected)
3061 kunmap_atomic(kaddr);
3064 btrfs_warn_rl(BTRFS_I(inode)->root->fs_info,
3065 "csum failed ino %llu off %llu csum %u expected csum %u",
3066 btrfs_ino(inode), start, csum, csum_expected);
3067 memset(kaddr + pgoff, 1, len);
3068 flush_dcache_page(page);
3069 kunmap_atomic(kaddr);
3070 if (csum_expected == 0)
3076 * when reads are done, we need to check csums to verify the data is correct
3077 * if there's a match, we allow the bio to finish. If not, the code in
3078 * extent_io.c will try to find good copies for us.
3080 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
3081 u64 phy_offset, struct page *page,
3082 u64 start, u64 end, int mirror)
3084 size_t offset = start - page_offset(page);
3085 struct inode *inode = page->mapping->host;
3086 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3087 struct btrfs_root *root = BTRFS_I(inode)->root;
3089 if (PageChecked(page)) {
3090 ClearPageChecked(page);
3094 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
3097 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
3098 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
3099 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
3104 phy_offset >>= inode->i_sb->s_blocksize_bits;
3105 return __readpage_endio_check(inode, io_bio, phy_offset, page, offset,
3106 start, (size_t)(end - start + 1));
3109 struct delayed_iput {
3110 struct list_head list;
3111 struct inode *inode;
3114 /* JDM: If this is fs-wide, why can't we add a pointer to
3115 * btrfs_inode instead and avoid the allocation? */
3116 void btrfs_add_delayed_iput(struct inode *inode)
3118 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
3119 struct delayed_iput *delayed;
3121 if (atomic_add_unless(&inode->i_count, -1, 1))
3124 delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
3125 delayed->inode = inode;
3127 spin_lock(&fs_info->delayed_iput_lock);
3128 list_add_tail(&delayed->list, &fs_info->delayed_iputs);
3129 spin_unlock(&fs_info->delayed_iput_lock);
3132 void btrfs_run_delayed_iputs(struct btrfs_root *root)
3135 struct btrfs_fs_info *fs_info = root->fs_info;
3136 struct delayed_iput *delayed;
3139 spin_lock(&fs_info->delayed_iput_lock);
3140 empty = list_empty(&fs_info->delayed_iputs);
3141 spin_unlock(&fs_info->delayed_iput_lock);
3145 down_read(&fs_info->delayed_iput_sem);
3147 spin_lock(&fs_info->delayed_iput_lock);
3148 list_splice_init(&fs_info->delayed_iputs, &list);
3149 spin_unlock(&fs_info->delayed_iput_lock);
3151 while (!list_empty(&list)) {
3152 delayed = list_entry(list.next, struct delayed_iput, list);
3153 list_del(&delayed->list);
3154 iput(delayed->inode);
3158 up_read(&root->fs_info->delayed_iput_sem);
3162 * This is called in transaction commit time. If there are no orphan
3163 * files in the subvolume, it removes orphan item and frees block_rsv
3166 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
3167 struct btrfs_root *root)
3169 struct btrfs_block_rsv *block_rsv;
3172 if (atomic_read(&root->orphan_inodes) ||
3173 root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
3176 spin_lock(&root->orphan_lock);
3177 if (atomic_read(&root->orphan_inodes)) {
3178 spin_unlock(&root->orphan_lock);
3182 if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
3183 spin_unlock(&root->orphan_lock);
3187 block_rsv = root->orphan_block_rsv;
3188 root->orphan_block_rsv = NULL;
3189 spin_unlock(&root->orphan_lock);
3191 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state) &&
3192 btrfs_root_refs(&root->root_item) > 0) {
3193 ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
3194 root->root_key.objectid);
3196 btrfs_abort_transaction(trans, root, ret);
3198 clear_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
3203 WARN_ON(block_rsv->size > 0);
3204 btrfs_free_block_rsv(root, block_rsv);
3209 * This creates an orphan entry for the given inode in case something goes
3210 * wrong in the middle of an unlink/truncate.
3212 * NOTE: caller of this function should reserve 5 units of metadata for
3215 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
3217 struct btrfs_root *root = BTRFS_I(inode)->root;
3218 struct btrfs_block_rsv *block_rsv = NULL;
3223 if (!root->orphan_block_rsv) {
3224 block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
3229 spin_lock(&root->orphan_lock);
3230 if (!root->orphan_block_rsv) {
3231 root->orphan_block_rsv = block_rsv;
3232 } else if (block_rsv) {
3233 btrfs_free_block_rsv(root, block_rsv);
3237 if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3238 &BTRFS_I(inode)->runtime_flags)) {
3241 * For proper ENOSPC handling, we should do orphan
3242 * cleanup when mounting. But this introduces backward
3243 * compatibility issue.
3245 if (!xchg(&root->orphan_item_inserted, 1))
3251 atomic_inc(&root->orphan_inodes);
3254 if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3255 &BTRFS_I(inode)->runtime_flags))
3257 spin_unlock(&root->orphan_lock);
3259 /* grab metadata reservation from transaction handle */
3261 ret = btrfs_orphan_reserve_metadata(trans, inode);
3262 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
3265 /* insert an orphan item to track this unlinked/truncated file */
3267 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
3269 atomic_dec(&root->orphan_inodes);
3271 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3272 &BTRFS_I(inode)->runtime_flags);
3273 btrfs_orphan_release_metadata(inode);
3275 if (ret != -EEXIST) {
3276 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3277 &BTRFS_I(inode)->runtime_flags);
3278 btrfs_abort_transaction(trans, root, ret);
3285 /* insert an orphan item to track subvolume contains orphan files */
3287 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
3288 root->root_key.objectid);
3289 if (ret && ret != -EEXIST) {
3290 btrfs_abort_transaction(trans, root, ret);
3298 * We have done the truncate/delete so we can go ahead and remove the orphan
3299 * item for this particular inode.
3301 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3302 struct inode *inode)
3304 struct btrfs_root *root = BTRFS_I(inode)->root;
3305 int delete_item = 0;
3306 int release_rsv = 0;
3309 spin_lock(&root->orphan_lock);
3310 if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3311 &BTRFS_I(inode)->runtime_flags))
3314 if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3315 &BTRFS_I(inode)->runtime_flags))
3317 spin_unlock(&root->orphan_lock);
3320 atomic_dec(&root->orphan_inodes);
3322 ret = btrfs_del_orphan_item(trans, root,
3327 btrfs_orphan_release_metadata(inode);
3333 * this cleans up any orphans that may be left on the list from the last use
3336 int btrfs_orphan_cleanup(struct btrfs_root *root)
3338 struct btrfs_path *path;
3339 struct extent_buffer *leaf;
3340 struct btrfs_key key, found_key;
3341 struct btrfs_trans_handle *trans;
3342 struct inode *inode;
3343 u64 last_objectid = 0;
3344 int ret = 0, nr_unlink = 0, nr_truncate = 0;
3346 if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3349 path = btrfs_alloc_path();
3356 key.objectid = BTRFS_ORPHAN_OBJECTID;
3357 key.type = BTRFS_ORPHAN_ITEM_KEY;
3358 key.offset = (u64)-1;
3361 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3366 * if ret == 0 means we found what we were searching for, which
3367 * is weird, but possible, so only screw with path if we didn't
3368 * find the key and see if we have stuff that matches
3372 if (path->slots[0] == 0)
3377 /* pull out the item */
3378 leaf = path->nodes[0];
3379 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3381 /* make sure the item matches what we want */
3382 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3384 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3387 /* release the path since we're done with it */
3388 btrfs_release_path(path);
3391 * this is where we are basically btrfs_lookup, without the
3392 * crossing root thing. we store the inode number in the
3393 * offset of the orphan item.
3396 if (found_key.offset == last_objectid) {
3397 btrfs_err(root->fs_info,
3398 "Error removing orphan entry, stopping orphan cleanup");
3403 last_objectid = found_key.offset;
3405 found_key.objectid = found_key.offset;
3406 found_key.type = BTRFS_INODE_ITEM_KEY;
3407 found_key.offset = 0;
3408 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
3409 ret = PTR_ERR_OR_ZERO(inode);
3410 if (ret && ret != -ESTALE)
3413 if (ret == -ESTALE && root == root->fs_info->tree_root) {
3414 struct btrfs_root *dead_root;
3415 struct btrfs_fs_info *fs_info = root->fs_info;
3416 int is_dead_root = 0;
3419 * this is an orphan in the tree root. Currently these
3420 * could come from 2 sources:
3421 * a) a snapshot deletion in progress
3422 * b) a free space cache inode
3423 * We need to distinguish those two, as the snapshot
3424 * orphan must not get deleted.
3425 * find_dead_roots already ran before us, so if this
3426 * is a snapshot deletion, we should find the root
3427 * in the dead_roots list
3429 spin_lock(&fs_info->trans_lock);
3430 list_for_each_entry(dead_root, &fs_info->dead_roots,
3432 if (dead_root->root_key.objectid ==
3433 found_key.objectid) {
3438 spin_unlock(&fs_info->trans_lock);
3440 /* prevent this orphan from being found again */
3441 key.offset = found_key.objectid - 1;
3446 * Inode is already gone but the orphan item is still there,
3447 * kill the orphan item.
3449 if (ret == -ESTALE) {
3450 trans = btrfs_start_transaction(root, 1);
3451 if (IS_ERR(trans)) {
3452 ret = PTR_ERR(trans);
3455 btrfs_debug(root->fs_info, "auto deleting %Lu",
3456 found_key.objectid);
3457 ret = btrfs_del_orphan_item(trans, root,
3458 found_key.objectid);
3459 btrfs_end_transaction(trans, root);
3466 * add this inode to the orphan list so btrfs_orphan_del does
3467 * the proper thing when we hit it
3469 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3470 &BTRFS_I(inode)->runtime_flags);
3471 atomic_inc(&root->orphan_inodes);
3473 /* if we have links, this was a truncate, lets do that */
3474 if (inode->i_nlink) {
3475 if (WARN_ON(!S_ISREG(inode->i_mode))) {
3481 /* 1 for the orphan item deletion. */
3482 trans = btrfs_start_transaction(root, 1);
3483 if (IS_ERR(trans)) {
3485 ret = PTR_ERR(trans);
3488 ret = btrfs_orphan_add(trans, inode);
3489 btrfs_end_transaction(trans, root);
3495 ret = btrfs_truncate(inode);
3497 btrfs_orphan_del(NULL, inode);
3502 /* this will do delete_inode and everything for us */
3507 /* release the path since we're done with it */
3508 btrfs_release_path(path);
3510 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3512 if (root->orphan_block_rsv)
3513 btrfs_block_rsv_release(root, root->orphan_block_rsv,
3516 if (root->orphan_block_rsv ||
3517 test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3518 trans = btrfs_join_transaction(root);
3520 btrfs_end_transaction(trans, root);
3524 btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink);
3526 btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate);
3530 btrfs_err(root->fs_info,
3531 "could not do orphan cleanup %d", ret);
3532 btrfs_free_path(path);
3537 * very simple check to peek ahead in the leaf looking for xattrs. If we
3538 * don't find any xattrs, we know there can't be any acls.
3540 * slot is the slot the inode is in, objectid is the objectid of the inode
3542 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3543 int slot, u64 objectid,
3544 int *first_xattr_slot)
3546 u32 nritems = btrfs_header_nritems(leaf);
3547 struct btrfs_key found_key;
3548 static u64 xattr_access = 0;
3549 static u64 xattr_default = 0;
3552 if (!xattr_access) {
3553 xattr_access = btrfs_name_hash(POSIX_ACL_XATTR_ACCESS,
3554 strlen(POSIX_ACL_XATTR_ACCESS));
3555 xattr_default = btrfs_name_hash(POSIX_ACL_XATTR_DEFAULT,
3556 strlen(POSIX_ACL_XATTR_DEFAULT));
3560 *first_xattr_slot = -1;
3561 while (slot < nritems) {
3562 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3564 /* we found a different objectid, there must not be acls */
3565 if (found_key.objectid != objectid)
3568 /* we found an xattr, assume we've got an acl */
3569 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3570 if (*first_xattr_slot == -1)
3571 *first_xattr_slot = slot;
3572 if (found_key.offset == xattr_access ||
3573 found_key.offset == xattr_default)
3578 * we found a key greater than an xattr key, there can't
3579 * be any acls later on
3581 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3588 * it goes inode, inode backrefs, xattrs, extents,
3589 * so if there are a ton of hard links to an inode there can
3590 * be a lot of backrefs. Don't waste time searching too hard,
3591 * this is just an optimization
3596 /* we hit the end of the leaf before we found an xattr or
3597 * something larger than an xattr. We have to assume the inode
3600 if (*first_xattr_slot == -1)
3601 *first_xattr_slot = slot;
3606 * read an inode from the btree into the in-memory inode
3608 static void btrfs_read_locked_inode(struct inode *inode)
3610 struct btrfs_path *path;
3611 struct extent_buffer *leaf;
3612 struct btrfs_inode_item *inode_item;
3613 struct btrfs_root *root = BTRFS_I(inode)->root;
3614 struct btrfs_key location;
3619 bool filled = false;
3620 int first_xattr_slot;
3622 ret = btrfs_fill_inode(inode, &rdev);
3626 path = btrfs_alloc_path();
3630 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3632 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3636 leaf = path->nodes[0];
3641 inode_item = btrfs_item_ptr(leaf, path->slots[0],
3642 struct btrfs_inode_item);
3643 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3644 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3645 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3646 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3647 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
3649 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3650 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3652 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3653 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3655 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3656 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3658 BTRFS_I(inode)->i_otime.tv_sec =
3659 btrfs_timespec_sec(leaf, &inode_item->otime);
3660 BTRFS_I(inode)->i_otime.tv_nsec =
3661 btrfs_timespec_nsec(leaf, &inode_item->otime);
3663 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3664 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3665 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3667 inode->i_version = btrfs_inode_sequence(leaf, inode_item);
3668 inode->i_generation = BTRFS_I(inode)->generation;
3670 rdev = btrfs_inode_rdev(leaf, inode_item);
3672 BTRFS_I(inode)->index_cnt = (u64)-1;
3673 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3677 * If we were modified in the current generation and evicted from memory
3678 * and then re-read we need to do a full sync since we don't have any
3679 * idea about which extents were modified before we were evicted from
3682 * This is required for both inode re-read from disk and delayed inode
3683 * in delayed_nodes_tree.
3685 if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
3686 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3687 &BTRFS_I(inode)->runtime_flags);
3690 * We don't persist the id of the transaction where an unlink operation
3691 * against the inode was last made. So here we assume the inode might
3692 * have been evicted, and therefore the exact value of last_unlink_trans
3693 * lost, and set it to last_trans to avoid metadata inconsistencies
3694 * between the inode and its parent if the inode is fsync'ed and the log
3695 * replayed. For example, in the scenario:
3698 * ln mydir/foo mydir/bar
3701 * echo 2 > /proc/sys/vm/drop_caches # evicts inode
3702 * xfs_io -c fsync mydir/foo
3704 * mount fs, triggers fsync log replay
3706 * We must make sure that when we fsync our inode foo we also log its
3707 * parent inode, otherwise after log replay the parent still has the
3708 * dentry with the "bar" name but our inode foo has a link count of 1
3709 * and doesn't have an inode ref with the name "bar" anymore.
3711 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3712 * but it guarantees correctness at the expense of ocassional full
3713 * transaction commits on fsync if our inode is a directory, or if our
3714 * inode is not a directory, logging its parent unnecessarily.
3716 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3719 if (inode->i_nlink != 1 ||
3720 path->slots[0] >= btrfs_header_nritems(leaf))
3723 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3724 if (location.objectid != btrfs_ino(inode))
3727 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3728 if (location.type == BTRFS_INODE_REF_KEY) {
3729 struct btrfs_inode_ref *ref;
3731 ref = (struct btrfs_inode_ref *)ptr;
3732 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3733 } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3734 struct btrfs_inode_extref *extref;
3736 extref = (struct btrfs_inode_extref *)ptr;
3737 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3742 * try to precache a NULL acl entry for files that don't have
3743 * any xattrs or acls
3745 maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3746 btrfs_ino(inode), &first_xattr_slot);
3747 if (first_xattr_slot != -1) {
3748 path->slots[0] = first_xattr_slot;
3749 ret = btrfs_load_inode_props(inode, path);
3751 btrfs_err(root->fs_info,
3752 "error loading props for ino %llu (root %llu): %d",
3754 root->root_key.objectid, ret);
3756 btrfs_free_path(path);
3759 cache_no_acl(inode);
3761 switch (inode->i_mode & S_IFMT) {
3763 inode->i_mapping->a_ops = &btrfs_aops;
3764 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3765 inode->i_fop = &btrfs_file_operations;
3766 inode->i_op = &btrfs_file_inode_operations;
3769 inode->i_fop = &btrfs_dir_file_operations;
3770 if (root == root->fs_info->tree_root)
3771 inode->i_op = &btrfs_dir_ro_inode_operations;
3773 inode->i_op = &btrfs_dir_inode_operations;
3776 inode->i_op = &btrfs_symlink_inode_operations;
3777 inode->i_mapping->a_ops = &btrfs_symlink_aops;
3780 inode->i_op = &btrfs_special_inode_operations;
3781 init_special_inode(inode, inode->i_mode, rdev);
3785 btrfs_update_iflags(inode);
3789 btrfs_free_path(path);
3790 make_bad_inode(inode);
3794 * given a leaf and an inode, copy the inode fields into the leaf
3796 static void fill_inode_item(struct btrfs_trans_handle *trans,
3797 struct extent_buffer *leaf,
3798 struct btrfs_inode_item *item,
3799 struct inode *inode)
3801 struct btrfs_map_token token;
3803 btrfs_init_map_token(&token);
3805 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3806 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3807 btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3809 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3810 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3812 btrfs_set_token_timespec_sec(leaf, &item->atime,
3813 inode->i_atime.tv_sec, &token);
3814 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3815 inode->i_atime.tv_nsec, &token);
3817 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3818 inode->i_mtime.tv_sec, &token);
3819 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3820 inode->i_mtime.tv_nsec, &token);
3822 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3823 inode->i_ctime.tv_sec, &token);
3824 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3825 inode->i_ctime.tv_nsec, &token);
3827 btrfs_set_token_timespec_sec(leaf, &item->otime,
3828 BTRFS_I(inode)->i_otime.tv_sec, &token);
3829 btrfs_set_token_timespec_nsec(leaf, &item->otime,
3830 BTRFS_I(inode)->i_otime.tv_nsec, &token);
3832 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3834 btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3836 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3837 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3838 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3839 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3840 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3844 * copy everything in the in-memory inode into the btree.
3846 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3847 struct btrfs_root *root, struct inode *inode)
3849 struct btrfs_inode_item *inode_item;
3850 struct btrfs_path *path;
3851 struct extent_buffer *leaf;
3854 path = btrfs_alloc_path();
3858 path->leave_spinning = 1;
3859 ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
3867 leaf = path->nodes[0];
3868 inode_item = btrfs_item_ptr(leaf, path->slots[0],
3869 struct btrfs_inode_item);
3871 fill_inode_item(trans, leaf, inode_item, inode);
3872 btrfs_mark_buffer_dirty(leaf);
3873 btrfs_set_inode_last_trans(trans, inode);
3876 btrfs_free_path(path);
3881 * copy everything in the in-memory inode into the btree.
3883 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3884 struct btrfs_root *root, struct inode *inode)
3889 * If the inode is a free space inode, we can deadlock during commit
3890 * if we put it into the delayed code.
3892 * The data relocation inode should also be directly updated
3895 if (!btrfs_is_free_space_inode(inode)
3896 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
3897 && !root->fs_info->log_root_recovering) {
3898 btrfs_update_root_times(trans, root);
3900 ret = btrfs_delayed_update_inode(trans, root, inode);
3902 btrfs_set_inode_last_trans(trans, inode);
3906 return btrfs_update_inode_item(trans, root, inode);
3909 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3910 struct btrfs_root *root,
3911 struct inode *inode)
3915 ret = btrfs_update_inode(trans, root, inode);
3917 return btrfs_update_inode_item(trans, root, inode);
3922 * unlink helper that gets used here in inode.c and in the tree logging
3923 * recovery code. It remove a link in a directory with a given name, and
3924 * also drops the back refs in the inode to the directory
3926 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3927 struct btrfs_root *root,
3928 struct inode *dir, struct inode *inode,
3929 const char *name, int name_len)
3931 struct btrfs_path *path;
3933 struct extent_buffer *leaf;
3934 struct btrfs_dir_item *di;
3935 struct btrfs_key key;
3937 u64 ino = btrfs_ino(inode);
3938 u64 dir_ino = btrfs_ino(dir);
3940 path = btrfs_alloc_path();
3946 path->leave_spinning = 1;
3947 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3948 name, name_len, -1);
3957 leaf = path->nodes[0];
3958 btrfs_dir_item_key_to_cpu(leaf, di, &key);
3959 ret = btrfs_delete_one_dir_name(trans, root, path, di);
3962 btrfs_release_path(path);
3965 * If we don't have dir index, we have to get it by looking up
3966 * the inode ref, since we get the inode ref, remove it directly,
3967 * it is unnecessary to do delayed deletion.
3969 * But if we have dir index, needn't search inode ref to get it.
3970 * Since the inode ref is close to the inode item, it is better
3971 * that we delay to delete it, and just do this deletion when
3972 * we update the inode item.
3974 if (BTRFS_I(inode)->dir_index) {
3975 ret = btrfs_delayed_delete_inode_ref(inode);
3977 index = BTRFS_I(inode)->dir_index;
3982 ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
3985 btrfs_info(root->fs_info,
3986 "failed to delete reference to %.*s, inode %llu parent %llu",
3987 name_len, name, ino, dir_ino);
3988 btrfs_abort_transaction(trans, root, ret);
3992 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3994 btrfs_abort_transaction(trans, root, ret);
3998 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
4000 if (ret != 0 && ret != -ENOENT) {
4001 btrfs_abort_transaction(trans, root, ret);
4005 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
4010 btrfs_abort_transaction(trans, root, ret);
4012 btrfs_free_path(path);
4016 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
4017 inode_inc_iversion(inode);
4018 inode_inc_iversion(dir);
4019 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
4020 ret = btrfs_update_inode(trans, root, dir);
4025 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4026 struct btrfs_root *root,
4027 struct inode *dir, struct inode *inode,
4028 const char *name, int name_len)
4031 ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
4034 ret = btrfs_update_inode(trans, root, inode);
4040 * helper to start transaction for unlink and rmdir.
4042 * unlink and rmdir are special in btrfs, they do not always free space, so
4043 * if we cannot make our reservations the normal way try and see if there is
4044 * plenty of slack room in the global reserve to migrate, otherwise we cannot
4045 * allow the unlink to occur.
4047 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4049 struct btrfs_trans_handle *trans;
4050 struct btrfs_root *root = BTRFS_I(dir)->root;
4054 * 1 for the possible orphan item
4055 * 1 for the dir item
4056 * 1 for the dir index
4057 * 1 for the inode ref
4060 trans = btrfs_start_transaction(root, 5);
4061 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
4064 if (PTR_ERR(trans) == -ENOSPC) {
4065 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
4067 trans = btrfs_start_transaction(root, 0);
4070 ret = btrfs_cond_migrate_bytes(root->fs_info,
4071 &root->fs_info->trans_block_rsv,
4074 btrfs_end_transaction(trans, root);
4075 return ERR_PTR(ret);
4077 trans->block_rsv = &root->fs_info->trans_block_rsv;
4078 trans->bytes_reserved = num_bytes;
4083 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4085 struct btrfs_root *root = BTRFS_I(dir)->root;
4086 struct btrfs_trans_handle *trans;
4087 struct inode *inode = d_inode(dentry);
4090 trans = __unlink_start_trans(dir);
4092 return PTR_ERR(trans);
4094 btrfs_record_unlink_dir(trans, dir, d_inode(dentry), 0);
4096 ret = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
4097 dentry->d_name.name, dentry->d_name.len);
4101 if (inode->i_nlink == 0) {
4102 ret = btrfs_orphan_add(trans, inode);
4108 btrfs_end_transaction(trans, root);
4109 btrfs_btree_balance_dirty(root);
4113 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4114 struct btrfs_root *root,
4115 struct inode *dir, u64 objectid,
4116 const char *name, int name_len)
4118 struct btrfs_path *path;
4119 struct extent_buffer *leaf;
4120 struct btrfs_dir_item *di;
4121 struct btrfs_key key;
4124 u64 dir_ino = btrfs_ino(dir);
4126 path = btrfs_alloc_path();
4130 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4131 name, name_len, -1);
4132 if (IS_ERR_OR_NULL(di)) {
4140 leaf = path->nodes[0];
4141 btrfs_dir_item_key_to_cpu(leaf, di, &key);
4142 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4143 ret = btrfs_delete_one_dir_name(trans, root, path, di);
4145 btrfs_abort_transaction(trans, root, ret);
4148 btrfs_release_path(path);
4150 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
4151 objectid, root->root_key.objectid,
4152 dir_ino, &index, name, name_len);
4154 if (ret != -ENOENT) {
4155 btrfs_abort_transaction(trans, root, ret);
4158 di = btrfs_search_dir_index_item(root, path, dir_ino,
4160 if (IS_ERR_OR_NULL(di)) {
4165 btrfs_abort_transaction(trans, root, ret);
4169 leaf = path->nodes[0];
4170 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4171 btrfs_release_path(path);
4174 btrfs_release_path(path);
4176 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
4178 btrfs_abort_transaction(trans, root, ret);
4182 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
4183 inode_inc_iversion(dir);
4184 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
4185 ret = btrfs_update_inode_fallback(trans, root, dir);
4187 btrfs_abort_transaction(trans, root, ret);
4189 btrfs_free_path(path);
4193 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4195 struct inode *inode = d_inode(dentry);
4197 struct btrfs_root *root = BTRFS_I(dir)->root;
4198 struct btrfs_trans_handle *trans;
4200 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4202 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
4205 trans = __unlink_start_trans(dir);
4207 return PTR_ERR(trans);
4209 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4210 err = btrfs_unlink_subvol(trans, root, dir,
4211 BTRFS_I(inode)->location.objectid,
4212 dentry->d_name.name,
4213 dentry->d_name.len);
4217 err = btrfs_orphan_add(trans, inode);
4221 /* now the directory is empty */
4222 err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
4223 dentry->d_name.name, dentry->d_name.len);
4225 btrfs_i_size_write(inode, 0);
4227 btrfs_end_transaction(trans, root);
4228 btrfs_btree_balance_dirty(root);
4233 static int truncate_space_check(struct btrfs_trans_handle *trans,
4234 struct btrfs_root *root,
4239 bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
4240 ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv,
4241 bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
4243 trans->bytes_reserved += bytes_deleted;
4248 static int truncate_inline_extent(struct inode *inode,
4249 struct btrfs_path *path,
4250 struct btrfs_key *found_key,
4254 struct extent_buffer *leaf = path->nodes[0];
4255 int slot = path->slots[0];
4256 struct btrfs_file_extent_item *fi;
4257 u32 size = (u32)(new_size - found_key->offset);
4258 struct btrfs_root *root = BTRFS_I(inode)->root;
4260 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
4262 if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
4263 loff_t offset = new_size;
4264 loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE);
4267 * Zero out the remaining of the last page of our inline extent,
4268 * instead of directly truncating our inline extent here - that
4269 * would be much more complex (decompressing all the data, then
4270 * compressing the truncated data, which might be bigger than
4271 * the size of the inline extent, resize the extent, etc).
4272 * We release the path because to get the page we might need to
4273 * read the extent item from disk (data not in the page cache).
4275 btrfs_release_path(path);
4276 return btrfs_truncate_page(inode, offset, page_end - offset, 0);
4279 btrfs_set_file_extent_ram_bytes(leaf, fi, size);
4280 size = btrfs_file_extent_calc_inline_size(size);
4281 btrfs_truncate_item(root, path, size, 1);
4283 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4284 inode_sub_bytes(inode, item_end + 1 - new_size);
4290 * this can truncate away extent items, csum items and directory items.
4291 * It starts at a high offset and removes keys until it can't find
4292 * any higher than new_size
4294 * csum items that cross the new i_size are truncated to the new size
4297 * min_type is the minimum key type to truncate down to. If set to 0, this
4298 * will kill all the items on this inode, including the INODE_ITEM_KEY.
4300 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4301 struct btrfs_root *root,
4302 struct inode *inode,
4303 u64 new_size, u32 min_type)
4305 struct btrfs_path *path;
4306 struct extent_buffer *leaf;
4307 struct btrfs_file_extent_item *fi;
4308 struct btrfs_key key;
4309 struct btrfs_key found_key;
4310 u64 extent_start = 0;
4311 u64 extent_num_bytes = 0;
4312 u64 extent_offset = 0;
4314 u64 last_size = new_size;
4315 u32 found_type = (u8)-1;
4318 int pending_del_nr = 0;
4319 int pending_del_slot = 0;
4320 int extent_type = -1;
4323 u64 ino = btrfs_ino(inode);
4324 u64 bytes_deleted = 0;
4326 bool should_throttle = 0;
4327 bool should_end = 0;
4329 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
4332 * for non-free space inodes and ref cows, we want to back off from
4335 if (!btrfs_is_free_space_inode(inode) &&
4336 test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4339 path = btrfs_alloc_path();
4345 * We want to drop from the next block forward in case this new size is
4346 * not block aligned since we will be keeping the last block of the
4347 * extent just the way it is.
4349 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4350 root == root->fs_info->tree_root)
4351 btrfs_drop_extent_cache(inode, ALIGN(new_size,
4352 root->sectorsize), (u64)-1, 0);
4355 * This function is also used to drop the items in the log tree before
4356 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4357 * it is used to drop the loged items. So we shouldn't kill the delayed
4360 if (min_type == 0 && root == BTRFS_I(inode)->root)
4361 btrfs_kill_delayed_inode_items(inode);
4364 key.offset = (u64)-1;
4369 * with a 16K leaf size and 128MB extents, you can actually queue
4370 * up a huge file in a single leaf. Most of the time that
4371 * bytes_deleted is > 0, it will be huge by the time we get here
4373 if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
4374 if (btrfs_should_end_transaction(trans, root)) {
4381 path->leave_spinning = 1;
4382 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4389 /* there are no items in the tree for us to truncate, we're
4392 if (path->slots[0] == 0)
4399 leaf = path->nodes[0];
4400 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4401 found_type = found_key.type;
4403 if (found_key.objectid != ino)
4406 if (found_type < min_type)
4409 item_end = found_key.offset;
4410 if (found_type == BTRFS_EXTENT_DATA_KEY) {
4411 fi = btrfs_item_ptr(leaf, path->slots[0],
4412 struct btrfs_file_extent_item);
4413 extent_type = btrfs_file_extent_type(leaf, fi);
4414 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4416 btrfs_file_extent_num_bytes(leaf, fi);
4417 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4418 item_end += btrfs_file_extent_inline_len(leaf,
4419 path->slots[0], fi);
4423 if (found_type > min_type) {
4426 if (item_end < new_size)
4428 if (found_key.offset >= new_size)
4434 /* FIXME, shrink the extent if the ref count is only 1 */
4435 if (found_type != BTRFS_EXTENT_DATA_KEY)
4439 last_size = found_key.offset;
4441 last_size = new_size;
4443 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4445 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4447 u64 orig_num_bytes =
4448 btrfs_file_extent_num_bytes(leaf, fi);
4449 extent_num_bytes = ALIGN(new_size -
4452 btrfs_set_file_extent_num_bytes(leaf, fi,
4454 num_dec = (orig_num_bytes -
4456 if (test_bit(BTRFS_ROOT_REF_COWS,
4459 inode_sub_bytes(inode, num_dec);
4460 btrfs_mark_buffer_dirty(leaf);
4463 btrfs_file_extent_disk_num_bytes(leaf,
4465 extent_offset = found_key.offset -
4466 btrfs_file_extent_offset(leaf, fi);
4468 /* FIXME blocksize != 4096 */
4469 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4470 if (extent_start != 0) {
4472 if (test_bit(BTRFS_ROOT_REF_COWS,
4474 inode_sub_bytes(inode, num_dec);
4477 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4479 * we can't truncate inline items that have had
4483 btrfs_file_extent_encryption(leaf, fi) == 0 &&
4484 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
4487 * Need to release path in order to truncate a
4488 * compressed extent. So delete any accumulated
4489 * extent items so far.
4491 if (btrfs_file_extent_compression(leaf, fi) !=
4492 BTRFS_COMPRESS_NONE && pending_del_nr) {
4493 err = btrfs_del_items(trans, root, path,
4497 btrfs_abort_transaction(trans,
4505 err = truncate_inline_extent(inode, path,
4510 btrfs_abort_transaction(trans,
4514 } else if (test_bit(BTRFS_ROOT_REF_COWS,
4516 inode_sub_bytes(inode, item_end + 1 - new_size);
4521 if (!pending_del_nr) {
4522 /* no pending yet, add ourselves */
4523 pending_del_slot = path->slots[0];
4525 } else if (pending_del_nr &&
4526 path->slots[0] + 1 == pending_del_slot) {
4527 /* hop on the pending chunk */
4529 pending_del_slot = path->slots[0];
4536 should_throttle = 0;
4539 (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4540 root == root->fs_info->tree_root)) {
4541 btrfs_set_path_blocking(path);
4542 bytes_deleted += extent_num_bytes;
4543 ret = btrfs_free_extent(trans, root, extent_start,
4544 extent_num_bytes, 0,
4545 btrfs_header_owner(leaf),
4546 ino, extent_offset);
4548 if (btrfs_should_throttle_delayed_refs(trans, root))
4549 btrfs_async_run_delayed_refs(root,
4550 trans->delayed_ref_updates * 2, 0);
4552 if (truncate_space_check(trans, root,
4553 extent_num_bytes)) {
4556 if (btrfs_should_throttle_delayed_refs(trans,
4558 should_throttle = 1;
4563 if (found_type == BTRFS_INODE_ITEM_KEY)
4566 if (path->slots[0] == 0 ||
4567 path->slots[0] != pending_del_slot ||
4568 should_throttle || should_end) {
4569 if (pending_del_nr) {
4570 ret = btrfs_del_items(trans, root, path,
4574 btrfs_abort_transaction(trans,
4580 btrfs_release_path(path);
4581 if (should_throttle) {
4582 unsigned long updates = trans->delayed_ref_updates;
4584 trans->delayed_ref_updates = 0;
4585 ret = btrfs_run_delayed_refs(trans, root, updates * 2);
4591 * if we failed to refill our space rsv, bail out
4592 * and let the transaction restart
4604 if (pending_del_nr) {
4605 ret = btrfs_del_items(trans, root, path, pending_del_slot,
4608 btrfs_abort_transaction(trans, root, ret);
4611 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4612 btrfs_ordered_update_i_size(inode, last_size, NULL);
4614 btrfs_free_path(path);
4616 if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
4617 unsigned long updates = trans->delayed_ref_updates;
4619 trans->delayed_ref_updates = 0;
4620 ret = btrfs_run_delayed_refs(trans, root, updates * 2);
4629 * btrfs_truncate_page - read, zero a chunk and write a page
4630 * @inode - inode that we're zeroing
4631 * @from - the offset to start zeroing
4632 * @len - the length to zero, 0 to zero the entire range respective to the
4634 * @front - zero up to the offset instead of from the offset on
4636 * This will find the page for the "from" offset and cow the page and zero the
4637 * part we want to zero. This is used with truncate and hole punching.
4639 int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
4642 struct address_space *mapping = inode->i_mapping;
4643 struct btrfs_root *root = BTRFS_I(inode)->root;
4644 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4645 struct btrfs_ordered_extent *ordered;
4646 struct extent_state *cached_state = NULL;
4648 u32 blocksize = root->sectorsize;
4649 pgoff_t index = from >> PAGE_CACHE_SHIFT;
4650 unsigned offset = from & (PAGE_CACHE_SIZE-1);
4652 gfp_t mask = btrfs_alloc_write_mask(mapping);
4657 if ((offset & (blocksize - 1)) == 0 &&
4658 (!len || ((len & (blocksize - 1)) == 0)))
4660 ret = btrfs_delalloc_reserve_space(inode,
4661 round_down(from, PAGE_CACHE_SIZE), PAGE_CACHE_SIZE);
4666 page = find_or_create_page(mapping, index, mask);
4668 btrfs_delalloc_release_space(inode,
4669 round_down(from, PAGE_CACHE_SIZE),
4675 page_start = page_offset(page);
4676 page_end = page_start + PAGE_CACHE_SIZE - 1;
4678 if (!PageUptodate(page)) {
4679 ret = btrfs_readpage(NULL, page);
4681 if (page->mapping != mapping) {
4683 page_cache_release(page);
4686 if (!PageUptodate(page)) {
4691 wait_on_page_writeback(page);
4693 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
4694 set_page_extent_mapped(page);
4696 ordered = btrfs_lookup_ordered_extent(inode, page_start);
4698 unlock_extent_cached(io_tree, page_start, page_end,
4699 &cached_state, GFP_NOFS);
4701 page_cache_release(page);
4702 btrfs_start_ordered_extent(inode, ordered, 1);
4703 btrfs_put_ordered_extent(ordered);
4707 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
4708 EXTENT_DIRTY | EXTENT_DELALLOC |
4709 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4710 0, 0, &cached_state, GFP_NOFS);
4712 ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
4715 unlock_extent_cached(io_tree, page_start, page_end,
4716 &cached_state, GFP_NOFS);
4720 if (offset != PAGE_CACHE_SIZE) {
4722 len = PAGE_CACHE_SIZE - offset;
4725 memset(kaddr, 0, offset);
4727 memset(kaddr + offset, 0, len);
4728 flush_dcache_page(page);
4731 ClearPageChecked(page);
4732 set_page_dirty(page);
4733 unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
4738 btrfs_delalloc_release_space(inode, page_start,
4741 page_cache_release(page);
4746 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
4747 u64 offset, u64 len)
4749 struct btrfs_trans_handle *trans;
4753 * Still need to make sure the inode looks like it's been updated so
4754 * that any holes get logged if we fsync.
4756 if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) {
4757 BTRFS_I(inode)->last_trans = root->fs_info->generation;
4758 BTRFS_I(inode)->last_sub_trans = root->log_transid;
4759 BTRFS_I(inode)->last_log_commit = root->last_log_commit;
4764 * 1 - for the one we're dropping
4765 * 1 - for the one we're adding
4766 * 1 - for updating the inode.
4768 trans = btrfs_start_transaction(root, 3);
4770 return PTR_ERR(trans);
4772 ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
4774 btrfs_abort_transaction(trans, root, ret);
4775 btrfs_end_transaction(trans, root);
4779 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
4780 0, 0, len, 0, len, 0, 0, 0);
4782 btrfs_abort_transaction(trans, root, ret);
4784 btrfs_update_inode(trans, root, inode);
4785 btrfs_end_transaction(trans, root);
4790 * This function puts in dummy file extents for the area we're creating a hole
4791 * for. So if we are truncating this file to a larger size we need to insert
4792 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4793 * the range between oldsize and size
4795 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
4797 struct btrfs_root *root = BTRFS_I(inode)->root;
4798 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4799 struct extent_map *em = NULL;
4800 struct extent_state *cached_state = NULL;
4801 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4802 u64 hole_start = ALIGN(oldsize, root->sectorsize);
4803 u64 block_end = ALIGN(size, root->sectorsize);
4810 * If our size started in the middle of a page we need to zero out the
4811 * rest of the page before we expand the i_size, otherwise we could
4812 * expose stale data.
4814 err = btrfs_truncate_page(inode, oldsize, 0, 0);
4818 if (size <= hole_start)
4822 struct btrfs_ordered_extent *ordered;
4824 lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
4826 ordered = btrfs_lookup_ordered_range(inode, hole_start,
4827 block_end - hole_start);
4830 unlock_extent_cached(io_tree, hole_start, block_end - 1,
4831 &cached_state, GFP_NOFS);
4832 btrfs_start_ordered_extent(inode, ordered, 1);
4833 btrfs_put_ordered_extent(ordered);
4836 cur_offset = hole_start;
4838 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4839 block_end - cur_offset, 0);
4845 last_byte = min(extent_map_end(em), block_end);
4846 last_byte = ALIGN(last_byte , root->sectorsize);
4847 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
4848 struct extent_map *hole_em;
4849 hole_size = last_byte - cur_offset;
4851 err = maybe_insert_hole(root, inode, cur_offset,
4855 btrfs_drop_extent_cache(inode, cur_offset,
4856 cur_offset + hole_size - 1, 0);
4857 hole_em = alloc_extent_map();
4859 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4860 &BTRFS_I(inode)->runtime_flags);
4863 hole_em->start = cur_offset;
4864 hole_em->len = hole_size;
4865 hole_em->orig_start = cur_offset;
4867 hole_em->block_start = EXTENT_MAP_HOLE;
4868 hole_em->block_len = 0;
4869 hole_em->orig_block_len = 0;
4870 hole_em->ram_bytes = hole_size;
4871 hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
4872 hole_em->compress_type = BTRFS_COMPRESS_NONE;
4873 hole_em->generation = root->fs_info->generation;
4876 write_lock(&em_tree->lock);
4877 err = add_extent_mapping(em_tree, hole_em, 1);
4878 write_unlock(&em_tree->lock);
4881 btrfs_drop_extent_cache(inode, cur_offset,
4885 free_extent_map(hole_em);
4888 free_extent_map(em);
4890 cur_offset = last_byte;
4891 if (cur_offset >= block_end)
4894 free_extent_map(em);
4895 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
4900 static int wait_snapshoting_atomic_t(atomic_t *a)
4906 static void wait_for_snapshot_creation(struct btrfs_root *root)
4911 ret = btrfs_start_write_no_snapshoting(root);
4914 wait_on_atomic_t(&root->will_be_snapshoted,
4915 wait_snapshoting_atomic_t,
4916 TASK_UNINTERRUPTIBLE);
4920 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4922 struct btrfs_root *root = BTRFS_I(inode)->root;
4923 struct btrfs_trans_handle *trans;
4924 loff_t oldsize = i_size_read(inode);
4925 loff_t newsize = attr->ia_size;
4926 int mask = attr->ia_valid;
4930 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4931 * special case where we need to update the times despite not having
4932 * these flags set. For all other operations the VFS set these flags
4933 * explicitly if it wants a timestamp update.
4935 if (newsize != oldsize) {
4936 inode_inc_iversion(inode);
4937 if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
4938 inode->i_ctime = inode->i_mtime =
4939 current_fs_time(inode->i_sb);
4942 if (newsize > oldsize) {
4943 truncate_pagecache(inode, newsize);
4945 * Don't do an expanding truncate while snapshoting is ongoing.
4946 * This is to ensure the snapshot captures a fully consistent
4947 * state of this file - if the snapshot captures this expanding
4948 * truncation, it must capture all writes that happened before
4951 wait_for_snapshot_creation(root);
4952 ret = btrfs_cont_expand(inode, oldsize, newsize);
4954 btrfs_end_write_no_snapshoting(root);
4958 trans = btrfs_start_transaction(root, 1);
4959 if (IS_ERR(trans)) {
4960 btrfs_end_write_no_snapshoting(root);
4961 return PTR_ERR(trans);
4964 i_size_write(inode, newsize);
4965 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
4966 ret = btrfs_update_inode(trans, root, inode);
4967 btrfs_end_write_no_snapshoting(root);
4968 btrfs_end_transaction(trans, root);
4972 * We're truncating a file that used to have good data down to
4973 * zero. Make sure it gets into the ordered flush list so that
4974 * any new writes get down to disk quickly.
4977 set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
4978 &BTRFS_I(inode)->runtime_flags);
4981 * 1 for the orphan item we're going to add
4982 * 1 for the orphan item deletion.
4984 trans = btrfs_start_transaction(root, 2);
4986 return PTR_ERR(trans);
4989 * We need to do this in case we fail at _any_ point during the
4990 * actual truncate. Once we do the truncate_setsize we could
4991 * invalidate pages which forces any outstanding ordered io to
4992 * be instantly completed which will give us extents that need
4993 * to be truncated. If we fail to get an orphan inode down we
4994 * could have left over extents that were never meant to live,
4995 * so we need to garuntee from this point on that everything
4996 * will be consistent.
4998 ret = btrfs_orphan_add(trans, inode);
4999 btrfs_end_transaction(trans, root);
5003 /* we don't support swapfiles, so vmtruncate shouldn't fail */
5004 truncate_setsize(inode, newsize);
5006 /* Disable nonlocked read DIO to avoid the end less truncate */
5007 btrfs_inode_block_unlocked_dio(inode);
5008 inode_dio_wait(inode);
5009 btrfs_inode_resume_unlocked_dio(inode);
5011 ret = btrfs_truncate(inode);
5012 if (ret && inode->i_nlink) {
5016 * failed to truncate, disk_i_size is only adjusted down
5017 * as we remove extents, so it should represent the true
5018 * size of the inode, so reset the in memory size and
5019 * delete our orphan entry.
5021 trans = btrfs_join_transaction(root);
5022 if (IS_ERR(trans)) {
5023 btrfs_orphan_del(NULL, inode);
5026 i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5027 err = btrfs_orphan_del(trans, inode);
5029 btrfs_abort_transaction(trans, root, err);
5030 btrfs_end_transaction(trans, root);
5037 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
5039 struct inode *inode = d_inode(dentry);
5040 struct btrfs_root *root = BTRFS_I(inode)->root;
5043 if (btrfs_root_readonly(root))
5046 err = inode_change_ok(inode, attr);
5050 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5051 err = btrfs_setsize(inode, attr);
5056 if (attr->ia_valid) {
5057 setattr_copy(inode, attr);
5058 inode_inc_iversion(inode);
5059 err = btrfs_dirty_inode(inode);
5061 if (!err && attr->ia_valid & ATTR_MODE)
5062 err = posix_acl_chmod(inode, inode->i_mode);
5069 * While truncating the inode pages during eviction, we get the VFS calling
5070 * btrfs_invalidatepage() against each page of the inode. This is slow because
5071 * the calls to btrfs_invalidatepage() result in a huge amount of calls to
5072 * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
5073 * extent_state structures over and over, wasting lots of time.
5075 * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
5076 * those expensive operations on a per page basis and do only the ordered io
5077 * finishing, while we release here the extent_map and extent_state structures,
5078 * without the excessive merging and splitting.
5080 static void evict_inode_truncate_pages(struct inode *inode)
5082 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5083 struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
5084 struct rb_node *node;
5086 ASSERT(inode->i_state & I_FREEING);
5087 truncate_inode_pages_final(&inode->i_data);
5089 write_lock(&map_tree->lock);
5090 while (!RB_EMPTY_ROOT(&map_tree->map)) {
5091 struct extent_map *em;
5093 node = rb_first(&map_tree->map);
5094 em = rb_entry(node, struct extent_map, rb_node);
5095 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
5096 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
5097 remove_extent_mapping(map_tree, em);
5098 free_extent_map(em);
5099 if (need_resched()) {
5100 write_unlock(&map_tree->lock);
5102 write_lock(&map_tree->lock);
5105 write_unlock(&map_tree->lock);
5108 * Keep looping until we have no more ranges in the io tree.
5109 * We can have ongoing bios started by readpages (called from readahead)
5110 * that have their endio callback (extent_io.c:end_bio_extent_readpage)
5111 * still in progress (unlocked the pages in the bio but did not yet
5112 * unlocked the ranges in the io tree). Therefore this means some
5113 * ranges can still be locked and eviction started because before
5114 * submitting those bios, which are executed by a separate task (work
5115 * queue kthread), inode references (inode->i_count) were not taken
5116 * (which would be dropped in the end io callback of each bio).
5117 * Therefore here we effectively end up waiting for those bios and
5118 * anyone else holding locked ranges without having bumped the inode's
5119 * reference count - if we don't do it, when they access the inode's
5120 * io_tree to unlock a range it may be too late, leading to an
5121 * use-after-free issue.
5123 spin_lock(&io_tree->lock);
5124 while (!RB_EMPTY_ROOT(&io_tree->state)) {
5125 struct extent_state *state;
5126 struct extent_state *cached_state = NULL;
5130 node = rb_first(&io_tree->state);
5131 state = rb_entry(node, struct extent_state, rb_node);
5132 start = state->start;
5134 spin_unlock(&io_tree->lock);
5136 lock_extent_bits(io_tree, start, end, 0, &cached_state);
5139 * If still has DELALLOC flag, the extent didn't reach disk,
5140 * and its reserved space won't be freed by delayed_ref.
5141 * So we need to free its reserved space here.
5142 * (Refer to comment in btrfs_invalidatepage, case 2)
5144 * Note, end is the bytenr of last byte, so we need + 1 here.
5146 if (state->state & EXTENT_DELALLOC)
5147 btrfs_qgroup_free_data(inode, start, end - start + 1);
5149 clear_extent_bit(io_tree, start, end,
5150 EXTENT_LOCKED | EXTENT_DIRTY |
5151 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
5152 EXTENT_DEFRAG, 1, 1,
5153 &cached_state, GFP_NOFS);
5156 spin_lock(&io_tree->lock);
5158 spin_unlock(&io_tree->lock);
5161 void btrfs_evict_inode(struct inode *inode)
5163 struct btrfs_trans_handle *trans;
5164 struct btrfs_root *root = BTRFS_I(inode)->root;
5165 struct btrfs_block_rsv *rsv, *global_rsv;
5166 int steal_from_global = 0;
5167 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
5170 trace_btrfs_inode_evict(inode);
5172 evict_inode_truncate_pages(inode);
5174 if (inode->i_nlink &&
5175 ((btrfs_root_refs(&root->root_item) != 0 &&
5176 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5177 btrfs_is_free_space_inode(inode)))
5180 if (is_bad_inode(inode)) {
5181 btrfs_orphan_del(NULL, inode);
5184 /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
5185 if (!special_file(inode->i_mode))
5186 btrfs_wait_ordered_range(inode, 0, (u64)-1);
5188 btrfs_free_io_failure_record(inode, 0, (u64)-1);
5190 if (root->fs_info->log_root_recovering) {
5191 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
5192 &BTRFS_I(inode)->runtime_flags));
5196 if (inode->i_nlink > 0) {
5197 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5198 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5202 ret = btrfs_commit_inode_delayed_inode(inode);
5204 btrfs_orphan_del(NULL, inode);
5208 rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
5210 btrfs_orphan_del(NULL, inode);
5213 rsv->size = min_size;
5215 global_rsv = &root->fs_info->global_block_rsv;
5217 btrfs_i_size_write(inode, 0);
5220 * This is a bit simpler than btrfs_truncate since we've already
5221 * reserved our space for our orphan item in the unlink, so we just
5222 * need to reserve some slack space in case we add bytes and update
5223 * inode item when doing the truncate.
5226 ret = btrfs_block_rsv_refill(root, rsv, min_size,
5227 BTRFS_RESERVE_FLUSH_LIMIT);
5230 * Try and steal from the global reserve since we will
5231 * likely not use this space anyway, we want to try as
5232 * hard as possible to get this to work.
5235 steal_from_global++;
5237 steal_from_global = 0;
5241 * steal_from_global == 0: we reserved stuff, hooray!
5242 * steal_from_global == 1: we didn't reserve stuff, boo!
5243 * steal_from_global == 2: we've committed, still not a lot of
5244 * room but maybe we'll have room in the global reserve this
5246 * steal_from_global == 3: abandon all hope!
5248 if (steal_from_global > 2) {
5249 btrfs_warn(root->fs_info,
5250 "Could not get space for a delete, will truncate on mount %d",
5252 btrfs_orphan_del(NULL, inode);
5253 btrfs_free_block_rsv(root, rsv);
5257 trans = btrfs_join_transaction(root);
5258 if (IS_ERR(trans)) {
5259 btrfs_orphan_del(NULL, inode);
5260 btrfs_free_block_rsv(root, rsv);
5265 * We can't just steal from the global reserve, we need tomake
5266 * sure there is room to do it, if not we need to commit and try
5269 if (steal_from_global) {
5270 if (!btrfs_check_space_for_delayed_refs(trans, root))
5271 ret = btrfs_block_rsv_migrate(global_rsv, rsv,
5278 * Couldn't steal from the global reserve, we have too much
5279 * pending stuff built up, commit the transaction and try it
5283 ret = btrfs_commit_transaction(trans, root);
5285 btrfs_orphan_del(NULL, inode);
5286 btrfs_free_block_rsv(root, rsv);
5291 steal_from_global = 0;
5294 trans->block_rsv = rsv;
5296 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
5297 if (ret != -ENOSPC && ret != -EAGAIN)
5300 trans->block_rsv = &root->fs_info->trans_block_rsv;
5301 btrfs_end_transaction(trans, root);
5303 btrfs_btree_balance_dirty(root);
5306 btrfs_free_block_rsv(root, rsv);
5309 * Errors here aren't a big deal, it just means we leave orphan items
5310 * in the tree. They will be cleaned up on the next mount.
5313 trans->block_rsv = root->orphan_block_rsv;
5314 btrfs_orphan_del(trans, inode);
5316 btrfs_orphan_del(NULL, inode);
5319 trans->block_rsv = &root->fs_info->trans_block_rsv;
5320 if (!(root == root->fs_info->tree_root ||
5321 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
5322 btrfs_return_ino(root, btrfs_ino(inode));
5324 btrfs_end_transaction(trans, root);
5325 btrfs_btree_balance_dirty(root);
5327 btrfs_remove_delayed_node(inode);
5333 * this returns the key found in the dir entry in the location pointer.
5334 * If no dir entries were found, location->objectid is 0.
5336 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
5337 struct btrfs_key *location)
5339 const char *name = dentry->d_name.name;
5340 int namelen = dentry->d_name.len;
5341 struct btrfs_dir_item *di;
5342 struct btrfs_path *path;
5343 struct btrfs_root *root = BTRFS_I(dir)->root;
5346 path = btrfs_alloc_path();
5350 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
5355 if (IS_ERR_OR_NULL(di))
5358 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5360 btrfs_free_path(path);
5363 location->objectid = 0;
5368 * when we hit a tree root in a directory, the btrfs part of the inode
5369 * needs to be changed to reflect the root directory of the tree root. This
5370 * is kind of like crossing a mount point.
5372 static int fixup_tree_root_location(struct btrfs_root *root,
5374 struct dentry *dentry,
5375 struct btrfs_key *location,
5376 struct btrfs_root **sub_root)
5378 struct btrfs_path *path;
5379 struct btrfs_root *new_root;
5380 struct btrfs_root_ref *ref;
5381 struct extent_buffer *leaf;
5382 struct btrfs_key key;
5386 path = btrfs_alloc_path();
5393 key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5394 key.type = BTRFS_ROOT_REF_KEY;
5395 key.offset = location->objectid;
5397 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path,
5405 leaf = path->nodes[0];
5406 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5407 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
5408 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
5411 ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
5412 (unsigned long)(ref + 1),
5413 dentry->d_name.len);
5417 btrfs_release_path(path);
5419 new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
5420 if (IS_ERR(new_root)) {
5421 err = PTR_ERR(new_root);
5425 *sub_root = new_root;
5426 location->objectid = btrfs_root_dirid(&new_root->root_item);
5427 location->type = BTRFS_INODE_ITEM_KEY;
5428 location->offset = 0;
5431 btrfs_free_path(path);
5435 static void inode_tree_add(struct inode *inode)
5437 struct btrfs_root *root = BTRFS_I(inode)->root;
5438 struct btrfs_inode *entry;
5440 struct rb_node *parent;
5441 struct rb_node *new = &BTRFS_I(inode)->rb_node;
5442 u64 ino = btrfs_ino(inode);
5444 if (inode_unhashed(inode))
5447 spin_lock(&root->inode_lock);
5448 p = &root->inode_tree.rb_node;
5451 entry = rb_entry(parent, struct btrfs_inode, rb_node);
5453 if (ino < btrfs_ino(&entry->vfs_inode))
5454 p = &parent->rb_left;
5455 else if (ino > btrfs_ino(&entry->vfs_inode))
5456 p = &parent->rb_right;
5458 WARN_ON(!(entry->vfs_inode.i_state &
5459 (I_WILL_FREE | I_FREEING)));
5460 rb_replace_node(parent, new, &root->inode_tree);
5461 RB_CLEAR_NODE(parent);
5462 spin_unlock(&root->inode_lock);
5466 rb_link_node(new, parent, p);
5467 rb_insert_color(new, &root->inode_tree);
5468 spin_unlock(&root->inode_lock);
5471 static void inode_tree_del(struct inode *inode)
5473 struct btrfs_root *root = BTRFS_I(inode)->root;
5476 spin_lock(&root->inode_lock);
5477 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
5478 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
5479 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
5480 empty = RB_EMPTY_ROOT(&root->inode_tree);
5482 spin_unlock(&root->inode_lock);
5484 if (empty && btrfs_root_refs(&root->root_item) == 0) {
5485 synchronize_srcu(&root->fs_info->subvol_srcu);
5486 spin_lock(&root->inode_lock);
5487 empty = RB_EMPTY_ROOT(&root->inode_tree);
5488 spin_unlock(&root->inode_lock);
5490 btrfs_add_dead_root(root);
5494 void btrfs_invalidate_inodes(struct btrfs_root *root)
5496 struct rb_node *node;
5497 struct rb_node *prev;
5498 struct btrfs_inode *entry;
5499 struct inode *inode;
5502 if (!test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
5503 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
5505 spin_lock(&root->inode_lock);
5507 node = root->inode_tree.rb_node;
5511 entry = rb_entry(node, struct btrfs_inode, rb_node);
5513 if (objectid < btrfs_ino(&entry->vfs_inode))
5514 node = node->rb_left;
5515 else if (objectid > btrfs_ino(&entry->vfs_inode))
5516 node = node->rb_right;
5522 entry = rb_entry(prev, struct btrfs_inode, rb_node);
5523 if (objectid <= btrfs_ino(&entry->vfs_inode)) {
5527 prev = rb_next(prev);
5531 entry = rb_entry(node, struct btrfs_inode, rb_node);
5532 objectid = btrfs_ino(&entry->vfs_inode) + 1;
5533 inode = igrab(&entry->vfs_inode);
5535 spin_unlock(&root->inode_lock);
5536 if (atomic_read(&inode->i_count) > 1)
5537 d_prune_aliases(inode);
5539 * btrfs_drop_inode will have it removed from
5540 * the inode cache when its usage count
5545 spin_lock(&root->inode_lock);
5549 if (cond_resched_lock(&root->inode_lock))
5552 node = rb_next(node);
5554 spin_unlock(&root->inode_lock);
5557 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5559 struct btrfs_iget_args *args = p;
5560 inode->i_ino = args->location->objectid;
5561 memcpy(&BTRFS_I(inode)->location, args->location,
5562 sizeof(*args->location));
5563 BTRFS_I(inode)->root = args->root;
5567 static int btrfs_find_actor(struct inode *inode, void *opaque)
5569 struct btrfs_iget_args *args = opaque;
5570 return args->location->objectid == BTRFS_I(inode)->location.objectid &&
5571 args->root == BTRFS_I(inode)->root;
5574 static struct inode *btrfs_iget_locked(struct super_block *s,
5575 struct btrfs_key *location,
5576 struct btrfs_root *root)
5578 struct inode *inode;
5579 struct btrfs_iget_args args;
5580 unsigned long hashval = btrfs_inode_hash(location->objectid, root);
5582 args.location = location;
5585 inode = iget5_locked(s, hashval, btrfs_find_actor,
5586 btrfs_init_locked_inode,
5591 /* Get an inode object given its location and corresponding root.
5592 * Returns in *is_new if the inode was read from disk
5594 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5595 struct btrfs_root *root, int *new)
5597 struct inode *inode;
5599 inode = btrfs_iget_locked(s, location, root);
5601 return ERR_PTR(-ENOMEM);
5603 if (inode->i_state & I_NEW) {
5604 btrfs_read_locked_inode(inode);
5605 if (!is_bad_inode(inode)) {
5606 inode_tree_add(inode);
5607 unlock_new_inode(inode);
5611 unlock_new_inode(inode);
5613 inode = ERR_PTR(-ESTALE);
5620 static struct inode *new_simple_dir(struct super_block *s,
5621 struct btrfs_key *key,
5622 struct btrfs_root *root)
5624 struct inode *inode = new_inode(s);
5627 return ERR_PTR(-ENOMEM);
5629 BTRFS_I(inode)->root = root;
5630 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5631 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5633 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5634 inode->i_op = &btrfs_dir_ro_inode_operations;
5635 inode->i_fop = &simple_dir_operations;
5636 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5637 inode->i_mtime = CURRENT_TIME;
5638 inode->i_atime = inode->i_mtime;
5639 inode->i_ctime = inode->i_mtime;
5640 BTRFS_I(inode)->i_otime = inode->i_mtime;
5645 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5647 struct inode *inode;
5648 struct btrfs_root *root = BTRFS_I(dir)->root;
5649 struct btrfs_root *sub_root = root;
5650 struct btrfs_key location;
5654 if (dentry->d_name.len > BTRFS_NAME_LEN)
5655 return ERR_PTR(-ENAMETOOLONG);
5657 ret = btrfs_inode_by_name(dir, dentry, &location);
5659 return ERR_PTR(ret);
5661 if (location.objectid == 0)
5662 return ERR_PTR(-ENOENT);
5664 if (location.type == BTRFS_INODE_ITEM_KEY) {
5665 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
5669 BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
5671 index = srcu_read_lock(&root->fs_info->subvol_srcu);
5672 ret = fixup_tree_root_location(root, dir, dentry,
5673 &location, &sub_root);
5676 inode = ERR_PTR(ret);
5678 inode = new_simple_dir(dir->i_sb, &location, sub_root);
5680 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
5682 srcu_read_unlock(&root->fs_info->subvol_srcu, index);
5684 if (!IS_ERR(inode) && root != sub_root) {
5685 down_read(&root->fs_info->cleanup_work_sem);
5686 if (!(inode->i_sb->s_flags & MS_RDONLY))
5687 ret = btrfs_orphan_cleanup(sub_root);
5688 up_read(&root->fs_info->cleanup_work_sem);
5691 inode = ERR_PTR(ret);
5698 static int btrfs_dentry_delete(const struct dentry *dentry)
5700 struct btrfs_root *root;
5701 struct inode *inode = d_inode(dentry);
5703 if (!inode && !IS_ROOT(dentry))
5704 inode = d_inode(dentry->d_parent);
5707 root = BTRFS_I(inode)->root;
5708 if (btrfs_root_refs(&root->root_item) == 0)
5711 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5717 static void btrfs_dentry_release(struct dentry *dentry)
5719 kfree(dentry->d_fsdata);
5722 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5725 struct inode *inode;
5727 inode = btrfs_lookup_dentry(dir, dentry);
5728 if (IS_ERR(inode)) {
5729 if (PTR_ERR(inode) == -ENOENT)
5732 return ERR_CAST(inode);
5735 return d_splice_alias(inode, dentry);
5738 unsigned char btrfs_filetype_table[] = {
5739 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
5742 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5744 struct inode *inode = file_inode(file);
5745 struct btrfs_root *root = BTRFS_I(inode)->root;
5746 struct btrfs_item *item;
5747 struct btrfs_dir_item *di;
5748 struct btrfs_key key;
5749 struct btrfs_key found_key;
5750 struct btrfs_path *path;
5751 struct list_head ins_list;
5752 struct list_head del_list;
5754 struct extent_buffer *leaf;
5756 unsigned char d_type;
5761 int key_type = BTRFS_DIR_INDEX_KEY;
5765 int is_curr = 0; /* ctx->pos points to the current index? */
5767 /* FIXME, use a real flag for deciding about the key type */
5768 if (root->fs_info->tree_root == root)
5769 key_type = BTRFS_DIR_ITEM_KEY;
5771 if (!dir_emit_dots(file, ctx))
5774 path = btrfs_alloc_path();
5780 if (key_type == BTRFS_DIR_INDEX_KEY) {
5781 INIT_LIST_HEAD(&ins_list);
5782 INIT_LIST_HEAD(&del_list);
5783 btrfs_get_delayed_items(inode, &ins_list, &del_list);
5786 key.type = key_type;
5787 key.offset = ctx->pos;
5788 key.objectid = btrfs_ino(inode);
5790 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5795 leaf = path->nodes[0];
5796 slot = path->slots[0];
5797 if (slot >= btrfs_header_nritems(leaf)) {
5798 ret = btrfs_next_leaf(root, path);
5806 item = btrfs_item_nr(slot);
5807 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5809 if (found_key.objectid != key.objectid)
5811 if (found_key.type != key_type)
5813 if (found_key.offset < ctx->pos)
5815 if (key_type == BTRFS_DIR_INDEX_KEY &&
5816 btrfs_should_delete_dir_index(&del_list,
5820 ctx->pos = found_key.offset;
5823 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
5825 di_total = btrfs_item_size(leaf, item);
5827 while (di_cur < di_total) {
5828 struct btrfs_key location;
5830 if (verify_dir_item(root, leaf, di))
5833 name_len = btrfs_dir_name_len(leaf, di);
5834 if (name_len <= sizeof(tmp_name)) {
5835 name_ptr = tmp_name;
5837 name_ptr = kmalloc(name_len, GFP_NOFS);
5843 read_extent_buffer(leaf, name_ptr,
5844 (unsigned long)(di + 1), name_len);
5846 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
5847 btrfs_dir_item_key_to_cpu(leaf, di, &location);
5850 /* is this a reference to our own snapshot? If so
5853 * In contrast to old kernels, we insert the snapshot's
5854 * dir item and dir index after it has been created, so
5855 * we won't find a reference to our own snapshot. We
5856 * still keep the following code for backward
5859 if (location.type == BTRFS_ROOT_ITEM_KEY &&
5860 location.objectid == root->root_key.objectid) {
5864 over = !dir_emit(ctx, name_ptr, name_len,
5865 location.objectid, d_type);
5868 if (name_ptr != tmp_name)
5873 di_len = btrfs_dir_name_len(leaf, di) +
5874 btrfs_dir_data_len(leaf, di) + sizeof(*di);
5876 di = (struct btrfs_dir_item *)((char *)di + di_len);
5882 if (key_type == BTRFS_DIR_INDEX_KEY) {
5885 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
5890 /* Reached end of directory/root. Bump pos past the last item. */
5894 * Stop new entries from being returned after we return the last
5897 * New directory entries are assigned a strictly increasing
5898 * offset. This means that new entries created during readdir
5899 * are *guaranteed* to be seen in the future by that readdir.
5900 * This has broken buggy programs which operate on names as
5901 * they're returned by readdir. Until we re-use freed offsets
5902 * we have this hack to stop new entries from being returned
5903 * under the assumption that they'll never reach this huge
5906 * This is being careful not to overflow 32bit loff_t unless the
5907 * last entry requires it because doing so has broken 32bit apps
5910 if (key_type == BTRFS_DIR_INDEX_KEY) {
5911 if (ctx->pos >= INT_MAX)
5912 ctx->pos = LLONG_MAX;
5919 if (key_type == BTRFS_DIR_INDEX_KEY)
5920 btrfs_put_delayed_items(&ins_list, &del_list);
5921 btrfs_free_path(path);
5925 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
5927 struct btrfs_root *root = BTRFS_I(inode)->root;
5928 struct btrfs_trans_handle *trans;
5930 bool nolock = false;
5932 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5935 if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
5938 if (wbc->sync_mode == WB_SYNC_ALL) {
5940 trans = btrfs_join_transaction_nolock(root);
5942 trans = btrfs_join_transaction(root);
5944 return PTR_ERR(trans);
5945 ret = btrfs_commit_transaction(trans, root);
5951 * This is somewhat expensive, updating the tree every time the
5952 * inode changes. But, it is most likely to find the inode in cache.
5953 * FIXME, needs more benchmarking...there are no reasons other than performance
5954 * to keep or drop this code.
5956 static int btrfs_dirty_inode(struct inode *inode)
5958 struct btrfs_root *root = BTRFS_I(inode)->root;
5959 struct btrfs_trans_handle *trans;
5962 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5965 trans = btrfs_join_transaction(root);
5967 return PTR_ERR(trans);
5969 ret = btrfs_update_inode(trans, root, inode);
5970 if (ret && ret == -ENOSPC) {
5971 /* whoops, lets try again with the full transaction */
5972 btrfs_end_transaction(trans, root);
5973 trans = btrfs_start_transaction(root, 1);
5975 return PTR_ERR(trans);
5977 ret = btrfs_update_inode(trans, root, inode);
5979 btrfs_end_transaction(trans, root);
5980 if (BTRFS_I(inode)->delayed_node)
5981 btrfs_balance_delayed_items(root);
5987 * This is a copy of file_update_time. We need this so we can return error on
5988 * ENOSPC for updating the inode in the case of file write and mmap writes.
5990 static int btrfs_update_time(struct inode *inode, struct timespec *now,
5993 struct btrfs_root *root = BTRFS_I(inode)->root;
5995 if (btrfs_root_readonly(root))
5998 if (flags & S_VERSION)
5999 inode_inc_iversion(inode);
6000 if (flags & S_CTIME)
6001 inode->i_ctime = *now;
6002 if (flags & S_MTIME)
6003 inode->i_mtime = *now;
6004 if (flags & S_ATIME)
6005 inode->i_atime = *now;
6006 return btrfs_dirty_inode(inode);
6010 * find the highest existing sequence number in a directory
6011 * and then set the in-memory index_cnt variable to reflect
6012 * free sequence numbers
6014 static int btrfs_set_inode_index_count(struct inode *inode)
6016 struct btrfs_root *root = BTRFS_I(inode)->root;
6017 struct btrfs_key key, found_key;
6018 struct btrfs_path *path;
6019 struct extent_buffer *leaf;
6022 key.objectid = btrfs_ino(inode);
6023 key.type = BTRFS_DIR_INDEX_KEY;
6024 key.offset = (u64)-1;
6026 path = btrfs_alloc_path();
6030 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6033 /* FIXME: we should be able to handle this */
6039 * MAGIC NUMBER EXPLANATION:
6040 * since we search a directory based on f_pos we have to start at 2
6041 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
6042 * else has to start at 2
6044 if (path->slots[0] == 0) {
6045 BTRFS_I(inode)->index_cnt = 2;
6051 leaf = path->nodes[0];
6052 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6054 if (found_key.objectid != btrfs_ino(inode) ||
6055 found_key.type != BTRFS_DIR_INDEX_KEY) {
6056 BTRFS_I(inode)->index_cnt = 2;
6060 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
6062 btrfs_free_path(path);
6067 * helper to find a free sequence number in a given directory. This current
6068 * code is very simple, later versions will do smarter things in the btree
6070 int btrfs_set_inode_index(struct inode *dir, u64 *index)
6074 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
6075 ret = btrfs_inode_delayed_dir_index_count(dir);
6077 ret = btrfs_set_inode_index_count(dir);
6083 *index = BTRFS_I(dir)->index_cnt;
6084 BTRFS_I(dir)->index_cnt++;
6089 static int btrfs_insert_inode_locked(struct inode *inode)
6091 struct btrfs_iget_args args;
6092 args.location = &BTRFS_I(inode)->location;
6093 args.root = BTRFS_I(inode)->root;
6095 return insert_inode_locked4(inode,
6096 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6097 btrfs_find_actor, &args);
6100 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
6101 struct btrfs_root *root,
6103 const char *name, int name_len,
6104 u64 ref_objectid, u64 objectid,
6105 umode_t mode, u64 *index)
6107 struct inode *inode;
6108 struct btrfs_inode_item *inode_item;
6109 struct btrfs_key *location;
6110 struct btrfs_path *path;
6111 struct btrfs_inode_ref *ref;
6112 struct btrfs_key key[2];
6114 int nitems = name ? 2 : 1;
6118 path = btrfs_alloc_path();
6120 return ERR_PTR(-ENOMEM);
6122 inode = new_inode(root->fs_info->sb);
6124 btrfs_free_path(path);
6125 return ERR_PTR(-ENOMEM);
6129 * O_TMPFILE, set link count to 0, so that after this point,
6130 * we fill in an inode item with the correct link count.
6133 set_nlink(inode, 0);
6136 * we have to initialize this early, so we can reclaim the inode
6137 * number if we fail afterwards in this function.
6139 inode->i_ino = objectid;
6142 trace_btrfs_inode_request(dir);
6144 ret = btrfs_set_inode_index(dir, index);
6146 btrfs_free_path(path);
6148 return ERR_PTR(ret);
6154 * index_cnt is ignored for everything but a dir,
6155 * btrfs_get_inode_index_count has an explanation for the magic
6158 BTRFS_I(inode)->index_cnt = 2;
6159 BTRFS_I(inode)->dir_index = *index;
6160 BTRFS_I(inode)->root = root;
6161 BTRFS_I(inode)->generation = trans->transid;
6162 inode->i_generation = BTRFS_I(inode)->generation;
6165 * We could have gotten an inode number from somebody who was fsynced
6166 * and then removed in this same transaction, so let's just set full
6167 * sync since it will be a full sync anyway and this will blow away the
6168 * old info in the log.
6170 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
6172 key[0].objectid = objectid;
6173 key[0].type = BTRFS_INODE_ITEM_KEY;
6176 sizes[0] = sizeof(struct btrfs_inode_item);
6180 * Start new inodes with an inode_ref. This is slightly more
6181 * efficient for small numbers of hard links since they will
6182 * be packed into one item. Extended refs will kick in if we
6183 * add more hard links than can fit in the ref item.
6185 key[1].objectid = objectid;
6186 key[1].type = BTRFS_INODE_REF_KEY;
6187 key[1].offset = ref_objectid;
6189 sizes[1] = name_len + sizeof(*ref);
6192 location = &BTRFS_I(inode)->location;
6193 location->objectid = objectid;
6194 location->offset = 0;
6195 location->type = BTRFS_INODE_ITEM_KEY;
6197 ret = btrfs_insert_inode_locked(inode);
6201 path->leave_spinning = 1;
6202 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
6206 inode_init_owner(inode, dir, mode);
6207 inode_set_bytes(inode, 0);
6209 inode->i_mtime = CURRENT_TIME;
6210 inode->i_atime = inode->i_mtime;
6211 inode->i_ctime = inode->i_mtime;
6212 BTRFS_I(inode)->i_otime = inode->i_mtime;
6214 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6215 struct btrfs_inode_item);
6216 memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
6217 sizeof(*inode_item));
6218 fill_inode_item(trans, path->nodes[0], inode_item, inode);
6221 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6222 struct btrfs_inode_ref);
6223 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
6224 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
6225 ptr = (unsigned long)(ref + 1);
6226 write_extent_buffer(path->nodes[0], name, ptr, name_len);
6229 btrfs_mark_buffer_dirty(path->nodes[0]);
6230 btrfs_free_path(path);
6232 btrfs_inherit_iflags(inode, dir);
6234 if (S_ISREG(mode)) {
6235 if (btrfs_test_opt(root, NODATASUM))
6236 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6237 if (btrfs_test_opt(root, NODATACOW))
6238 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6239 BTRFS_INODE_NODATASUM;
6242 inode_tree_add(inode);
6244 trace_btrfs_inode_new(inode);
6245 btrfs_set_inode_last_trans(trans, inode);
6247 btrfs_update_root_times(trans, root);
6249 ret = btrfs_inode_inherit_props(trans, inode, dir);
6251 btrfs_err(root->fs_info,
6252 "error inheriting props for ino %llu (root %llu): %d",
6253 btrfs_ino(inode), root->root_key.objectid, ret);
6258 unlock_new_inode(inode);
6261 BTRFS_I(dir)->index_cnt--;
6262 btrfs_free_path(path);
6264 return ERR_PTR(ret);
6267 static inline u8 btrfs_inode_type(struct inode *inode)
6269 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
6273 * utility function to add 'inode' into 'parent_inode' with
6274 * a give name and a given sequence number.
6275 * if 'add_backref' is true, also insert a backref from the
6276 * inode to the parent directory.
6278 int btrfs_add_link(struct btrfs_trans_handle *trans,
6279 struct inode *parent_inode, struct inode *inode,
6280 const char *name, int name_len, int add_backref, u64 index)
6283 struct btrfs_key key;
6284 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
6285 u64 ino = btrfs_ino(inode);
6286 u64 parent_ino = btrfs_ino(parent_inode);
6288 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6289 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
6292 key.type = BTRFS_INODE_ITEM_KEY;
6296 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6297 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
6298 key.objectid, root->root_key.objectid,
6299 parent_ino, index, name, name_len);
6300 } else if (add_backref) {
6301 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
6305 /* Nothing to clean up yet */
6309 ret = btrfs_insert_dir_item(trans, root, name, name_len,
6311 btrfs_inode_type(inode), index);
6312 if (ret == -EEXIST || ret == -EOVERFLOW)
6315 btrfs_abort_transaction(trans, root, ret);
6319 btrfs_i_size_write(parent_inode, parent_inode->i_size +
6321 inode_inc_iversion(parent_inode);
6322 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
6323 ret = btrfs_update_inode(trans, root, parent_inode);
6325 btrfs_abort_transaction(trans, root, ret);
6329 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6332 err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
6333 key.objectid, root->root_key.objectid,
6334 parent_ino, &local_index, name, name_len);
6336 } else if (add_backref) {
6340 err = btrfs_del_inode_ref(trans, root, name, name_len,
6341 ino, parent_ino, &local_index);
6346 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
6347 struct inode *dir, struct dentry *dentry,
6348 struct inode *inode, int backref, u64 index)
6350 int err = btrfs_add_link(trans, dir, inode,
6351 dentry->d_name.name, dentry->d_name.len,
6358 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
6359 umode_t mode, dev_t rdev)
6361 struct btrfs_trans_handle *trans;
6362 struct btrfs_root *root = BTRFS_I(dir)->root;
6363 struct inode *inode = NULL;
6370 * 2 for inode item and ref
6372 * 1 for xattr if selinux is on
6374 trans = btrfs_start_transaction(root, 5);
6376 return PTR_ERR(trans);
6378 err = btrfs_find_free_ino(root, &objectid);
6382 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6383 dentry->d_name.len, btrfs_ino(dir), objectid,
6385 if (IS_ERR(inode)) {
6386 err = PTR_ERR(inode);
6391 * If the active LSM wants to access the inode during
6392 * d_instantiate it needs these. Smack checks to see
6393 * if the filesystem supports xattrs by looking at the
6396 inode->i_op = &btrfs_special_inode_operations;
6397 init_special_inode(inode, inode->i_mode, rdev);
6399 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6401 goto out_unlock_inode;
6403 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
6405 goto out_unlock_inode;
6407 btrfs_update_inode(trans, root, inode);
6408 unlock_new_inode(inode);
6409 d_instantiate(dentry, inode);
6413 btrfs_end_transaction(trans, root);
6414 btrfs_balance_delayed_items(root);
6415 btrfs_btree_balance_dirty(root);
6417 inode_dec_link_count(inode);
6424 unlock_new_inode(inode);
6429 static int btrfs_create(struct inode *dir, struct dentry *dentry,
6430 umode_t mode, bool excl)
6432 struct btrfs_trans_handle *trans;
6433 struct btrfs_root *root = BTRFS_I(dir)->root;
6434 struct inode *inode = NULL;
6435 int drop_inode_on_err = 0;
6441 * 2 for inode item and ref
6443 * 1 for xattr if selinux is on
6445 trans = btrfs_start_transaction(root, 5);
6447 return PTR_ERR(trans);
6449 err = btrfs_find_free_ino(root, &objectid);
6453 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6454 dentry->d_name.len, btrfs_ino(dir), objectid,
6456 if (IS_ERR(inode)) {
6457 err = PTR_ERR(inode);
6460 drop_inode_on_err = 1;
6462 * If the active LSM wants to access the inode during
6463 * d_instantiate it needs these. Smack checks to see
6464 * if the filesystem supports xattrs by looking at the
6467 inode->i_fop = &btrfs_file_operations;
6468 inode->i_op = &btrfs_file_inode_operations;
6469 inode->i_mapping->a_ops = &btrfs_aops;
6471 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6473 goto out_unlock_inode;
6475 err = btrfs_update_inode(trans, root, inode);
6477 goto out_unlock_inode;
6479 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
6481 goto out_unlock_inode;
6483 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
6484 unlock_new_inode(inode);
6485 d_instantiate(dentry, inode);
6488 btrfs_end_transaction(trans, root);
6489 if (err && drop_inode_on_err) {
6490 inode_dec_link_count(inode);
6493 btrfs_balance_delayed_items(root);
6494 btrfs_btree_balance_dirty(root);
6498 unlock_new_inode(inode);
6503 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6504 struct dentry *dentry)
6506 struct btrfs_trans_handle *trans;
6507 struct btrfs_root *root = BTRFS_I(dir)->root;
6508 struct inode *inode = d_inode(old_dentry);
6513 /* do not allow sys_link's with other subvols of the same device */
6514 if (root->objectid != BTRFS_I(inode)->root->objectid)
6517 if (inode->i_nlink >= BTRFS_LINK_MAX)
6520 err = btrfs_set_inode_index(dir, &index);
6525 * 2 items for inode and inode ref
6526 * 2 items for dir items
6527 * 1 item for parent inode
6529 trans = btrfs_start_transaction(root, 5);
6530 if (IS_ERR(trans)) {
6531 err = PTR_ERR(trans);
6535 /* There are several dir indexes for this inode, clear the cache. */
6536 BTRFS_I(inode)->dir_index = 0ULL;
6538 inode_inc_iversion(inode);
6539 inode->i_ctime = CURRENT_TIME;
6541 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6543 err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
6548 struct dentry *parent = dentry->d_parent;
6549 err = btrfs_update_inode(trans, root, inode);
6552 if (inode->i_nlink == 1) {
6554 * If new hard link count is 1, it's a file created
6555 * with open(2) O_TMPFILE flag.
6557 err = btrfs_orphan_del(trans, inode);
6561 d_instantiate(dentry, inode);
6562 btrfs_log_new_name(trans, inode, NULL, parent);
6565 btrfs_end_transaction(trans, root);
6566 btrfs_balance_delayed_items(root);
6569 inode_dec_link_count(inode);
6572 btrfs_btree_balance_dirty(root);
6576 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
6578 struct inode *inode = NULL;
6579 struct btrfs_trans_handle *trans;
6580 struct btrfs_root *root = BTRFS_I(dir)->root;
6582 int drop_on_err = 0;
6587 * 2 items for inode and ref
6588 * 2 items for dir items
6589 * 1 for xattr if selinux is on
6591 trans = btrfs_start_transaction(root, 5);
6593 return PTR_ERR(trans);
6595 err = btrfs_find_free_ino(root, &objectid);
6599 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6600 dentry->d_name.len, btrfs_ino(dir), objectid,
6601 S_IFDIR | mode, &index);
6602 if (IS_ERR(inode)) {
6603 err = PTR_ERR(inode);
6608 /* these must be set before we unlock the inode */
6609 inode->i_op = &btrfs_dir_inode_operations;
6610 inode->i_fop = &btrfs_dir_file_operations;
6612 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6614 goto out_fail_inode;
6616 btrfs_i_size_write(inode, 0);
6617 err = btrfs_update_inode(trans, root, inode);
6619 goto out_fail_inode;
6621 err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
6622 dentry->d_name.len, 0, index);
6624 goto out_fail_inode;
6626 d_instantiate(dentry, inode);
6628 * mkdir is special. We're unlocking after we call d_instantiate
6629 * to avoid a race with nfsd calling d_instantiate.
6631 unlock_new_inode(inode);
6635 btrfs_end_transaction(trans, root);
6637 inode_dec_link_count(inode);
6640 btrfs_balance_delayed_items(root);
6641 btrfs_btree_balance_dirty(root);
6645 unlock_new_inode(inode);
6649 /* Find next extent map of a given extent map, caller needs to ensure locks */
6650 static struct extent_map *next_extent_map(struct extent_map *em)
6652 struct rb_node *next;
6654 next = rb_next(&em->rb_node);
6657 return container_of(next, struct extent_map, rb_node);
6660 static struct extent_map *prev_extent_map(struct extent_map *em)
6662 struct rb_node *prev;
6664 prev = rb_prev(&em->rb_node);
6667 return container_of(prev, struct extent_map, rb_node);
6670 /* helper for btfs_get_extent. Given an existing extent in the tree,
6671 * the existing extent is the nearest extent to map_start,
6672 * and an extent that you want to insert, deal with overlap and insert
6673 * the best fitted new extent into the tree.
6675 static int merge_extent_mapping(struct extent_map_tree *em_tree,
6676 struct extent_map *existing,
6677 struct extent_map *em,
6680 struct extent_map *prev;
6681 struct extent_map *next;
6686 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
6688 if (existing->start > map_start) {
6690 prev = prev_extent_map(next);
6693 next = next_extent_map(prev);
6696 start = prev ? extent_map_end(prev) : em->start;
6697 start = max_t(u64, start, em->start);
6698 end = next ? next->start : extent_map_end(em);
6699 end = min_t(u64, end, extent_map_end(em));
6700 start_diff = start - em->start;
6702 em->len = end - start;
6703 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
6704 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
6705 em->block_start += start_diff;
6706 em->block_len -= start_diff;
6708 return add_extent_mapping(em_tree, em, 0);
6711 static noinline int uncompress_inline(struct btrfs_path *path,
6712 struct inode *inode, struct page *page,
6713 size_t pg_offset, u64 extent_offset,
6714 struct btrfs_file_extent_item *item)
6717 struct extent_buffer *leaf = path->nodes[0];
6720 unsigned long inline_size;
6724 WARN_ON(pg_offset != 0);
6725 compress_type = btrfs_file_extent_compression(leaf, item);
6726 max_size = btrfs_file_extent_ram_bytes(leaf, item);
6727 inline_size = btrfs_file_extent_inline_item_len(leaf,
6728 btrfs_item_nr(path->slots[0]));
6729 tmp = kmalloc(inline_size, GFP_NOFS);
6732 ptr = btrfs_file_extent_inline_start(item);
6734 read_extent_buffer(leaf, tmp, ptr, inline_size);
6736 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
6737 ret = btrfs_decompress(compress_type, tmp, page,
6738 extent_offset, inline_size, max_size);
6744 * a bit scary, this does extent mapping from logical file offset to the disk.
6745 * the ugly parts come from merging extents from the disk with the in-ram
6746 * representation. This gets more complex because of the data=ordered code,
6747 * where the in-ram extents might be locked pending data=ordered completion.
6749 * This also copies inline extents directly into the page.
6752 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
6753 size_t pg_offset, u64 start, u64 len,
6758 u64 extent_start = 0;
6760 u64 objectid = btrfs_ino(inode);
6762 struct btrfs_path *path = NULL;
6763 struct btrfs_root *root = BTRFS_I(inode)->root;
6764 struct btrfs_file_extent_item *item;
6765 struct extent_buffer *leaf;
6766 struct btrfs_key found_key;
6767 struct extent_map *em = NULL;
6768 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
6769 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6770 struct btrfs_trans_handle *trans = NULL;
6771 const bool new_inline = !page || create;
6774 read_lock(&em_tree->lock);
6775 em = lookup_extent_mapping(em_tree, start, len);
6777 em->bdev = root->fs_info->fs_devices->latest_bdev;
6778 read_unlock(&em_tree->lock);
6781 if (em->start > start || em->start + em->len <= start)
6782 free_extent_map(em);
6783 else if (em->block_start == EXTENT_MAP_INLINE && page)
6784 free_extent_map(em);
6788 em = alloc_extent_map();
6793 em->bdev = root->fs_info->fs_devices->latest_bdev;
6794 em->start = EXTENT_MAP_HOLE;
6795 em->orig_start = EXTENT_MAP_HOLE;
6797 em->block_len = (u64)-1;
6800 path = btrfs_alloc_path();
6806 * Chances are we'll be called again, so go ahead and do
6812 ret = btrfs_lookup_file_extent(trans, root, path,
6813 objectid, start, trans != NULL);
6820 if (path->slots[0] == 0)
6825 leaf = path->nodes[0];
6826 item = btrfs_item_ptr(leaf, path->slots[0],
6827 struct btrfs_file_extent_item);
6828 /* are we inside the extent that was found? */
6829 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6830 found_type = found_key.type;
6831 if (found_key.objectid != objectid ||
6832 found_type != BTRFS_EXTENT_DATA_KEY) {
6834 * If we backup past the first extent we want to move forward
6835 * and see if there is an extent in front of us, otherwise we'll
6836 * say there is a hole for our whole search range which can
6843 found_type = btrfs_file_extent_type(leaf, item);
6844 extent_start = found_key.offset;
6845 if (found_type == BTRFS_FILE_EXTENT_REG ||
6846 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6847 extent_end = extent_start +
6848 btrfs_file_extent_num_bytes(leaf, item);
6849 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6851 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6852 extent_end = ALIGN(extent_start + size, root->sectorsize);
6855 if (start >= extent_end) {
6857 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6858 ret = btrfs_next_leaf(root, path);
6865 leaf = path->nodes[0];
6867 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6868 if (found_key.objectid != objectid ||
6869 found_key.type != BTRFS_EXTENT_DATA_KEY)
6871 if (start + len <= found_key.offset)
6873 if (start > found_key.offset)
6876 em->orig_start = start;
6877 em->len = found_key.offset - start;
6881 btrfs_extent_item_to_extent_map(inode, path, item, new_inline, em);
6883 if (found_type == BTRFS_FILE_EXTENT_REG ||
6884 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6886 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6890 size_t extent_offset;
6896 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6897 extent_offset = page_offset(page) + pg_offset - extent_start;
6898 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
6899 size - extent_offset);
6900 em->start = extent_start + extent_offset;
6901 em->len = ALIGN(copy_size, root->sectorsize);
6902 em->orig_block_len = em->len;
6903 em->orig_start = em->start;
6904 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
6905 if (create == 0 && !PageUptodate(page)) {
6906 if (btrfs_file_extent_compression(leaf, item) !=
6907 BTRFS_COMPRESS_NONE) {
6908 ret = uncompress_inline(path, inode, page,
6910 extent_offset, item);
6917 read_extent_buffer(leaf, map + pg_offset, ptr,
6919 if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
6920 memset(map + pg_offset + copy_size, 0,
6921 PAGE_CACHE_SIZE - pg_offset -
6926 flush_dcache_page(page);
6927 } else if (create && PageUptodate(page)) {
6931 free_extent_map(em);
6934 btrfs_release_path(path);
6935 trans = btrfs_join_transaction(root);
6938 return ERR_CAST(trans);
6942 write_extent_buffer(leaf, map + pg_offset, ptr,
6945 btrfs_mark_buffer_dirty(leaf);
6947 set_extent_uptodate(io_tree, em->start,
6948 extent_map_end(em) - 1, NULL, GFP_NOFS);
6953 em->orig_start = start;
6956 em->block_start = EXTENT_MAP_HOLE;
6957 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
6959 btrfs_release_path(path);
6960 if (em->start > start || extent_map_end(em) <= start) {
6961 btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]",
6962 em->start, em->len, start, len);
6968 write_lock(&em_tree->lock);
6969 ret = add_extent_mapping(em_tree, em, 0);
6970 /* it is possible that someone inserted the extent into the tree
6971 * while we had the lock dropped. It is also possible that
6972 * an overlapping map exists in the tree
6974 if (ret == -EEXIST) {
6975 struct extent_map *existing;
6979 existing = search_extent_mapping(em_tree, start, len);
6981 * existing will always be non-NULL, since there must be
6982 * extent causing the -EEXIST.
6984 if (start >= extent_map_end(existing) ||
6985 start <= existing->start) {
6987 * The existing extent map is the one nearest to
6988 * the [start, start + len) range which overlaps
6990 err = merge_extent_mapping(em_tree, existing,
6992 free_extent_map(existing);
6994 free_extent_map(em);
6998 free_extent_map(em);
7003 write_unlock(&em_tree->lock);
7006 trace_btrfs_get_extent(root, em);
7008 btrfs_free_path(path);
7010 ret = btrfs_end_transaction(trans, root);
7015 free_extent_map(em);
7016 return ERR_PTR(err);
7018 BUG_ON(!em); /* Error is always set */
7022 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
7023 size_t pg_offset, u64 start, u64 len,
7026 struct extent_map *em;
7027 struct extent_map *hole_em = NULL;
7028 u64 range_start = start;
7034 em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
7041 * - a pre-alloc extent,
7042 * there might actually be delalloc bytes behind it.
7044 if (em->block_start != EXTENT_MAP_HOLE &&
7045 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7051 /* check to see if we've wrapped (len == -1 or similar) */
7060 /* ok, we didn't find anything, lets look for delalloc */
7061 found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
7062 end, len, EXTENT_DELALLOC, 1);
7063 found_end = range_start + found;
7064 if (found_end < range_start)
7065 found_end = (u64)-1;
7068 * we didn't find anything useful, return
7069 * the original results from get_extent()
7071 if (range_start > end || found_end <= start) {
7077 /* adjust the range_start to make sure it doesn't
7078 * go backwards from the start they passed in
7080 range_start = max(start, range_start);
7081 found = found_end - range_start;
7084 u64 hole_start = start;
7087 em = alloc_extent_map();
7093 * when btrfs_get_extent can't find anything it
7094 * returns one huge hole
7096 * make sure what it found really fits our range, and
7097 * adjust to make sure it is based on the start from
7101 u64 calc_end = extent_map_end(hole_em);
7103 if (calc_end <= start || (hole_em->start > end)) {
7104 free_extent_map(hole_em);
7107 hole_start = max(hole_em->start, start);
7108 hole_len = calc_end - hole_start;
7112 if (hole_em && range_start > hole_start) {
7113 /* our hole starts before our delalloc, so we
7114 * have to return just the parts of the hole
7115 * that go until the delalloc starts
7117 em->len = min(hole_len,
7118 range_start - hole_start);
7119 em->start = hole_start;
7120 em->orig_start = hole_start;
7122 * don't adjust block start at all,
7123 * it is fixed at EXTENT_MAP_HOLE
7125 em->block_start = hole_em->block_start;
7126 em->block_len = hole_len;
7127 if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
7128 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
7130 em->start = range_start;
7132 em->orig_start = range_start;
7133 em->block_start = EXTENT_MAP_DELALLOC;
7134 em->block_len = found;
7136 } else if (hole_em) {
7141 free_extent_map(hole_em);
7143 free_extent_map(em);
7144 return ERR_PTR(err);
7149 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
7152 struct btrfs_root *root = BTRFS_I(inode)->root;
7153 struct extent_map *em;
7154 struct btrfs_key ins;
7158 alloc_hint = get_extent_allocation_hint(inode, start, len);
7159 ret = btrfs_reserve_extent(root, len, root->sectorsize, 0,
7160 alloc_hint, &ins, 1, 1);
7162 return ERR_PTR(ret);
7164 em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
7165 ins.offset, ins.offset, ins.offset, 0);
7167 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
7171 ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
7172 ins.offset, ins.offset, 0);
7174 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
7175 free_extent_map(em);
7176 return ERR_PTR(ret);
7183 * returns 1 when the nocow is safe, < 1 on error, 0 if the
7184 * block must be cow'd
7186 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7187 u64 *orig_start, u64 *orig_block_len,
7190 struct btrfs_trans_handle *trans;
7191 struct btrfs_path *path;
7193 struct extent_buffer *leaf;
7194 struct btrfs_root *root = BTRFS_I(inode)->root;
7195 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7196 struct btrfs_file_extent_item *fi;
7197 struct btrfs_key key;
7204 bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
7206 path = btrfs_alloc_path();
7210 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
7215 slot = path->slots[0];
7218 /* can't find the item, must cow */
7225 leaf = path->nodes[0];
7226 btrfs_item_key_to_cpu(leaf, &key, slot);
7227 if (key.objectid != btrfs_ino(inode) ||
7228 key.type != BTRFS_EXTENT_DATA_KEY) {
7229 /* not our file or wrong item type, must cow */
7233 if (key.offset > offset) {
7234 /* Wrong offset, must cow */
7238 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
7239 found_type = btrfs_file_extent_type(leaf, fi);
7240 if (found_type != BTRFS_FILE_EXTENT_REG &&
7241 found_type != BTRFS_FILE_EXTENT_PREALLOC) {
7242 /* not a regular extent, must cow */
7246 if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
7249 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
7250 if (extent_end <= offset)
7253 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7254 if (disk_bytenr == 0)
7257 if (btrfs_file_extent_compression(leaf, fi) ||
7258 btrfs_file_extent_encryption(leaf, fi) ||
7259 btrfs_file_extent_other_encoding(leaf, fi))
7262 backref_offset = btrfs_file_extent_offset(leaf, fi);
7265 *orig_start = key.offset - backref_offset;
7266 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
7267 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7270 if (btrfs_extent_readonly(root, disk_bytenr))
7273 num_bytes = min(offset + *len, extent_end) - offset;
7274 if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7277 range_end = round_up(offset + num_bytes, root->sectorsize) - 1;
7278 ret = test_range_bit(io_tree, offset, range_end,
7279 EXTENT_DELALLOC, 0, NULL);
7286 btrfs_release_path(path);
7289 * look for other files referencing this extent, if we
7290 * find any we must cow
7292 trans = btrfs_join_transaction(root);
7293 if (IS_ERR(trans)) {
7298 ret = btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
7299 key.offset - backref_offset, disk_bytenr);
7300 btrfs_end_transaction(trans, root);
7307 * adjust disk_bytenr and num_bytes to cover just the bytes
7308 * in this extent we are about to write. If there
7309 * are any csums in that range we have to cow in order
7310 * to keep the csums correct
7312 disk_bytenr += backref_offset;
7313 disk_bytenr += offset - key.offset;
7314 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
7317 * all of the above have passed, it is safe to overwrite this extent
7323 btrfs_free_path(path);
7327 bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7329 struct radix_tree_root *root = &inode->i_mapping->page_tree;
7331 void **pagep = NULL;
7332 struct page *page = NULL;
7336 start_idx = start >> PAGE_CACHE_SHIFT;
7339 * end is the last byte in the last page. end == start is legal
7341 end_idx = end >> PAGE_CACHE_SHIFT;
7345 /* Most of the code in this while loop is lifted from
7346 * find_get_page. It's been modified to begin searching from a
7347 * page and return just the first page found in that range. If the
7348 * found idx is less than or equal to the end idx then we know that
7349 * a page exists. If no pages are found or if those pages are
7350 * outside of the range then we're fine (yay!) */
7351 while (page == NULL &&
7352 radix_tree_gang_lookup_slot(root, &pagep, NULL, start_idx, 1)) {
7353 page = radix_tree_deref_slot(pagep);
7354 if (unlikely(!page))
7357 if (radix_tree_exception(page)) {
7358 if (radix_tree_deref_retry(page)) {
7363 * Otherwise, shmem/tmpfs must be storing a swap entry
7364 * here as an exceptional entry: so return it without
7365 * attempting to raise page count.
7368 break; /* TODO: Is this relevant for this use case? */
7371 if (!page_cache_get_speculative(page)) {
7377 * Has the page moved?
7378 * This is part of the lockless pagecache protocol. See
7379 * include/linux/pagemap.h for details.
7381 if (unlikely(page != *pagep)) {
7382 page_cache_release(page);
7388 if (page->index <= end_idx)
7390 page_cache_release(page);
7397 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7398 struct extent_state **cached_state, int writing)
7400 struct btrfs_ordered_extent *ordered;
7404 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7407 * We're concerned with the entire range that we're going to be
7408 * doing DIO to, so we need to make sure theres no ordered
7409 * extents in this range.
7411 ordered = btrfs_lookup_ordered_range(inode, lockstart,
7412 lockend - lockstart + 1);
7415 * We need to make sure there are no buffered pages in this
7416 * range either, we could have raced between the invalidate in
7417 * generic_file_direct_write and locking the extent. The
7418 * invalidate needs to happen so that reads after a write do not
7423 !btrfs_page_exists_in_range(inode, lockstart, lockend)))
7426 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7427 cached_state, GFP_NOFS);
7430 btrfs_start_ordered_extent(inode, ordered, 1);
7431 btrfs_put_ordered_extent(ordered);
7433 /* Screw you mmap */
7434 ret = btrfs_fdatawrite_range(inode, lockstart, lockend);
7437 ret = filemap_fdatawait_range(inode->i_mapping,
7444 * If we found a page that couldn't be invalidated just
7445 * fall back to buffered.
7447 ret = invalidate_inode_pages2_range(inode->i_mapping,
7448 lockstart >> PAGE_CACHE_SHIFT,
7449 lockend >> PAGE_CACHE_SHIFT);
7460 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
7461 u64 len, u64 orig_start,
7462 u64 block_start, u64 block_len,
7463 u64 orig_block_len, u64 ram_bytes,
7466 struct extent_map_tree *em_tree;
7467 struct extent_map *em;
7468 struct btrfs_root *root = BTRFS_I(inode)->root;
7471 em_tree = &BTRFS_I(inode)->extent_tree;
7472 em = alloc_extent_map();
7474 return ERR_PTR(-ENOMEM);
7477 em->orig_start = orig_start;
7478 em->mod_start = start;
7481 em->block_len = block_len;
7482 em->block_start = block_start;
7483 em->bdev = root->fs_info->fs_devices->latest_bdev;
7484 em->orig_block_len = orig_block_len;
7485 em->ram_bytes = ram_bytes;
7486 em->generation = -1;
7487 set_bit(EXTENT_FLAG_PINNED, &em->flags);
7488 if (type == BTRFS_ORDERED_PREALLOC)
7489 set_bit(EXTENT_FLAG_FILLING, &em->flags);
7492 btrfs_drop_extent_cache(inode, em->start,
7493 em->start + em->len - 1, 0);
7494 write_lock(&em_tree->lock);
7495 ret = add_extent_mapping(em_tree, em, 1);
7496 write_unlock(&em_tree->lock);
7497 } while (ret == -EEXIST);
7500 free_extent_map(em);
7501 return ERR_PTR(ret);
7507 struct btrfs_dio_data {
7508 u64 outstanding_extents;
7512 static void adjust_dio_outstanding_extents(struct inode *inode,
7513 struct btrfs_dio_data *dio_data,
7516 unsigned num_extents;
7518 num_extents = (unsigned) div64_u64(len + BTRFS_MAX_EXTENT_SIZE - 1,
7519 BTRFS_MAX_EXTENT_SIZE);
7521 * If we have an outstanding_extents count still set then we're
7522 * within our reservation, otherwise we need to adjust our inode
7523 * counter appropriately.
7525 if (dio_data->outstanding_extents) {
7526 dio_data->outstanding_extents -= num_extents;
7528 spin_lock(&BTRFS_I(inode)->lock);
7529 BTRFS_I(inode)->outstanding_extents += num_extents;
7530 spin_unlock(&BTRFS_I(inode)->lock);
7534 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7535 struct buffer_head *bh_result, int create)
7537 struct extent_map *em;
7538 struct btrfs_root *root = BTRFS_I(inode)->root;
7539 struct extent_state *cached_state = NULL;
7540 struct btrfs_dio_data *dio_data = NULL;
7541 u64 start = iblock << inode->i_blkbits;
7542 u64 lockstart, lockend;
7543 u64 len = bh_result->b_size;
7544 int unlock_bits = EXTENT_LOCKED;
7548 unlock_bits |= EXTENT_DIRTY;
7550 len = min_t(u64, len, root->sectorsize);
7553 lockend = start + len - 1;
7555 if (current->journal_info) {
7557 * Need to pull our outstanding extents and set journal_info to NULL so
7558 * that anything that needs to check if there's a transction doesn't get
7561 dio_data = current->journal_info;
7562 current->journal_info = NULL;
7566 * If this errors out it's because we couldn't invalidate pagecache for
7567 * this range and we need to fallback to buffered.
7569 if (lock_extent_direct(inode, lockstart, lockend, &cached_state,
7575 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
7582 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7583 * io. INLINE is special, and we could probably kludge it in here, but
7584 * it's still buffered so for safety lets just fall back to the generic
7587 * For COMPRESSED we _have_ to read the entire extent in so we can
7588 * decompress it, so there will be buffering required no matter what we
7589 * do, so go ahead and fallback to buffered.
7591 * We return -ENOTBLK because thats what makes DIO go ahead and go back
7592 * to buffered IO. Don't blame me, this is the price we pay for using
7595 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
7596 em->block_start == EXTENT_MAP_INLINE) {
7597 free_extent_map(em);
7602 /* Just a good old fashioned hole, return */
7603 if (!create && (em->block_start == EXTENT_MAP_HOLE ||
7604 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
7605 free_extent_map(em);
7610 * We don't allocate a new extent in the following cases
7612 * 1) The inode is marked as NODATACOW. In this case we'll just use the
7614 * 2) The extent is marked as PREALLOC. We're good to go here and can
7615 * just use the extent.
7619 len = min(len, em->len - (start - em->start));
7620 lockstart = start + len;
7624 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7625 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7626 em->block_start != EXTENT_MAP_HOLE)) {
7628 u64 block_start, orig_start, orig_block_len, ram_bytes;
7630 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7631 type = BTRFS_ORDERED_PREALLOC;
7633 type = BTRFS_ORDERED_NOCOW;
7634 len = min(len, em->len - (start - em->start));
7635 block_start = em->block_start + (start - em->start);
7637 if (can_nocow_extent(inode, start, &len, &orig_start,
7638 &orig_block_len, &ram_bytes) == 1) {
7639 if (type == BTRFS_ORDERED_PREALLOC) {
7640 free_extent_map(em);
7641 em = create_pinned_em(inode, start, len,
7652 ret = btrfs_add_ordered_extent_dio(inode, start,
7653 block_start, len, len, type);
7655 free_extent_map(em);
7663 * this will cow the extent, reset the len in case we changed
7666 len = bh_result->b_size;
7667 free_extent_map(em);
7668 em = btrfs_new_extent_direct(inode, start, len);
7673 len = min(len, em->len - (start - em->start));
7675 bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
7677 bh_result->b_size = len;
7678 bh_result->b_bdev = em->bdev;
7679 set_buffer_mapped(bh_result);
7681 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7682 set_buffer_new(bh_result);
7685 * Need to update the i_size under the extent lock so buffered
7686 * readers will get the updated i_size when we unlock.
7688 if (start + len > i_size_read(inode))
7689 i_size_write(inode, start + len);
7691 adjust_dio_outstanding_extents(inode, dio_data, len);
7692 btrfs_free_reserved_data_space(inode, start, len);
7693 WARN_ON(dio_data->reserve < len);
7694 dio_data->reserve -= len;
7695 current->journal_info = dio_data;
7699 * In the case of write we need to clear and unlock the entire range,
7700 * in the case of read we need to unlock only the end area that we
7701 * aren't using if there is any left over space.
7703 if (lockstart < lockend) {
7704 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
7705 lockend, unlock_bits, 1, 0,
7706 &cached_state, GFP_NOFS);
7708 free_extent_state(cached_state);
7711 free_extent_map(em);
7716 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7717 unlock_bits, 1, 0, &cached_state, GFP_NOFS);
7720 current->journal_info = dio_data;
7722 * Compensate the delalloc release we do in btrfs_direct_IO() when we
7723 * write less data then expected, so that we don't underflow our inode's
7724 * outstanding extents counter.
7726 if (create && dio_data)
7727 adjust_dio_outstanding_extents(inode, dio_data, len);
7732 static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
7733 int rw, int mirror_num)
7735 struct btrfs_root *root = BTRFS_I(inode)->root;
7738 BUG_ON(rw & REQ_WRITE);
7742 ret = btrfs_bio_wq_end_io(root->fs_info, bio,
7743 BTRFS_WQ_ENDIO_DIO_REPAIR);
7747 ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
7753 static int btrfs_check_dio_repairable(struct inode *inode,
7754 struct bio *failed_bio,
7755 struct io_failure_record *failrec,
7760 num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
7761 failrec->logical, failrec->len);
7762 if (num_copies == 1) {
7764 * we only have a single copy of the data, so don't bother with
7765 * all the retry and error correction code that follows. no
7766 * matter what the error is, it is very likely to persist.
7768 pr_debug("Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
7769 num_copies, failrec->this_mirror, failed_mirror);
7773 failrec->failed_mirror = failed_mirror;
7774 failrec->this_mirror++;
7775 if (failrec->this_mirror == failed_mirror)
7776 failrec->this_mirror++;
7778 if (failrec->this_mirror > num_copies) {
7779 pr_debug("Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
7780 num_copies, failrec->this_mirror, failed_mirror);
7787 static int dio_read_error(struct inode *inode, struct bio *failed_bio,
7788 struct page *page, u64 start, u64 end,
7789 int failed_mirror, bio_end_io_t *repair_endio,
7792 struct io_failure_record *failrec;
7798 BUG_ON(failed_bio->bi_rw & REQ_WRITE);
7800 ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
7804 ret = btrfs_check_dio_repairable(inode, failed_bio, failrec,
7807 free_io_failure(inode, failrec);
7811 if (failed_bio->bi_vcnt > 1)
7812 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
7814 read_mode = READ_SYNC;
7816 isector = start - btrfs_io_bio(failed_bio)->logical;
7817 isector >>= inode->i_sb->s_blocksize_bits;
7818 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
7819 0, isector, repair_endio, repair_arg);
7821 free_io_failure(inode, failrec);
7825 btrfs_debug(BTRFS_I(inode)->root->fs_info,
7826 "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n",
7827 read_mode, failrec->this_mirror, failrec->in_validation);
7829 ret = submit_dio_repair_bio(inode, bio, read_mode,
7830 failrec->this_mirror);
7832 free_io_failure(inode, failrec);
7839 struct btrfs_retry_complete {
7840 struct completion done;
7841 struct inode *inode;
7846 static void btrfs_retry_endio_nocsum(struct bio *bio)
7848 struct btrfs_retry_complete *done = bio->bi_private;
7849 struct bio_vec *bvec;
7856 bio_for_each_segment_all(bvec, bio, i)
7857 clean_io_failure(done->inode, done->start, bvec->bv_page, 0);
7859 complete(&done->done);
7863 static int __btrfs_correct_data_nocsum(struct inode *inode,
7864 struct btrfs_io_bio *io_bio)
7866 struct bio_vec *bvec;
7867 struct btrfs_retry_complete done;
7872 start = io_bio->logical;
7875 bio_for_each_segment_all(bvec, &io_bio->bio, i) {
7879 init_completion(&done.done);
7881 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
7882 start + bvec->bv_len - 1,
7884 btrfs_retry_endio_nocsum, &done);
7888 wait_for_completion(&done.done);
7890 if (!done.uptodate) {
7891 /* We might have another mirror, so try again */
7895 start += bvec->bv_len;
7901 static void btrfs_retry_endio(struct bio *bio)
7903 struct btrfs_retry_complete *done = bio->bi_private;
7904 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7905 struct bio_vec *bvec;
7914 bio_for_each_segment_all(bvec, bio, i) {
7915 ret = __readpage_endio_check(done->inode, io_bio, i,
7917 done->start, bvec->bv_len);
7919 clean_io_failure(done->inode, done->start,
7925 done->uptodate = uptodate;
7927 complete(&done->done);
7931 static int __btrfs_subio_endio_read(struct inode *inode,
7932 struct btrfs_io_bio *io_bio, int err)
7934 struct bio_vec *bvec;
7935 struct btrfs_retry_complete done;
7942 start = io_bio->logical;
7945 bio_for_each_segment_all(bvec, &io_bio->bio, i) {
7946 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
7947 0, start, bvec->bv_len);
7953 init_completion(&done.done);
7955 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
7956 start + bvec->bv_len - 1,
7958 btrfs_retry_endio, &done);
7964 wait_for_completion(&done.done);
7966 if (!done.uptodate) {
7967 /* We might have another mirror, so try again */
7971 offset += bvec->bv_len;
7972 start += bvec->bv_len;
7978 static int btrfs_subio_endio_read(struct inode *inode,
7979 struct btrfs_io_bio *io_bio, int err)
7981 bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
7985 return __btrfs_correct_data_nocsum(inode, io_bio);
7989 return __btrfs_subio_endio_read(inode, io_bio, err);
7993 static void btrfs_endio_direct_read(struct bio *bio)
7995 struct btrfs_dio_private *dip = bio->bi_private;
7996 struct inode *inode = dip->inode;
7997 struct bio *dio_bio;
7998 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7999 int err = bio->bi_error;
8001 if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
8002 err = btrfs_subio_endio_read(inode, io_bio, err);
8004 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
8005 dip->logical_offset + dip->bytes - 1);
8006 dio_bio = dip->dio_bio;
8010 dio_end_io(dio_bio, bio->bi_error);
8013 io_bio->end_io(io_bio, err);
8017 static void btrfs_endio_direct_write(struct bio *bio)
8019 struct btrfs_dio_private *dip = bio->bi_private;
8020 struct inode *inode = dip->inode;
8021 struct btrfs_root *root = BTRFS_I(inode)->root;
8022 struct btrfs_ordered_extent *ordered = NULL;
8023 u64 ordered_offset = dip->logical_offset;
8024 u64 ordered_bytes = dip->bytes;
8025 struct bio *dio_bio;
8029 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
8036 btrfs_init_work(&ordered->work, btrfs_endio_write_helper,
8037 finish_ordered_fn, NULL, NULL);
8038 btrfs_queue_work(root->fs_info->endio_write_workers,
8042 * our bio might span multiple ordered extents. If we haven't
8043 * completed the accounting for the whole dio, go back and try again
8045 if (ordered_offset < dip->logical_offset + dip->bytes) {
8046 ordered_bytes = dip->logical_offset + dip->bytes -
8051 dio_bio = dip->dio_bio;
8055 dio_end_io(dio_bio, bio->bi_error);
8059 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
8060 struct bio *bio, int mirror_num,
8061 unsigned long bio_flags, u64 offset)
8064 struct btrfs_root *root = BTRFS_I(inode)->root;
8065 ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
8066 BUG_ON(ret); /* -ENOMEM */
8070 static void btrfs_end_dio_bio(struct bio *bio)
8072 struct btrfs_dio_private *dip = bio->bi_private;
8073 int err = bio->bi_error;
8076 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
8077 "direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d",
8078 btrfs_ino(dip->inode), bio->bi_rw,
8079 (unsigned long long)bio->bi_iter.bi_sector,
8080 bio->bi_iter.bi_size, err);
8082 if (dip->subio_endio)
8083 err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err);
8089 * before atomic variable goto zero, we must make sure
8090 * dip->errors is perceived to be set.
8092 smp_mb__before_atomic();
8095 /* if there are more bios still pending for this dio, just exit */
8096 if (!atomic_dec_and_test(&dip->pending_bios))
8100 bio_io_error(dip->orig_bio);
8102 dip->dio_bio->bi_error = 0;
8103 bio_endio(dip->orig_bio);
8109 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
8110 u64 first_sector, gfp_t gfp_flags)
8113 bio = btrfs_bio_alloc(bdev, first_sector, BIO_MAX_PAGES, gfp_flags);
8115 bio_associate_current(bio);
8119 static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
8120 struct inode *inode,
8121 struct btrfs_dio_private *dip,
8125 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8126 struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
8130 * We load all the csum data we need when we submit
8131 * the first bio to reduce the csum tree search and
8134 if (dip->logical_offset == file_offset) {
8135 ret = btrfs_lookup_bio_sums_dio(root, inode, dip->orig_bio,
8141 if (bio == dip->orig_bio)
8144 file_offset -= dip->logical_offset;
8145 file_offset >>= inode->i_sb->s_blocksize_bits;
8146 io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset);
8151 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
8152 int rw, u64 file_offset, int skip_sum,
8155 struct btrfs_dio_private *dip = bio->bi_private;
8156 int write = rw & REQ_WRITE;
8157 struct btrfs_root *root = BTRFS_I(inode)->root;
8161 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
8166 ret = btrfs_bio_wq_end_io(root->fs_info, bio,
8167 BTRFS_WQ_ENDIO_DATA);
8175 if (write && async_submit) {
8176 ret = btrfs_wq_submit_bio(root->fs_info,
8177 inode, rw, bio, 0, 0,
8179 __btrfs_submit_bio_start_direct_io,
8180 __btrfs_submit_bio_done);
8184 * If we aren't doing async submit, calculate the csum of the
8187 ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
8191 ret = btrfs_lookup_and_bind_dio_csum(root, inode, dip, bio,
8197 ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
8203 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
8206 struct inode *inode = dip->inode;
8207 struct btrfs_root *root = BTRFS_I(inode)->root;
8209 struct bio *orig_bio = dip->orig_bio;
8210 struct bio_vec *bvec = orig_bio->bi_io_vec;
8211 u64 start_sector = orig_bio->bi_iter.bi_sector;
8212 u64 file_offset = dip->logical_offset;
8217 int async_submit = 0;
8219 map_length = orig_bio->bi_iter.bi_size;
8220 ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
8221 &map_length, NULL, 0);
8225 if (map_length >= orig_bio->bi_iter.bi_size) {
8227 dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED;
8231 /* async crcs make it difficult to collect full stripe writes. */
8232 if (btrfs_get_alloc_profile(root, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK)
8237 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
8241 bio->bi_private = dip;
8242 bio->bi_end_io = btrfs_end_dio_bio;
8243 btrfs_io_bio(bio)->logical = file_offset;
8244 atomic_inc(&dip->pending_bios);
8246 while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
8247 if (map_length < submit_len + bvec->bv_len ||
8248 bio_add_page(bio, bvec->bv_page, bvec->bv_len,
8249 bvec->bv_offset) < bvec->bv_len) {
8251 * inc the count before we submit the bio so
8252 * we know the end IO handler won't happen before
8253 * we inc the count. Otherwise, the dip might get freed
8254 * before we're done setting it up
8256 atomic_inc(&dip->pending_bios);
8257 ret = __btrfs_submit_dio_bio(bio, inode, rw,
8258 file_offset, skip_sum,
8262 atomic_dec(&dip->pending_bios);
8266 start_sector += submit_len >> 9;
8267 file_offset += submit_len;
8272 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
8273 start_sector, GFP_NOFS);
8276 bio->bi_private = dip;
8277 bio->bi_end_io = btrfs_end_dio_bio;
8278 btrfs_io_bio(bio)->logical = file_offset;
8280 map_length = orig_bio->bi_iter.bi_size;
8281 ret = btrfs_map_block(root->fs_info, rw,
8283 &map_length, NULL, 0);
8289 submit_len += bvec->bv_len;
8296 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
8305 * before atomic variable goto zero, we must
8306 * make sure dip->errors is perceived to be set.
8308 smp_mb__before_atomic();
8309 if (atomic_dec_and_test(&dip->pending_bios))
8310 bio_io_error(dip->orig_bio);
8312 /* bio_end_io() will handle error, so we needn't return it */
8316 static void btrfs_submit_direct(int rw, struct bio *dio_bio,
8317 struct inode *inode, loff_t file_offset)
8319 struct btrfs_dio_private *dip = NULL;
8320 struct bio *io_bio = NULL;
8321 struct btrfs_io_bio *btrfs_bio;
8323 int write = rw & REQ_WRITE;
8326 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
8328 io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS);
8334 dip = kzalloc(sizeof(*dip), GFP_NOFS);
8340 dip->private = dio_bio->bi_private;
8342 dip->logical_offset = file_offset;
8343 dip->bytes = dio_bio->bi_iter.bi_size;
8344 dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
8345 io_bio->bi_private = dip;
8346 dip->orig_bio = io_bio;
8347 dip->dio_bio = dio_bio;
8348 atomic_set(&dip->pending_bios, 0);
8349 btrfs_bio = btrfs_io_bio(io_bio);
8350 btrfs_bio->logical = file_offset;
8353 io_bio->bi_end_io = btrfs_endio_direct_write;
8355 io_bio->bi_end_io = btrfs_endio_direct_read;
8356 dip->subio_endio = btrfs_subio_endio_read;
8359 ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
8363 if (btrfs_bio->end_io)
8364 btrfs_bio->end_io(btrfs_bio, ret);
8368 * If we arrived here it means either we failed to submit the dip
8369 * or we either failed to clone the dio_bio or failed to allocate the
8370 * dip. If we cloned the dio_bio and allocated the dip, we can just
8371 * call bio_endio against our io_bio so that we get proper resource
8372 * cleanup if we fail to submit the dip, otherwise, we must do the
8373 * same as btrfs_endio_direct_[write|read] because we can't call these
8374 * callbacks - they require an allocated dip and a clone of dio_bio.
8376 if (io_bio && dip) {
8377 io_bio->bi_error = -EIO;
8380 * The end io callbacks free our dip, do the final put on io_bio
8381 * and all the cleanup and final put for dio_bio (through
8388 struct btrfs_ordered_extent *ordered;
8390 ordered = btrfs_lookup_ordered_extent(inode,
8392 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
8394 * Decrements our ref on the ordered extent and removes
8395 * the ordered extent from the inode's ordered tree,
8396 * doing all the proper resource cleanup such as for the
8397 * reserved space and waking up any waiters for this
8398 * ordered extent (through btrfs_remove_ordered_extent).
8400 btrfs_finish_ordered_io(ordered);
8402 unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
8403 file_offset + dio_bio->bi_iter.bi_size - 1);
8405 dio_bio->bi_error = -EIO;
8407 * Releases and cleans up our dio_bio, no need to bio_put()
8408 * nor bio_endio()/bio_io_error() against dio_bio.
8410 dio_end_io(dio_bio, ret);
8417 static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
8418 const struct iov_iter *iter, loff_t offset)
8422 unsigned blocksize_mask = root->sectorsize - 1;
8423 ssize_t retval = -EINVAL;
8425 if (offset & blocksize_mask)
8428 if (iov_iter_alignment(iter) & blocksize_mask)
8431 /* If this is a write we don't need to check anymore */
8432 if (iov_iter_rw(iter) == WRITE)
8435 * Check to make sure we don't have duplicate iov_base's in this
8436 * iovec, if so return EINVAL, otherwise we'll get csum errors
8437 * when reading back.
8439 for (seg = 0; seg < iter->nr_segs; seg++) {
8440 for (i = seg + 1; i < iter->nr_segs; i++) {
8441 if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
8450 static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
8453 struct file *file = iocb->ki_filp;
8454 struct inode *inode = file->f_mapping->host;
8455 struct btrfs_root *root = BTRFS_I(inode)->root;
8456 struct btrfs_dio_data dio_data = { 0 };
8460 bool relock = false;
8463 if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset))
8466 inode_dio_begin(inode);
8467 smp_mb__after_atomic();
8470 * The generic stuff only does filemap_write_and_wait_range, which
8471 * isn't enough if we've written compressed pages to this area, so
8472 * we need to flush the dirty pages again to make absolutely sure
8473 * that any outstanding dirty pages are on disk.
8475 count = iov_iter_count(iter);
8476 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8477 &BTRFS_I(inode)->runtime_flags))
8478 filemap_fdatawrite_range(inode->i_mapping, offset,
8479 offset + count - 1);
8481 if (iov_iter_rw(iter) == WRITE) {
8483 * If the write DIO is beyond the EOF, we need update
8484 * the isize, but it is protected by i_mutex. So we can
8485 * not unlock the i_mutex at this case.
8487 if (offset + count <= inode->i_size) {
8488 mutex_unlock(&inode->i_mutex);
8491 ret = btrfs_delalloc_reserve_space(inode, offset, count);
8494 dio_data.outstanding_extents = div64_u64(count +
8495 BTRFS_MAX_EXTENT_SIZE - 1,
8496 BTRFS_MAX_EXTENT_SIZE);
8499 * We need to know how many extents we reserved so that we can
8500 * do the accounting properly if we go over the number we
8501 * originally calculated. Abuse current->journal_info for this.
8503 dio_data.reserve = round_up(count, root->sectorsize);
8504 current->journal_info = &dio_data;
8505 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8506 &BTRFS_I(inode)->runtime_flags)) {
8507 inode_dio_end(inode);
8508 flags = DIO_LOCKING | DIO_SKIP_HOLES;
8512 ret = __blockdev_direct_IO(iocb, inode,
8513 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
8514 iter, offset, btrfs_get_blocks_direct, NULL,
8515 btrfs_submit_direct, flags);
8516 if (iov_iter_rw(iter) == WRITE) {
8517 current->journal_info = NULL;
8518 if (ret < 0 && ret != -EIOCBQUEUED) {
8519 if (dio_data.reserve)
8520 btrfs_delalloc_release_space(inode, offset,
8522 } else if (ret >= 0 && (size_t)ret < count)
8523 btrfs_delalloc_release_space(inode, offset,
8524 count - (size_t)ret);
8528 inode_dio_end(inode);
8530 mutex_lock(&inode->i_mutex);
8535 #define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC)
8537 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
8538 __u64 start, __u64 len)
8542 ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
8546 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
8549 int btrfs_readpage(struct file *file, struct page *page)
8551 struct extent_io_tree *tree;
8552 tree = &BTRFS_I(page->mapping->host)->io_tree;
8553 return extent_read_full_page(tree, page, btrfs_get_extent, 0);
8556 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
8558 struct extent_io_tree *tree;
8561 if (current->flags & PF_MEMALLOC) {
8562 redirty_page_for_writepage(wbc, page);
8566 tree = &BTRFS_I(page->mapping->host)->io_tree;
8567 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
8570 static int btrfs_writepages(struct address_space *mapping,
8571 struct writeback_control *wbc)
8573 struct extent_io_tree *tree;
8575 tree = &BTRFS_I(mapping->host)->io_tree;
8576 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
8580 btrfs_readpages(struct file *file, struct address_space *mapping,
8581 struct list_head *pages, unsigned nr_pages)
8583 struct extent_io_tree *tree;
8584 tree = &BTRFS_I(mapping->host)->io_tree;
8585 return extent_readpages(tree, mapping, pages, nr_pages,
8588 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8590 struct extent_io_tree *tree;
8591 struct extent_map_tree *map;
8594 tree = &BTRFS_I(page->mapping->host)->io_tree;
8595 map = &BTRFS_I(page->mapping->host)->extent_tree;
8596 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
8598 ClearPagePrivate(page);
8599 set_page_private(page, 0);
8600 page_cache_release(page);
8605 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8607 if (PageWriteback(page) || PageDirty(page))
8609 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
8612 static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8613 unsigned int length)
8615 struct inode *inode = page->mapping->host;
8616 struct extent_io_tree *tree;
8617 struct btrfs_ordered_extent *ordered;
8618 struct extent_state *cached_state = NULL;
8619 u64 page_start = page_offset(page);
8620 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
8621 int inode_evicting = inode->i_state & I_FREEING;
8624 * we have the page locked, so new writeback can't start,
8625 * and the dirty bit won't be cleared while we are here.
8627 * Wait for IO on this page so that we can safely clear
8628 * the PagePrivate2 bit and do ordered accounting
8630 wait_on_page_writeback(page);
8632 tree = &BTRFS_I(inode)->io_tree;
8634 btrfs_releasepage(page, GFP_NOFS);
8638 if (!inode_evicting)
8639 lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
8640 ordered = btrfs_lookup_ordered_extent(inode, page_start);
8643 * IO on this page will never be started, so we need
8644 * to account for any ordered extents now
8646 if (!inode_evicting)
8647 clear_extent_bit(tree, page_start, page_end,
8648 EXTENT_DIRTY | EXTENT_DELALLOC |
8649 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8650 EXTENT_DEFRAG, 1, 0, &cached_state,
8653 * whoever cleared the private bit is responsible
8654 * for the finish_ordered_io
8656 if (TestClearPagePrivate2(page)) {
8657 struct btrfs_ordered_inode_tree *tree;
8660 tree = &BTRFS_I(inode)->ordered_tree;
8662 spin_lock_irq(&tree->lock);
8663 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8664 new_len = page_start - ordered->file_offset;
8665 if (new_len < ordered->truncated_len)
8666 ordered->truncated_len = new_len;
8667 spin_unlock_irq(&tree->lock);
8669 if (btrfs_dec_test_ordered_pending(inode, &ordered,
8671 PAGE_CACHE_SIZE, 1))
8672 btrfs_finish_ordered_io(ordered);
8674 btrfs_put_ordered_extent(ordered);
8675 if (!inode_evicting) {
8676 cached_state = NULL;
8677 lock_extent_bits(tree, page_start, page_end, 0,
8683 * Qgroup reserved space handler
8684 * Page here will be either
8685 * 1) Already written to disk
8686 * In this case, its reserved space is released from data rsv map
8687 * and will be freed by delayed_ref handler finally.
8688 * So even we call qgroup_free_data(), it won't decrease reserved
8690 * 2) Not written to disk
8691 * This means the reserved space should be freed here.
8693 btrfs_qgroup_free_data(inode, page_start, PAGE_CACHE_SIZE);
8694 if (!inode_evicting) {
8695 clear_extent_bit(tree, page_start, page_end,
8696 EXTENT_LOCKED | EXTENT_DIRTY |
8697 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
8698 EXTENT_DEFRAG, 1, 1,
8699 &cached_state, GFP_NOFS);
8701 __btrfs_releasepage(page, GFP_NOFS);
8704 ClearPageChecked(page);
8705 if (PagePrivate(page)) {
8706 ClearPagePrivate(page);
8707 set_page_private(page, 0);
8708 page_cache_release(page);
8713 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8714 * called from a page fault handler when a page is first dirtied. Hence we must
8715 * be careful to check for EOF conditions here. We set the page up correctly
8716 * for a written page which means we get ENOSPC checking when writing into
8717 * holes and correct delalloc and unwritten extent mapping on filesystems that
8718 * support these features.
8720 * We are not allowed to take the i_mutex here so we have to play games to
8721 * protect against truncate races as the page could now be beyond EOF. Because
8722 * vmtruncate() writes the inode size before removing pages, once we have the
8723 * page lock we can determine safely if the page is beyond EOF. If it is not
8724 * beyond EOF, then the page is guaranteed safe against truncation until we
8727 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
8729 struct page *page = vmf->page;
8730 struct inode *inode = file_inode(vma->vm_file);
8731 struct btrfs_root *root = BTRFS_I(inode)->root;
8732 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8733 struct btrfs_ordered_extent *ordered;
8734 struct extent_state *cached_state = NULL;
8736 unsigned long zero_start;
8743 sb_start_pagefault(inode->i_sb);
8744 page_start = page_offset(page);
8745 page_end = page_start + PAGE_CACHE_SIZE - 1;
8747 ret = btrfs_delalloc_reserve_space(inode, page_start,
8750 ret = file_update_time(vma->vm_file);
8756 else /* -ENOSPC, -EIO, etc */
8757 ret = VM_FAULT_SIGBUS;
8763 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
8766 size = i_size_read(inode);
8768 if ((page->mapping != inode->i_mapping) ||
8769 (page_start >= size)) {
8770 /* page got truncated out from underneath us */
8773 wait_on_page_writeback(page);
8775 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
8776 set_page_extent_mapped(page);
8779 * we can't set the delalloc bits if there are pending ordered
8780 * extents. Drop our locks and wait for them to finish
8782 ordered = btrfs_lookup_ordered_extent(inode, page_start);
8784 unlock_extent_cached(io_tree, page_start, page_end,
8785 &cached_state, GFP_NOFS);
8787 btrfs_start_ordered_extent(inode, ordered, 1);
8788 btrfs_put_ordered_extent(ordered);
8793 * XXX - page_mkwrite gets called every time the page is dirtied, even
8794 * if it was already dirty, so for space accounting reasons we need to
8795 * clear any delalloc bits for the range we are fixing to save. There
8796 * is probably a better way to do this, but for now keep consistent with
8797 * prepare_pages in the normal write path.
8799 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
8800 EXTENT_DIRTY | EXTENT_DELALLOC |
8801 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
8802 0, 0, &cached_state, GFP_NOFS);
8804 ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
8807 unlock_extent_cached(io_tree, page_start, page_end,
8808 &cached_state, GFP_NOFS);
8809 ret = VM_FAULT_SIGBUS;
8814 /* page is wholly or partially inside EOF */
8815 if (page_start + PAGE_CACHE_SIZE > size)
8816 zero_start = size & ~PAGE_CACHE_MASK;
8818 zero_start = PAGE_CACHE_SIZE;
8820 if (zero_start != PAGE_CACHE_SIZE) {
8822 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
8823 flush_dcache_page(page);
8826 ClearPageChecked(page);
8827 set_page_dirty(page);
8828 SetPageUptodate(page);
8830 BTRFS_I(inode)->last_trans = root->fs_info->generation;
8831 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
8832 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
8834 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
8838 sb_end_pagefault(inode->i_sb);
8839 return VM_FAULT_LOCKED;
8843 btrfs_delalloc_release_space(inode, page_start, PAGE_CACHE_SIZE);
8845 sb_end_pagefault(inode->i_sb);
8849 static int btrfs_truncate(struct inode *inode)
8851 struct btrfs_root *root = BTRFS_I(inode)->root;
8852 struct btrfs_block_rsv *rsv;
8855 struct btrfs_trans_handle *trans;
8856 u64 mask = root->sectorsize - 1;
8857 u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
8859 ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
8865 * Yes ladies and gentelment, this is indeed ugly. The fact is we have
8866 * 3 things going on here
8868 * 1) We need to reserve space for our orphan item and the space to
8869 * delete our orphan item. Lord knows we don't want to have a dangling
8870 * orphan item because we didn't reserve space to remove it.
8872 * 2) We need to reserve space to update our inode.
8874 * 3) We need to have something to cache all the space that is going to
8875 * be free'd up by the truncate operation, but also have some slack
8876 * space reserved in case it uses space during the truncate (thank you
8877 * very much snapshotting).
8879 * And we need these to all be seperate. The fact is we can use alot of
8880 * space doing the truncate, and we have no earthly idea how much space
8881 * we will use, so we need the truncate reservation to be seperate so it
8882 * doesn't end up using space reserved for updating the inode or
8883 * removing the orphan item. We also need to be able to stop the
8884 * transaction and start a new one, which means we need to be able to
8885 * update the inode several times, and we have no idea of knowing how
8886 * many times that will be, so we can't just reserve 1 item for the
8887 * entirety of the opration, so that has to be done seperately as well.
8888 * Then there is the orphan item, which does indeed need to be held on
8889 * to for the whole operation, and we need nobody to touch this reserved
8890 * space except the orphan code.
8892 * So that leaves us with
8894 * 1) root->orphan_block_rsv - for the orphan deletion.
8895 * 2) rsv - for the truncate reservation, which we will steal from the
8896 * transaction reservation.
8897 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
8898 * updating the inode.
8900 rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
8903 rsv->size = min_size;
8907 * 1 for the truncate slack space
8908 * 1 for updating the inode.
8910 trans = btrfs_start_transaction(root, 2);
8911 if (IS_ERR(trans)) {
8912 err = PTR_ERR(trans);
8916 /* Migrate the slack space for the truncate to our reserve */
8917 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
8922 * So if we truncate and then write and fsync we normally would just
8923 * write the extents that changed, which is a problem if we need to
8924 * first truncate that entire inode. So set this flag so we write out
8925 * all of the extents in the inode to the sync log so we're completely
8928 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
8929 trans->block_rsv = rsv;
8932 ret = btrfs_truncate_inode_items(trans, root, inode,
8934 BTRFS_EXTENT_DATA_KEY);
8935 if (ret != -ENOSPC && ret != -EAGAIN) {
8940 trans->block_rsv = &root->fs_info->trans_block_rsv;
8941 ret = btrfs_update_inode(trans, root, inode);
8947 btrfs_end_transaction(trans, root);
8948 btrfs_btree_balance_dirty(root);
8950 trans = btrfs_start_transaction(root, 2);
8951 if (IS_ERR(trans)) {
8952 ret = err = PTR_ERR(trans);
8957 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
8959 BUG_ON(ret); /* shouldn't happen */
8960 trans->block_rsv = rsv;
8963 if (ret == 0 && inode->i_nlink > 0) {
8964 trans->block_rsv = root->orphan_block_rsv;
8965 ret = btrfs_orphan_del(trans, inode);
8971 trans->block_rsv = &root->fs_info->trans_block_rsv;
8972 ret = btrfs_update_inode(trans, root, inode);
8976 ret = btrfs_end_transaction(trans, root);
8977 btrfs_btree_balance_dirty(root);
8981 btrfs_free_block_rsv(root, rsv);
8990 * create a new subvolume directory/inode (helper for the ioctl).
8992 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
8993 struct btrfs_root *new_root,
8994 struct btrfs_root *parent_root,
8997 struct inode *inode;
9001 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
9002 new_dirid, new_dirid,
9003 S_IFDIR | (~current_umask() & S_IRWXUGO),
9006 return PTR_ERR(inode);
9007 inode->i_op = &btrfs_dir_inode_operations;
9008 inode->i_fop = &btrfs_dir_file_operations;
9010 set_nlink(inode, 1);
9011 btrfs_i_size_write(inode, 0);
9012 unlock_new_inode(inode);
9014 err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
9016 btrfs_err(new_root->fs_info,
9017 "error inheriting subvolume %llu properties: %d",
9018 new_root->root_key.objectid, err);
9020 err = btrfs_update_inode(trans, new_root, inode);
9026 struct inode *btrfs_alloc_inode(struct super_block *sb)
9028 struct btrfs_inode *ei;
9029 struct inode *inode;
9031 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
9038 ei->last_sub_trans = 0;
9039 ei->logged_trans = 0;
9040 ei->delalloc_bytes = 0;
9041 ei->defrag_bytes = 0;
9042 ei->disk_i_size = 0;
9045 ei->index_cnt = (u64)-1;
9047 ei->last_unlink_trans = 0;
9048 ei->last_log_commit = 0;
9050 spin_lock_init(&ei->lock);
9051 ei->outstanding_extents = 0;
9052 ei->reserved_extents = 0;
9054 ei->runtime_flags = 0;
9055 ei->force_compress = BTRFS_COMPRESS_NONE;
9057 ei->delayed_node = NULL;
9059 ei->i_otime.tv_sec = 0;
9060 ei->i_otime.tv_nsec = 0;
9062 inode = &ei->vfs_inode;
9063 extent_map_tree_init(&ei->extent_tree);
9064 extent_io_tree_init(&ei->io_tree, &inode->i_data);
9065 extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
9066 ei->io_tree.track_uptodate = 1;
9067 ei->io_failure_tree.track_uptodate = 1;
9068 atomic_set(&ei->sync_writers, 0);
9069 mutex_init(&ei->log_mutex);
9070 mutex_init(&ei->delalloc_mutex);
9071 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
9072 INIT_LIST_HEAD(&ei->delalloc_inodes);
9073 RB_CLEAR_NODE(&ei->rb_node);
9078 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
9079 void btrfs_test_destroy_inode(struct inode *inode)
9081 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
9082 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9086 static void btrfs_i_callback(struct rcu_head *head)
9088 struct inode *inode = container_of(head, struct inode, i_rcu);
9089 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9092 void btrfs_destroy_inode(struct inode *inode)
9094 struct btrfs_ordered_extent *ordered;
9095 struct btrfs_root *root = BTRFS_I(inode)->root;
9097 WARN_ON(!hlist_empty(&inode->i_dentry));
9098 WARN_ON(inode->i_data.nrpages);
9099 WARN_ON(BTRFS_I(inode)->outstanding_extents);
9100 WARN_ON(BTRFS_I(inode)->reserved_extents);
9101 WARN_ON(BTRFS_I(inode)->delalloc_bytes);
9102 WARN_ON(BTRFS_I(inode)->csum_bytes);
9103 WARN_ON(BTRFS_I(inode)->defrag_bytes);
9106 * This can happen where we create an inode, but somebody else also
9107 * created the same inode and we need to destroy the one we already
9113 if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
9114 &BTRFS_I(inode)->runtime_flags)) {
9115 btrfs_info(root->fs_info, "inode %llu still on the orphan list",
9117 atomic_dec(&root->orphan_inodes);
9121 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
9125 btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup",
9126 ordered->file_offset, ordered->len);
9127 btrfs_remove_ordered_extent(inode, ordered);
9128 btrfs_put_ordered_extent(ordered);
9129 btrfs_put_ordered_extent(ordered);
9132 btrfs_qgroup_check_reserved_leak(inode);
9133 inode_tree_del(inode);
9134 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
9136 call_rcu(&inode->i_rcu, btrfs_i_callback);
9139 int btrfs_drop_inode(struct inode *inode)
9141 struct btrfs_root *root = BTRFS_I(inode)->root;
9146 /* the snap/subvol tree is on deleting */
9147 if (btrfs_root_refs(&root->root_item) == 0)
9150 return generic_drop_inode(inode);
9153 static void init_once(void *foo)
9155 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
9157 inode_init_once(&ei->vfs_inode);
9160 void btrfs_destroy_cachep(void)
9163 * Make sure all delayed rcu free inodes are flushed before we
9167 if (btrfs_inode_cachep)
9168 kmem_cache_destroy(btrfs_inode_cachep);
9169 if (btrfs_trans_handle_cachep)
9170 kmem_cache_destroy(btrfs_trans_handle_cachep);
9171 if (btrfs_transaction_cachep)
9172 kmem_cache_destroy(btrfs_transaction_cachep);
9173 if (btrfs_path_cachep)
9174 kmem_cache_destroy(btrfs_path_cachep);
9175 if (btrfs_free_space_cachep)
9176 kmem_cache_destroy(btrfs_free_space_cachep);
9177 if (btrfs_delalloc_work_cachep)
9178 kmem_cache_destroy(btrfs_delalloc_work_cachep);
9181 int btrfs_init_cachep(void)
9183 btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
9184 sizeof(struct btrfs_inode), 0,
9185 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
9186 if (!btrfs_inode_cachep)
9189 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
9190 sizeof(struct btrfs_trans_handle), 0,
9191 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9192 if (!btrfs_trans_handle_cachep)
9195 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
9196 sizeof(struct btrfs_transaction), 0,
9197 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9198 if (!btrfs_transaction_cachep)
9201 btrfs_path_cachep = kmem_cache_create("btrfs_path",
9202 sizeof(struct btrfs_path), 0,
9203 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9204 if (!btrfs_path_cachep)
9207 btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
9208 sizeof(struct btrfs_free_space), 0,
9209 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9210 if (!btrfs_free_space_cachep)
9213 btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work",
9214 sizeof(struct btrfs_delalloc_work), 0,
9215 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
9217 if (!btrfs_delalloc_work_cachep)
9222 btrfs_destroy_cachep();
9226 static int btrfs_getattr(struct vfsmount *mnt,
9227 struct dentry *dentry, struct kstat *stat)
9230 struct inode *inode = d_inode(dentry);
9231 u32 blocksize = inode->i_sb->s_blocksize;
9233 generic_fillattr(inode, stat);
9234 stat->dev = BTRFS_I(inode)->root->anon_dev;
9235 stat->blksize = PAGE_CACHE_SIZE;
9237 spin_lock(&BTRFS_I(inode)->lock);
9238 delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
9239 spin_unlock(&BTRFS_I(inode)->lock);
9240 stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
9241 ALIGN(delalloc_bytes, blocksize)) >> 9;
9245 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
9246 struct inode *new_dir, struct dentry *new_dentry)
9248 struct btrfs_trans_handle *trans;
9249 struct btrfs_root *root = BTRFS_I(old_dir)->root;
9250 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9251 struct inode *new_inode = d_inode(new_dentry);
9252 struct inode *old_inode = d_inode(old_dentry);
9253 struct timespec ctime = CURRENT_TIME;
9257 u64 old_ino = btrfs_ino(old_inode);
9259 if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
9262 /* we only allow rename subvolume link between subvolumes */
9263 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9266 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
9267 (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
9270 if (S_ISDIR(old_inode->i_mode) && new_inode &&
9271 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
9275 /* check for collisions, even if the name isn't there */
9276 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
9277 new_dentry->d_name.name,
9278 new_dentry->d_name.len);
9281 if (ret == -EEXIST) {
9283 * eexist without a new_inode */
9284 if (WARN_ON(!new_inode)) {
9288 /* maybe -EOVERFLOW */
9295 * we're using rename to replace one file with another. Start IO on it
9296 * now so we don't add too much work to the end of the transaction
9298 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9299 filemap_flush(old_inode->i_mapping);
9301 /* close the racy window with snapshot create/destroy ioctl */
9302 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9303 down_read(&root->fs_info->subvol_sem);
9305 * We want to reserve the absolute worst case amount of items. So if
9306 * both inodes are subvols and we need to unlink them then that would
9307 * require 4 item modifications, but if they are both normal inodes it
9308 * would require 5 item modifications, so we'll assume their normal
9309 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
9310 * should cover the worst case number of items we'll modify.
9312 trans = btrfs_start_transaction(root, 11);
9313 if (IS_ERR(trans)) {
9314 ret = PTR_ERR(trans);
9319 btrfs_record_root_in_trans(trans, dest);
9321 ret = btrfs_set_inode_index(new_dir, &index);
9325 BTRFS_I(old_inode)->dir_index = 0ULL;
9326 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9327 /* force full log commit if subvolume involved. */
9328 btrfs_set_log_full_commit(root->fs_info, trans);
9330 ret = btrfs_insert_inode_ref(trans, dest,
9331 new_dentry->d_name.name,
9332 new_dentry->d_name.len,
9334 btrfs_ino(new_dir), index);
9338 * this is an ugly little race, but the rename is required
9339 * to make sure that if we crash, the inode is either at the
9340 * old name or the new one. pinning the log transaction lets
9341 * us make sure we don't allow a log commit to come in after
9342 * we unlink the name but before we add the new name back in.
9344 btrfs_pin_log_trans(root);
9347 inode_inc_iversion(old_dir);
9348 inode_inc_iversion(new_dir);
9349 inode_inc_iversion(old_inode);
9350 old_dir->i_ctime = old_dir->i_mtime = ctime;
9351 new_dir->i_ctime = new_dir->i_mtime = ctime;
9352 old_inode->i_ctime = ctime;
9354 if (old_dentry->d_parent != new_dentry->d_parent)
9355 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
9357 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9358 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
9359 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
9360 old_dentry->d_name.name,
9361 old_dentry->d_name.len);
9363 ret = __btrfs_unlink_inode(trans, root, old_dir,
9364 d_inode(old_dentry),
9365 old_dentry->d_name.name,
9366 old_dentry->d_name.len);
9368 ret = btrfs_update_inode(trans, root, old_inode);
9371 btrfs_abort_transaction(trans, root, ret);
9376 inode_inc_iversion(new_inode);
9377 new_inode->i_ctime = CURRENT_TIME;
9378 if (unlikely(btrfs_ino(new_inode) ==
9379 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
9380 root_objectid = BTRFS_I(new_inode)->location.objectid;
9381 ret = btrfs_unlink_subvol(trans, dest, new_dir,
9383 new_dentry->d_name.name,
9384 new_dentry->d_name.len);
9385 BUG_ON(new_inode->i_nlink == 0);
9387 ret = btrfs_unlink_inode(trans, dest, new_dir,
9388 d_inode(new_dentry),
9389 new_dentry->d_name.name,
9390 new_dentry->d_name.len);
9392 if (!ret && new_inode->i_nlink == 0)
9393 ret = btrfs_orphan_add(trans, d_inode(new_dentry));
9395 btrfs_abort_transaction(trans, root, ret);
9400 ret = btrfs_add_link(trans, new_dir, old_inode,
9401 new_dentry->d_name.name,
9402 new_dentry->d_name.len, 0, index);
9404 btrfs_abort_transaction(trans, root, ret);
9408 if (old_inode->i_nlink == 1)
9409 BTRFS_I(old_inode)->dir_index = index;
9411 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
9412 struct dentry *parent = new_dentry->d_parent;
9413 btrfs_log_new_name(trans, old_inode, old_dir, parent);
9414 btrfs_end_log_trans(root);
9417 btrfs_end_transaction(trans, root);
9419 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9420 up_read(&root->fs_info->subvol_sem);
9425 static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry,
9426 struct inode *new_dir, struct dentry *new_dentry,
9429 if (flags & ~RENAME_NOREPLACE)
9432 return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry);
9435 static void btrfs_run_delalloc_work(struct btrfs_work *work)
9437 struct btrfs_delalloc_work *delalloc_work;
9438 struct inode *inode;
9440 delalloc_work = container_of(work, struct btrfs_delalloc_work,
9442 inode = delalloc_work->inode;
9443 if (delalloc_work->wait) {
9444 btrfs_wait_ordered_range(inode, 0, (u64)-1);
9446 filemap_flush(inode->i_mapping);
9447 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
9448 &BTRFS_I(inode)->runtime_flags))
9449 filemap_flush(inode->i_mapping);
9452 if (delalloc_work->delay_iput)
9453 btrfs_add_delayed_iput(inode);
9456 complete(&delalloc_work->completion);
9459 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
9460 int wait, int delay_iput)
9462 struct btrfs_delalloc_work *work;
9464 work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS);
9468 init_completion(&work->completion);
9469 INIT_LIST_HEAD(&work->list);
9470 work->inode = inode;
9472 work->delay_iput = delay_iput;
9473 WARN_ON_ONCE(!inode);
9474 btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
9475 btrfs_run_delalloc_work, NULL, NULL);
9480 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
9482 wait_for_completion(&work->completion);
9483 kmem_cache_free(btrfs_delalloc_work_cachep, work);
9487 * some fairly slow code that needs optimization. This walks the list
9488 * of all the inodes with pending delalloc and forces them to disk.
9490 static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput,
9493 struct btrfs_inode *binode;
9494 struct inode *inode;
9495 struct btrfs_delalloc_work *work, *next;
9496 struct list_head works;
9497 struct list_head splice;
9500 INIT_LIST_HEAD(&works);
9501 INIT_LIST_HEAD(&splice);
9503 mutex_lock(&root->delalloc_mutex);
9504 spin_lock(&root->delalloc_lock);
9505 list_splice_init(&root->delalloc_inodes, &splice);
9506 while (!list_empty(&splice)) {
9507 binode = list_entry(splice.next, struct btrfs_inode,
9510 list_move_tail(&binode->delalloc_inodes,
9511 &root->delalloc_inodes);
9512 inode = igrab(&binode->vfs_inode);
9514 cond_resched_lock(&root->delalloc_lock);
9517 spin_unlock(&root->delalloc_lock);
9519 work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
9522 btrfs_add_delayed_iput(inode);
9528 list_add_tail(&work->list, &works);
9529 btrfs_queue_work(root->fs_info->flush_workers,
9532 if (nr != -1 && ret >= nr)
9535 spin_lock(&root->delalloc_lock);
9537 spin_unlock(&root->delalloc_lock);
9540 list_for_each_entry_safe(work, next, &works, list) {
9541 list_del_init(&work->list);
9542 btrfs_wait_and_free_delalloc_work(work);
9545 if (!list_empty_careful(&splice)) {
9546 spin_lock(&root->delalloc_lock);
9547 list_splice_tail(&splice, &root->delalloc_inodes);
9548 spin_unlock(&root->delalloc_lock);
9550 mutex_unlock(&root->delalloc_mutex);
9554 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
9558 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
9561 ret = __start_delalloc_inodes(root, delay_iput, -1);
9565 * the filemap_flush will queue IO into the worker threads, but
9566 * we have to make sure the IO is actually started and that
9567 * ordered extents get created before we return
9569 atomic_inc(&root->fs_info->async_submit_draining);
9570 while (atomic_read(&root->fs_info->nr_async_submits) ||
9571 atomic_read(&root->fs_info->async_delalloc_pages)) {
9572 wait_event(root->fs_info->async_submit_wait,
9573 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
9574 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
9576 atomic_dec(&root->fs_info->async_submit_draining);
9580 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
9583 struct btrfs_root *root;
9584 struct list_head splice;
9587 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
9590 INIT_LIST_HEAD(&splice);
9592 mutex_lock(&fs_info->delalloc_root_mutex);
9593 spin_lock(&fs_info->delalloc_root_lock);
9594 list_splice_init(&fs_info->delalloc_roots, &splice);
9595 while (!list_empty(&splice) && nr) {
9596 root = list_first_entry(&splice, struct btrfs_root,
9598 root = btrfs_grab_fs_root(root);
9600 list_move_tail(&root->delalloc_root,
9601 &fs_info->delalloc_roots);
9602 spin_unlock(&fs_info->delalloc_root_lock);
9604 ret = __start_delalloc_inodes(root, delay_iput, nr);
9605 btrfs_put_fs_root(root);
9613 spin_lock(&fs_info->delalloc_root_lock);
9615 spin_unlock(&fs_info->delalloc_root_lock);
9618 atomic_inc(&fs_info->async_submit_draining);
9619 while (atomic_read(&fs_info->nr_async_submits) ||
9620 atomic_read(&fs_info->async_delalloc_pages)) {
9621 wait_event(fs_info->async_submit_wait,
9622 (atomic_read(&fs_info->nr_async_submits) == 0 &&
9623 atomic_read(&fs_info->async_delalloc_pages) == 0));
9625 atomic_dec(&fs_info->async_submit_draining);
9627 if (!list_empty_careful(&splice)) {
9628 spin_lock(&fs_info->delalloc_root_lock);
9629 list_splice_tail(&splice, &fs_info->delalloc_roots);
9630 spin_unlock(&fs_info->delalloc_root_lock);
9632 mutex_unlock(&fs_info->delalloc_root_mutex);
9636 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
9637 const char *symname)
9639 struct btrfs_trans_handle *trans;
9640 struct btrfs_root *root = BTRFS_I(dir)->root;
9641 struct btrfs_path *path;
9642 struct btrfs_key key;
9643 struct inode *inode = NULL;
9651 struct btrfs_file_extent_item *ei;
9652 struct extent_buffer *leaf;
9654 name_len = strlen(symname);
9655 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
9656 return -ENAMETOOLONG;
9659 * 2 items for inode item and ref
9660 * 2 items for dir items
9661 * 1 item for xattr if selinux is on
9663 trans = btrfs_start_transaction(root, 5);
9665 return PTR_ERR(trans);
9667 err = btrfs_find_free_ino(root, &objectid);
9671 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
9672 dentry->d_name.len, btrfs_ino(dir), objectid,
9673 S_IFLNK|S_IRWXUGO, &index);
9674 if (IS_ERR(inode)) {
9675 err = PTR_ERR(inode);
9680 * If the active LSM wants to access the inode during
9681 * d_instantiate it needs these. Smack checks to see
9682 * if the filesystem supports xattrs by looking at the
9685 inode->i_fop = &btrfs_file_operations;
9686 inode->i_op = &btrfs_file_inode_operations;
9687 inode->i_mapping->a_ops = &btrfs_aops;
9688 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
9690 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
9692 goto out_unlock_inode;
9694 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
9696 goto out_unlock_inode;
9698 path = btrfs_alloc_path();
9701 goto out_unlock_inode;
9703 key.objectid = btrfs_ino(inode);
9705 key.type = BTRFS_EXTENT_DATA_KEY;
9706 datasize = btrfs_file_extent_calc_inline_size(name_len);
9707 err = btrfs_insert_empty_item(trans, root, path, &key,
9710 btrfs_free_path(path);
9711 goto out_unlock_inode;
9713 leaf = path->nodes[0];
9714 ei = btrfs_item_ptr(leaf, path->slots[0],
9715 struct btrfs_file_extent_item);
9716 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
9717 btrfs_set_file_extent_type(leaf, ei,
9718 BTRFS_FILE_EXTENT_INLINE);
9719 btrfs_set_file_extent_encryption(leaf, ei, 0);
9720 btrfs_set_file_extent_compression(leaf, ei, 0);
9721 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
9722 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
9724 ptr = btrfs_file_extent_inline_start(ei);
9725 write_extent_buffer(leaf, symname, ptr, name_len);
9726 btrfs_mark_buffer_dirty(leaf);
9727 btrfs_free_path(path);
9729 inode->i_op = &btrfs_symlink_inode_operations;
9730 inode->i_mapping->a_ops = &btrfs_symlink_aops;
9731 inode_set_bytes(inode, name_len);
9732 btrfs_i_size_write(inode, name_len);
9733 err = btrfs_update_inode(trans, root, inode);
9736 goto out_unlock_inode;
9739 unlock_new_inode(inode);
9740 d_instantiate(dentry, inode);
9743 btrfs_end_transaction(trans, root);
9745 inode_dec_link_count(inode);
9748 btrfs_btree_balance_dirty(root);
9753 unlock_new_inode(inode);
9757 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
9758 u64 start, u64 num_bytes, u64 min_size,
9759 loff_t actual_len, u64 *alloc_hint,
9760 struct btrfs_trans_handle *trans)
9762 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
9763 struct extent_map *em;
9764 struct btrfs_root *root = BTRFS_I(inode)->root;
9765 struct btrfs_key ins;
9766 u64 cur_offset = start;
9769 u64 last_alloc = (u64)-1;
9771 bool own_trans = true;
9775 while (num_bytes > 0) {
9777 trans = btrfs_start_transaction(root, 3);
9778 if (IS_ERR(trans)) {
9779 ret = PTR_ERR(trans);
9784 cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
9785 cur_bytes = max(cur_bytes, min_size);
9787 * If we are severely fragmented we could end up with really
9788 * small allocations, so if the allocator is returning small
9789 * chunks lets make its job easier by only searching for those
9792 cur_bytes = min(cur_bytes, last_alloc);
9793 ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
9794 *alloc_hint, &ins, 1, 0);
9797 btrfs_end_transaction(trans, root);
9801 last_alloc = ins.offset;
9802 ret = insert_reserved_file_extent(trans, inode,
9803 cur_offset, ins.objectid,
9804 ins.offset, ins.offset,
9805 ins.offset, 0, 0, 0,
9806 BTRFS_FILE_EXTENT_PREALLOC);
9808 btrfs_free_reserved_extent(root, ins.objectid,
9810 btrfs_abort_transaction(trans, root, ret);
9812 btrfs_end_transaction(trans, root);
9816 btrfs_drop_extent_cache(inode, cur_offset,
9817 cur_offset + ins.offset -1, 0);
9819 em = alloc_extent_map();
9821 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
9822 &BTRFS_I(inode)->runtime_flags);
9826 em->start = cur_offset;
9827 em->orig_start = cur_offset;
9828 em->len = ins.offset;
9829 em->block_start = ins.objectid;
9830 em->block_len = ins.offset;
9831 em->orig_block_len = ins.offset;
9832 em->ram_bytes = ins.offset;
9833 em->bdev = root->fs_info->fs_devices->latest_bdev;
9834 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
9835 em->generation = trans->transid;
9838 write_lock(&em_tree->lock);
9839 ret = add_extent_mapping(em_tree, em, 1);
9840 write_unlock(&em_tree->lock);
9843 btrfs_drop_extent_cache(inode, cur_offset,
9844 cur_offset + ins.offset - 1,
9847 free_extent_map(em);
9849 num_bytes -= ins.offset;
9850 cur_offset += ins.offset;
9851 *alloc_hint = ins.objectid + ins.offset;
9853 inode_inc_iversion(inode);
9854 inode->i_ctime = CURRENT_TIME;
9855 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
9856 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
9857 (actual_len > inode->i_size) &&
9858 (cur_offset > inode->i_size)) {
9859 if (cur_offset > actual_len)
9860 i_size = actual_len;
9862 i_size = cur_offset;
9863 i_size_write(inode, i_size);
9864 btrfs_ordered_update_i_size(inode, i_size, NULL);
9867 ret = btrfs_update_inode(trans, root, inode);
9870 btrfs_abort_transaction(trans, root, ret);
9872 btrfs_end_transaction(trans, root);
9877 btrfs_end_transaction(trans, root);
9882 int btrfs_prealloc_file_range(struct inode *inode, int mode,
9883 u64 start, u64 num_bytes, u64 min_size,
9884 loff_t actual_len, u64 *alloc_hint)
9886 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9887 min_size, actual_len, alloc_hint,
9891 int btrfs_prealloc_file_range_trans(struct inode *inode,
9892 struct btrfs_trans_handle *trans, int mode,
9893 u64 start, u64 num_bytes, u64 min_size,
9894 loff_t actual_len, u64 *alloc_hint)
9896 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9897 min_size, actual_len, alloc_hint, trans);
9900 static int btrfs_set_page_dirty(struct page *page)
9902 return __set_page_dirty_nobuffers(page);
9905 static int btrfs_permission(struct inode *inode, int mask)
9907 struct btrfs_root *root = BTRFS_I(inode)->root;
9908 umode_t mode = inode->i_mode;
9910 if (mask & MAY_WRITE &&
9911 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
9912 if (btrfs_root_readonly(root))
9914 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
9917 return generic_permission(inode, mask);
9920 static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
9922 struct btrfs_trans_handle *trans;
9923 struct btrfs_root *root = BTRFS_I(dir)->root;
9924 struct inode *inode = NULL;
9930 * 5 units required for adding orphan entry
9932 trans = btrfs_start_transaction(root, 5);
9934 return PTR_ERR(trans);
9936 ret = btrfs_find_free_ino(root, &objectid);
9940 inode = btrfs_new_inode(trans, root, dir, NULL, 0,
9941 btrfs_ino(dir), objectid, mode, &index);
9942 if (IS_ERR(inode)) {
9943 ret = PTR_ERR(inode);
9948 inode->i_fop = &btrfs_file_operations;
9949 inode->i_op = &btrfs_file_inode_operations;
9951 inode->i_mapping->a_ops = &btrfs_aops;
9952 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
9954 ret = btrfs_init_inode_security(trans, inode, dir, NULL);
9958 ret = btrfs_update_inode(trans, root, inode);
9961 ret = btrfs_orphan_add(trans, inode);
9966 * We set number of links to 0 in btrfs_new_inode(), and here we set
9967 * it to 1 because d_tmpfile() will issue a warning if the count is 0,
9970 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
9972 set_nlink(inode, 1);
9973 unlock_new_inode(inode);
9974 d_tmpfile(dentry, inode);
9975 mark_inode_dirty(inode);
9978 btrfs_end_transaction(trans, root);
9981 btrfs_balance_delayed_items(root);
9982 btrfs_btree_balance_dirty(root);
9986 unlock_new_inode(inode);
9991 /* Inspired by filemap_check_errors() */
9992 int btrfs_inode_check_errors(struct inode *inode)
9996 if (test_bit(AS_ENOSPC, &inode->i_mapping->flags) &&
9997 test_and_clear_bit(AS_ENOSPC, &inode->i_mapping->flags))
9999 if (test_bit(AS_EIO, &inode->i_mapping->flags) &&
10000 test_and_clear_bit(AS_EIO, &inode->i_mapping->flags))
10006 static const struct inode_operations btrfs_dir_inode_operations = {
10007 .getattr = btrfs_getattr,
10008 .lookup = btrfs_lookup,
10009 .create = btrfs_create,
10010 .unlink = btrfs_unlink,
10011 .link = btrfs_link,
10012 .mkdir = btrfs_mkdir,
10013 .rmdir = btrfs_rmdir,
10014 .rename2 = btrfs_rename2,
10015 .symlink = btrfs_symlink,
10016 .setattr = btrfs_setattr,
10017 .mknod = btrfs_mknod,
10018 .setxattr = btrfs_setxattr,
10019 .getxattr = btrfs_getxattr,
10020 .listxattr = btrfs_listxattr,
10021 .removexattr = btrfs_removexattr,
10022 .permission = btrfs_permission,
10023 .get_acl = btrfs_get_acl,
10024 .set_acl = btrfs_set_acl,
10025 .update_time = btrfs_update_time,
10026 .tmpfile = btrfs_tmpfile,
10028 static const struct inode_operations btrfs_dir_ro_inode_operations = {
10029 .lookup = btrfs_lookup,
10030 .permission = btrfs_permission,
10031 .get_acl = btrfs_get_acl,
10032 .set_acl = btrfs_set_acl,
10033 .update_time = btrfs_update_time,
10036 static const struct file_operations btrfs_dir_file_operations = {
10037 .llseek = generic_file_llseek,
10038 .read = generic_read_dir,
10039 .iterate = btrfs_real_readdir,
10040 .unlocked_ioctl = btrfs_ioctl,
10041 #ifdef CONFIG_COMPAT
10042 .compat_ioctl = btrfs_ioctl,
10044 .release = btrfs_release_file,
10045 .fsync = btrfs_sync_file,
10048 static struct extent_io_ops btrfs_extent_io_ops = {
10049 .fill_delalloc = run_delalloc_range,
10050 .submit_bio_hook = btrfs_submit_bio_hook,
10051 .merge_bio_hook = btrfs_merge_bio_hook,
10052 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
10053 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
10054 .writepage_start_hook = btrfs_writepage_start_hook,
10055 .set_bit_hook = btrfs_set_bit_hook,
10056 .clear_bit_hook = btrfs_clear_bit_hook,
10057 .merge_extent_hook = btrfs_merge_extent_hook,
10058 .split_extent_hook = btrfs_split_extent_hook,
10062 * btrfs doesn't support the bmap operation because swapfiles
10063 * use bmap to make a mapping of extents in the file. They assume
10064 * these extents won't change over the life of the file and they
10065 * use the bmap result to do IO directly to the drive.
10067 * the btrfs bmap call would return logical addresses that aren't
10068 * suitable for IO and they also will change frequently as COW
10069 * operations happen. So, swapfile + btrfs == corruption.
10071 * For now we're avoiding this by dropping bmap.
10073 static const struct address_space_operations btrfs_aops = {
10074 .readpage = btrfs_readpage,
10075 .writepage = btrfs_writepage,
10076 .writepages = btrfs_writepages,
10077 .readpages = btrfs_readpages,
10078 .direct_IO = btrfs_direct_IO,
10079 .invalidatepage = btrfs_invalidatepage,
10080 .releasepage = btrfs_releasepage,
10081 .set_page_dirty = btrfs_set_page_dirty,
10082 .error_remove_page = generic_error_remove_page,
10085 static const struct address_space_operations btrfs_symlink_aops = {
10086 .readpage = btrfs_readpage,
10087 .writepage = btrfs_writepage,
10088 .invalidatepage = btrfs_invalidatepage,
10089 .releasepage = btrfs_releasepage,
10092 static const struct inode_operations btrfs_file_inode_operations = {
10093 .getattr = btrfs_getattr,
10094 .setattr = btrfs_setattr,
10095 .setxattr = btrfs_setxattr,
10096 .getxattr = btrfs_getxattr,
10097 .listxattr = btrfs_listxattr,
10098 .removexattr = btrfs_removexattr,
10099 .permission = btrfs_permission,
10100 .fiemap = btrfs_fiemap,
10101 .get_acl = btrfs_get_acl,
10102 .set_acl = btrfs_set_acl,
10103 .update_time = btrfs_update_time,
10105 static const struct inode_operations btrfs_special_inode_operations = {
10106 .getattr = btrfs_getattr,
10107 .setattr = btrfs_setattr,
10108 .permission = btrfs_permission,
10109 .setxattr = btrfs_setxattr,
10110 .getxattr = btrfs_getxattr,
10111 .listxattr = btrfs_listxattr,
10112 .removexattr = btrfs_removexattr,
10113 .get_acl = btrfs_get_acl,
10114 .set_acl = btrfs_set_acl,
10115 .update_time = btrfs_update_time,
10117 static const struct inode_operations btrfs_symlink_inode_operations = {
10118 .readlink = generic_readlink,
10119 .follow_link = page_follow_link_light,
10120 .put_link = page_put_link,
10121 .getattr = btrfs_getattr,
10122 .setattr = btrfs_setattr,
10123 .permission = btrfs_permission,
10124 .setxattr = btrfs_setxattr,
10125 .getxattr = btrfs_getxattr,
10126 .listxattr = btrfs_listxattr,
10127 .removexattr = btrfs_removexattr,
10128 .update_time = btrfs_update_time,
10131 const struct dentry_operations btrfs_dentry_operations = {
10132 .d_delete = btrfs_dentry_delete,
10133 .d_release = btrfs_dentry_release,