2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/smp_lock.h>
26 #include <linux/backing-dev.h>
27 #include <linux/mpage.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/version.h>
35 #include "transaction.h"
36 #include "btrfs_inode.h"
38 #include "print-tree.h"
44 /* simple helper to fault in pages and copy. This should go away
45 * and be replaced with calls into generic code.
47 static int noinline btrfs_copy_from_user(loff_t pos, int num_pages,
49 struct page **prepared_pages,
50 const char __user * buf)
54 int offset = pos & (PAGE_CACHE_SIZE - 1);
56 for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
57 size_t count = min_t(size_t,
58 PAGE_CACHE_SIZE - offset, write_bytes);
59 struct page *page = prepared_pages[i];
60 fault_in_pages_readable(buf, count);
62 /* Copy data from userspace to the current page */
64 page_fault = __copy_from_user(page_address(page) + offset,
66 /* Flush processor's dcache for this page */
67 flush_dcache_page(page);
75 return page_fault ? -EFAULT : 0;
79 * unlocks pages after btrfs_file_write is done with them
81 static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages)
84 for (i = 0; i < num_pages; i++) {
87 /* page checked is some magic around finding pages that
88 * have been modified without going through btrfs_set_page_dirty
91 ClearPageChecked(pages[i]);
92 unlock_page(pages[i]);
93 mark_page_accessed(pages[i]);
94 page_cache_release(pages[i]);
99 * after copy_from_user, pages need to be dirtied and we need to make
100 * sure holes are created between the current EOF and the start of
101 * any next extents (if required).
103 * this also makes the decision about creating an inline extent vs
104 * doing real data extents, marking pages dirty and delalloc as required.
106 static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
107 struct btrfs_root *root,
116 struct inode *inode = fdentry(file)->d_inode;
117 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
121 u64 end_of_last_block;
122 u64 end_pos = pos + write_bytes;
123 loff_t isize = i_size_read(inode);
125 start_pos = pos & ~((u64)root->sectorsize - 1);
126 num_bytes = (write_bytes + pos - start_pos +
127 root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
129 end_of_last_block = start_pos + num_bytes - 1;
131 lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
132 trans = btrfs_join_transaction(root, 1);
137 btrfs_set_trans_block_group(trans, inode);
140 if ((end_of_last_block & 4095) == 0) {
141 printk("strange end of last %Lu %zu %Lu\n", start_pos, write_bytes, end_of_last_block);
143 set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
145 /* check for reserved extents on each page, we don't want
146 * to reset the delalloc bit on things that already have
149 btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
150 for (i = 0; i < num_pages; i++) {
151 struct page *p = pages[i];
156 if (end_pos > isize) {
157 i_size_write(inode, end_pos);
158 btrfs_update_inode(trans, root, inode);
160 err = btrfs_end_transaction(trans, root);
162 unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
167 * this drops all the extents in the cache that intersect the range
168 * [start, end]. Existing extents are split as required.
170 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
173 struct extent_map *em;
174 struct extent_map *split = NULL;
175 struct extent_map *split2 = NULL;
176 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
177 u64 len = end - start + 1;
183 WARN_ON(end < start);
184 if (end == (u64)-1) {
190 split = alloc_extent_map(GFP_NOFS);
192 split2 = alloc_extent_map(GFP_NOFS);
194 spin_lock(&em_tree->lock);
195 em = lookup_extent_mapping(em_tree, start, len);
197 spin_unlock(&em_tree->lock);
201 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
202 spin_unlock(&em_tree->lock);
203 if (em->start <= start &&
204 (!testend || em->start + em->len >= start + len)) {
208 if (start < em->start) {
209 len = em->start - start;
211 len = start + len - (em->start + em->len);
212 start = em->start + em->len;
217 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
218 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
219 remove_extent_mapping(em_tree, em);
221 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
223 split->start = em->start;
224 split->len = start - em->start;
225 split->block_start = em->block_start;
228 split->block_len = em->block_len;
230 split->block_len = split->len;
232 split->bdev = em->bdev;
233 split->flags = flags;
234 ret = add_extent_mapping(em_tree, split);
236 free_extent_map(split);
240 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
241 testend && em->start + em->len > start + len) {
242 u64 diff = start + len - em->start;
244 split->start = start + len;
245 split->len = em->start + em->len - (start + len);
246 split->bdev = em->bdev;
247 split->flags = flags;
250 split->block_len = em->block_len;
251 split->block_start = em->block_start;
253 split->block_len = split->len;
254 split->block_start = em->block_start + diff;
257 ret = add_extent_mapping(em_tree, split);
259 free_extent_map(split);
262 spin_unlock(&em_tree->lock);
266 /* once for the tree*/
270 free_extent_map(split);
272 free_extent_map(split2);
276 int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
280 struct btrfs_path *path;
281 struct btrfs_key found_key;
282 struct extent_buffer *leaf;
283 struct btrfs_file_extent_item *extent;
292 path = btrfs_alloc_path();
293 ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino,
296 nritems = btrfs_header_nritems(path->nodes[0]);
297 if (path->slots[0] >= nritems) {
298 ret = btrfs_next_leaf(root, path);
301 nritems = btrfs_header_nritems(path->nodes[0]);
303 slot = path->slots[0];
304 leaf = path->nodes[0];
305 btrfs_item_key_to_cpu(leaf, &found_key, slot);
306 if (found_key.objectid != inode->i_ino)
308 if (found_key.type != BTRFS_EXTENT_DATA_KEY)
311 if (found_key.offset < last_offset) {
313 btrfs_print_leaf(root, leaf);
314 printk("inode %lu found offset %Lu expected %Lu\n",
315 inode->i_ino, found_key.offset, last_offset);
319 extent = btrfs_item_ptr(leaf, slot,
320 struct btrfs_file_extent_item);
321 found_type = btrfs_file_extent_type(leaf, extent);
322 if (found_type == BTRFS_FILE_EXTENT_REG) {
323 extent_end = found_key.offset +
324 btrfs_file_extent_num_bytes(leaf, extent);
325 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
326 struct btrfs_item *item;
327 item = btrfs_item_nr(leaf, slot);
328 extent_end = found_key.offset +
329 btrfs_file_extent_inline_len(leaf, extent);
330 extent_end = (extent_end + root->sectorsize - 1) &
331 ~((u64)root->sectorsize -1 );
333 last_offset = extent_end;
336 if (0 && last_offset < inode->i_size) {
338 btrfs_print_leaf(root, leaf);
339 printk("inode %lu found offset %Lu size %Lu\n", inode->i_ino,
340 last_offset, inode->i_size);
345 btrfs_free_path(path);
351 * this is very complex, but the basic idea is to drop all extents
352 * in the range start - end. hint_block is filled in with a block number
353 * that would be a good hint to the block allocator for this file.
355 * If an extent intersects the range but is not entirely inside the range
356 * it is either truncated or split. Anything entirely inside the range
357 * is deleted from the tree.
359 * inline_limit is used to tell this code which offsets in the file to keep
360 * if they contain inline extents.
362 int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans,
363 struct btrfs_root *root, struct inode *inode,
364 u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
367 u64 search_start = start;
372 u16 other_encoding = 0;
375 struct extent_buffer *leaf;
376 struct btrfs_file_extent_item *extent;
377 struct btrfs_path *path;
378 struct btrfs_key key;
379 struct btrfs_file_extent_item old;
390 btrfs_drop_extent_cache(inode, start, end - 1, 0);
392 path = btrfs_alloc_path();
397 btrfs_release_path(root, path);
398 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
403 if (path->slots[0] == 0) {
418 leaf = path->nodes[0];
419 slot = path->slots[0];
421 btrfs_item_key_to_cpu(leaf, &key, slot);
422 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
426 if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
427 key.objectid != inode->i_ino) {
431 search_start = key.offset;
434 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
435 extent = btrfs_item_ptr(leaf, slot,
436 struct btrfs_file_extent_item);
437 found_type = btrfs_file_extent_type(leaf, extent);
438 compression = btrfs_file_extent_compression(leaf,
440 encryption = btrfs_file_extent_encryption(leaf,
442 other_encoding = btrfs_file_extent_other_encoding(leaf,
444 if (found_type == BTRFS_FILE_EXTENT_REG) {
446 btrfs_file_extent_disk_bytenr(leaf,
449 *hint_byte = extent_end;
451 extent_end = key.offset +
452 btrfs_file_extent_num_bytes(leaf, extent);
453 ram_bytes = btrfs_file_extent_ram_bytes(leaf,
456 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
458 extent_end = key.offset +
459 btrfs_file_extent_inline_len(leaf, extent);
462 extent_end = search_start;
465 /* we found nothing we can drop */
466 if ((!found_extent && !found_inline) ||
467 search_start >= extent_end) {
470 nritems = btrfs_header_nritems(leaf);
471 if (slot >= nritems - 1) {
472 nextret = btrfs_next_leaf(root, path);
483 u64 mask = root->sectorsize - 1;
484 search_start = (extent_end + mask) & ~mask;
486 search_start = extent_end;
488 if (end <= extent_end && start >= key.offset && found_inline)
489 *hint_byte = EXTENT_MAP_INLINE;
492 read_extent_buffer(leaf, &old, (unsigned long)extent,
494 root_gen = btrfs_header_generation(leaf);
495 root_owner = btrfs_header_owner(leaf);
496 leaf_start = leaf->start;
499 if (end < extent_end && end >= key.offset) {
501 if (found_inline && start <= key.offset)
504 /* truncate existing extent */
505 if (start > key.offset) {
509 WARN_ON(start & (root->sectorsize - 1));
511 new_num = start - key.offset;
512 old_num = btrfs_file_extent_num_bytes(leaf,
515 btrfs_file_extent_disk_bytenr(leaf,
517 if (btrfs_file_extent_disk_bytenr(leaf,
519 inode_sub_bytes(inode, old_num -
522 btrfs_set_file_extent_num_bytes(leaf, extent,
524 btrfs_mark_buffer_dirty(leaf);
525 } else if (key.offset < inline_limit &&
526 (end > extent_end) &&
527 (inline_limit < extent_end)) {
529 new_size = btrfs_file_extent_calc_inline_size(
530 inline_limit - key.offset);
531 inode_sub_bytes(inode, extent_end -
533 btrfs_truncate_item(trans, root, path,
537 /* delete the entire extent */
540 inode_sub_bytes(inode, extent_end -
542 ret = btrfs_del_item(trans, root, path);
543 /* TODO update progress marker and return */
546 btrfs_release_path(root, path);
547 /* the extent will be freed later */
549 if (bookend && found_inline && start <= key.offset) {
551 new_size = btrfs_file_extent_calc_inline_size(
553 inode_sub_bytes(inode, end - key.offset);
554 ret = btrfs_truncate_item(trans, root, path,
558 /* create bookend, splitting the extent in two */
559 if (bookend && found_extent) {
561 struct btrfs_key ins;
562 ins.objectid = inode->i_ino;
564 btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
565 btrfs_release_path(root, path);
566 ret = btrfs_insert_empty_item(trans, root, path, &ins,
570 leaf = path->nodes[0];
571 extent = btrfs_item_ptr(leaf, path->slots[0],
572 struct btrfs_file_extent_item);
573 write_extent_buffer(leaf, &old,
574 (unsigned long)extent, sizeof(old));
576 btrfs_set_file_extent_compression(leaf, extent,
578 btrfs_set_file_extent_encryption(leaf, extent,
580 btrfs_set_file_extent_other_encoding(leaf, extent,
582 btrfs_set_file_extent_offset(leaf, extent,
583 le64_to_cpu(old.offset) + end - key.offset);
584 WARN_ON(le64_to_cpu(old.num_bytes) <
586 btrfs_set_file_extent_num_bytes(leaf, extent,
590 * set the ram bytes to the size of the full extent
591 * before splitting. This is a worst case flag,
592 * but its the best we can do because we don't know
593 * how splitting affects compression
595 btrfs_set_file_extent_ram_bytes(leaf, extent,
597 btrfs_set_file_extent_type(leaf, extent,
598 BTRFS_FILE_EXTENT_REG);
600 btrfs_mark_buffer_dirty(path->nodes[0]);
602 disk_bytenr = le64_to_cpu(old.disk_bytenr);
603 if (disk_bytenr != 0) {
604 ret = btrfs_inc_extent_ref(trans, root,
606 le64_to_cpu(old.disk_num_bytes),
608 root->root_key.objectid,
609 trans->transid, ins.objectid);
612 btrfs_release_path(root, path);
613 if (disk_bytenr != 0) {
614 inode_add_bytes(inode, extent_end - end);
618 if (found_extent && !keep) {
619 u64 disk_bytenr = le64_to_cpu(old.disk_bytenr);
621 if (disk_bytenr != 0) {
622 inode_sub_bytes(inode,
623 le64_to_cpu(old.num_bytes));
624 ret = btrfs_free_extent(trans, root,
626 le64_to_cpu(old.disk_num_bytes),
627 leaf_start, root_owner,
628 root_gen, key.objectid, 0);
630 *hint_byte = disk_bytenr;
634 if (search_start >= end) {
640 btrfs_free_path(path);
641 btrfs_check_file(root, inode);
646 * this gets pages into the page cache and locks them down, it also properly
647 * waits for data=ordered extents to finish before allowing the pages to be
650 static int noinline prepare_pages(struct btrfs_root *root, struct file *file,
651 struct page **pages, size_t num_pages,
652 loff_t pos, unsigned long first_index,
653 unsigned long last_index, size_t write_bytes)
656 unsigned long index = pos >> PAGE_CACHE_SHIFT;
657 struct inode *inode = fdentry(file)->d_inode;
662 start_pos = pos & ~((u64)root->sectorsize - 1);
663 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
665 if (start_pos > inode->i_size) {
666 err = btrfs_cont_expand(inode, start_pos);
671 memset(pages, 0, num_pages * sizeof(struct page *));
673 for (i = 0; i < num_pages; i++) {
674 pages[i] = grab_cache_page(inode->i_mapping, index + i);
679 wait_on_page_writeback(pages[i]);
681 if (start_pos < inode->i_size) {
682 struct btrfs_ordered_extent *ordered;
683 lock_extent(&BTRFS_I(inode)->io_tree,
684 start_pos, last_pos - 1, GFP_NOFS);
685 ordered = btrfs_lookup_first_ordered_extent(inode, last_pos -1);
687 ordered->file_offset + ordered->len > start_pos &&
688 ordered->file_offset < last_pos) {
689 btrfs_put_ordered_extent(ordered);
690 unlock_extent(&BTRFS_I(inode)->io_tree,
691 start_pos, last_pos - 1, GFP_NOFS);
692 for (i = 0; i < num_pages; i++) {
693 unlock_page(pages[i]);
694 page_cache_release(pages[i]);
696 btrfs_wait_ordered_range(inode, start_pos,
697 last_pos - start_pos);
701 btrfs_put_ordered_extent(ordered);
703 clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
704 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
706 unlock_extent(&BTRFS_I(inode)->io_tree,
707 start_pos, last_pos - 1, GFP_NOFS);
709 for (i = 0; i < num_pages; i++) {
710 clear_page_dirty_for_io(pages[i]);
711 set_page_extent_mapped(pages[i]);
712 WARN_ON(!PageLocked(pages[i]));
717 static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
718 size_t count, loff_t *ppos)
722 ssize_t num_written = 0;
725 struct inode *inode = fdentry(file)->d_inode;
726 struct btrfs_root *root = BTRFS_I(inode)->root;
727 struct page **pages = NULL;
729 struct page *pinned[2];
730 unsigned long first_index;
731 unsigned long last_index;
734 will_write = ((file->f_flags & O_SYNC) || IS_SYNC(inode) ||
735 (file->f_flags & O_DIRECT));
737 nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
738 PAGE_CACHE_SIZE / (sizeof(struct page *)));
745 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
746 current->backing_dev_info = inode->i_mapping->backing_dev_info;
747 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
753 err = file_remove_suid(file);
756 file_update_time(file);
758 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
760 mutex_lock(&inode->i_mutex);
761 first_index = pos >> PAGE_CACHE_SHIFT;
762 last_index = (pos + count) >> PAGE_CACHE_SHIFT;
765 * if this is a nodatasum mount, force summing off for the inode
766 * all the time. That way a later mount with summing on won't
769 if (btrfs_test_opt(root, NODATASUM))
770 btrfs_set_flag(inode, NODATASUM);
773 * there are lots of better ways to do this, but this code
774 * makes sure the first and last page in the file range are
775 * up to date and ready for cow
777 if ((pos & (PAGE_CACHE_SIZE - 1))) {
778 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
779 if (!PageUptodate(pinned[0])) {
780 ret = btrfs_readpage(NULL, pinned[0]);
782 wait_on_page_locked(pinned[0]);
784 unlock_page(pinned[0]);
787 if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
788 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
789 if (!PageUptodate(pinned[1])) {
790 ret = btrfs_readpage(NULL, pinned[1]);
792 wait_on_page_locked(pinned[1]);
794 unlock_page(pinned[1]);
799 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
800 size_t write_bytes = min(count, nrptrs *
801 (size_t)PAGE_CACHE_SIZE -
803 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
806 WARN_ON(num_pages > nrptrs);
807 memset(pages, 0, sizeof(pages));
809 ret = btrfs_check_free_space(root, write_bytes, 0);
813 ret = prepare_pages(root, file, pages, num_pages,
814 pos, first_index, last_index,
819 ret = btrfs_copy_from_user(pos, num_pages,
820 write_bytes, pages, buf);
822 btrfs_drop_pages(pages, num_pages);
826 ret = dirty_and_release_pages(NULL, root, file, pages,
827 num_pages, pos, write_bytes);
828 btrfs_drop_pages(pages, num_pages);
833 btrfs_fdatawrite_range(inode->i_mapping, pos,
834 pos + write_bytes - 1,
837 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
840 (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
841 btrfs_btree_balance_dirty(root, 1);
842 btrfs_throttle(root);
846 count -= write_bytes;
848 num_written += write_bytes;
853 mutex_unlock(&inode->i_mutex);
858 page_cache_release(pinned[0]);
860 page_cache_release(pinned[1]);
863 if (num_written > 0 && will_write) {
864 struct btrfs_trans_handle *trans;
866 err = btrfs_wait_ordered_range(inode, start_pos, num_written);
870 if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
871 trans = btrfs_start_transaction(root, 1);
872 ret = btrfs_log_dentry_safe(trans, root,
875 btrfs_sync_log(trans, root);
876 btrfs_end_transaction(trans, root);
878 btrfs_commit_transaction(trans, root);
881 if (file->f_flags & O_DIRECT) {
882 invalidate_mapping_pages(inode->i_mapping,
883 start_pos >> PAGE_CACHE_SHIFT,
884 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
887 current->backing_dev_info = NULL;
888 return num_written ? num_written : err;
891 int btrfs_release_file(struct inode * inode, struct file * filp)
893 if (filp->private_data)
894 btrfs_ioctl_trans_end(filp);
899 * fsync call for both files and directories. This logs the inode into
900 * the tree log instead of forcing full commits whenever possible.
902 * It needs to call filemap_fdatawait so that all ordered extent updates are
903 * in the metadata btree are up to date for copying to the log.
905 * It drops the inode mutex before doing the tree log commit. This is an
906 * important optimization for directories because holding the mutex prevents
907 * new operations on the dir while we write to disk.
909 int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
911 struct inode *inode = dentry->d_inode;
912 struct btrfs_root *root = BTRFS_I(inode)->root;
914 struct btrfs_trans_handle *trans;
917 * check the transaction that last modified this inode
918 * and see if its already been committed
920 if (!BTRFS_I(inode)->last_trans)
923 mutex_lock(&root->fs_info->trans_mutex);
924 if (BTRFS_I(inode)->last_trans <=
925 root->fs_info->last_trans_committed) {
926 BTRFS_I(inode)->last_trans = 0;
927 mutex_unlock(&root->fs_info->trans_mutex);
930 mutex_unlock(&root->fs_info->trans_mutex);
932 root->fs_info->tree_log_batch++;
933 filemap_fdatawait(inode->i_mapping);
934 root->fs_info->tree_log_batch++;
937 * ok we haven't committed the transaction yet, lets do a commit
939 if (file->private_data)
940 btrfs_ioctl_trans_end(file);
942 trans = btrfs_start_transaction(root, 1);
948 ret = btrfs_log_dentry_safe(trans, root, file->f_dentry);
953 /* we've logged all the items and now have a consistent
954 * version of the file in the log. It is possible that
955 * someone will come in and modify the file, but that's
956 * fine because the log is consistent on disk, and we
957 * have references to all of the file's extents
959 * It is possible that someone will come in and log the
960 * file again, but that will end up using the synchronization
961 * inside btrfs_sync_log to keep things safe.
963 mutex_unlock(&file->f_dentry->d_inode->i_mutex);
966 ret = btrfs_commit_transaction(trans, root);
968 btrfs_sync_log(trans, root);
969 ret = btrfs_end_transaction(trans, root);
971 mutex_lock(&file->f_dentry->d_inode->i_mutex);
973 return ret > 0 ? EIO : ret;
976 static struct vm_operations_struct btrfs_file_vm_ops = {
977 .fault = filemap_fault,
978 .page_mkwrite = btrfs_page_mkwrite,
981 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
983 vma->vm_ops = &btrfs_file_vm_ops;
988 struct file_operations btrfs_file_operations = {
989 .llseek = generic_file_llseek,
990 .read = do_sync_read,
991 .aio_read = generic_file_aio_read,
992 .splice_read = generic_file_splice_read,
993 .write = btrfs_file_write,
994 .mmap = btrfs_file_mmap,
995 .open = generic_file_open,
996 .release = btrfs_release_file,
997 .fsync = btrfs_sync_file,
998 .unlocked_ioctl = btrfs_ioctl,
1000 .compat_ioctl = btrfs_ioctl,