2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
21 #include "transaction.h"
24 #include "print-tree.h"
28 /* magic values for the inode_only field in btrfs_log_inode:
30 * LOG_INODE_ALL means to log everything
31 * LOG_INODE_EXISTS means to log just enough to recreate the inode
34 #define LOG_INODE_ALL 0
35 #define LOG_INODE_EXISTS 1
38 * stages for the tree walking. The first
39 * stage (0) is to only pin down the blocks we find
40 * the second stage (1) is to make sure that all the inodes
41 * we find in the log are created in the subvolume.
43 * The last stage is to deal with directories and links and extents
44 * and all the other fun semantics
46 #define LOG_WALK_PIN_ONLY 0
47 #define LOG_WALK_REPLAY_INODES 1
48 #define LOG_WALK_REPLAY_ALL 2
50 static int __btrfs_log_inode(struct btrfs_trans_handle *trans,
51 struct btrfs_root *root, struct inode *inode,
53 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
54 struct btrfs_root *root,
55 struct btrfs_path *path, u64 objectid);
58 * tree logging is a special write ahead log used to make sure that
59 * fsyncs and O_SYNCs can happen without doing full tree commits.
61 * Full tree commits are expensive because they require commonly
62 * modified blocks to be recowed, creating many dirty pages in the
63 * extent tree an 4x-6x higher write load than ext3.
65 * Instead of doing a tree commit on every fsync, we use the
66 * key ranges and transaction ids to find items for a given file or directory
67 * that have changed in this transaction. Those items are copied into
68 * a special tree (one per subvolume root), that tree is written to disk
69 * and then the fsync is considered complete.
71 * After a crash, items are copied out of the log-tree back into the
72 * subvolume tree. Any file data extents found are recorded in the extent
73 * allocation tree, and the log-tree freed.
75 * The log tree is read three times, once to pin down all the extents it is
76 * using in ram and once, once to create all the inodes logged in the tree
77 * and once to do all the other items.
81 * start a sub transaction and setup the log tree
82 * this increments the log tree writer count to make the people
83 * syncing the tree wait for us to finish
85 static int start_log_trans(struct btrfs_trans_handle *trans,
86 struct btrfs_root *root)
90 mutex_lock(&root->log_mutex);
93 atomic_inc(&root->log_writers);
94 mutex_unlock(&root->log_mutex);
97 mutex_lock(&root->fs_info->tree_log_mutex);
98 if (!root->fs_info->log_root_tree) {
99 ret = btrfs_init_log_root_tree(trans, root->fs_info);
102 if (!root->log_root) {
103 ret = btrfs_add_log_tree(trans, root);
106 mutex_unlock(&root->fs_info->tree_log_mutex);
108 atomic_inc(&root->log_writers);
109 mutex_unlock(&root->log_mutex);
114 * returns 0 if there was a log transaction running and we were able
115 * to join, or returns -ENOENT if there were not transactions
118 static int join_running_log_trans(struct btrfs_root *root)
126 mutex_lock(&root->log_mutex);
127 if (root->log_root) {
129 atomic_inc(&root->log_writers);
131 mutex_unlock(&root->log_mutex);
136 * indicate we're done making changes to the log tree
137 * and wake up anyone waiting to do a sync
139 static int end_log_trans(struct btrfs_root *root)
141 if (atomic_dec_and_test(&root->log_writers)) {
143 if (waitqueue_active(&root->log_writer_wait))
144 wake_up(&root->log_writer_wait);
151 * the walk control struct is used to pass state down the chain when
152 * processing the log tree. The stage field tells us which part
153 * of the log tree processing we are currently doing. The others
154 * are state fields used for that specific part
156 struct walk_control {
157 /* should we free the extent on disk when done? This is used
158 * at transaction commit time while freeing a log tree
162 /* should we write out the extent buffer? This is used
163 * while flushing the log tree to disk during a sync
167 /* should we wait for the extent buffer io to finish? Also used
168 * while flushing the log tree to disk for a sync
172 /* pin only walk, we record which extents on disk belong to the
177 /* what stage of the replay code we're currently in */
180 /* the root we are currently replaying */
181 struct btrfs_root *replay_dest;
183 /* the trans handle for the current replay */
184 struct btrfs_trans_handle *trans;
186 /* the function that gets used to process blocks we find in the
187 * tree. Note the extent_buffer might not be up to date when it is
188 * passed in, and it must be checked or read if you need the data
191 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
192 struct walk_control *wc, u64 gen);
196 * process_func used to pin down extents, write them or wait on them
198 static int process_one_buffer(struct btrfs_root *log,
199 struct extent_buffer *eb,
200 struct walk_control *wc, u64 gen)
203 mutex_lock(&log->fs_info->pinned_mutex);
204 btrfs_update_pinned_extents(log->fs_info->extent_root,
205 eb->start, eb->len, 1);
208 if (btrfs_buffer_uptodate(eb, gen)) {
210 btrfs_write_tree_block(eb);
212 btrfs_wait_tree_block_writeback(eb);
218 * Item overwrite used by replay and tree logging. eb, slot and key all refer
219 * to the src data we are copying out.
221 * root is the tree we are copying into, and path is a scratch
222 * path for use in this function (it should be released on entry and
223 * will be released on exit).
225 * If the key is already in the destination tree the existing item is
226 * overwritten. If the existing item isn't big enough, it is extended.
227 * If it is too large, it is truncated.
229 * If the key isn't in the destination yet, a new item is inserted.
231 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
232 struct btrfs_root *root,
233 struct btrfs_path *path,
234 struct extent_buffer *eb, int slot,
235 struct btrfs_key *key)
239 u64 saved_i_size = 0;
240 int save_old_i_size = 0;
241 unsigned long src_ptr;
242 unsigned long dst_ptr;
243 int overwrite_root = 0;
245 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
248 item_size = btrfs_item_size_nr(eb, slot);
249 src_ptr = btrfs_item_ptr_offset(eb, slot);
251 /* look for the key in the destination tree */
252 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
256 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
258 if (dst_size != item_size)
261 if (item_size == 0) {
262 btrfs_release_path(root, path);
265 dst_copy = kmalloc(item_size, GFP_NOFS);
266 src_copy = kmalloc(item_size, GFP_NOFS);
268 read_extent_buffer(eb, src_copy, src_ptr, item_size);
270 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
271 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
273 ret = memcmp(dst_copy, src_copy, item_size);
278 * they have the same contents, just return, this saves
279 * us from cowing blocks in the destination tree and doing
280 * extra writes that may not have been done by a previous
284 btrfs_release_path(root, path);
290 btrfs_release_path(root, path);
291 /* try to insert the key into the destination tree */
292 ret = btrfs_insert_empty_item(trans, root, path,
295 /* make sure any existing item is the correct size */
296 if (ret == -EEXIST) {
298 found_size = btrfs_item_size_nr(path->nodes[0],
300 if (found_size > item_size) {
301 btrfs_truncate_item(trans, root, path, item_size, 1);
302 } else if (found_size < item_size) {
303 ret = btrfs_extend_item(trans, root, path,
304 item_size - found_size);
310 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
313 /* don't overwrite an existing inode if the generation number
314 * was logged as zero. This is done when the tree logging code
315 * is just logging an inode to make sure it exists after recovery.
317 * Also, don't overwrite i_size on directories during replay.
318 * log replay inserts and removes directory items based on the
319 * state of the tree found in the subvolume, and i_size is modified
322 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
323 struct btrfs_inode_item *src_item;
324 struct btrfs_inode_item *dst_item;
326 src_item = (struct btrfs_inode_item *)src_ptr;
327 dst_item = (struct btrfs_inode_item *)dst_ptr;
329 if (btrfs_inode_generation(eb, src_item) == 0)
332 if (overwrite_root &&
333 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
334 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
336 saved_i_size = btrfs_inode_size(path->nodes[0],
341 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
344 if (save_old_i_size) {
345 struct btrfs_inode_item *dst_item;
346 dst_item = (struct btrfs_inode_item *)dst_ptr;
347 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
350 /* make sure the generation is filled in */
351 if (key->type == BTRFS_INODE_ITEM_KEY) {
352 struct btrfs_inode_item *dst_item;
353 dst_item = (struct btrfs_inode_item *)dst_ptr;
354 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
355 btrfs_set_inode_generation(path->nodes[0], dst_item,
360 btrfs_mark_buffer_dirty(path->nodes[0]);
361 btrfs_release_path(root, path);
366 * simple helper to read an inode off the disk from a given root
367 * This can only be called for subvolume roots and not for the log
369 static noinline struct inode *read_one_inode(struct btrfs_root *root,
373 inode = btrfs_iget_locked(root->fs_info->sb, objectid, root);
374 if (inode->i_state & I_NEW) {
375 BTRFS_I(inode)->root = root;
376 BTRFS_I(inode)->location.objectid = objectid;
377 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
378 BTRFS_I(inode)->location.offset = 0;
379 btrfs_read_locked_inode(inode);
380 unlock_new_inode(inode);
383 if (is_bad_inode(inode)) {
390 /* replays a single extent in 'eb' at 'slot' with 'key' into the
391 * subvolume 'root'. path is released on entry and should be released
394 * extents in the log tree have not been allocated out of the extent
395 * tree yet. So, this completes the allocation, taking a reference
396 * as required if the extent already exists or creating a new extent
397 * if it isn't in the extent allocation tree yet.
399 * The extent is inserted into the file, dropping any existing extents
400 * from the file that overlap the new one.
402 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
403 struct btrfs_root *root,
404 struct btrfs_path *path,
405 struct extent_buffer *eb, int slot,
406 struct btrfs_key *key)
409 u64 mask = root->sectorsize - 1;
412 u64 start = key->offset;
414 struct btrfs_file_extent_item *item;
415 struct inode *inode = NULL;
419 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
420 found_type = btrfs_file_extent_type(eb, item);
422 if (found_type == BTRFS_FILE_EXTENT_REG ||
423 found_type == BTRFS_FILE_EXTENT_PREALLOC)
424 extent_end = start + btrfs_file_extent_num_bytes(eb, item);
425 else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
426 size = btrfs_file_extent_inline_len(eb, item);
427 extent_end = (start + size + mask) & ~mask;
433 inode = read_one_inode(root, key->objectid);
440 * first check to see if we already have this extent in the
441 * file. This must be done before the btrfs_drop_extents run
442 * so we don't try to drop this extent.
444 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
448 (found_type == BTRFS_FILE_EXTENT_REG ||
449 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
450 struct btrfs_file_extent_item cmp1;
451 struct btrfs_file_extent_item cmp2;
452 struct btrfs_file_extent_item *existing;
453 struct extent_buffer *leaf;
455 leaf = path->nodes[0];
456 existing = btrfs_item_ptr(leaf, path->slots[0],
457 struct btrfs_file_extent_item);
459 read_extent_buffer(eb, &cmp1, (unsigned long)item,
461 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
465 * we already have a pointer to this exact extent,
466 * we don't have to do anything
468 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
469 btrfs_release_path(root, path);
473 btrfs_release_path(root, path);
475 saved_nbytes = inode_get_bytes(inode);
476 /* drop any overlapping extents */
477 ret = btrfs_drop_extents(trans, root, inode,
478 start, extent_end, start, &alloc_hint);
481 if (found_type == BTRFS_FILE_EXTENT_REG ||
482 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
483 unsigned long dest_offset;
484 struct btrfs_key ins;
486 ret = btrfs_insert_empty_item(trans, root, path, key,
489 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
491 copy_extent_buffer(path->nodes[0], eb, dest_offset,
492 (unsigned long)item, sizeof(*item));
494 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
495 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
496 ins.type = BTRFS_EXTENT_ITEM_KEY;
498 if (ins.objectid > 0) {
501 LIST_HEAD(ordered_sums);
503 * is this extent already allocated in the extent
504 * allocation tree? If so, just add a reference
506 ret = btrfs_lookup_extent(root, ins.objectid,
509 ret = btrfs_inc_extent_ref(trans, root,
510 ins.objectid, ins.offset,
511 path->nodes[0]->start,
512 root->root_key.objectid,
513 trans->transid, key->objectid);
516 * insert the extent pointer in the extent
519 ret = btrfs_alloc_logged_extent(trans, root,
520 path->nodes[0]->start,
521 root->root_key.objectid,
522 trans->transid, key->objectid,
526 btrfs_release_path(root, path);
528 if (btrfs_file_extent_compression(eb, item)) {
529 csum_start = ins.objectid;
530 csum_end = csum_start + ins.offset;
532 csum_start = ins.objectid +
533 btrfs_file_extent_offset(eb, item);
534 csum_end = csum_start +
535 btrfs_file_extent_num_bytes(eb, item);
538 ret = btrfs_lookup_csums_range(root->log_root,
539 csum_start, csum_end - 1,
542 while (!list_empty(&ordered_sums)) {
543 struct btrfs_ordered_sum *sums;
544 sums = list_entry(ordered_sums.next,
545 struct btrfs_ordered_sum,
547 ret = btrfs_csum_file_blocks(trans,
548 root->fs_info->csum_root,
551 list_del(&sums->list);
555 btrfs_release_path(root, path);
557 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
558 /* inline extents are easy, we just overwrite them */
559 ret = overwrite_item(trans, root, path, eb, slot, key);
563 inode_set_bytes(inode, saved_nbytes);
564 btrfs_update_inode(trans, root, inode);
572 * when cleaning up conflicts between the directory names in the
573 * subvolume, directory names in the log and directory names in the
574 * inode back references, we may have to unlink inodes from directories.
576 * This is a helper function to do the unlink of a specific directory
579 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
580 struct btrfs_root *root,
581 struct btrfs_path *path,
583 struct btrfs_dir_item *di)
588 struct extent_buffer *leaf;
589 struct btrfs_key location;
592 leaf = path->nodes[0];
594 btrfs_dir_item_key_to_cpu(leaf, di, &location);
595 name_len = btrfs_dir_name_len(leaf, di);
596 name = kmalloc(name_len, GFP_NOFS);
597 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
598 btrfs_release_path(root, path);
600 inode = read_one_inode(root, location.objectid);
603 ret = link_to_fixup_dir(trans, root, path, location.objectid);
605 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
614 * helper function to see if a given name and sequence number found
615 * in an inode back reference are already in a directory and correctly
616 * point to this inode
618 static noinline int inode_in_dir(struct btrfs_root *root,
619 struct btrfs_path *path,
620 u64 dirid, u64 objectid, u64 index,
621 const char *name, int name_len)
623 struct btrfs_dir_item *di;
624 struct btrfs_key location;
627 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
628 index, name, name_len, 0);
629 if (di && !IS_ERR(di)) {
630 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
631 if (location.objectid != objectid)
635 btrfs_release_path(root, path);
637 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
638 if (di && !IS_ERR(di)) {
639 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
640 if (location.objectid != objectid)
646 btrfs_release_path(root, path);
651 * helper function to check a log tree for a named back reference in
652 * an inode. This is used to decide if a back reference that is
653 * found in the subvolume conflicts with what we find in the log.
655 * inode backreferences may have multiple refs in a single item,
656 * during replay we process one reference at a time, and we don't
657 * want to delete valid links to a file from the subvolume if that
658 * link is also in the log.
660 static noinline int backref_in_log(struct btrfs_root *log,
661 struct btrfs_key *key,
662 char *name, int namelen)
664 struct btrfs_path *path;
665 struct btrfs_inode_ref *ref;
667 unsigned long ptr_end;
668 unsigned long name_ptr;
674 path = btrfs_alloc_path();
675 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
679 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
680 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
681 ptr_end = ptr + item_size;
682 while (ptr < ptr_end) {
683 ref = (struct btrfs_inode_ref *)ptr;
684 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
685 if (found_name_len == namelen) {
686 name_ptr = (unsigned long)(ref + 1);
687 ret = memcmp_extent_buffer(path->nodes[0], name,
694 ptr = (unsigned long)(ref + 1) + found_name_len;
697 btrfs_free_path(path);
703 * replay one inode back reference item found in the log tree.
704 * eb, slot and key refer to the buffer and key found in the log tree.
705 * root is the destination we are replaying into, and path is for temp
706 * use by this function. (it should be released on return).
708 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
709 struct btrfs_root *root,
710 struct btrfs_root *log,
711 struct btrfs_path *path,
712 struct extent_buffer *eb, int slot,
713 struct btrfs_key *key)
717 struct btrfs_key location;
718 struct btrfs_inode_ref *ref;
719 struct btrfs_dir_item *di;
723 unsigned long ref_ptr;
724 unsigned long ref_end;
726 location.objectid = key->objectid;
727 location.type = BTRFS_INODE_ITEM_KEY;
731 * it is possible that we didn't log all the parent directories
732 * for a given inode. If we don't find the dir, just don't
733 * copy the back ref in. The link count fixup code will take
736 dir = read_one_inode(root, key->offset);
740 inode = read_one_inode(root, key->objectid);
743 ref_ptr = btrfs_item_ptr_offset(eb, slot);
744 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
747 ref = (struct btrfs_inode_ref *)ref_ptr;
749 namelen = btrfs_inode_ref_name_len(eb, ref);
750 name = kmalloc(namelen, GFP_NOFS);
753 read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen);
755 /* if we already have a perfect match, we're done */
756 if (inode_in_dir(root, path, dir->i_ino, inode->i_ino,
757 btrfs_inode_ref_index(eb, ref),
763 * look for a conflicting back reference in the metadata.
764 * if we find one we have to unlink that name of the file
765 * before we add our new link. Later on, we overwrite any
766 * existing back reference, and we don't want to create
767 * dangling pointers in the directory.
770 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
774 struct btrfs_inode_ref *victim_ref;
776 unsigned long ptr_end;
777 struct extent_buffer *leaf = path->nodes[0];
779 /* are we trying to overwrite a back ref for the root directory
780 * if so, just jump out, we're done
782 if (key->objectid == key->offset)
785 /* check all the names in this back reference to see
786 * if they are in the log. if so, we allow them to stay
787 * otherwise they must be unlinked as a conflict
789 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
790 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
791 while (ptr < ptr_end) {
792 victim_ref = (struct btrfs_inode_ref *)ptr;
793 victim_name_len = btrfs_inode_ref_name_len(leaf,
795 victim_name = kmalloc(victim_name_len, GFP_NOFS);
796 BUG_ON(!victim_name);
798 read_extent_buffer(leaf, victim_name,
799 (unsigned long)(victim_ref + 1),
802 if (!backref_in_log(log, key, victim_name,
804 btrfs_inc_nlink(inode);
805 btrfs_release_path(root, path);
806 ret = btrfs_unlink_inode(trans, root, dir,
810 btrfs_release_path(root, path);
814 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
818 btrfs_release_path(root, path);
820 /* look for a conflicting sequence number */
821 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
822 btrfs_inode_ref_index(eb, ref),
824 if (di && !IS_ERR(di)) {
825 ret = drop_one_dir_item(trans, root, path, dir, di);
828 btrfs_release_path(root, path);
831 /* look for a conflicting name */
832 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
834 if (di && !IS_ERR(di)) {
835 ret = drop_one_dir_item(trans, root, path, dir, di);
838 btrfs_release_path(root, path);
840 /* insert our name */
841 ret = btrfs_add_link(trans, dir, inode, name, namelen, 0,
842 btrfs_inode_ref_index(eb, ref));
845 btrfs_update_inode(trans, root, inode);
848 ref_ptr = (unsigned long)(ref + 1) + namelen;
850 if (ref_ptr < ref_end)
853 /* finally write the back reference in the inode */
854 ret = overwrite_item(trans, root, path, eb, slot, key);
858 btrfs_release_path(root, path);
865 * There are a few corners where the link count of the file can't
866 * be properly maintained during replay. So, instead of adding
867 * lots of complexity to the log code, we just scan the backrefs
868 * for any file that has been through replay.
870 * The scan will update the link count on the inode to reflect the
871 * number of back refs found. If it goes down to zero, the iput
872 * will free the inode.
874 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
875 struct btrfs_root *root,
878 struct btrfs_path *path;
880 struct btrfs_key key;
883 unsigned long ptr_end;
886 key.objectid = inode->i_ino;
887 key.type = BTRFS_INODE_REF_KEY;
888 key.offset = (u64)-1;
890 path = btrfs_alloc_path();
893 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
897 if (path->slots[0] == 0)
901 btrfs_item_key_to_cpu(path->nodes[0], &key,
903 if (key.objectid != inode->i_ino ||
904 key.type != BTRFS_INODE_REF_KEY)
906 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
907 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
909 while (ptr < ptr_end) {
910 struct btrfs_inode_ref *ref;
912 ref = (struct btrfs_inode_ref *)ptr;
913 name_len = btrfs_inode_ref_name_len(path->nodes[0],
915 ptr = (unsigned long)(ref + 1) + name_len;
922 btrfs_release_path(root, path);
924 btrfs_free_path(path);
925 if (nlink != inode->i_nlink) {
926 inode->i_nlink = nlink;
927 btrfs_update_inode(trans, root, inode);
929 BTRFS_I(inode)->index_cnt = (u64)-1;
934 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
935 struct btrfs_root *root,
936 struct btrfs_path *path)
939 struct btrfs_key key;
942 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
943 key.type = BTRFS_ORPHAN_ITEM_KEY;
944 key.offset = (u64)-1;
946 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
951 if (path->slots[0] == 0)
956 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
957 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
958 key.type != BTRFS_ORPHAN_ITEM_KEY)
961 ret = btrfs_del_item(trans, root, path);
964 btrfs_release_path(root, path);
965 inode = read_one_inode(root, key.offset);
968 ret = fixup_inode_link_count(trans, root, inode);
977 btrfs_release_path(root, path);
983 * record a given inode in the fixup dir so we can check its link
984 * count when replay is done. The link count is incremented here
985 * so the inode won't go away until we check it
987 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
988 struct btrfs_root *root,
989 struct btrfs_path *path,
992 struct btrfs_key key;
996 inode = read_one_inode(root, objectid);
999 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1000 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1001 key.offset = objectid;
1003 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1005 btrfs_release_path(root, path);
1007 btrfs_inc_nlink(inode);
1008 btrfs_update_inode(trans, root, inode);
1009 } else if (ret == -EEXIST) {
1020 * when replaying the log for a directory, we only insert names
1021 * for inodes that actually exist. This means an fsync on a directory
1022 * does not implicitly fsync all the new files in it
1024 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1025 struct btrfs_root *root,
1026 struct btrfs_path *path,
1027 u64 dirid, u64 index,
1028 char *name, int name_len, u8 type,
1029 struct btrfs_key *location)
1031 struct inode *inode;
1035 inode = read_one_inode(root, location->objectid);
1039 dir = read_one_inode(root, dirid);
1044 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
1046 /* FIXME, put inode into FIXUP list */
1054 * take a single entry in a log directory item and replay it into
1057 * if a conflicting item exists in the subdirectory already,
1058 * the inode it points to is unlinked and put into the link count
1061 * If a name from the log points to a file or directory that does
1062 * not exist in the FS, it is skipped. fsyncs on directories
1063 * do not force down inodes inside that directory, just changes to the
1064 * names or unlinks in a directory.
1066 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1067 struct btrfs_root *root,
1068 struct btrfs_path *path,
1069 struct extent_buffer *eb,
1070 struct btrfs_dir_item *di,
1071 struct btrfs_key *key)
1075 struct btrfs_dir_item *dst_di;
1076 struct btrfs_key found_key;
1077 struct btrfs_key log_key;
1083 dir = read_one_inode(root, key->objectid);
1086 name_len = btrfs_dir_name_len(eb, di);
1087 name = kmalloc(name_len, GFP_NOFS);
1088 log_type = btrfs_dir_type(eb, di);
1089 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1092 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1093 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1098 btrfs_release_path(root, path);
1100 if (key->type == BTRFS_DIR_ITEM_KEY) {
1101 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1103 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1104 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1111 if (!dst_di || IS_ERR(dst_di)) {
1112 /* we need a sequence number to insert, so we only
1113 * do inserts for the BTRFS_DIR_INDEX_KEY types
1115 if (key->type != BTRFS_DIR_INDEX_KEY)
1120 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1121 /* the existing item matches the logged item */
1122 if (found_key.objectid == log_key.objectid &&
1123 found_key.type == log_key.type &&
1124 found_key.offset == log_key.offset &&
1125 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1130 * don't drop the conflicting directory entry if the inode
1131 * for the new entry doesn't exist
1136 ret = drop_one_dir_item(trans, root, path, dir, dst_di);
1139 if (key->type == BTRFS_DIR_INDEX_KEY)
1142 btrfs_release_path(root, path);
1148 btrfs_release_path(root, path);
1149 ret = insert_one_name(trans, root, path, key->objectid, key->offset,
1150 name, name_len, log_type, &log_key);
1152 if (ret && ret != -ENOENT)
1158 * find all the names in a directory item and reconcile them into
1159 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1160 * one name in a directory item, but the same code gets used for
1161 * both directory index types
1163 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1164 struct btrfs_root *root,
1165 struct btrfs_path *path,
1166 struct extent_buffer *eb, int slot,
1167 struct btrfs_key *key)
1170 u32 item_size = btrfs_item_size_nr(eb, slot);
1171 struct btrfs_dir_item *di;
1174 unsigned long ptr_end;
1176 ptr = btrfs_item_ptr_offset(eb, slot);
1177 ptr_end = ptr + item_size;
1178 while (ptr < ptr_end) {
1179 di = (struct btrfs_dir_item *)ptr;
1180 name_len = btrfs_dir_name_len(eb, di);
1181 ret = replay_one_name(trans, root, path, eb, di, key);
1183 ptr = (unsigned long)(di + 1);
1190 * directory replay has two parts. There are the standard directory
1191 * items in the log copied from the subvolume, and range items
1192 * created in the log while the subvolume was logged.
1194 * The range items tell us which parts of the key space the log
1195 * is authoritative for. During replay, if a key in the subvolume
1196 * directory is in a logged range item, but not actually in the log
1197 * that means it was deleted from the directory before the fsync
1198 * and should be removed.
1200 static noinline int find_dir_range(struct btrfs_root *root,
1201 struct btrfs_path *path,
1202 u64 dirid, int key_type,
1203 u64 *start_ret, u64 *end_ret)
1205 struct btrfs_key key;
1207 struct btrfs_dir_log_item *item;
1211 if (*start_ret == (u64)-1)
1214 key.objectid = dirid;
1215 key.type = key_type;
1216 key.offset = *start_ret;
1218 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1222 if (path->slots[0] == 0)
1227 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1229 if (key.type != key_type || key.objectid != dirid) {
1233 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1234 struct btrfs_dir_log_item);
1235 found_end = btrfs_dir_log_end(path->nodes[0], item);
1237 if (*start_ret >= key.offset && *start_ret <= found_end) {
1239 *start_ret = key.offset;
1240 *end_ret = found_end;
1245 /* check the next slot in the tree to see if it is a valid item */
1246 nritems = btrfs_header_nritems(path->nodes[0]);
1247 if (path->slots[0] >= nritems) {
1248 ret = btrfs_next_leaf(root, path);
1255 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1257 if (key.type != key_type || key.objectid != dirid) {
1261 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1262 struct btrfs_dir_log_item);
1263 found_end = btrfs_dir_log_end(path->nodes[0], item);
1264 *start_ret = key.offset;
1265 *end_ret = found_end;
1268 btrfs_release_path(root, path);
1273 * this looks for a given directory item in the log. If the directory
1274 * item is not in the log, the item is removed and the inode it points
1277 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
1278 struct btrfs_root *root,
1279 struct btrfs_root *log,
1280 struct btrfs_path *path,
1281 struct btrfs_path *log_path,
1283 struct btrfs_key *dir_key)
1286 struct extent_buffer *eb;
1289 struct btrfs_dir_item *di;
1290 struct btrfs_dir_item *log_di;
1293 unsigned long ptr_end;
1295 struct inode *inode;
1296 struct btrfs_key location;
1299 eb = path->nodes[0];
1300 slot = path->slots[0];
1301 item_size = btrfs_item_size_nr(eb, slot);
1302 ptr = btrfs_item_ptr_offset(eb, slot);
1303 ptr_end = ptr + item_size;
1304 while (ptr < ptr_end) {
1305 di = (struct btrfs_dir_item *)ptr;
1306 name_len = btrfs_dir_name_len(eb, di);
1307 name = kmalloc(name_len, GFP_NOFS);
1312 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1315 if (dir_key->type == BTRFS_DIR_ITEM_KEY) {
1316 log_di = btrfs_lookup_dir_item(trans, log, log_path,
1319 } else if (dir_key->type == BTRFS_DIR_INDEX_KEY) {
1320 log_di = btrfs_lookup_dir_index_item(trans, log,
1326 if (!log_di || IS_ERR(log_di)) {
1327 btrfs_dir_item_key_to_cpu(eb, di, &location);
1328 btrfs_release_path(root, path);
1329 btrfs_release_path(log, log_path);
1330 inode = read_one_inode(root, location.objectid);
1333 ret = link_to_fixup_dir(trans, root,
1334 path, location.objectid);
1336 btrfs_inc_nlink(inode);
1337 ret = btrfs_unlink_inode(trans, root, dir, inode,
1343 /* there might still be more names under this key
1344 * check and repeat if required
1346 ret = btrfs_search_slot(NULL, root, dir_key, path,
1353 btrfs_release_path(log, log_path);
1356 ptr = (unsigned long)(di + 1);
1361 btrfs_release_path(root, path);
1362 btrfs_release_path(log, log_path);
1367 * deletion replay happens before we copy any new directory items
1368 * out of the log or out of backreferences from inodes. It
1369 * scans the log to find ranges of keys that log is authoritative for,
1370 * and then scans the directory to find items in those ranges that are
1371 * not present in the log.
1373 * Anything we don't find in the log is unlinked and removed from the
1376 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
1377 struct btrfs_root *root,
1378 struct btrfs_root *log,
1379 struct btrfs_path *path,
1384 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
1386 struct btrfs_key dir_key;
1387 struct btrfs_key found_key;
1388 struct btrfs_path *log_path;
1391 dir_key.objectid = dirid;
1392 dir_key.type = BTRFS_DIR_ITEM_KEY;
1393 log_path = btrfs_alloc_path();
1397 dir = read_one_inode(root, dirid);
1398 /* it isn't an error if the inode isn't there, that can happen
1399 * because we replay the deletes before we copy in the inode item
1403 btrfs_free_path(log_path);
1410 ret = find_dir_range(log, path, dirid, key_type,
1411 &range_start, &range_end);
1415 dir_key.offset = range_start;
1418 ret = btrfs_search_slot(NULL, root, &dir_key, path,
1423 nritems = btrfs_header_nritems(path->nodes[0]);
1424 if (path->slots[0] >= nritems) {
1425 ret = btrfs_next_leaf(root, path);
1429 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1431 if (found_key.objectid != dirid ||
1432 found_key.type != dir_key.type)
1435 if (found_key.offset > range_end)
1438 ret = check_item_in_log(trans, root, log, path,
1439 log_path, dir, &found_key);
1441 if (found_key.offset == (u64)-1)
1443 dir_key.offset = found_key.offset + 1;
1445 btrfs_release_path(root, path);
1446 if (range_end == (u64)-1)
1448 range_start = range_end + 1;
1453 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
1454 key_type = BTRFS_DIR_LOG_INDEX_KEY;
1455 dir_key.type = BTRFS_DIR_INDEX_KEY;
1456 btrfs_release_path(root, path);
1460 btrfs_release_path(root, path);
1461 btrfs_free_path(log_path);
1467 * the process_func used to replay items from the log tree. This
1468 * gets called in two different stages. The first stage just looks
1469 * for inodes and makes sure they are all copied into the subvolume.
1471 * The second stage copies all the other item types from the log into
1472 * the subvolume. The two stage approach is slower, but gets rid of
1473 * lots of complexity around inodes referencing other inodes that exist
1474 * only in the log (references come from either directory items or inode
1477 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
1478 struct walk_control *wc, u64 gen)
1481 struct btrfs_path *path;
1482 struct btrfs_root *root = wc->replay_dest;
1483 struct btrfs_key key;
1489 btrfs_read_buffer(eb, gen);
1491 level = btrfs_header_level(eb);
1496 path = btrfs_alloc_path();
1499 nritems = btrfs_header_nritems(eb);
1500 for (i = 0; i < nritems; i++) {
1501 btrfs_item_key_to_cpu(eb, &key, i);
1502 item_size = btrfs_item_size_nr(eb, i);
1504 /* inode keys are done during the first stage */
1505 if (key.type == BTRFS_INODE_ITEM_KEY &&
1506 wc->stage == LOG_WALK_REPLAY_INODES) {
1507 struct inode *inode;
1508 struct btrfs_inode_item *inode_item;
1511 inode_item = btrfs_item_ptr(eb, i,
1512 struct btrfs_inode_item);
1513 mode = btrfs_inode_mode(eb, inode_item);
1514 if (S_ISDIR(mode)) {
1515 ret = replay_dir_deletes(wc->trans,
1516 root, log, path, key.objectid);
1519 ret = overwrite_item(wc->trans, root, path,
1523 /* for regular files, truncate away
1524 * extents past the new EOF
1526 if (S_ISREG(mode)) {
1527 inode = read_one_inode(root,
1531 ret = btrfs_truncate_inode_items(wc->trans,
1532 root, inode, inode->i_size,
1533 BTRFS_EXTENT_DATA_KEY);
1537 ret = link_to_fixup_dir(wc->trans, root,
1538 path, key.objectid);
1541 if (wc->stage < LOG_WALK_REPLAY_ALL)
1544 /* these keys are simply copied */
1545 if (key.type == BTRFS_XATTR_ITEM_KEY) {
1546 ret = overwrite_item(wc->trans, root, path,
1549 } else if (key.type == BTRFS_INODE_REF_KEY) {
1550 ret = add_inode_ref(wc->trans, root, log, path,
1552 BUG_ON(ret && ret != -ENOENT);
1553 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
1554 ret = replay_one_extent(wc->trans, root, path,
1557 } else if (key.type == BTRFS_DIR_ITEM_KEY ||
1558 key.type == BTRFS_DIR_INDEX_KEY) {
1559 ret = replay_one_dir_item(wc->trans, root, path,
1564 btrfs_free_path(path);
1568 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
1569 struct btrfs_root *root,
1570 struct btrfs_path *path, int *level,
1571 struct walk_control *wc)
1577 struct extent_buffer *next;
1578 struct extent_buffer *cur;
1579 struct extent_buffer *parent;
1583 WARN_ON(*level < 0);
1584 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1586 while (*level > 0) {
1587 WARN_ON(*level < 0);
1588 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1589 cur = path->nodes[*level];
1591 if (btrfs_header_level(cur) != *level)
1594 if (path->slots[*level] >=
1595 btrfs_header_nritems(cur))
1598 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
1599 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
1600 blocksize = btrfs_level_size(root, *level - 1);
1602 parent = path->nodes[*level];
1603 root_owner = btrfs_header_owner(parent);
1604 root_gen = btrfs_header_generation(parent);
1606 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
1608 wc->process_func(root, next, wc, ptr_gen);
1611 path->slots[*level]++;
1613 btrfs_read_buffer(next, ptr_gen);
1615 btrfs_tree_lock(next);
1616 clean_tree_block(trans, root, next);
1617 btrfs_set_lock_blocking(next);
1618 btrfs_wait_tree_block_writeback(next);
1619 btrfs_tree_unlock(next);
1621 ret = btrfs_drop_leaf_ref(trans, root, next);
1624 WARN_ON(root_owner !=
1625 BTRFS_TREE_LOG_OBJECTID);
1626 ret = btrfs_free_reserved_extent(root,
1630 free_extent_buffer(next);
1633 btrfs_read_buffer(next, ptr_gen);
1635 WARN_ON(*level <= 0);
1636 if (path->nodes[*level-1])
1637 free_extent_buffer(path->nodes[*level-1]);
1638 path->nodes[*level-1] = next;
1639 *level = btrfs_header_level(next);
1640 path->slots[*level] = 0;
1643 WARN_ON(*level < 0);
1644 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1646 if (path->nodes[*level] == root->node)
1647 parent = path->nodes[*level];
1649 parent = path->nodes[*level + 1];
1651 bytenr = path->nodes[*level]->start;
1653 blocksize = btrfs_level_size(root, *level);
1654 root_owner = btrfs_header_owner(parent);
1655 root_gen = btrfs_header_generation(parent);
1657 wc->process_func(root, path->nodes[*level], wc,
1658 btrfs_header_generation(path->nodes[*level]));
1661 next = path->nodes[*level];
1662 btrfs_tree_lock(next);
1663 clean_tree_block(trans, root, next);
1664 btrfs_set_lock_blocking(next);
1665 btrfs_wait_tree_block_writeback(next);
1666 btrfs_tree_unlock(next);
1669 ret = btrfs_drop_leaf_ref(trans, root, next);
1672 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
1673 ret = btrfs_free_reserved_extent(root, bytenr, blocksize);
1676 free_extent_buffer(path->nodes[*level]);
1677 path->nodes[*level] = NULL;
1684 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
1685 struct btrfs_root *root,
1686 struct btrfs_path *path, int *level,
1687 struct walk_control *wc)
1695 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
1696 slot = path->slots[i];
1697 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
1698 struct extent_buffer *node;
1699 node = path->nodes[i];
1702 WARN_ON(*level == 0);
1705 struct extent_buffer *parent;
1706 if (path->nodes[*level] == root->node)
1707 parent = path->nodes[*level];
1709 parent = path->nodes[*level + 1];
1711 root_owner = btrfs_header_owner(parent);
1712 root_gen = btrfs_header_generation(parent);
1713 wc->process_func(root, path->nodes[*level], wc,
1714 btrfs_header_generation(path->nodes[*level]));
1716 struct extent_buffer *next;
1718 next = path->nodes[*level];
1720 btrfs_tree_lock(next);
1721 clean_tree_block(trans, root, next);
1722 btrfs_set_lock_blocking(next);
1723 btrfs_wait_tree_block_writeback(next);
1724 btrfs_tree_unlock(next);
1727 ret = btrfs_drop_leaf_ref(trans, root,
1732 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
1733 ret = btrfs_free_reserved_extent(root,
1734 path->nodes[*level]->start,
1735 path->nodes[*level]->len);
1738 free_extent_buffer(path->nodes[*level]);
1739 path->nodes[*level] = NULL;
1747 * drop the reference count on the tree rooted at 'snap'. This traverses
1748 * the tree freeing any blocks that have a ref count of zero after being
1751 static int walk_log_tree(struct btrfs_trans_handle *trans,
1752 struct btrfs_root *log, struct walk_control *wc)
1757 struct btrfs_path *path;
1761 path = btrfs_alloc_path();
1764 level = btrfs_header_level(log->node);
1766 path->nodes[level] = log->node;
1767 extent_buffer_get(log->node);
1768 path->slots[level] = 0;
1771 wret = walk_down_log_tree(trans, log, path, &level, wc);
1777 wret = walk_up_log_tree(trans, log, path, &level, wc);
1784 /* was the root node processed? if not, catch it here */
1785 if (path->nodes[orig_level]) {
1786 wc->process_func(log, path->nodes[orig_level], wc,
1787 btrfs_header_generation(path->nodes[orig_level]));
1789 struct extent_buffer *next;
1791 next = path->nodes[orig_level];
1793 btrfs_tree_lock(next);
1794 clean_tree_block(trans, log, next);
1795 btrfs_set_lock_blocking(next);
1796 btrfs_wait_tree_block_writeback(next);
1797 btrfs_tree_unlock(next);
1799 if (orig_level == 0) {
1800 ret = btrfs_drop_leaf_ref(trans, log,
1804 WARN_ON(log->root_key.objectid !=
1805 BTRFS_TREE_LOG_OBJECTID);
1806 ret = btrfs_free_reserved_extent(log, next->start,
1812 for (i = 0; i <= orig_level; i++) {
1813 if (path->nodes[i]) {
1814 free_extent_buffer(path->nodes[i]);
1815 path->nodes[i] = NULL;
1818 btrfs_free_path(path);
1823 * helper function to update the item for a given subvolumes log root
1824 * in the tree of log roots
1826 static int update_log_root(struct btrfs_trans_handle *trans,
1827 struct btrfs_root *log)
1831 if (log->log_transid == 1) {
1832 /* insert root item on the first sync */
1833 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
1834 &log->root_key, &log->root_item);
1836 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
1837 &log->root_key, &log->root_item);
1842 static int wait_log_commit(struct btrfs_root *root, unsigned long transid)
1845 int index = transid % 2;
1848 * we only allow two pending log transactions at a time,
1849 * so we know that if ours is more than 2 older than the
1850 * current transaction, we're done
1853 prepare_to_wait(&root->log_commit_wait[index],
1854 &wait, TASK_UNINTERRUPTIBLE);
1855 mutex_unlock(&root->log_mutex);
1856 if (root->log_transid < transid + 2 &&
1857 atomic_read(&root->log_commit[index]))
1859 finish_wait(&root->log_commit_wait[index], &wait);
1860 mutex_lock(&root->log_mutex);
1861 } while (root->log_transid < transid + 2 &&
1862 atomic_read(&root->log_commit[index]));
1866 static int wait_for_writer(struct btrfs_root *root)
1869 while (atomic_read(&root->log_writers)) {
1870 prepare_to_wait(&root->log_writer_wait,
1871 &wait, TASK_UNINTERRUPTIBLE);
1872 mutex_unlock(&root->log_mutex);
1873 if (atomic_read(&root->log_writers))
1875 mutex_lock(&root->log_mutex);
1876 finish_wait(&root->log_writer_wait, &wait);
1882 * btrfs_sync_log does sends a given tree log down to the disk and
1883 * updates the super blocks to record it. When this call is done,
1884 * you know that any inodes previously logged are safely on disk
1886 int btrfs_sync_log(struct btrfs_trans_handle *trans,
1887 struct btrfs_root *root)
1892 struct btrfs_root *log = root->log_root;
1893 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
1895 mutex_lock(&root->log_mutex);
1896 index1 = root->log_transid % 2;
1897 if (atomic_read(&root->log_commit[index1])) {
1898 wait_log_commit(root, root->log_transid);
1899 mutex_unlock(&root->log_mutex);
1902 atomic_set(&root->log_commit[index1], 1);
1904 /* wait for previous tree log sync to complete */
1905 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
1906 wait_log_commit(root, root->log_transid - 1);
1909 unsigned long batch = root->log_batch;
1910 mutex_unlock(&root->log_mutex);
1911 schedule_timeout_uninterruptible(1);
1912 mutex_lock(&root->log_mutex);
1913 wait_for_writer(root);
1914 if (batch == root->log_batch)
1918 ret = btrfs_write_and_wait_marked_extents(log, &log->dirty_log_pages);
1921 btrfs_set_root_bytenr(&log->root_item, log->node->start);
1922 btrfs_set_root_generation(&log->root_item, trans->transid);
1923 btrfs_set_root_level(&log->root_item, btrfs_header_level(log->node));
1925 root->log_batch = 0;
1926 root->log_transid++;
1927 log->log_transid = root->log_transid;
1930 * log tree has been flushed to disk, new modifications of
1931 * the log will be written to new positions. so it's safe to
1932 * allow log writers to go in.
1934 mutex_unlock(&root->log_mutex);
1936 mutex_lock(&log_root_tree->log_mutex);
1937 log_root_tree->log_batch++;
1938 atomic_inc(&log_root_tree->log_writers);
1939 mutex_unlock(&log_root_tree->log_mutex);
1941 ret = update_log_root(trans, log);
1944 mutex_lock(&log_root_tree->log_mutex);
1945 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
1947 if (waitqueue_active(&log_root_tree->log_writer_wait))
1948 wake_up(&log_root_tree->log_writer_wait);
1951 index2 = log_root_tree->log_transid % 2;
1952 if (atomic_read(&log_root_tree->log_commit[index2])) {
1953 wait_log_commit(log_root_tree, log_root_tree->log_transid);
1954 mutex_unlock(&log_root_tree->log_mutex);
1957 atomic_set(&log_root_tree->log_commit[index2], 1);
1959 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2]))
1960 wait_log_commit(log_root_tree, log_root_tree->log_transid - 1);
1962 wait_for_writer(log_root_tree);
1964 ret = btrfs_write_and_wait_marked_extents(log_root_tree,
1965 &log_root_tree->dirty_log_pages);
1968 btrfs_set_super_log_root(&root->fs_info->super_for_commit,
1969 log_root_tree->node->start);
1970 btrfs_set_super_log_root_level(&root->fs_info->super_for_commit,
1971 btrfs_header_level(log_root_tree->node));
1973 log_root_tree->log_batch = 0;
1974 log_root_tree->log_transid++;
1977 mutex_unlock(&log_root_tree->log_mutex);
1980 * nobody else is going to jump in and write the the ctree
1981 * super here because the log_commit atomic below is protecting
1982 * us. We must be called with a transaction handle pinning
1983 * the running transaction open, so a full commit can't hop
1984 * in and cause problems either.
1986 write_ctree_super(trans, root->fs_info->tree_root, 2);
1988 atomic_set(&log_root_tree->log_commit[index2], 0);
1990 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
1991 wake_up(&log_root_tree->log_commit_wait[index2]);
1993 atomic_set(&root->log_commit[index1], 0);
1995 if (waitqueue_active(&root->log_commit_wait[index1]))
1996 wake_up(&root->log_commit_wait[index1]);
2000 /* * free all the extents used by the tree log. This should be called
2001 * at commit time of the full transaction
2003 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
2006 struct btrfs_root *log;
2010 struct walk_control wc = {
2012 .process_func = process_one_buffer
2015 if (!root->log_root || root->fs_info->log_root_recovering)
2018 log = root->log_root;
2019 ret = walk_log_tree(trans, log, &wc);
2023 ret = find_first_extent_bit(&log->dirty_log_pages,
2024 0, &start, &end, EXTENT_DIRTY);
2028 clear_extent_dirty(&log->dirty_log_pages,
2029 start, end, GFP_NOFS);
2032 if (log->log_transid > 0) {
2033 ret = btrfs_del_root(trans, root->fs_info->log_root_tree,
2037 root->log_root = NULL;
2038 free_extent_buffer(log->node);
2044 * If both a file and directory are logged, and unlinks or renames are
2045 * mixed in, we have a few interesting corners:
2047 * create file X in dir Y
2048 * link file X to X.link in dir Y
2050 * unlink file X but leave X.link
2053 * After a crash we would expect only X.link to exist. But file X
2054 * didn't get fsync'd again so the log has back refs for X and X.link.
2056 * We solve this by removing directory entries and inode backrefs from the
2057 * log when a file that was logged in the current transaction is
2058 * unlinked. Any later fsync will include the updated log entries, and
2059 * we'll be able to reconstruct the proper directory items from backrefs.
2061 * This optimizations allows us to avoid relogging the entire inode
2062 * or the entire directory.
2064 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2065 struct btrfs_root *root,
2066 const char *name, int name_len,
2067 struct inode *dir, u64 index)
2069 struct btrfs_root *log;
2070 struct btrfs_dir_item *di;
2071 struct btrfs_path *path;
2075 if (BTRFS_I(dir)->logged_trans < trans->transid)
2078 ret = join_running_log_trans(root);
2082 mutex_lock(&BTRFS_I(dir)->log_mutex);
2084 log = root->log_root;
2085 path = btrfs_alloc_path();
2086 di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino,
2087 name, name_len, -1);
2088 if (di && !IS_ERR(di)) {
2089 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2090 bytes_del += name_len;
2093 btrfs_release_path(log, path);
2094 di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino,
2095 index, name, name_len, -1);
2096 if (di && !IS_ERR(di)) {
2097 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2098 bytes_del += name_len;
2102 /* update the directory size in the log to reflect the names
2106 struct btrfs_key key;
2108 key.objectid = dir->i_ino;
2110 key.type = BTRFS_INODE_ITEM_KEY;
2111 btrfs_release_path(log, path);
2113 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
2115 struct btrfs_inode_item *item;
2118 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2119 struct btrfs_inode_item);
2120 i_size = btrfs_inode_size(path->nodes[0], item);
2121 if (i_size > bytes_del)
2122 i_size -= bytes_del;
2125 btrfs_set_inode_size(path->nodes[0], item, i_size);
2126 btrfs_mark_buffer_dirty(path->nodes[0]);
2129 btrfs_release_path(log, path);
2132 btrfs_free_path(path);
2133 mutex_unlock(&BTRFS_I(dir)->log_mutex);
2134 end_log_trans(root);
2139 /* see comments for btrfs_del_dir_entries_in_log */
2140 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
2141 struct btrfs_root *root,
2142 const char *name, int name_len,
2143 struct inode *inode, u64 dirid)
2145 struct btrfs_root *log;
2149 if (BTRFS_I(inode)->logged_trans < trans->transid)
2152 ret = join_running_log_trans(root);
2155 log = root->log_root;
2156 mutex_lock(&BTRFS_I(inode)->log_mutex);
2158 ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino,
2160 mutex_unlock(&BTRFS_I(inode)->log_mutex);
2161 end_log_trans(root);
2167 * creates a range item in the log for 'dirid'. first_offset and
2168 * last_offset tell us which parts of the key space the log should
2169 * be considered authoritative for.
2171 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
2172 struct btrfs_root *log,
2173 struct btrfs_path *path,
2174 int key_type, u64 dirid,
2175 u64 first_offset, u64 last_offset)
2178 struct btrfs_key key;
2179 struct btrfs_dir_log_item *item;
2181 key.objectid = dirid;
2182 key.offset = first_offset;
2183 if (key_type == BTRFS_DIR_ITEM_KEY)
2184 key.type = BTRFS_DIR_LOG_ITEM_KEY;
2186 key.type = BTRFS_DIR_LOG_INDEX_KEY;
2187 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
2190 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2191 struct btrfs_dir_log_item);
2192 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
2193 btrfs_mark_buffer_dirty(path->nodes[0]);
2194 btrfs_release_path(log, path);
2199 * log all the items included in the current transaction for a given
2200 * directory. This also creates the range items in the log tree required
2201 * to replay anything deleted before the fsync
2203 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2204 struct btrfs_root *root, struct inode *inode,
2205 struct btrfs_path *path,
2206 struct btrfs_path *dst_path, int key_type,
2207 u64 min_offset, u64 *last_offset_ret)
2209 struct btrfs_key min_key;
2210 struct btrfs_key max_key;
2211 struct btrfs_root *log = root->log_root;
2212 struct extent_buffer *src;
2216 u64 first_offset = min_offset;
2217 u64 last_offset = (u64)-1;
2219 log = root->log_root;
2220 max_key.objectid = inode->i_ino;
2221 max_key.offset = (u64)-1;
2222 max_key.type = key_type;
2224 min_key.objectid = inode->i_ino;
2225 min_key.type = key_type;
2226 min_key.offset = min_offset;
2228 path->keep_locks = 1;
2230 ret = btrfs_search_forward(root, &min_key, &max_key,
2231 path, 0, trans->transid);
2234 * we didn't find anything from this transaction, see if there
2235 * is anything at all
2237 if (ret != 0 || min_key.objectid != inode->i_ino ||
2238 min_key.type != key_type) {
2239 min_key.objectid = inode->i_ino;
2240 min_key.type = key_type;
2241 min_key.offset = (u64)-1;
2242 btrfs_release_path(root, path);
2243 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
2245 btrfs_release_path(root, path);
2248 ret = btrfs_previous_item(root, path, inode->i_ino, key_type);
2250 /* if ret == 0 there are items for this type,
2251 * create a range to tell us the last key of this type.
2252 * otherwise, there are no items in this directory after
2253 * *min_offset, and we create a range to indicate that.
2256 struct btrfs_key tmp;
2257 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
2259 if (key_type == tmp.type)
2260 first_offset = max(min_offset, tmp.offset) + 1;
2265 /* go backward to find any previous key */
2266 ret = btrfs_previous_item(root, path, inode->i_ino, key_type);
2268 struct btrfs_key tmp;
2269 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
2270 if (key_type == tmp.type) {
2271 first_offset = tmp.offset;
2272 ret = overwrite_item(trans, log, dst_path,
2273 path->nodes[0], path->slots[0],
2277 btrfs_release_path(root, path);
2279 /* find the first key from this transaction again */
2280 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
2287 * we have a block from this transaction, log every item in it
2288 * from our directory
2291 struct btrfs_key tmp;
2292 src = path->nodes[0];
2293 nritems = btrfs_header_nritems(src);
2294 for (i = path->slots[0]; i < nritems; i++) {
2295 btrfs_item_key_to_cpu(src, &min_key, i);
2297 if (min_key.objectid != inode->i_ino ||
2298 min_key.type != key_type)
2300 ret = overwrite_item(trans, log, dst_path, src, i,
2304 path->slots[0] = nritems;
2307 * look ahead to the next item and see if it is also
2308 * from this directory and from this transaction
2310 ret = btrfs_next_leaf(root, path);
2312 last_offset = (u64)-1;
2315 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
2316 if (tmp.objectid != inode->i_ino || tmp.type != key_type) {
2317 last_offset = (u64)-1;
2320 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
2321 ret = overwrite_item(trans, log, dst_path,
2322 path->nodes[0], path->slots[0],
2326 last_offset = tmp.offset;
2331 *last_offset_ret = last_offset;
2332 btrfs_release_path(root, path);
2333 btrfs_release_path(log, dst_path);
2335 /* insert the log range keys to indicate where the log is valid */
2336 ret = insert_dir_log_key(trans, log, path, key_type, inode->i_ino,
2337 first_offset, last_offset);
2343 * logging directories is very similar to logging inodes, We find all the items
2344 * from the current transaction and write them to the log.
2346 * The recovery code scans the directory in the subvolume, and if it finds a
2347 * key in the range logged that is not present in the log tree, then it means
2348 * that dir entry was unlinked during the transaction.
2350 * In order for that scan to work, we must include one key smaller than
2351 * the smallest logged by this transaction and one key larger than the largest
2352 * key logged by this transaction.
2354 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
2355 struct btrfs_root *root, struct inode *inode,
2356 struct btrfs_path *path,
2357 struct btrfs_path *dst_path)
2362 int key_type = BTRFS_DIR_ITEM_KEY;
2368 ret = log_dir_items(trans, root, inode, path,
2369 dst_path, key_type, min_key,
2372 if (max_key == (u64)-1)
2374 min_key = max_key + 1;
2377 if (key_type == BTRFS_DIR_ITEM_KEY) {
2378 key_type = BTRFS_DIR_INDEX_KEY;
2385 * a helper function to drop items from the log before we relog an
2386 * inode. max_key_type indicates the highest item type to remove.
2387 * This cannot be run for file data extents because it does not
2388 * free the extents they point to.
2390 static int drop_objectid_items(struct btrfs_trans_handle *trans,
2391 struct btrfs_root *log,
2392 struct btrfs_path *path,
2393 u64 objectid, int max_key_type)
2396 struct btrfs_key key;
2397 struct btrfs_key found_key;
2399 key.objectid = objectid;
2400 key.type = max_key_type;
2401 key.offset = (u64)-1;
2404 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
2409 if (path->slots[0] == 0)
2413 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2416 if (found_key.objectid != objectid)
2419 ret = btrfs_del_item(trans, log, path);
2421 btrfs_release_path(log, path);
2423 btrfs_release_path(log, path);
2427 static noinline int copy_items(struct btrfs_trans_handle *trans,
2428 struct btrfs_root *log,
2429 struct btrfs_path *dst_path,
2430 struct extent_buffer *src,
2431 int start_slot, int nr, int inode_only)
2433 unsigned long src_offset;
2434 unsigned long dst_offset;
2435 struct btrfs_file_extent_item *extent;
2436 struct btrfs_inode_item *inode_item;
2438 struct btrfs_key *ins_keys;
2442 struct list_head ordered_sums;
2444 INIT_LIST_HEAD(&ordered_sums);
2446 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
2447 nr * sizeof(u32), GFP_NOFS);
2448 ins_sizes = (u32 *)ins_data;
2449 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
2451 for (i = 0; i < nr; i++) {
2452 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
2453 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
2455 ret = btrfs_insert_empty_items(trans, log, dst_path,
2456 ins_keys, ins_sizes, nr);
2459 for (i = 0; i < nr; i++) {
2460 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
2461 dst_path->slots[0]);
2463 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
2465 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
2466 src_offset, ins_sizes[i]);
2468 if (inode_only == LOG_INODE_EXISTS &&
2469 ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
2470 inode_item = btrfs_item_ptr(dst_path->nodes[0],
2472 struct btrfs_inode_item);
2473 btrfs_set_inode_size(dst_path->nodes[0], inode_item, 0);
2475 /* set the generation to zero so the recover code
2476 * can tell the difference between an logging
2477 * just to say 'this inode exists' and a logging
2478 * to say 'update this inode with these values'
2480 btrfs_set_inode_generation(dst_path->nodes[0],
2483 /* take a reference on file data extents so that truncates
2484 * or deletes of this inode don't have to relog the inode
2487 if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY) {
2489 extent = btrfs_item_ptr(src, start_slot + i,
2490 struct btrfs_file_extent_item);
2492 found_type = btrfs_file_extent_type(src, extent);
2493 if (found_type == BTRFS_FILE_EXTENT_REG ||
2494 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
2495 u64 ds = btrfs_file_extent_disk_bytenr(src,
2497 u64 dl = btrfs_file_extent_disk_num_bytes(src,
2499 u64 cs = btrfs_file_extent_offset(src, extent);
2500 u64 cl = btrfs_file_extent_num_bytes(src,
2502 if (btrfs_file_extent_compression(src,
2507 /* ds == 0 is a hole */
2509 ret = btrfs_inc_extent_ref(trans, log,
2511 dst_path->nodes[0]->start,
2512 BTRFS_TREE_LOG_OBJECTID,
2514 ins_keys[i].objectid);
2516 ret = btrfs_lookup_csums_range(
2517 log->fs_info->csum_root,
2518 ds + cs, ds + cs + cl - 1,
2524 dst_path->slots[0]++;
2527 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
2528 btrfs_release_path(log, dst_path);
2532 * we have to do this after the loop above to avoid changing the
2533 * log tree while trying to change the log tree.
2535 while (!list_empty(&ordered_sums)) {
2536 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
2537 struct btrfs_ordered_sum,
2539 ret = btrfs_csum_file_blocks(trans, log, sums);
2541 list_del(&sums->list);
2547 /* log a single inode in the tree log.
2548 * At least one parent directory for this inode must exist in the tree
2549 * or be logged already.
2551 * Any items from this inode changed by the current transaction are copied
2552 * to the log tree. An extra reference is taken on any extents in this
2553 * file, allowing us to avoid a whole pile of corner cases around logging
2554 * blocks that have been removed from the tree.
2556 * See LOG_INODE_ALL and related defines for a description of what inode_only
2559 * This handles both files and directories.
2561 static int __btrfs_log_inode(struct btrfs_trans_handle *trans,
2562 struct btrfs_root *root, struct inode *inode,
2565 struct btrfs_path *path;
2566 struct btrfs_path *dst_path;
2567 struct btrfs_key min_key;
2568 struct btrfs_key max_key;
2569 struct btrfs_root *log = root->log_root;
2570 struct extent_buffer *src = NULL;
2574 int ins_start_slot = 0;
2577 log = root->log_root;
2579 path = btrfs_alloc_path();
2580 dst_path = btrfs_alloc_path();
2582 min_key.objectid = inode->i_ino;
2583 min_key.type = BTRFS_INODE_ITEM_KEY;
2586 max_key.objectid = inode->i_ino;
2587 if (inode_only == LOG_INODE_EXISTS || S_ISDIR(inode->i_mode))
2588 max_key.type = BTRFS_XATTR_ITEM_KEY;
2590 max_key.type = (u8)-1;
2591 max_key.offset = (u64)-1;
2594 * if this inode has already been logged and we're in inode_only
2595 * mode, we don't want to delete the things that have already
2596 * been written to the log.
2598 * But, if the inode has been through an inode_only log,
2599 * the logged_trans field is not set. This allows us to catch
2600 * any new names for this inode in the backrefs by logging it
2603 if (inode_only == LOG_INODE_EXISTS &&
2604 BTRFS_I(inode)->logged_trans == trans->transid) {
2605 btrfs_free_path(path);
2606 btrfs_free_path(dst_path);
2609 mutex_lock(&BTRFS_I(inode)->log_mutex);
2612 * a brute force approach to making sure we get the most uptodate
2613 * copies of everything.
2615 if (S_ISDIR(inode->i_mode)) {
2616 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
2618 if (inode_only == LOG_INODE_EXISTS)
2619 max_key_type = BTRFS_XATTR_ITEM_KEY;
2620 ret = drop_objectid_items(trans, log, path,
2621 inode->i_ino, max_key_type);
2623 ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0);
2626 path->keep_locks = 1;
2630 ret = btrfs_search_forward(root, &min_key, &max_key,
2631 path, 0, trans->transid);
2635 /* note, ins_nr might be > 0 here, cleanup outside the loop */
2636 if (min_key.objectid != inode->i_ino)
2638 if (min_key.type > max_key.type)
2641 src = path->nodes[0];
2642 size = btrfs_item_size_nr(src, path->slots[0]);
2643 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
2646 } else if (!ins_nr) {
2647 ins_start_slot = path->slots[0];
2652 ret = copy_items(trans, log, dst_path, src, ins_start_slot,
2653 ins_nr, inode_only);
2656 ins_start_slot = path->slots[0];
2659 nritems = btrfs_header_nritems(path->nodes[0]);
2661 if (path->slots[0] < nritems) {
2662 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
2667 ret = copy_items(trans, log, dst_path, src,
2669 ins_nr, inode_only);
2673 btrfs_release_path(root, path);
2675 if (min_key.offset < (u64)-1)
2677 else if (min_key.type < (u8)-1)
2679 else if (min_key.objectid < (u64)-1)
2685 ret = copy_items(trans, log, dst_path, src,
2687 ins_nr, inode_only);
2692 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
2693 btrfs_release_path(root, path);
2694 btrfs_release_path(log, dst_path);
2695 BTRFS_I(inode)->log_dirty_trans = 0;
2696 ret = log_directory_changes(trans, root, inode, path, dst_path);
2699 BTRFS_I(inode)->logged_trans = trans->transid;
2700 mutex_unlock(&BTRFS_I(inode)->log_mutex);
2702 btrfs_free_path(path);
2703 btrfs_free_path(dst_path);
2708 int btrfs_log_inode(struct btrfs_trans_handle *trans,
2709 struct btrfs_root *root, struct inode *inode,
2714 start_log_trans(trans, root);
2715 ret = __btrfs_log_inode(trans, root, inode, inode_only);
2716 end_log_trans(root);
2721 * helper function around btrfs_log_inode to make sure newly created
2722 * parent directories also end up in the log. A minimal inode and backref
2723 * only logging is done of any parent directories that are older than
2724 * the last committed transaction
2726 int btrfs_log_dentry(struct btrfs_trans_handle *trans,
2727 struct btrfs_root *root, struct dentry *dentry)
2729 int inode_only = LOG_INODE_ALL;
2730 struct super_block *sb;
2733 start_log_trans(trans, root);
2734 sb = dentry->d_inode->i_sb;
2736 ret = __btrfs_log_inode(trans, root, dentry->d_inode,
2739 inode_only = LOG_INODE_EXISTS;
2741 dentry = dentry->d_parent;
2742 if (!dentry || !dentry->d_inode || sb != dentry->d_inode->i_sb)
2745 if (BTRFS_I(dentry->d_inode)->generation <=
2746 root->fs_info->last_trans_committed)
2749 end_log_trans(root);
2754 * it is not safe to log dentry if the chunk root has added new
2755 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
2756 * If this returns 1, you must commit the transaction to safely get your
2759 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
2760 struct btrfs_root *root, struct dentry *dentry)
2763 gen = root->fs_info->last_trans_new_blockgroup;
2764 if (gen > root->fs_info->last_trans_committed)
2767 return btrfs_log_dentry(trans, root, dentry);
2771 * should be called during mount to recover any replay any log trees
2774 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
2777 struct btrfs_path *path;
2778 struct btrfs_trans_handle *trans;
2779 struct btrfs_key key;
2780 struct btrfs_key found_key;
2781 struct btrfs_key tmp_key;
2782 struct btrfs_root *log;
2783 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
2785 struct walk_control wc = {
2786 .process_func = process_one_buffer,
2790 fs_info->log_root_recovering = 1;
2791 path = btrfs_alloc_path();
2794 trans = btrfs_start_transaction(fs_info->tree_root, 1);
2799 walk_log_tree(trans, log_root_tree, &wc);
2802 key.objectid = BTRFS_TREE_LOG_OBJECTID;
2803 key.offset = (u64)-1;
2804 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
2807 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
2811 if (path->slots[0] == 0)
2815 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2817 btrfs_release_path(log_root_tree, path);
2818 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
2821 log = btrfs_read_fs_root_no_radix(log_root_tree,
2826 tmp_key.objectid = found_key.offset;
2827 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
2828 tmp_key.offset = (u64)-1;
2830 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
2831 BUG_ON(!wc.replay_dest);
2833 wc.replay_dest->log_root = log;
2834 mutex_lock(&fs_info->trans_mutex);
2835 btrfs_record_root_in_trans(wc.replay_dest);
2836 mutex_unlock(&fs_info->trans_mutex);
2837 ret = walk_log_tree(trans, log, &wc);
2840 if (wc.stage == LOG_WALK_REPLAY_ALL) {
2841 ret = fixup_inode_link_counts(trans, wc.replay_dest,
2845 ret = btrfs_find_highest_inode(wc.replay_dest, &highest_inode);
2847 wc.replay_dest->highest_inode = highest_inode;
2848 wc.replay_dest->last_inode_alloc = highest_inode;
2851 key.offset = found_key.offset - 1;
2852 wc.replay_dest->log_root = NULL;
2853 free_extent_buffer(log->node);
2856 if (found_key.offset == 0)
2859 btrfs_release_path(log_root_tree, path);
2861 /* step one is to pin it all, step two is to replay just inodes */
2864 wc.process_func = replay_one_buffer;
2865 wc.stage = LOG_WALK_REPLAY_INODES;
2868 /* step three is to replay everything */
2869 if (wc.stage < LOG_WALK_REPLAY_ALL) {
2874 btrfs_free_path(path);
2876 free_extent_buffer(log_root_tree->node);
2877 log_root_tree->log_root = NULL;
2878 fs_info->log_root_recovering = 0;
2880 /* step 4: commit the transaction, which also unpins the blocks */
2881 btrfs_commit_transaction(trans, fs_info->tree_root);
2883 kfree(log_root_tree);