2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/gfp.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
23 #include "transaction.h"
24 #include "btrfs_inode.h"
30 struct rb_node rb_node;
34 * returns > 0 if entry passed (root, objectid) is > entry,
35 * < 0 if (root, objectid) < entry and zero if they are equal
37 static int comp_entry(struct tree_entry *entry, u64 root_objectid,
40 if (root_objectid < entry->root_objectid)
42 if (root_objectid > entry->root_objectid)
44 if (objectid < entry->objectid)
46 if (objectid > entry->objectid)
51 static struct rb_node *tree_insert(struct rb_root *root, u64 root_objectid,
52 u64 objectid, struct rb_node *node)
54 struct rb_node ** p = &root->rb_node;
55 struct rb_node * parent = NULL;
56 struct tree_entry *entry;
61 entry = rb_entry(parent, struct tree_entry, rb_node);
63 comp = comp_entry(entry, root_objectid, objectid);
72 rb_link_node(node, parent, p);
73 rb_insert_color(node, root);
77 static struct rb_node *__tree_search(struct rb_root *root, u64 root_objectid,
78 u64 objectid, struct rb_node **prev_ret)
80 struct rb_node * n = root->rb_node;
81 struct rb_node *prev = NULL;
82 struct tree_entry *entry;
83 struct tree_entry *prev_entry = NULL;
87 entry = rb_entry(n, struct tree_entry, rb_node);
90 comp = comp_entry(entry, root_objectid, objectid);
102 while(prev && comp_entry(prev_entry, root_objectid, objectid) >= 0) {
103 prev = rb_next(prev);
104 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
110 static inline struct rb_node *tree_search(struct rb_root *root,
111 u64 root_objectid, u64 objectid)
113 struct rb_node *prev;
115 ret = __tree_search(root, root_objectid, objectid, &prev);
121 int btrfs_add_ordered_inode(struct inode *inode)
123 struct btrfs_root *root = BTRFS_I(inode)->root;
124 u64 root_objectid = root->root_key.objectid;
125 u64 transid = root->fs_info->running_transaction->transid;
126 struct tree_entry *entry;
127 struct rb_node *node;
128 struct btrfs_ordered_inode_tree *tree;
130 if (transid <= BTRFS_I(inode)->ordered_trans)
133 tree = &root->fs_info->running_transaction->ordered_inode_tree;
135 read_lock(&tree->lock);
136 node = __tree_search(&tree->tree, root_objectid, inode->i_ino, NULL);
137 read_unlock(&tree->lock);
142 entry = kmalloc(sizeof(*entry), GFP_NOFS);
146 write_lock(&tree->lock);
147 entry->objectid = inode->i_ino;
148 entry->root_objectid = root_objectid;
149 entry->inode = inode;
151 node = tree_insert(&tree->tree, root_objectid,
152 inode->i_ino, &entry->rb_node);
154 BTRFS_I(inode)->ordered_trans = transid;
156 write_unlock(&tree->lock);
164 int btrfs_find_first_ordered_inode(struct btrfs_ordered_inode_tree *tree,
165 u64 *root_objectid, u64 *objectid,
166 struct inode **inode)
168 struct tree_entry *entry;
169 struct rb_node *node;
171 write_lock(&tree->lock);
172 node = tree_search(&tree->tree, *root_objectid, *objectid);
174 write_unlock(&tree->lock);
177 entry = rb_entry(node, struct tree_entry, rb_node);
179 while(comp_entry(entry, *root_objectid, *objectid) >= 0) {
180 node = rb_next(node);
183 entry = rb_entry(node, struct tree_entry, rb_node);
186 write_unlock(&tree->lock);
190 *root_objectid = entry->root_objectid;
191 *inode = entry->inode;
192 atomic_inc(&entry->inode->i_count);
193 *objectid = entry->objectid;
194 write_unlock(&tree->lock);
198 int btrfs_find_del_first_ordered_inode(struct btrfs_ordered_inode_tree *tree,
199 u64 *root_objectid, u64 *objectid,
200 struct inode **inode)
202 struct tree_entry *entry;
203 struct rb_node *node;
205 write_lock(&tree->lock);
206 node = tree_search(&tree->tree, *root_objectid, *objectid);
208 write_unlock(&tree->lock);
212 entry = rb_entry(node, struct tree_entry, rb_node);
213 while(comp_entry(entry, *root_objectid, *objectid) >= 0) {
214 node = rb_next(node);
217 entry = rb_entry(node, struct tree_entry, rb_node);
220 write_unlock(&tree->lock);
224 *root_objectid = entry->root_objectid;
225 *objectid = entry->objectid;
226 *inode = entry->inode;
227 atomic_inc(&entry->inode->i_count);
228 rb_erase(node, &tree->tree);
229 write_unlock(&tree->lock);
234 static void __btrfs_del_ordered_inode(struct btrfs_ordered_inode_tree *tree,
236 u64 root_objectid, u64 objectid)
238 struct tree_entry *entry;
239 struct rb_node *node;
240 struct rb_node *prev;
242 write_lock(&tree->lock);
243 node = __tree_search(&tree->tree, root_objectid, objectid, &prev);
245 write_unlock(&tree->lock);
248 rb_erase(node, &tree->tree);
249 BTRFS_I(inode)->ordered_trans = 0;
250 write_unlock(&tree->lock);
251 atomic_dec(&inode->i_count);
252 entry = rb_entry(node, struct tree_entry, rb_node);
257 void btrfs_del_ordered_inode(struct inode *inode, int force)
259 struct btrfs_root *root = BTRFS_I(inode)->root;
260 u64 root_objectid = root->root_key.objectid;
262 if (!BTRFS_I(inode)->ordered_trans) {
266 if (!force && (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY) ||
267 mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
270 spin_lock(&root->fs_info->new_trans_lock);
271 if (root->fs_info->running_transaction) {
272 struct btrfs_ordered_inode_tree *tree;
273 tree = &root->fs_info->running_transaction->ordered_inode_tree;
274 __btrfs_del_ordered_inode(tree, inode, root_objectid,
277 spin_unlock(&root->fs_info->new_trans_lock);
280 int btrfs_ordered_throttle(struct btrfs_root *root, struct inode *inode)
282 struct btrfs_transaction *cur = root->fs_info->running_transaction;
283 while(cur == root->fs_info->running_transaction &&
284 atomic_read(&BTRFS_I(inode)->ordered_writeback)) {
285 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
286 congestion_wait(WRITE, HZ/20);
288 blk_congestion_wait(WRITE, HZ/20);