Btrfs: Add BH_Defrag to mark buffers that are in need of defragging
[linux-2.6-block.git] / fs / btrfs / transaction.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/fs.h>
20#include <linux/sched.h>
21#include "ctree.h"
22#include "disk-io.h"
23#include "transaction.h"
24
25static int total_trans = 0;
26extern struct kmem_cache *btrfs_trans_handle_cachep;
27extern struct kmem_cache *btrfs_transaction_cachep;
28
29static struct workqueue_struct *trans_wq;
30
31#define BTRFS_ROOT_TRANS_TAG 0
32#define BTRFS_ROOT_DEFRAG_TAG 1
33
34static void put_transaction(struct btrfs_transaction *transaction)
35{
36 WARN_ON(transaction->use_count == 0);
37 transaction->use_count--;
38 if (transaction->use_count == 0) {
39 WARN_ON(total_trans == 0);
40 total_trans--;
41 list_del_init(&transaction->list);
42 memset(transaction, 0, sizeof(*transaction));
43 kmem_cache_free(btrfs_transaction_cachep, transaction);
44 }
45}
46
47static int join_transaction(struct btrfs_root *root)
48{
49 struct btrfs_transaction *cur_trans;
50 cur_trans = root->fs_info->running_transaction;
51 if (!cur_trans) {
52 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
53 GFP_NOFS);
54 total_trans++;
55 BUG_ON(!cur_trans);
56 root->fs_info->generation++;
57 root->fs_info->running_transaction = cur_trans;
58 cur_trans->num_writers = 0;
59 cur_trans->transid = root->fs_info->generation;
60 init_waitqueue_head(&cur_trans->writer_wait);
61 init_waitqueue_head(&cur_trans->commit_wait);
62 cur_trans->in_commit = 0;
63 cur_trans->use_count = 1;
64 cur_trans->commit_done = 0;
65 cur_trans->start_time = get_seconds();
66 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
67 init_bit_radix(&cur_trans->dirty_pages);
68 }
69 cur_trans->num_writers++;
70 return 0;
71}
72
73static int record_root_in_trans(struct btrfs_root *root)
74{
75 u64 running_trans_id = root->fs_info->running_transaction->transid;
76 if (root->ref_cows && root->last_trans < running_trans_id) {
77 WARN_ON(root == root->fs_info->extent_root);
78 if (root->root_item.refs != 0) {
79 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
80 (unsigned long)root->root_key.objectid,
81 BTRFS_ROOT_TRANS_TAG);
82 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
83 (unsigned long)root->root_key.objectid,
84 BTRFS_ROOT_DEFRAG_TAG);
85 root->commit_root = root->node;
86 get_bh(root->node);
87 } else {
88 WARN_ON(1);
89 }
90 root->last_trans = running_trans_id;
91 }
92 return 0;
93}
94
95struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
96 int num_blocks)
97{
98 struct btrfs_trans_handle *h =
99 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
100 int ret;
101
102 mutex_lock(&root->fs_info->trans_mutex);
103 ret = join_transaction(root);
104 BUG_ON(ret);
105
106 record_root_in_trans(root);
107 h->transid = root->fs_info->running_transaction->transid;
108 h->transaction = root->fs_info->running_transaction;
109 h->blocks_reserved = num_blocks;
110 h->blocks_used = 0;
111 h->block_group = NULL;
112 h->alloc_exclude_nr = 0;
113 h->alloc_exclude_start = 0;
114 root->fs_info->running_transaction->use_count++;
115 mutex_unlock(&root->fs_info->trans_mutex);
116 return h;
117}
118
119int btrfs_end_transaction(struct btrfs_trans_handle *trans,
120 struct btrfs_root *root)
121{
122 struct btrfs_transaction *cur_trans;
123
124 mutex_lock(&root->fs_info->trans_mutex);
125 cur_trans = root->fs_info->running_transaction;
126 WARN_ON(cur_trans != trans->transaction);
127 WARN_ON(cur_trans->num_writers < 1);
128 cur_trans->num_writers--;
129 if (waitqueue_active(&cur_trans->writer_wait))
130 wake_up(&cur_trans->writer_wait);
131 put_transaction(cur_trans);
132 mutex_unlock(&root->fs_info->trans_mutex);
133 memset(trans, 0, sizeof(*trans));
134 kmem_cache_free(btrfs_trans_handle_cachep, trans);
135 return 0;
136}
137
138
139int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
140 struct btrfs_root *root)
141{
142 unsigned long gang[16];
143 int ret;
144 int i;
145 int err;
146 int werr = 0;
147 struct page *page;
148 struct radix_tree_root *dirty_pages;
149 struct inode *btree_inode = root->fs_info->btree_inode;
150
151 if (!trans || !trans->transaction) {
152 return filemap_write_and_wait(btree_inode->i_mapping);
153 }
154 dirty_pages = &trans->transaction->dirty_pages;
155 while(1) {
156 ret = find_first_radix_bit(dirty_pages, gang,
157 0, ARRAY_SIZE(gang));
158 if (!ret)
159 break;
160 for (i = 0; i < ret; i++) {
161 /* FIXME EIO */
162 clear_radix_bit(dirty_pages, gang[i]);
163 page = find_lock_page(btree_inode->i_mapping,
164 gang[i]);
165 if (!page)
166 continue;
167 if (PageWriteback(page)) {
168 if (PageDirty(page))
169 wait_on_page_writeback(page);
170 else {
171 unlock_page(page);
172 page_cache_release(page);
173 continue;
174 }
175 }
176 err = write_one_page(page, 0);
177 if (err)
178 werr = err;
179 page_cache_release(page);
180 }
181 }
182 err = filemap_fdatawait(btree_inode->i_mapping);
183 if (err)
184 werr = err;
185 return werr;
186}
187
188int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
189 struct btrfs_root *root)
190{
191 int ret;
192 u64 old_extent_block;
193 struct btrfs_fs_info *fs_info = root->fs_info;
194 struct btrfs_root *tree_root = fs_info->tree_root;
195 struct btrfs_root *extent_root = fs_info->extent_root;
196
197 btrfs_write_dirty_block_groups(trans, extent_root);
198 while(1) {
199 old_extent_block = btrfs_root_blocknr(&extent_root->root_item);
200 if (old_extent_block == bh_blocknr(extent_root->node))
201 break;
202 btrfs_set_root_blocknr(&extent_root->root_item,
203 bh_blocknr(extent_root->node));
204 ret = btrfs_update_root(trans, tree_root,
205 &extent_root->root_key,
206 &extent_root->root_item);
207 BUG_ON(ret);
208 btrfs_write_dirty_block_groups(trans, extent_root);
209 }
210 return 0;
211}
212
213static int wait_for_commit(struct btrfs_root *root,
214 struct btrfs_transaction *commit)
215{
216 DEFINE_WAIT(wait);
217 mutex_lock(&root->fs_info->trans_mutex);
218 while(!commit->commit_done) {
219 prepare_to_wait(&commit->commit_wait, &wait,
220 TASK_UNINTERRUPTIBLE);
221 if (commit->commit_done)
222 break;
223 mutex_unlock(&root->fs_info->trans_mutex);
224 schedule();
225 mutex_lock(&root->fs_info->trans_mutex);
226 }
227 mutex_unlock(&root->fs_info->trans_mutex);
228 finish_wait(&commit->commit_wait, &wait);
229 return 0;
230}
231
232struct dirty_root {
233 struct list_head list;
234 struct btrfs_root *root;
235};
236
237int btrfs_add_dead_root(struct btrfs_root *root, struct list_head *dead_list)
238{
239 struct dirty_root *dirty;
240
241 dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
242 if (!dirty)
243 return -ENOMEM;
244 dirty->root = root;
245 list_add(&dirty->list, dead_list);
246 return 0;
247}
248
249static int add_dirty_roots(struct btrfs_trans_handle *trans,
250 struct radix_tree_root *radix,
251 struct list_head *list)
252{
253 struct dirty_root *dirty;
254 struct btrfs_root *gang[8];
255 struct btrfs_root *root;
256 int i;
257 int ret;
258 int err = 0;
259 u32 refs;
260
261 while(1) {
262 ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0,
263 ARRAY_SIZE(gang),
264 BTRFS_ROOT_TRANS_TAG);
265 if (ret == 0)
266 break;
267 for (i = 0; i < ret; i++) {
268 root = gang[i];
269 radix_tree_tag_clear(radix,
270 (unsigned long)root->root_key.objectid,
271 BTRFS_ROOT_TRANS_TAG);
272 if (root->commit_root == root->node) {
273 WARN_ON(bh_blocknr(root->node) !=
274 btrfs_root_blocknr(&root->root_item));
275 brelse(root->commit_root);
276 root->commit_root = NULL;
277 continue;
278 }
279 dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
280 BUG_ON(!dirty);
281 dirty->root = kmalloc(sizeof(*dirty->root), GFP_NOFS);
282 BUG_ON(!dirty->root);
283
284 memset(&root->root_item.drop_progress, 0,
285 sizeof(struct btrfs_disk_key));
286 root->root_item.drop_level = 0;
287
288 memcpy(dirty->root, root, sizeof(*root));
289 dirty->root->node = root->commit_root;
290 root->commit_root = NULL;
291
292 root->root_key.offset = root->fs_info->generation;
293 btrfs_set_root_blocknr(&root->root_item,
294 bh_blocknr(root->node));
295 err = btrfs_insert_root(trans, root->fs_info->tree_root,
296 &root->root_key,
297 &root->root_item);
298 if (err)
299 break;
300
301 refs = btrfs_root_refs(&dirty->root->root_item);
302 btrfs_set_root_refs(&dirty->root->root_item, refs - 1);
303 err = btrfs_update_root(trans, root->fs_info->tree_root,
304 &dirty->root->root_key,
305 &dirty->root->root_item);
306
307 BUG_ON(err);
308 if (refs == 1) {
309 list_add(&dirty->list, list);
310 } else {
311 WARN_ON(1);
312 kfree(dirty->root);
313 kfree(dirty);
314 }
315 }
316 }
317 return err;
318}
319
320int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
321{
322 struct btrfs_fs_info *info = root->fs_info;
323 int ret;
324 struct btrfs_trans_handle *trans;
325
326 if (root->defrag_running)
327 return 0;
328
329 trans = btrfs_start_transaction(root, 1);
330 while (1) {
331 root->defrag_running = 1;
332 ret = btrfs_defrag_leaves(trans, root, cacheonly);
333 btrfs_end_transaction(trans, root);
334 mutex_unlock(&info->fs_mutex);
335
336 btrfs_btree_balance_dirty(root);
337 cond_resched();
338
339 mutex_lock(&info->fs_mutex);
340 trans = btrfs_start_transaction(root, 1);
341 if (ret != -EAGAIN)
342 break;
343 }
344 root->defrag_running = 0;
345 radix_tree_tag_clear(&info->fs_roots_radix,
346 (unsigned long)root->root_key.objectid,
347 BTRFS_ROOT_DEFRAG_TAG);
348 btrfs_end_transaction(trans, root);
349 return 0;
350}
351
352int btrfs_defrag_dirty_roots(struct btrfs_fs_info *info)
353{
354 struct btrfs_root *gang[1];
355 struct btrfs_root *root;
356 int i;
357 int ret;
358 int err = 0;
359 u64 last = 0;
360
361 while(1) {
362 ret = radix_tree_gang_lookup_tag(&info->fs_roots_radix,
363 (void **)gang, last,
364 ARRAY_SIZE(gang),
365 BTRFS_ROOT_DEFRAG_TAG);
366 if (ret == 0)
367 break;
368 for (i = 0; i < ret; i++) {
369 root = gang[i];
370 last = root->root_key.objectid + 1;
371 btrfs_defrag_root(root, 1);
372 }
373 }
374 btrfs_defrag_root(info->extent_root, 1);
375 return err;
376}
377
378static int drop_dirty_roots(struct btrfs_root *tree_root,
379 struct list_head *list)
380{
381 struct dirty_root *dirty;
382 struct btrfs_trans_handle *trans;
383 int ret = 0;
384 int err;
385
386 while(!list_empty(list)) {
387 mutex_lock(&tree_root->fs_info->fs_mutex);
388 dirty = list_entry(list->next, struct dirty_root, list);
389 list_del_init(&dirty->list);
390
391 while(1) {
392 trans = btrfs_start_transaction(tree_root, 1);
393 ret = btrfs_drop_snapshot(trans, dirty->root);
394 if (ret != -EAGAIN) {
395 break;
396 }
397 err = btrfs_update_root(trans,
398 tree_root,
399 &dirty->root->root_key,
400 &dirty->root->root_item);
401 if (err)
402 ret = err;
403 ret = btrfs_end_transaction(trans, tree_root);
404 BUG_ON(ret);
405 mutex_unlock(&tree_root->fs_info->fs_mutex);
406
407 btrfs_btree_balance_dirty(tree_root);
408 schedule();
409
410 mutex_lock(&tree_root->fs_info->fs_mutex);
411 }
412 BUG_ON(ret);
413 ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key);
414 if (ret)
415 break;
416 ret = btrfs_end_transaction(trans, tree_root);
417 BUG_ON(ret);
418
419 kfree(dirty->root);
420 kfree(dirty);
421 mutex_unlock(&tree_root->fs_info->fs_mutex);
422 btrfs_btree_balance_dirty(tree_root);
423 schedule();
424 }
425 return ret;
426}
427
428int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
429 struct btrfs_root *root)
430{
431 int ret = 0;
432 struct btrfs_transaction *cur_trans;
433 struct btrfs_transaction *prev_trans = NULL;
434 struct list_head dirty_fs_roots;
435 struct radix_tree_root pinned_copy;
436 DEFINE_WAIT(wait);
437
438 init_bit_radix(&pinned_copy);
439 INIT_LIST_HEAD(&dirty_fs_roots);
440
441 mutex_lock(&root->fs_info->trans_mutex);
442 if (trans->transaction->in_commit) {
443 cur_trans = trans->transaction;
444 trans->transaction->use_count++;
445 mutex_unlock(&root->fs_info->trans_mutex);
446 btrfs_end_transaction(trans, root);
447
448 mutex_unlock(&root->fs_info->fs_mutex);
449 ret = wait_for_commit(root, cur_trans);
450 BUG_ON(ret);
451 put_transaction(cur_trans);
452 mutex_lock(&root->fs_info->fs_mutex);
453 return 0;
454 }
455 trans->transaction->in_commit = 1;
456 cur_trans = trans->transaction;
457 if (cur_trans->list.prev != &root->fs_info->trans_list) {
458 prev_trans = list_entry(cur_trans->list.prev,
459 struct btrfs_transaction, list);
460 if (!prev_trans->commit_done) {
461 prev_trans->use_count++;
462 mutex_unlock(&root->fs_info->fs_mutex);
463 mutex_unlock(&root->fs_info->trans_mutex);
464
465 wait_for_commit(root, prev_trans);
466 put_transaction(prev_trans);
467
468 mutex_lock(&root->fs_info->fs_mutex);
469 mutex_lock(&root->fs_info->trans_mutex);
470 }
471 }
472 while (trans->transaction->num_writers > 1) {
473 WARN_ON(cur_trans != trans->transaction);
474 prepare_to_wait(&trans->transaction->writer_wait, &wait,
475 TASK_UNINTERRUPTIBLE);
476 if (trans->transaction->num_writers <= 1)
477 break;
478 mutex_unlock(&root->fs_info->fs_mutex);
479 mutex_unlock(&root->fs_info->trans_mutex);
480 schedule();
481 mutex_lock(&root->fs_info->fs_mutex);
482 mutex_lock(&root->fs_info->trans_mutex);
483 finish_wait(&trans->transaction->writer_wait, &wait);
484 }
485 finish_wait(&trans->transaction->writer_wait, &wait);
486 WARN_ON(cur_trans != trans->transaction);
487 ret = add_dirty_roots(trans, &root->fs_info->fs_roots_radix,
488 &dirty_fs_roots);
489 BUG_ON(ret);
490
491 ret = btrfs_commit_tree_roots(trans, root);
492 BUG_ON(ret);
493
494 cur_trans = root->fs_info->running_transaction;
495 root->fs_info->running_transaction = NULL;
496 btrfs_set_super_generation(&root->fs_info->super_copy,
497 cur_trans->transid);
498 btrfs_set_super_root(&root->fs_info->super_copy,
499 bh_blocknr(root->fs_info->tree_root->node));
500 memcpy(root->fs_info->disk_super, &root->fs_info->super_copy,
501 sizeof(root->fs_info->super_copy));
502
503 btrfs_copy_pinned(root, &pinned_copy);
504
505 mutex_unlock(&root->fs_info->trans_mutex);
506 mutex_unlock(&root->fs_info->fs_mutex);
507 ret = btrfs_write_and_wait_transaction(trans, root);
508 BUG_ON(ret);
509 write_ctree_super(trans, root);
510 mutex_lock(&root->fs_info->fs_mutex);
511 btrfs_finish_extent_commit(trans, root, &pinned_copy);
512 mutex_lock(&root->fs_info->trans_mutex);
513 cur_trans->commit_done = 1;
514 wake_up(&cur_trans->commit_wait);
515 put_transaction(cur_trans);
516 put_transaction(cur_trans);
517 if (root->fs_info->closing)
518 list_splice_init(&root->fs_info->dead_roots, &dirty_fs_roots);
519 else
520 list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots);
521 mutex_unlock(&root->fs_info->trans_mutex);
522 kmem_cache_free(btrfs_trans_handle_cachep, trans);
523
524 if (root->fs_info->closing) {
525 mutex_unlock(&root->fs_info->fs_mutex);
526 drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots);
527 mutex_lock(&root->fs_info->fs_mutex);
528 }
529 return ret;
530}
531
532int btrfs_clean_old_snapshots(struct btrfs_root *root)
533{
534 struct list_head dirty_roots;
535 INIT_LIST_HEAD(&dirty_roots);
536
537 mutex_lock(&root->fs_info->trans_mutex);
538 list_splice_init(&root->fs_info->dead_roots, &dirty_roots);
539 mutex_unlock(&root->fs_info->trans_mutex);
540
541 if (!list_empty(&dirty_roots)) {
542 drop_dirty_roots(root, &dirty_roots);
543 }
544 return 0;
545}
546void btrfs_transaction_cleaner(struct work_struct *work)
547{
548 struct btrfs_fs_info *fs_info = container_of(work,
549 struct btrfs_fs_info,
550 trans_work.work);
551
552 struct btrfs_root *root = fs_info->tree_root;
553 struct btrfs_transaction *cur;
554 struct btrfs_trans_handle *trans;
555 unsigned long now;
556 unsigned long delay = HZ * 30;
557 int ret;
558
559 mutex_lock(&root->fs_info->fs_mutex);
560 mutex_lock(&root->fs_info->trans_mutex);
561 cur = root->fs_info->running_transaction;
562 if (!cur) {
563 mutex_unlock(&root->fs_info->trans_mutex);
564 goto out;
565 }
566 now = get_seconds();
567 if (now < cur->start_time || now - cur->start_time < 30) {
568 mutex_unlock(&root->fs_info->trans_mutex);
569 delay = HZ * 5;
570 goto out;
571 }
572 mutex_unlock(&root->fs_info->trans_mutex);
573 btrfs_defrag_dirty_roots(root->fs_info);
574 trans = btrfs_start_transaction(root, 1);
575 ret = btrfs_commit_transaction(trans, root);
576out:
577 mutex_unlock(&root->fs_info->fs_mutex);
578 btrfs_clean_old_snapshots(root);
579 btrfs_transaction_queue_work(root, delay);
580}
581
582void btrfs_transaction_queue_work(struct btrfs_root *root, int delay)
583{
584 queue_delayed_work(trans_wq, &root->fs_info->trans_work, delay);
585}
586
587void btrfs_transaction_flush_work(struct btrfs_root *root)
588{
589 cancel_rearming_delayed_workqueue(trans_wq, &root->fs_info->trans_work);
590 flush_workqueue(trans_wq);
591}
592
593void __init btrfs_init_transaction_sys(void)
594{
595 trans_wq = create_workqueue("btrfs");
596}
597
598void __exit btrfs_exit_transaction_sys(void)
599{
600 destroy_workqueue(trans_wq);
601}
602