Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
58862699 | 2 | * linux/fs/jbd/transaction.c |
ae6ddcc5 | 3 | * |
1da177e4 LT |
4 | * Written by Stephen C. Tweedie <sct@redhat.com>, 1998 |
5 | * | |
6 | * Copyright 1998 Red Hat corp --- All Rights Reserved | |
7 | * | |
8 | * This file is part of the Linux kernel and is made available under | |
9 | * the terms of the GNU General Public License, version 2, or at your | |
10 | * option, any later version, incorporated herein by reference. | |
11 | * | |
12 | * Generic filesystem transaction handling code; part of the ext2fs | |
ae6ddcc5 | 13 | * journaling system. |
1da177e4 LT |
14 | * |
15 | * This file manages transactions (compound commits managed by the | |
16 | * journaling code) and handles (individual atomic operations by the | |
17 | * filesystem). | |
18 | */ | |
19 | ||
20 | #include <linux/time.h> | |
21 | #include <linux/fs.h> | |
22 | #include <linux/jbd.h> | |
23 | #include <linux/errno.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/timer.h> | |
1da177e4 LT |
26 | #include <linux/mm.h> |
27 | #include <linux/highmem.h> | |
f420d4dc | 28 | #include <linux/hrtimer.h> |
1da177e4 | 29 | |
d394e122 AB |
30 | static void __journal_temp_unlink_buffer(struct journal_head *jh); |
31 | ||
1da177e4 LT |
32 | /* |
33 | * get_transaction: obtain a new transaction_t object. | |
34 | * | |
35 | * Simply allocate and initialise a new transaction. Create it in | |
36 | * RUNNING state and add it to the current journal (which should not | |
37 | * have an existing running transaction: we only make a new transaction | |
38 | * once we have started to commit the old one). | |
39 | * | |
40 | * Preconditions: | |
41 | * The journal MUST be locked. We don't perform atomic mallocs on the | |
42 | * new transaction and we can't block without protecting against other | |
43 | * processes trying to touch the journal while it is in transition. | |
44 | * | |
45 | * Called under j_state_lock | |
46 | */ | |
47 | ||
48 | static transaction_t * | |
49 | get_transaction(journal_t *journal, transaction_t *transaction) | |
50 | { | |
51 | transaction->t_journal = journal; | |
52 | transaction->t_state = T_RUNNING; | |
f420d4dc | 53 | transaction->t_start_time = ktime_get(); |
1da177e4 LT |
54 | transaction->t_tid = journal->j_transaction_sequence++; |
55 | transaction->t_expires = jiffies + journal->j_commit_interval; | |
56 | spin_lock_init(&transaction->t_handle_lock); | |
57 | ||
58 | /* Set up the commit timer for the new transaction. */ | |
b449fc6f AD |
59 | journal->j_commit_timer.expires = |
60 | round_jiffies_up(transaction->t_expires); | |
e3df1898 | 61 | add_timer(&journal->j_commit_timer); |
1da177e4 LT |
62 | |
63 | J_ASSERT(journal->j_running_transaction == NULL); | |
64 | journal->j_running_transaction = transaction; | |
65 | ||
66 | return transaction; | |
67 | } | |
68 | ||
69 | /* | |
70 | * Handle management. | |
71 | * | |
72 | * A handle_t is an object which represents a single atomic update to a | |
73 | * filesystem, and which tracks all of the modifications which form part | |
74 | * of that one update. | |
75 | */ | |
76 | ||
77 | /* | |
78 | * start_this_handle: Given a handle, deal with any locking or stalling | |
79 | * needed to make sure that there is enough journal space for the handle | |
80 | * to begin. Attach the handle to a transaction and set up the | |
ae6ddcc5 | 81 | * transaction's buffer credits. |
1da177e4 LT |
82 | */ |
83 | ||
84 | static int start_this_handle(journal_t *journal, handle_t *handle) | |
85 | { | |
86 | transaction_t *transaction; | |
87 | int needed; | |
88 | int nblocks = handle->h_buffer_credits; | |
89 | transaction_t *new_transaction = NULL; | |
90 | int ret = 0; | |
91 | ||
92 | if (nblocks > journal->j_max_transaction_buffers) { | |
93 | printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n", | |
94 | current->comm, nblocks, | |
95 | journal->j_max_transaction_buffers); | |
96 | ret = -ENOSPC; | |
97 | goto out; | |
98 | } | |
99 | ||
100 | alloc_transaction: | |
101 | if (!journal->j_running_transaction) { | |
8c3478a5 | 102 | new_transaction = kzalloc(sizeof(*new_transaction), |
a5005da2 | 103 | GFP_NOFS|__GFP_NOFAIL); |
1da177e4 LT |
104 | if (!new_transaction) { |
105 | ret = -ENOMEM; | |
106 | goto out; | |
107 | } | |
1da177e4 LT |
108 | } |
109 | ||
110 | jbd_debug(3, "New handle %p going live.\n", handle); | |
111 | ||
112 | repeat: | |
113 | ||
114 | /* | |
115 | * We need to hold j_state_lock until t_updates has been incremented, | |
116 | * for proper journal barrier handling | |
117 | */ | |
118 | spin_lock(&journal->j_state_lock); | |
119 | repeat_locked: | |
120 | if (is_journal_aborted(journal) || | |
121 | (journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) { | |
122 | spin_unlock(&journal->j_state_lock); | |
ae6ddcc5 | 123 | ret = -EROFS; |
1da177e4 LT |
124 | goto out; |
125 | } | |
126 | ||
127 | /* Wait on the journal's transaction barrier if necessary */ | |
128 | if (journal->j_barrier_count) { | |
129 | spin_unlock(&journal->j_state_lock); | |
130 | wait_event(journal->j_wait_transaction_locked, | |
131 | journal->j_barrier_count == 0); | |
132 | goto repeat; | |
133 | } | |
134 | ||
135 | if (!journal->j_running_transaction) { | |
136 | if (!new_transaction) { | |
137 | spin_unlock(&journal->j_state_lock); | |
138 | goto alloc_transaction; | |
139 | } | |
140 | get_transaction(journal, new_transaction); | |
141 | new_transaction = NULL; | |
142 | } | |
143 | ||
144 | transaction = journal->j_running_transaction; | |
145 | ||
146 | /* | |
147 | * If the current transaction is locked down for commit, wait for the | |
148 | * lock to be released. | |
149 | */ | |
150 | if (transaction->t_state == T_LOCKED) { | |
151 | DEFINE_WAIT(wait); | |
152 | ||
153 | prepare_to_wait(&journal->j_wait_transaction_locked, | |
154 | &wait, TASK_UNINTERRUPTIBLE); | |
155 | spin_unlock(&journal->j_state_lock); | |
156 | schedule(); | |
157 | finish_wait(&journal->j_wait_transaction_locked, &wait); | |
158 | goto repeat; | |
159 | } | |
160 | ||
161 | /* | |
162 | * If there is not enough space left in the log to write all potential | |
163 | * buffers requested by this operation, we need to stall pending a log | |
164 | * checkpoint to free some more log space. | |
165 | */ | |
166 | spin_lock(&transaction->t_handle_lock); | |
167 | needed = transaction->t_outstanding_credits + nblocks; | |
168 | ||
169 | if (needed > journal->j_max_transaction_buffers) { | |
170 | /* | |
171 | * If the current transaction is already too large, then start | |
172 | * to commit it: we can then go back and attach this handle to | |
173 | * a new transaction. | |
174 | */ | |
175 | DEFINE_WAIT(wait); | |
176 | ||
177 | jbd_debug(2, "Handle %p starting new commit...\n", handle); | |
178 | spin_unlock(&transaction->t_handle_lock); | |
179 | prepare_to_wait(&journal->j_wait_transaction_locked, &wait, | |
180 | TASK_UNINTERRUPTIBLE); | |
181 | __log_start_commit(journal, transaction->t_tid); | |
182 | spin_unlock(&journal->j_state_lock); | |
183 | schedule(); | |
184 | finish_wait(&journal->j_wait_transaction_locked, &wait); | |
185 | goto repeat; | |
186 | } | |
187 | ||
ae6ddcc5 | 188 | /* |
1da177e4 LT |
189 | * The commit code assumes that it can get enough log space |
190 | * without forcing a checkpoint. This is *critical* for | |
191 | * correctness: a checkpoint of a buffer which is also | |
192 | * associated with a committing transaction creates a deadlock, | |
193 | * so commit simply cannot force through checkpoints. | |
194 | * | |
195 | * We must therefore ensure the necessary space in the journal | |
196 | * *before* starting to dirty potentially checkpointed buffers | |
ae6ddcc5 | 197 | * in the new transaction. |
1da177e4 LT |
198 | * |
199 | * The worst part is, any transaction currently committing can | |
200 | * reduce the free space arbitrarily. Be careful to account for | |
201 | * those buffers when checkpointing. | |
202 | */ | |
203 | ||
204 | /* | |
205 | * @@@ AKPM: This seems rather over-defensive. We're giving commit | |
206 | * a _lot_ of headroom: 1/4 of the journal plus the size of | |
207 | * the committing transaction. Really, we only need to give it | |
208 | * committing_transaction->t_outstanding_credits plus "enough" for | |
209 | * the log control blocks. | |
210 | * Also, this test is inconsitent with the matching one in | |
211 | * journal_extend(). | |
212 | */ | |
213 | if (__log_space_left(journal) < jbd_space_needed(journal)) { | |
214 | jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle); | |
215 | spin_unlock(&transaction->t_handle_lock); | |
216 | __log_wait_for_space(journal); | |
217 | goto repeat_locked; | |
218 | } | |
219 | ||
220 | /* OK, account for the buffers that this operation expects to | |
221 | * use and add the handle to the running transaction. */ | |
222 | ||
223 | handle->h_transaction = transaction; | |
224 | transaction->t_outstanding_credits += nblocks; | |
225 | transaction->t_updates++; | |
226 | transaction->t_handle_count++; | |
227 | jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n", | |
228 | handle, nblocks, transaction->t_outstanding_credits, | |
229 | __log_space_left(journal)); | |
230 | spin_unlock(&transaction->t_handle_lock); | |
231 | spin_unlock(&journal->j_state_lock); | |
3adae9da JK |
232 | |
233 | lock_map_acquire(&handle->h_lockdep_map); | |
1da177e4 | 234 | out: |
304c4c84 AM |
235 | if (unlikely(new_transaction)) /* It's usually NULL */ |
236 | kfree(new_transaction); | |
1da177e4 LT |
237 | return ret; |
238 | } | |
239 | ||
34a3d1e8 PZ |
240 | static struct lock_class_key jbd_handle_key; |
241 | ||
1da177e4 LT |
242 | /* Allocate a new handle. This should probably be in a slab... */ |
243 | static handle_t *new_handle(int nblocks) | |
244 | { | |
245 | handle_t *handle = jbd_alloc_handle(GFP_NOFS); | |
246 | if (!handle) | |
247 | return NULL; | |
248 | memset(handle, 0, sizeof(*handle)); | |
249 | handle->h_buffer_credits = nblocks; | |
250 | handle->h_ref = 1; | |
251 | ||
34a3d1e8 PZ |
252 | lockdep_init_map(&handle->h_lockdep_map, "jbd_handle", &jbd_handle_key, 0); |
253 | ||
1da177e4 LT |
254 | return handle; |
255 | } | |
256 | ||
257 | /** | |
ae6ddcc5 | 258 | * handle_t *journal_start() - Obtain a new handle. |
1da177e4 LT |
259 | * @journal: Journal to start transaction on. |
260 | * @nblocks: number of block buffer we might modify | |
261 | * | |
262 | * We make sure that the transaction can guarantee at least nblocks of | |
263 | * modified buffers in the log. We block until the log can guarantee | |
ae6ddcc5 | 264 | * that much space. |
1da177e4 LT |
265 | * |
266 | * This function is visible to journal users (like ext3fs), so is not | |
267 | * called with the journal already locked. | |
268 | * | |
269 | * Return a pointer to a newly allocated handle, or NULL on failure | |
270 | */ | |
271 | handle_t *journal_start(journal_t *journal, int nblocks) | |
272 | { | |
273 | handle_t *handle = journal_current_handle(); | |
274 | int err; | |
275 | ||
276 | if (!journal) | |
277 | return ERR_PTR(-EROFS); | |
278 | ||
279 | if (handle) { | |
280 | J_ASSERT(handle->h_transaction->t_journal == journal); | |
281 | handle->h_ref++; | |
282 | return handle; | |
283 | } | |
284 | ||
285 | handle = new_handle(nblocks); | |
286 | if (!handle) | |
287 | return ERR_PTR(-ENOMEM); | |
288 | ||
289 | current->journal_info = handle; | |
290 | ||
291 | err = start_this_handle(journal, handle); | |
292 | if (err < 0) { | |
293 | jbd_free_handle(handle); | |
294 | current->journal_info = NULL; | |
295 | handle = ERR_PTR(err); | |
296 | } | |
297 | return handle; | |
298 | } | |
299 | ||
300 | /** | |
301 | * int journal_extend() - extend buffer credits. | |
302 | * @handle: handle to 'extend' | |
303 | * @nblocks: nr blocks to try to extend by. | |
ae6ddcc5 | 304 | * |
1da177e4 LT |
305 | * Some transactions, such as large extends and truncates, can be done |
306 | * atomically all at once or in several stages. The operation requests | |
307 | * a credit for a number of buffer modications in advance, but can | |
ae6ddcc5 | 308 | * extend its credit if it needs more. |
1da177e4 LT |
309 | * |
310 | * journal_extend tries to give the running handle more buffer credits. | |
311 | * It does not guarantee that allocation - this is a best-effort only. | |
312 | * The calling process MUST be able to deal cleanly with a failure to | |
313 | * extend here. | |
314 | * | |
315 | * Return 0 on success, non-zero on failure. | |
316 | * | |
317 | * return code < 0 implies an error | |
318 | * return code > 0 implies normal transaction-full status. | |
319 | */ | |
320 | int journal_extend(handle_t *handle, int nblocks) | |
321 | { | |
322 | transaction_t *transaction = handle->h_transaction; | |
323 | journal_t *journal = transaction->t_journal; | |
324 | int result; | |
325 | int wanted; | |
326 | ||
327 | result = -EIO; | |
328 | if (is_handle_aborted(handle)) | |
329 | goto out; | |
330 | ||
331 | result = 1; | |
332 | ||
333 | spin_lock(&journal->j_state_lock); | |
334 | ||
335 | /* Don't extend a locked-down transaction! */ | |
336 | if (handle->h_transaction->t_state != T_RUNNING) { | |
337 | jbd_debug(3, "denied handle %p %d blocks: " | |
338 | "transaction not running\n", handle, nblocks); | |
339 | goto error_out; | |
340 | } | |
341 | ||
342 | spin_lock(&transaction->t_handle_lock); | |
343 | wanted = transaction->t_outstanding_credits + nblocks; | |
344 | ||
345 | if (wanted > journal->j_max_transaction_buffers) { | |
346 | jbd_debug(3, "denied handle %p %d blocks: " | |
347 | "transaction too large\n", handle, nblocks); | |
348 | goto unlock; | |
349 | } | |
350 | ||
351 | if (wanted > __log_space_left(journal)) { | |
352 | jbd_debug(3, "denied handle %p %d blocks: " | |
353 | "insufficient log space\n", handle, nblocks); | |
354 | goto unlock; | |
355 | } | |
356 | ||
357 | handle->h_buffer_credits += nblocks; | |
358 | transaction->t_outstanding_credits += nblocks; | |
359 | result = 0; | |
360 | ||
361 | jbd_debug(3, "extended handle %p by %d\n", handle, nblocks); | |
362 | unlock: | |
363 | spin_unlock(&transaction->t_handle_lock); | |
364 | error_out: | |
365 | spin_unlock(&journal->j_state_lock); | |
366 | out: | |
367 | return result; | |
368 | } | |
369 | ||
370 | ||
371 | /** | |
78a4a50a | 372 | * int journal_restart() - restart a handle. |
1da177e4 LT |
373 | * @handle: handle to restart |
374 | * @nblocks: nr credits requested | |
ae6ddcc5 | 375 | * |
1da177e4 LT |
376 | * Restart a handle for a multi-transaction filesystem |
377 | * operation. | |
378 | * | |
379 | * If the journal_extend() call above fails to grant new buffer credits | |
380 | * to a running handle, a call to journal_restart will commit the | |
381 | * handle's transaction so far and reattach the handle to a new | |
382 | * transaction capabable of guaranteeing the requested number of | |
383 | * credits. | |
384 | */ | |
385 | ||
386 | int journal_restart(handle_t *handle, int nblocks) | |
387 | { | |
388 | transaction_t *transaction = handle->h_transaction; | |
389 | journal_t *journal = transaction->t_journal; | |
390 | int ret; | |
391 | ||
392 | /* If we've had an abort of any type, don't even think about | |
393 | * actually doing the restart! */ | |
394 | if (is_handle_aborted(handle)) | |
395 | return 0; | |
396 | ||
397 | /* | |
398 | * First unlink the handle from its current transaction, and start the | |
399 | * commit on that. | |
400 | */ | |
401 | J_ASSERT(transaction->t_updates > 0); | |
402 | J_ASSERT(journal_current_handle() == handle); | |
403 | ||
404 | spin_lock(&journal->j_state_lock); | |
405 | spin_lock(&transaction->t_handle_lock); | |
406 | transaction->t_outstanding_credits -= handle->h_buffer_credits; | |
407 | transaction->t_updates--; | |
408 | ||
409 | if (!transaction->t_updates) | |
410 | wake_up(&journal->j_wait_updates); | |
411 | spin_unlock(&transaction->t_handle_lock); | |
412 | ||
413 | jbd_debug(2, "restarting handle %p\n", handle); | |
414 | __log_start_commit(journal, transaction->t_tid); | |
415 | spin_unlock(&journal->j_state_lock); | |
416 | ||
3adae9da | 417 | lock_map_release(&handle->h_lockdep_map); |
1da177e4 LT |
418 | handle->h_buffer_credits = nblocks; |
419 | ret = start_this_handle(journal, handle); | |
420 | return ret; | |
421 | } | |
422 | ||
423 | ||
424 | /** | |
425 | * void journal_lock_updates () - establish a transaction barrier. | |
426 | * @journal: Journal to establish a barrier on. | |
427 | * | |
428 | * This locks out any further updates from being started, and blocks | |
429 | * until all existing updates have completed, returning only once the | |
430 | * journal is in a quiescent state with no updates running. | |
431 | * | |
432 | * The journal lock should not be held on entry. | |
433 | */ | |
434 | void journal_lock_updates(journal_t *journal) | |
435 | { | |
436 | DEFINE_WAIT(wait); | |
437 | ||
438 | spin_lock(&journal->j_state_lock); | |
439 | ++journal->j_barrier_count; | |
440 | ||
441 | /* Wait until there are no running updates */ | |
442 | while (1) { | |
443 | transaction_t *transaction = journal->j_running_transaction; | |
444 | ||
445 | if (!transaction) | |
446 | break; | |
447 | ||
448 | spin_lock(&transaction->t_handle_lock); | |
449 | if (!transaction->t_updates) { | |
450 | spin_unlock(&transaction->t_handle_lock); | |
451 | break; | |
452 | } | |
453 | prepare_to_wait(&journal->j_wait_updates, &wait, | |
454 | TASK_UNINTERRUPTIBLE); | |
455 | spin_unlock(&transaction->t_handle_lock); | |
456 | spin_unlock(&journal->j_state_lock); | |
457 | schedule(); | |
458 | finish_wait(&journal->j_wait_updates, &wait); | |
459 | spin_lock(&journal->j_state_lock); | |
460 | } | |
461 | spin_unlock(&journal->j_state_lock); | |
462 | ||
463 | /* | |
464 | * We have now established a barrier against other normal updates, but | |
465 | * we also need to barrier against other journal_lock_updates() calls | |
466 | * to make sure that we serialise special journal-locked operations | |
467 | * too. | |
468 | */ | |
2c68ee75 | 469 | mutex_lock(&journal->j_barrier); |
1da177e4 LT |
470 | } |
471 | ||
472 | /** | |
473 | * void journal_unlock_updates (journal_t* journal) - release barrier | |
474 | * @journal: Journal to release the barrier on. | |
ae6ddcc5 | 475 | * |
1da177e4 LT |
476 | * Release a transaction barrier obtained with journal_lock_updates(). |
477 | * | |
478 | * Should be called without the journal lock held. | |
479 | */ | |
480 | void journal_unlock_updates (journal_t *journal) | |
481 | { | |
482 | J_ASSERT(journal->j_barrier_count != 0); | |
483 | ||
2c68ee75 | 484 | mutex_unlock(&journal->j_barrier); |
1da177e4 LT |
485 | spin_lock(&journal->j_state_lock); |
486 | --journal->j_barrier_count; | |
487 | spin_unlock(&journal->j_state_lock); | |
488 | wake_up(&journal->j_wait_transaction_locked); | |
489 | } | |
490 | ||
1e9fd53b | 491 | static void warn_dirty_buffer(struct buffer_head *bh) |
1da177e4 | 492 | { |
1e9fd53b | 493 | char b[BDEVNAME_SIZE]; |
4407c2b6 | 494 | |
1e9fd53b JK |
495 | printk(KERN_WARNING |
496 | "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). " | |
497 | "There's a risk of filesystem corruption in case of system " | |
498 | "crash.\n", | |
499 | bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr); | |
1da177e4 LT |
500 | } |
501 | ||
502 | /* | |
503 | * If the buffer is already part of the current transaction, then there | |
504 | * is nothing we need to do. If it is already part of a prior | |
505 | * transaction which we are still committing to disk, then we need to | |
506 | * make sure that we do not overwrite the old copy: we do copy-out to | |
507 | * preserve the copy going to disk. We also account the buffer against | |
508 | * the handle's metadata buffer credits (unless the buffer is already | |
509 | * part of the transaction, that is). | |
510 | * | |
511 | */ | |
512 | static int | |
513 | do_get_write_access(handle_t *handle, struct journal_head *jh, | |
514 | int force_copy) | |
515 | { | |
516 | struct buffer_head *bh; | |
517 | transaction_t *transaction; | |
518 | journal_t *journal; | |
519 | int error; | |
520 | char *frozen_buffer = NULL; | |
521 | int need_copy = 0; | |
522 | ||
523 | if (is_handle_aborted(handle)) | |
524 | return -EROFS; | |
525 | ||
526 | transaction = handle->h_transaction; | |
527 | journal = transaction->t_journal; | |
528 | ||
529 | jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy); | |
530 | ||
531 | JBUFFER_TRACE(jh, "entry"); | |
532 | repeat: | |
533 | bh = jh2bh(jh); | |
534 | ||
535 | /* @@@ Need to check for errors here at some point. */ | |
536 | ||
537 | lock_buffer(bh); | |
538 | jbd_lock_bh_state(bh); | |
539 | ||
540 | /* We now hold the buffer lock so it is safe to query the buffer | |
ae6ddcc5 MC |
541 | * state. Is the buffer dirty? |
542 | * | |
1da177e4 LT |
543 | * If so, there are two possibilities. The buffer may be |
544 | * non-journaled, and undergoing a quite legitimate writeback. | |
545 | * Otherwise, it is journaled, and we don't expect dirty buffers | |
546 | * in that state (the buffers should be marked JBD_Dirty | |
547 | * instead.) So either the IO is being done under our own | |
548 | * control and this is a bug, or it's a third party IO such as | |
549 | * dump(8) (which may leave the buffer scheduled for read --- | |
550 | * ie. locked but not dirty) or tune2fs (which may actually have | |
551 | * the buffer dirtied, ugh.) */ | |
552 | ||
553 | if (buffer_dirty(bh)) { | |
554 | /* | |
555 | * First question: is this buffer already part of the current | |
556 | * transaction or the existing committing transaction? | |
557 | */ | |
558 | if (jh->b_transaction) { | |
559 | J_ASSERT_JH(jh, | |
ae6ddcc5 | 560 | jh->b_transaction == transaction || |
1da177e4 LT |
561 | jh->b_transaction == |
562 | journal->j_committing_transaction); | |
563 | if (jh->b_next_transaction) | |
564 | J_ASSERT_JH(jh, jh->b_next_transaction == | |
565 | transaction); | |
1e9fd53b | 566 | warn_dirty_buffer(bh); |
4407c2b6 JK |
567 | } |
568 | /* | |
569 | * In any case we need to clean the dirty flag and we must | |
570 | * do it under the buffer lock to be sure we don't race | |
571 | * with running write-out. | |
572 | */ | |
1e9fd53b JK |
573 | JBUFFER_TRACE(jh, "Journalling dirty buffer"); |
574 | clear_buffer_dirty(bh); | |
575 | set_buffer_jbddirty(bh); | |
e9ad5620 | 576 | } |
1da177e4 LT |
577 | |
578 | unlock_buffer(bh); | |
579 | ||
580 | error = -EROFS; | |
581 | if (is_handle_aborted(handle)) { | |
582 | jbd_unlock_bh_state(bh); | |
583 | goto out; | |
584 | } | |
585 | error = 0; | |
586 | ||
587 | /* | |
588 | * The buffer is already part of this transaction if b_transaction or | |
589 | * b_next_transaction points to it | |
590 | */ | |
591 | if (jh->b_transaction == transaction || | |
592 | jh->b_next_transaction == transaction) | |
593 | goto done; | |
594 | ||
5bc833fe JB |
595 | /* |
596 | * this is the first time this transaction is touching this buffer, | |
597 | * reset the modified flag | |
598 | */ | |
599 | jh->b_modified = 0; | |
600 | ||
1da177e4 LT |
601 | /* |
602 | * If there is already a copy-out version of this buffer, then we don't | |
603 | * need to make another one | |
604 | */ | |
605 | if (jh->b_frozen_data) { | |
606 | JBUFFER_TRACE(jh, "has frozen data"); | |
607 | J_ASSERT_JH(jh, jh->b_next_transaction == NULL); | |
608 | jh->b_next_transaction = transaction; | |
609 | goto done; | |
610 | } | |
611 | ||
612 | /* Is there data here we need to preserve? */ | |
613 | ||
614 | if (jh->b_transaction && jh->b_transaction != transaction) { | |
615 | JBUFFER_TRACE(jh, "owned by older transaction"); | |
616 | J_ASSERT_JH(jh, jh->b_next_transaction == NULL); | |
617 | J_ASSERT_JH(jh, jh->b_transaction == | |
618 | journal->j_committing_transaction); | |
619 | ||
620 | /* There is one case we have to be very careful about. | |
621 | * If the committing transaction is currently writing | |
622 | * this buffer out to disk and has NOT made a copy-out, | |
623 | * then we cannot modify the buffer contents at all | |
624 | * right now. The essence of copy-out is that it is the | |
625 | * extra copy, not the primary copy, which gets | |
626 | * journaled. If the primary copy is already going to | |
627 | * disk then we cannot do copy-out here. */ | |
628 | ||
629 | if (jh->b_jlist == BJ_Shadow) { | |
630 | DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow); | |
631 | wait_queue_head_t *wqh; | |
632 | ||
633 | wqh = bit_waitqueue(&bh->b_state, BH_Unshadow); | |
634 | ||
635 | JBUFFER_TRACE(jh, "on shadow: sleep"); | |
636 | jbd_unlock_bh_state(bh); | |
637 | /* commit wakes up all shadow buffers after IO */ | |
638 | for ( ; ; ) { | |
639 | prepare_to_wait(wqh, &wait.wait, | |
640 | TASK_UNINTERRUPTIBLE); | |
641 | if (jh->b_jlist != BJ_Shadow) | |
642 | break; | |
643 | schedule(); | |
644 | } | |
645 | finish_wait(wqh, &wait.wait); | |
646 | goto repeat; | |
647 | } | |
648 | ||
649 | /* Only do the copy if the currently-owning transaction | |
650 | * still needs it. If it is on the Forget list, the | |
651 | * committing transaction is past that stage. The | |
652 | * buffer had better remain locked during the kmalloc, | |
653 | * but that should be true --- we hold the journal lock | |
654 | * still and the buffer is already on the BUF_JOURNAL | |
ae6ddcc5 | 655 | * list so won't be flushed. |
1da177e4 LT |
656 | * |
657 | * Subtle point, though: if this is a get_undo_access, | |
658 | * then we will be relying on the frozen_data to contain | |
659 | * the new value of the committed_data record after the | |
660 | * transaction, so we HAVE to force the frozen_data copy | |
661 | * in that case. */ | |
662 | ||
663 | if (jh->b_jlist != BJ_Forget || force_copy) { | |
664 | JBUFFER_TRACE(jh, "generate frozen data"); | |
665 | if (!frozen_buffer) { | |
666 | JBUFFER_TRACE(jh, "allocate memory for buffer"); | |
667 | jbd_unlock_bh_state(bh); | |
ea817398 | 668 | frozen_buffer = |
c089d490 | 669 | jbd_alloc(jh2bh(jh)->b_size, |
ea817398 | 670 | GFP_NOFS); |
1da177e4 LT |
671 | if (!frozen_buffer) { |
672 | printk(KERN_EMERG | |
673 | "%s: OOM for frozen_buffer\n", | |
08fc99bf | 674 | __func__); |
1da177e4 LT |
675 | JBUFFER_TRACE(jh, "oom!"); |
676 | error = -ENOMEM; | |
677 | jbd_lock_bh_state(bh); | |
678 | goto done; | |
679 | } | |
680 | goto repeat; | |
681 | } | |
682 | jh->b_frozen_data = frozen_buffer; | |
683 | frozen_buffer = NULL; | |
684 | need_copy = 1; | |
685 | } | |
686 | jh->b_next_transaction = transaction; | |
687 | } | |
688 | ||
689 | ||
690 | /* | |
691 | * Finally, if the buffer is not journaled right now, we need to make | |
692 | * sure it doesn't get written to disk before the caller actually | |
693 | * commits the new data | |
694 | */ | |
695 | if (!jh->b_transaction) { | |
696 | JBUFFER_TRACE(jh, "no transaction"); | |
697 | J_ASSERT_JH(jh, !jh->b_next_transaction); | |
698 | jh->b_transaction = transaction; | |
699 | JBUFFER_TRACE(jh, "file as BJ_Reserved"); | |
700 | spin_lock(&journal->j_list_lock); | |
701 | __journal_file_buffer(jh, transaction, BJ_Reserved); | |
702 | spin_unlock(&journal->j_list_lock); | |
703 | } | |
704 | ||
705 | done: | |
706 | if (need_copy) { | |
707 | struct page *page; | |
708 | int offset; | |
709 | char *source; | |
710 | ||
711 | J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)), | |
712 | "Possible IO failure.\n"); | |
713 | page = jh2bh(jh)->b_page; | |
714 | offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK; | |
715 | source = kmap_atomic(page, KM_USER0); | |
716 | memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size); | |
717 | kunmap_atomic(source, KM_USER0); | |
718 | } | |
719 | jbd_unlock_bh_state(bh); | |
720 | ||
721 | /* | |
722 | * If we are about to journal a buffer, then any revoke pending on it is | |
723 | * no longer valid | |
724 | */ | |
725 | journal_cancel_revoke(handle, jh); | |
726 | ||
727 | out: | |
304c4c84 | 728 | if (unlikely(frozen_buffer)) /* It's usually NULL */ |
c089d490 | 729 | jbd_free(frozen_buffer, bh->b_size); |
1da177e4 LT |
730 | |
731 | JBUFFER_TRACE(jh, "exit"); | |
732 | return error; | |
733 | } | |
734 | ||
735 | /** | |
736 | * int journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update. | |
737 | * @handle: transaction to add buffer modifications to | |
738 | * @bh: bh to be used for metadata writes | |
1da177e4 LT |
739 | * |
740 | * Returns an error code or 0 on success. | |
741 | * | |
742 | * In full data journalling mode the buffer may be of type BJ_AsyncData, | |
743 | * because we're write()ing a buffer which is also part of a shared mapping. | |
744 | */ | |
745 | ||
746 | int journal_get_write_access(handle_t *handle, struct buffer_head *bh) | |
747 | { | |
748 | struct journal_head *jh = journal_add_journal_head(bh); | |
749 | int rc; | |
750 | ||
751 | /* We do not want to get caught playing with fields which the | |
752 | * log thread also manipulates. Make sure that the buffer | |
753 | * completes any outstanding IO before proceeding. */ | |
754 | rc = do_get_write_access(handle, jh, 0); | |
755 | journal_put_journal_head(jh); | |
756 | return rc; | |
757 | } | |
758 | ||
759 | ||
760 | /* | |
761 | * When the user wants to journal a newly created buffer_head | |
762 | * (ie. getblk() returned a new buffer and we are going to populate it | |
763 | * manually rather than reading off disk), then we need to keep the | |
764 | * buffer_head locked until it has been completely filled with new | |
765 | * data. In this case, we should be able to make the assertion that | |
ae6ddcc5 MC |
766 | * the bh is not already part of an existing transaction. |
767 | * | |
1da177e4 LT |
768 | * The buffer should already be locked by the caller by this point. |
769 | * There is no lock ranking violation: it was a newly created, | |
770 | * unlocked buffer beforehand. */ | |
771 | ||
772 | /** | |
773 | * int journal_get_create_access () - notify intent to use newly created bh | |
774 | * @handle: transaction to new buffer to | |
775 | * @bh: new buffer. | |
776 | * | |
777 | * Call this if you create a new bh. | |
778 | */ | |
ae6ddcc5 | 779 | int journal_get_create_access(handle_t *handle, struct buffer_head *bh) |
1da177e4 LT |
780 | { |
781 | transaction_t *transaction = handle->h_transaction; | |
782 | journal_t *journal = transaction->t_journal; | |
783 | struct journal_head *jh = journal_add_journal_head(bh); | |
784 | int err; | |
785 | ||
786 | jbd_debug(5, "journal_head %p\n", jh); | |
787 | err = -EROFS; | |
788 | if (is_handle_aborted(handle)) | |
789 | goto out; | |
790 | err = 0; | |
791 | ||
792 | JBUFFER_TRACE(jh, "entry"); | |
793 | /* | |
794 | * The buffer may already belong to this transaction due to pre-zeroing | |
795 | * in the filesystem's new_block code. It may also be on the previous, | |
796 | * committing transaction's lists, but it HAS to be in Forget state in | |
797 | * that case: the transaction must have deleted the buffer for it to be | |
798 | * reused here. | |
799 | */ | |
800 | jbd_lock_bh_state(bh); | |
801 | spin_lock(&journal->j_list_lock); | |
802 | J_ASSERT_JH(jh, (jh->b_transaction == transaction || | |
803 | jh->b_transaction == NULL || | |
804 | (jh->b_transaction == journal->j_committing_transaction && | |
805 | jh->b_jlist == BJ_Forget))); | |
806 | ||
807 | J_ASSERT_JH(jh, jh->b_next_transaction == NULL); | |
808 | J_ASSERT_JH(jh, buffer_locked(jh2bh(jh))); | |
809 | ||
810 | if (jh->b_transaction == NULL) { | |
1e9fd53b JK |
811 | /* |
812 | * Previous journal_forget() could have left the buffer | |
813 | * with jbddirty bit set because it was being committed. When | |
814 | * the commit finished, we've filed the buffer for | |
815 | * checkpointing and marked it dirty. Now we are reallocating | |
816 | * the buffer so the transaction freeing it must have | |
817 | * committed and so it's safe to clear the dirty bit. | |
818 | */ | |
819 | clear_buffer_dirty(jh2bh(jh)); | |
1da177e4 | 820 | jh->b_transaction = transaction; |
5bc833fe JB |
821 | |
822 | /* first access by this transaction */ | |
823 | jh->b_modified = 0; | |
824 | ||
1da177e4 LT |
825 | JBUFFER_TRACE(jh, "file as BJ_Reserved"); |
826 | __journal_file_buffer(jh, transaction, BJ_Reserved); | |
827 | } else if (jh->b_transaction == journal->j_committing_transaction) { | |
5bc833fe JB |
828 | /* first access by this transaction */ |
829 | jh->b_modified = 0; | |
830 | ||
1da177e4 LT |
831 | JBUFFER_TRACE(jh, "set next transaction"); |
832 | jh->b_next_transaction = transaction; | |
833 | } | |
834 | spin_unlock(&journal->j_list_lock); | |
835 | jbd_unlock_bh_state(bh); | |
836 | ||
837 | /* | |
838 | * akpm: I added this. ext3_alloc_branch can pick up new indirect | |
839 | * blocks which contain freed but then revoked metadata. We need | |
840 | * to cancel the revoke in case we end up freeing it yet again | |
841 | * and the reallocating as data - this would cause a second revoke, | |
842 | * which hits an assertion error. | |
843 | */ | |
844 | JBUFFER_TRACE(jh, "cancelling revoke"); | |
845 | journal_cancel_revoke(handle, jh); | |
846 | journal_put_journal_head(jh); | |
847 | out: | |
848 | return err; | |
849 | } | |
850 | ||
851 | /** | |
78a4a50a | 852 | * int journal_get_undo_access() - Notify intent to modify metadata with non-rewindable consequences |
1da177e4 LT |
853 | * @handle: transaction |
854 | * @bh: buffer to undo | |
1da177e4 LT |
855 | * |
856 | * Sometimes there is a need to distinguish between metadata which has | |
857 | * been committed to disk and that which has not. The ext3fs code uses | |
858 | * this for freeing and allocating space, we have to make sure that we | |
859 | * do not reuse freed space until the deallocation has been committed, | |
860 | * since if we overwrote that space we would make the delete | |
861 | * un-rewindable in case of a crash. | |
ae6ddcc5 | 862 | * |
1da177e4 LT |
863 | * To deal with that, journal_get_undo_access requests write access to a |
864 | * buffer for parts of non-rewindable operations such as delete | |
865 | * operations on the bitmaps. The journaling code must keep a copy of | |
866 | * the buffer's contents prior to the undo_access call until such time | |
867 | * as we know that the buffer has definitely been committed to disk. | |
ae6ddcc5 | 868 | * |
1da177e4 LT |
869 | * We never need to know which transaction the committed data is part |
870 | * of, buffers touched here are guaranteed to be dirtied later and so | |
871 | * will be committed to a new transaction in due course, at which point | |
872 | * we can discard the old committed data pointer. | |
873 | * | |
874 | * Returns error number or 0 on success. | |
875 | */ | |
876 | int journal_get_undo_access(handle_t *handle, struct buffer_head *bh) | |
877 | { | |
878 | int err; | |
879 | struct journal_head *jh = journal_add_journal_head(bh); | |
880 | char *committed_data = NULL; | |
881 | ||
882 | JBUFFER_TRACE(jh, "entry"); | |
883 | ||
884 | /* | |
885 | * Do this first --- it can drop the journal lock, so we want to | |
886 | * make sure that obtaining the committed_data is done | |
887 | * atomically wrt. completion of any outstanding commits. | |
888 | */ | |
889 | err = do_get_write_access(handle, jh, 1); | |
890 | if (err) | |
891 | goto out; | |
892 | ||
893 | repeat: | |
894 | if (!jh->b_committed_data) { | |
c089d490 | 895 | committed_data = jbd_alloc(jh2bh(jh)->b_size, GFP_NOFS); |
1da177e4 LT |
896 | if (!committed_data) { |
897 | printk(KERN_EMERG "%s: No memory for committed data\n", | |
08fc99bf | 898 | __func__); |
1da177e4 LT |
899 | err = -ENOMEM; |
900 | goto out; | |
901 | } | |
902 | } | |
903 | ||
904 | jbd_lock_bh_state(bh); | |
905 | if (!jh->b_committed_data) { | |
906 | /* Copy out the current buffer contents into the | |
907 | * preserved, committed copy. */ | |
908 | JBUFFER_TRACE(jh, "generate b_committed data"); | |
909 | if (!committed_data) { | |
910 | jbd_unlock_bh_state(bh); | |
911 | goto repeat; | |
912 | } | |
913 | ||
914 | jh->b_committed_data = committed_data; | |
915 | committed_data = NULL; | |
916 | memcpy(jh->b_committed_data, bh->b_data, bh->b_size); | |
917 | } | |
918 | jbd_unlock_bh_state(bh); | |
919 | out: | |
920 | journal_put_journal_head(jh); | |
304c4c84 | 921 | if (unlikely(committed_data)) |
c089d490 | 922 | jbd_free(committed_data, bh->b_size); |
1da177e4 LT |
923 | return err; |
924 | } | |
925 | ||
ae6ddcc5 | 926 | /** |
78a4a50a | 927 | * int journal_dirty_data() - mark a buffer as containing dirty data to be flushed |
1da177e4 LT |
928 | * @handle: transaction |
929 | * @bh: bufferhead to mark | |
ae6ddcc5 | 930 | * |
78a4a50a RD |
931 | * Description: |
932 | * Mark a buffer as containing dirty data which needs to be flushed before | |
933 | * we can commit the current transaction. | |
934 | * | |
1da177e4 LT |
935 | * The buffer is placed on the transaction's data list and is marked as |
936 | * belonging to the transaction. | |
937 | * | |
938 | * Returns error number or 0 on success. | |
939 | * | |
940 | * journal_dirty_data() can be called via page_launder->ext3_writepage | |
941 | * by kswapd. | |
942 | */ | |
943 | int journal_dirty_data(handle_t *handle, struct buffer_head *bh) | |
944 | { | |
945 | journal_t *journal = handle->h_transaction->t_journal; | |
946 | int need_brelse = 0; | |
947 | struct journal_head *jh; | |
960a22ae | 948 | int ret = 0; |
1da177e4 LT |
949 | |
950 | if (is_handle_aborted(handle)) | |
960a22ae | 951 | return ret; |
1da177e4 LT |
952 | |
953 | jh = journal_add_journal_head(bh); | |
954 | JBUFFER_TRACE(jh, "entry"); | |
955 | ||
956 | /* | |
957 | * The buffer could *already* be dirty. Writeout can start | |
958 | * at any time. | |
959 | */ | |
960 | jbd_debug(4, "jh: %p, tid:%d\n", jh, handle->h_transaction->t_tid); | |
961 | ||
962 | /* | |
963 | * What if the buffer is already part of a running transaction? | |
ae6ddcc5 | 964 | * |
1da177e4 LT |
965 | * There are two cases: |
966 | * 1) It is part of the current running transaction. Refile it, | |
967 | * just in case we have allocated it as metadata, deallocated | |
ae6ddcc5 | 968 | * it, then reallocated it as data. |
1da177e4 LT |
969 | * 2) It is part of the previous, still-committing transaction. |
970 | * If all we want to do is to guarantee that the buffer will be | |
971 | * written to disk before this new transaction commits, then | |
ae6ddcc5 | 972 | * being sure that the *previous* transaction has this same |
1da177e4 LT |
973 | * property is sufficient for us! Just leave it on its old |
974 | * transaction. | |
975 | * | |
976 | * In case (2), the buffer must not already exist as metadata | |
977 | * --- that would violate write ordering (a transaction is free | |
978 | * to write its data at any point, even before the previous | |
979 | * committing transaction has committed). The caller must | |
980 | * never, ever allow this to happen: there's nothing we can do | |
981 | * about it in this layer. | |
982 | */ | |
983 | jbd_lock_bh_state(bh); | |
984 | spin_lock(&journal->j_list_lock); | |
f58a74dc ES |
985 | |
986 | /* Now that we have bh_state locked, are we really still mapped? */ | |
987 | if (!buffer_mapped(bh)) { | |
988 | JBUFFER_TRACE(jh, "unmapped buffer, bailing out"); | |
989 | goto no_journal; | |
990 | } | |
991 | ||
1da177e4 LT |
992 | if (jh->b_transaction) { |
993 | JBUFFER_TRACE(jh, "has transaction"); | |
994 | if (jh->b_transaction != handle->h_transaction) { | |
995 | JBUFFER_TRACE(jh, "belongs to older transaction"); | |
996 | J_ASSERT_JH(jh, jh->b_transaction == | |
997 | journal->j_committing_transaction); | |
998 | ||
999 | /* @@@ IS THIS TRUE ? */ | |
1000 | /* | |
1001 | * Not any more. Scenario: someone does a write() | |
1002 | * in data=journal mode. The buffer's transaction has | |
1003 | * moved into commit. Then someone does another | |
1004 | * write() to the file. We do the frozen data copyout | |
1005 | * and set b_next_transaction to point to j_running_t. | |
1006 | * And while we're in that state, someone does a | |
1007 | * writepage() in an attempt to pageout the same area | |
1008 | * of the file via a shared mapping. At present that | |
1009 | * calls journal_dirty_data(), and we get right here. | |
1010 | * It may be too late to journal the data. Simply | |
1011 | * falling through to the next test will suffice: the | |
1012 | * data will be dirty and wil be checkpointed. The | |
1013 | * ordering comments in the next comment block still | |
1014 | * apply. | |
1015 | */ | |
1016 | //J_ASSERT_JH(jh, jh->b_next_transaction == NULL); | |
1017 | ||
1018 | /* | |
1019 | * If we're journalling data, and this buffer was | |
1020 | * subject to a write(), it could be metadata, forget | |
1021 | * or shadow against the committing transaction. Now, | |
1022 | * someone has dirtied the same darn page via a mapping | |
1023 | * and it is being writepage()'d. | |
1024 | * We *could* just steal the page from commit, with some | |
1025 | * fancy locking there. Instead, we just skip it - | |
1026 | * don't tie the page's buffers to the new transaction | |
1027 | * at all. | |
1028 | * Implication: if we crash before the writepage() data | |
1029 | * is written into the filesystem, recovery will replay | |
1030 | * the write() data. | |
1031 | */ | |
1032 | if (jh->b_jlist != BJ_None && | |
1033 | jh->b_jlist != BJ_SyncData && | |
1034 | jh->b_jlist != BJ_Locked) { | |
1035 | JBUFFER_TRACE(jh, "Not stealing"); | |
1036 | goto no_journal; | |
1037 | } | |
1038 | ||
1039 | /* | |
1040 | * This buffer may be undergoing writeout in commit. We | |
1041 | * can't return from here and let the caller dirty it | |
1042 | * again because that can cause the write-out loop in | |
1043 | * commit to never terminate. | |
1044 | */ | |
1045 | if (buffer_dirty(bh)) { | |
1046 | get_bh(bh); | |
1047 | spin_unlock(&journal->j_list_lock); | |
1048 | jbd_unlock_bh_state(bh); | |
1049 | need_brelse = 1; | |
1050 | sync_dirty_buffer(bh); | |
1051 | jbd_lock_bh_state(bh); | |
1052 | spin_lock(&journal->j_list_lock); | |
f58a74dc ES |
1053 | /* Since we dropped the lock... */ |
1054 | if (!buffer_mapped(bh)) { | |
1055 | JBUFFER_TRACE(jh, "buffer got unmapped"); | |
1056 | goto no_journal; | |
1057 | } | |
1da177e4 LT |
1058 | /* The buffer may become locked again at any |
1059 | time if it is redirtied */ | |
1060 | } | |
1061 | ||
960a22ae HK |
1062 | /* |
1063 | * We cannot remove the buffer with io error from the | |
1064 | * committing transaction, because otherwise it would | |
1065 | * miss the error and the commit would not abort. | |
1066 | */ | |
1067 | if (unlikely(!buffer_uptodate(bh))) { | |
1068 | ret = -EIO; | |
1069 | goto no_journal; | |
1070 | } | |
1071 | ||
1da177e4 LT |
1072 | if (jh->b_transaction != NULL) { |
1073 | JBUFFER_TRACE(jh, "unfile from commit"); | |
1074 | __journal_temp_unlink_buffer(jh); | |
1075 | /* It still points to the committing | |
1076 | * transaction; move it to this one so | |
1077 | * that the refile assert checks are | |
1078 | * happy. */ | |
1079 | jh->b_transaction = handle->h_transaction; | |
1080 | } | |
1081 | /* The buffer will be refiled below */ | |
1082 | ||
1083 | } | |
1084 | /* | |
1085 | * Special case --- the buffer might actually have been | |
1086 | * allocated and then immediately deallocated in the previous, | |
1087 | * committing transaction, so might still be left on that | |
1088 | * transaction's metadata lists. | |
1089 | */ | |
1090 | if (jh->b_jlist != BJ_SyncData && jh->b_jlist != BJ_Locked) { | |
1091 | JBUFFER_TRACE(jh, "not on correct data list: unfile"); | |
1092 | J_ASSERT_JH(jh, jh->b_jlist != BJ_Shadow); | |
1093 | __journal_temp_unlink_buffer(jh); | |
1094 | jh->b_transaction = handle->h_transaction; | |
1095 | JBUFFER_TRACE(jh, "file as data"); | |
1096 | __journal_file_buffer(jh, handle->h_transaction, | |
1097 | BJ_SyncData); | |
1098 | } | |
1099 | } else { | |
1100 | JBUFFER_TRACE(jh, "not on a transaction"); | |
1101 | __journal_file_buffer(jh, handle->h_transaction, BJ_SyncData); | |
1102 | } | |
1103 | no_journal: | |
1104 | spin_unlock(&journal->j_list_lock); | |
1105 | jbd_unlock_bh_state(bh); | |
1106 | if (need_brelse) { | |
1107 | BUFFER_TRACE(bh, "brelse"); | |
1108 | __brelse(bh); | |
1109 | } | |
1110 | JBUFFER_TRACE(jh, "exit"); | |
1111 | journal_put_journal_head(jh); | |
960a22ae | 1112 | return ret; |
1da177e4 LT |
1113 | } |
1114 | ||
ae6ddcc5 | 1115 | /** |
78a4a50a | 1116 | * int journal_dirty_metadata() - mark a buffer as containing dirty metadata |
1da177e4 | 1117 | * @handle: transaction to add buffer to. |
ae6ddcc5 MC |
1118 | * @bh: buffer to mark |
1119 | * | |
78a4a50a | 1120 | * Mark dirty metadata which needs to be journaled as part of the current |
1da177e4 LT |
1121 | * transaction. |
1122 | * | |
1123 | * The buffer is placed on the transaction's metadata list and is marked | |
ae6ddcc5 | 1124 | * as belonging to the transaction. |
1da177e4 | 1125 | * |
ae6ddcc5 | 1126 | * Returns error number or 0 on success. |
1da177e4 LT |
1127 | * |
1128 | * Special care needs to be taken if the buffer already belongs to the | |
1129 | * current committing transaction (in which case we should have frozen | |
1130 | * data present for that commit). In that case, we don't relink the | |
1131 | * buffer: that only gets done when the old transaction finally | |
1132 | * completes its commit. | |
1133 | */ | |
1134 | int journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | |
1135 | { | |
1136 | transaction_t *transaction = handle->h_transaction; | |
1137 | journal_t *journal = transaction->t_journal; | |
1138 | struct journal_head *jh = bh2jh(bh); | |
1139 | ||
1140 | jbd_debug(5, "journal_head %p\n", jh); | |
1141 | JBUFFER_TRACE(jh, "entry"); | |
1142 | if (is_handle_aborted(handle)) | |
1143 | goto out; | |
1144 | ||
1145 | jbd_lock_bh_state(bh); | |
1146 | ||
1147 | if (jh->b_modified == 0) { | |
1148 | /* | |
1149 | * This buffer's got modified and becoming part | |
1150 | * of the transaction. This needs to be done | |
1151 | * once a transaction -bzzz | |
1152 | */ | |
1153 | jh->b_modified = 1; | |
1154 | J_ASSERT_JH(jh, handle->h_buffer_credits > 0); | |
1155 | handle->h_buffer_credits--; | |
1156 | } | |
1157 | ||
1158 | /* | |
1159 | * fastpath, to avoid expensive locking. If this buffer is already | |
1160 | * on the running transaction's metadata list there is nothing to do. | |
1161 | * Nobody can take it off again because there is a handle open. | |
1162 | * I _think_ we're OK here with SMP barriers - a mistaken decision will | |
1163 | * result in this test being false, so we go in and take the locks. | |
1164 | */ | |
1165 | if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) { | |
1166 | JBUFFER_TRACE(jh, "fastpath"); | |
1167 | J_ASSERT_JH(jh, jh->b_transaction == | |
1168 | journal->j_running_transaction); | |
1169 | goto out_unlock_bh; | |
1170 | } | |
1171 | ||
1172 | set_buffer_jbddirty(bh); | |
1173 | ||
ae6ddcc5 | 1174 | /* |
1da177e4 LT |
1175 | * Metadata already on the current transaction list doesn't |
1176 | * need to be filed. Metadata on another transaction's list must | |
1177 | * be committing, and will be refiled once the commit completes: | |
ae6ddcc5 | 1178 | * leave it alone for now. |
1da177e4 LT |
1179 | */ |
1180 | if (jh->b_transaction != transaction) { | |
1181 | JBUFFER_TRACE(jh, "already on other transaction"); | |
1182 | J_ASSERT_JH(jh, jh->b_transaction == | |
1183 | journal->j_committing_transaction); | |
1184 | J_ASSERT_JH(jh, jh->b_next_transaction == transaction); | |
1185 | /* And this case is illegal: we can't reuse another | |
1186 | * transaction's data buffer, ever. */ | |
1187 | goto out_unlock_bh; | |
1188 | } | |
1189 | ||
1190 | /* That test should have eliminated the following case: */ | |
c80544dc | 1191 | J_ASSERT_JH(jh, jh->b_frozen_data == NULL); |
1da177e4 LT |
1192 | |
1193 | JBUFFER_TRACE(jh, "file as BJ_Metadata"); | |
1194 | spin_lock(&journal->j_list_lock); | |
1195 | __journal_file_buffer(jh, handle->h_transaction, BJ_Metadata); | |
1196 | spin_unlock(&journal->j_list_lock); | |
1197 | out_unlock_bh: | |
1198 | jbd_unlock_bh_state(bh); | |
1199 | out: | |
1200 | JBUFFER_TRACE(jh, "exit"); | |
1201 | return 0; | |
1202 | } | |
1203 | ||
ae6ddcc5 | 1204 | /* |
1da177e4 LT |
1205 | * journal_release_buffer: undo a get_write_access without any buffer |
1206 | * updates, if the update decided in the end that it didn't need access. | |
1207 | * | |
1208 | */ | |
1209 | void | |
1210 | journal_release_buffer(handle_t *handle, struct buffer_head *bh) | |
1211 | { | |
1212 | BUFFER_TRACE(bh, "entry"); | |
1213 | } | |
1214 | ||
ae6ddcc5 | 1215 | /** |
1da177e4 LT |
1216 | * void journal_forget() - bforget() for potentially-journaled buffers. |
1217 | * @handle: transaction handle | |
1218 | * @bh: bh to 'forget' | |
1219 | * | |
1220 | * We can only do the bforget if there are no commits pending against the | |
1221 | * buffer. If the buffer is dirty in the current running transaction we | |
ae6ddcc5 | 1222 | * can safely unlink it. |
1da177e4 LT |
1223 | * |
1224 | * bh may not be a journalled buffer at all - it may be a non-JBD | |
1225 | * buffer which came off the hashtable. Check for this. | |
1226 | * | |
1227 | * Decrements bh->b_count by one. | |
ae6ddcc5 | 1228 | * |
1da177e4 LT |
1229 | * Allow this call even if the handle has aborted --- it may be part of |
1230 | * the caller's cleanup after an abort. | |
1231 | */ | |
1232 | int journal_forget (handle_t *handle, struct buffer_head *bh) | |
1233 | { | |
1234 | transaction_t *transaction = handle->h_transaction; | |
1235 | journal_t *journal = transaction->t_journal; | |
1236 | struct journal_head *jh; | |
1237 | int drop_reserve = 0; | |
1238 | int err = 0; | |
5b9a499d | 1239 | int was_modified = 0; |
1da177e4 LT |
1240 | |
1241 | BUFFER_TRACE(bh, "entry"); | |
1242 | ||
1243 | jbd_lock_bh_state(bh); | |
1244 | spin_lock(&journal->j_list_lock); | |
1245 | ||
1246 | if (!buffer_jbd(bh)) | |
1247 | goto not_jbd; | |
1248 | jh = bh2jh(bh); | |
1249 | ||
1250 | /* Critical error: attempting to delete a bitmap buffer, maybe? | |
1251 | * Don't do any jbd operations, and return an error. */ | |
1252 | if (!J_EXPECT_JH(jh, !jh->b_committed_data, | |
1253 | "inconsistent data on disk")) { | |
1254 | err = -EIO; | |
1255 | goto not_jbd; | |
1256 | } | |
1257 | ||
5b9a499d JB |
1258 | /* keep track of wether or not this transaction modified us */ |
1259 | was_modified = jh->b_modified; | |
1260 | ||
1da177e4 LT |
1261 | /* |
1262 | * The buffer's going from the transaction, we must drop | |
1263 | * all references -bzzz | |
1264 | */ | |
1265 | jh->b_modified = 0; | |
1266 | ||
1267 | if (jh->b_transaction == handle->h_transaction) { | |
1268 | J_ASSERT_JH(jh, !jh->b_frozen_data); | |
1269 | ||
1270 | /* If we are forgetting a buffer which is already part | |
1271 | * of this transaction, then we can just drop it from | |
1272 | * the transaction immediately. */ | |
1273 | clear_buffer_dirty(bh); | |
1274 | clear_buffer_jbddirty(bh); | |
1275 | ||
1276 | JBUFFER_TRACE(jh, "belongs to current transaction: unfile"); | |
1277 | ||
5b9a499d JB |
1278 | /* |
1279 | * we only want to drop a reference if this transaction | |
1280 | * modified the buffer | |
1281 | */ | |
1282 | if (was_modified) | |
1283 | drop_reserve = 1; | |
1da177e4 | 1284 | |
ae6ddcc5 | 1285 | /* |
1da177e4 LT |
1286 | * We are no longer going to journal this buffer. |
1287 | * However, the commit of this transaction is still | |
1288 | * important to the buffer: the delete that we are now | |
1289 | * processing might obsolete an old log entry, so by | |
1290 | * committing, we can satisfy the buffer's checkpoint. | |
1291 | * | |
1292 | * So, if we have a checkpoint on the buffer, we should | |
1293 | * now refile the buffer on our BJ_Forget list so that | |
ae6ddcc5 | 1294 | * we know to remove the checkpoint after we commit. |
1da177e4 LT |
1295 | */ |
1296 | ||
1297 | if (jh->b_cp_transaction) { | |
1298 | __journal_temp_unlink_buffer(jh); | |
1299 | __journal_file_buffer(jh, transaction, BJ_Forget); | |
1300 | } else { | |
1301 | __journal_unfile_buffer(jh); | |
1302 | journal_remove_journal_head(bh); | |
1303 | __brelse(bh); | |
1304 | if (!buffer_jbd(bh)) { | |
1305 | spin_unlock(&journal->j_list_lock); | |
1306 | jbd_unlock_bh_state(bh); | |
1307 | __bforget(bh); | |
1308 | goto drop; | |
1309 | } | |
1310 | } | |
1311 | } else if (jh->b_transaction) { | |
ae6ddcc5 | 1312 | J_ASSERT_JH(jh, (jh->b_transaction == |
1da177e4 LT |
1313 | journal->j_committing_transaction)); |
1314 | /* However, if the buffer is still owned by a prior | |
1315 | * (committing) transaction, we can't drop it yet... */ | |
1316 | JBUFFER_TRACE(jh, "belongs to older transaction"); | |
1317 | /* ... but we CAN drop it from the new transaction if we | |
1318 | * have also modified it since the original commit. */ | |
1319 | ||
1320 | if (jh->b_next_transaction) { | |
1321 | J_ASSERT(jh->b_next_transaction == transaction); | |
1322 | jh->b_next_transaction = NULL; | |
5b9a499d JB |
1323 | |
1324 | /* | |
1325 | * only drop a reference if this transaction modified | |
1326 | * the buffer | |
1327 | */ | |
1328 | if (was_modified) | |
1329 | drop_reserve = 1; | |
1da177e4 LT |
1330 | } |
1331 | } | |
1332 | ||
1333 | not_jbd: | |
1334 | spin_unlock(&journal->j_list_lock); | |
1335 | jbd_unlock_bh_state(bh); | |
1336 | __brelse(bh); | |
1337 | drop: | |
1338 | if (drop_reserve) { | |
1339 | /* no need to reserve log space for this block -bzzz */ | |
1340 | handle->h_buffer_credits++; | |
1341 | } | |
1342 | return err; | |
1343 | } | |
1344 | ||
1345 | /** | |
1346 | * int journal_stop() - complete a transaction | |
1347 | * @handle: tranaction to complete. | |
ae6ddcc5 | 1348 | * |
1da177e4 LT |
1349 | * All done for a particular handle. |
1350 | * | |
1351 | * There is not much action needed here. We just return any remaining | |
1352 | * buffer credits to the transaction and remove the handle. The only | |
1353 | * complication is that we need to start a commit operation if the | |
1354 | * filesystem is marked for synchronous update. | |
1355 | * | |
1356 | * journal_stop itself will not usually return an error, but it may | |
ae6ddcc5 | 1357 | * do so in unusual circumstances. In particular, expect it to |
1da177e4 LT |
1358 | * return -EIO if a journal_abort has been executed since the |
1359 | * transaction began. | |
1360 | */ | |
1361 | int journal_stop(handle_t *handle) | |
1362 | { | |
1363 | transaction_t *transaction = handle->h_transaction; | |
1364 | journal_t *journal = transaction->t_journal; | |
f420d4dc | 1365 | int err; |
fe1dcbc4 | 1366 | pid_t pid; |
1da177e4 | 1367 | |
1da177e4 LT |
1368 | J_ASSERT(journal_current_handle() == handle); |
1369 | ||
1370 | if (is_handle_aborted(handle)) | |
1371 | err = -EIO; | |
3e2a532b OH |
1372 | else { |
1373 | J_ASSERT(transaction->t_updates > 0); | |
1da177e4 | 1374 | err = 0; |
3e2a532b | 1375 | } |
1da177e4 LT |
1376 | |
1377 | if (--handle->h_ref > 0) { | |
1378 | jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1, | |
1379 | handle->h_ref); | |
1380 | return err; | |
1381 | } | |
1382 | ||
1383 | jbd_debug(4, "Handle %p going down\n", handle); | |
1384 | ||
1385 | /* | |
1386 | * Implement synchronous transaction batching. If the handle | |
1387 | * was synchronous, don't force a commit immediately. Let's | |
1388 | * yield and let another thread piggyback onto this transaction. | |
1389 | * Keep doing that while new threads continue to arrive. | |
1390 | * It doesn't cost much - we're about to run a commit and sleep | |
1391 | * on IO anyway. Speeds up many-threaded, many-dir operations | |
1392 | * by 30x or more... | |
fe1dcbc4 | 1393 | * |
f420d4dc JB |
1394 | * We try and optimize the sleep time against what the underlying disk |
1395 | * can do, instead of having a static sleep time. This is usefull for | |
1396 | * the case where our storage is so fast that it is more optimal to go | |
1397 | * ahead and force a flush and wait for the transaction to be committed | |
1398 | * than it is to wait for an arbitrary amount of time for new writers to | |
3ad2f3fb | 1399 | * join the transaction. We achieve this by measuring how long it takes |
f420d4dc JB |
1400 | * to commit a transaction, and compare it with how long this |
1401 | * transaction has been running, and if run time < commit time then we | |
1402 | * sleep for the delta and commit. This greatly helps super fast disks | |
1403 | * that would see slowdowns as more threads started doing fsyncs. | |
1404 | * | |
fe1dcbc4 AM |
1405 | * But don't do this if this process was the most recent one to |
1406 | * perform a synchronous write. We do this to detect the case where a | |
1407 | * single process is doing a stream of sync writes. No point in waiting | |
1408 | * for joiners in that case. | |
1da177e4 | 1409 | */ |
fe1dcbc4 AM |
1410 | pid = current->pid; |
1411 | if (handle->h_sync && journal->j_last_sync_writer != pid) { | |
f420d4dc JB |
1412 | u64 commit_time, trans_time; |
1413 | ||
fe1dcbc4 | 1414 | journal->j_last_sync_writer = pid; |
f420d4dc JB |
1415 | |
1416 | spin_lock(&journal->j_state_lock); | |
1417 | commit_time = journal->j_average_commit_time; | |
1418 | spin_unlock(&journal->j_state_lock); | |
1419 | ||
1420 | trans_time = ktime_to_ns(ktime_sub(ktime_get(), | |
1421 | transaction->t_start_time)); | |
1422 | ||
1423 | commit_time = min_t(u64, commit_time, | |
1424 | 1000*jiffies_to_usecs(1)); | |
1425 | ||
1426 | if (trans_time < commit_time) { | |
1427 | ktime_t expires = ktime_add_ns(ktime_get(), | |
1428 | commit_time); | |
1429 | set_current_state(TASK_UNINTERRUPTIBLE); | |
1430 | schedule_hrtimeout(&expires, HRTIMER_MODE_ABS); | |
1431 | } | |
1da177e4 LT |
1432 | } |
1433 | ||
512a0043 TT |
1434 | if (handle->h_sync) |
1435 | transaction->t_synchronous_commit = 1; | |
1da177e4 LT |
1436 | current->journal_info = NULL; |
1437 | spin_lock(&journal->j_state_lock); | |
1438 | spin_lock(&transaction->t_handle_lock); | |
1439 | transaction->t_outstanding_credits -= handle->h_buffer_credits; | |
1440 | transaction->t_updates--; | |
1441 | if (!transaction->t_updates) { | |
1442 | wake_up(&journal->j_wait_updates); | |
1443 | if (journal->j_barrier_count) | |
1444 | wake_up(&journal->j_wait_transaction_locked); | |
1445 | } | |
1446 | ||
1447 | /* | |
1448 | * If the handle is marked SYNC, we need to set another commit | |
1449 | * going! We also want to force a commit if the current | |
1450 | * transaction is occupying too much of the log, or if the | |
1451 | * transaction is too old now. | |
1452 | */ | |
1453 | if (handle->h_sync || | |
1454 | transaction->t_outstanding_credits > | |
1455 | journal->j_max_transaction_buffers || | |
e9ad5620 | 1456 | time_after_eq(jiffies, transaction->t_expires)) { |
1da177e4 LT |
1457 | /* Do this even for aborted journals: an abort still |
1458 | * completes the commit thread, it just doesn't write | |
1459 | * anything to disk. */ | |
1460 | tid_t tid = transaction->t_tid; | |
1461 | ||
1462 | spin_unlock(&transaction->t_handle_lock); | |
1463 | jbd_debug(2, "transaction too old, requesting commit for " | |
1464 | "handle %p\n", handle); | |
1465 | /* This is non-blocking */ | |
1466 | __log_start_commit(journal, transaction->t_tid); | |
1467 | spin_unlock(&journal->j_state_lock); | |
1468 | ||
1469 | /* | |
1470 | * Special case: JFS_SYNC synchronous updates require us | |
ae6ddcc5 | 1471 | * to wait for the commit to complete. |
1da177e4 LT |
1472 | */ |
1473 | if (handle->h_sync && !(current->flags & PF_MEMALLOC)) | |
1474 | err = log_wait_commit(journal, tid); | |
1475 | } else { | |
1476 | spin_unlock(&transaction->t_handle_lock); | |
1477 | spin_unlock(&journal->j_state_lock); | |
1478 | } | |
1479 | ||
3295f0ef | 1480 | lock_map_release(&handle->h_lockdep_map); |
34a3d1e8 | 1481 | |
1da177e4 LT |
1482 | jbd_free_handle(handle); |
1483 | return err; | |
1484 | } | |
1485 | ||
0cf01f66 RD |
1486 | /** |
1487 | * int journal_force_commit() - force any uncommitted transactions | |
1da177e4 LT |
1488 | * @journal: journal to force |
1489 | * | |
1490 | * For synchronous operations: force any uncommitted transactions | |
1491 | * to disk. May seem kludgy, but it reuses all the handle batching | |
1492 | * code in a very simple manner. | |
1493 | */ | |
1494 | int journal_force_commit(journal_t *journal) | |
1495 | { | |
1496 | handle_t *handle; | |
1497 | int ret; | |
1498 | ||
1499 | handle = journal_start(journal, 1); | |
1500 | if (IS_ERR(handle)) { | |
1501 | ret = PTR_ERR(handle); | |
1502 | } else { | |
1503 | handle->h_sync = 1; | |
1504 | ret = journal_stop(handle); | |
1505 | } | |
1506 | return ret; | |
1507 | } | |
1508 | ||
1509 | /* | |
1510 | * | |
1511 | * List management code snippets: various functions for manipulating the | |
1512 | * transaction buffer lists. | |
1513 | * | |
1514 | */ | |
1515 | ||
1516 | /* | |
1517 | * Append a buffer to a transaction list, given the transaction's list head | |
1518 | * pointer. | |
1519 | * | |
1520 | * j_list_lock is held. | |
1521 | * | |
1522 | * jbd_lock_bh_state(jh2bh(jh)) is held. | |
1523 | */ | |
1524 | ||
ae6ddcc5 | 1525 | static inline void |
1da177e4 LT |
1526 | __blist_add_buffer(struct journal_head **list, struct journal_head *jh) |
1527 | { | |
1528 | if (!*list) { | |
1529 | jh->b_tnext = jh->b_tprev = jh; | |
1530 | *list = jh; | |
1531 | } else { | |
1532 | /* Insert at the tail of the list to preserve order */ | |
1533 | struct journal_head *first = *list, *last = first->b_tprev; | |
1534 | jh->b_tprev = last; | |
1535 | jh->b_tnext = first; | |
1536 | last->b_tnext = first->b_tprev = jh; | |
1537 | } | |
1538 | } | |
1539 | ||
ae6ddcc5 | 1540 | /* |
1da177e4 LT |
1541 | * Remove a buffer from a transaction list, given the transaction's list |
1542 | * head pointer. | |
1543 | * | |
1544 | * Called with j_list_lock held, and the journal may not be locked. | |
1545 | * | |
1546 | * jbd_lock_bh_state(jh2bh(jh)) is held. | |
1547 | */ | |
1548 | ||
1549 | static inline void | |
1550 | __blist_del_buffer(struct journal_head **list, struct journal_head *jh) | |
1551 | { | |
1552 | if (*list == jh) { | |
1553 | *list = jh->b_tnext; | |
1554 | if (*list == jh) | |
1555 | *list = NULL; | |
1556 | } | |
1557 | jh->b_tprev->b_tnext = jh->b_tnext; | |
1558 | jh->b_tnext->b_tprev = jh->b_tprev; | |
1559 | } | |
1560 | ||
ae6ddcc5 | 1561 | /* |
1da177e4 LT |
1562 | * Remove a buffer from the appropriate transaction list. |
1563 | * | |
1564 | * Note that this function can *change* the value of | |
1565 | * bh->b_transaction->t_sync_datalist, t_buffers, t_forget, | |
1566 | * t_iobuf_list, t_shadow_list, t_log_list or t_reserved_list. If the caller | |
1567 | * is holding onto a copy of one of thee pointers, it could go bad. | |
1568 | * Generally the caller needs to re-read the pointer from the transaction_t. | |
1569 | * | |
1570 | * Called under j_list_lock. The journal may not be locked. | |
1571 | */ | |
d394e122 | 1572 | static void __journal_temp_unlink_buffer(struct journal_head *jh) |
1da177e4 LT |
1573 | { |
1574 | struct journal_head **list = NULL; | |
1575 | transaction_t *transaction; | |
1576 | struct buffer_head *bh = jh2bh(jh); | |
1577 | ||
1578 | J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); | |
1579 | transaction = jh->b_transaction; | |
1580 | if (transaction) | |
1581 | assert_spin_locked(&transaction->t_journal->j_list_lock); | |
1582 | ||
1583 | J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); | |
1584 | if (jh->b_jlist != BJ_None) | |
c80544dc | 1585 | J_ASSERT_JH(jh, transaction != NULL); |
1da177e4 LT |
1586 | |
1587 | switch (jh->b_jlist) { | |
1588 | case BJ_None: | |
1589 | return; | |
1590 | case BJ_SyncData: | |
1591 | list = &transaction->t_sync_datalist; | |
1592 | break; | |
1593 | case BJ_Metadata: | |
1594 | transaction->t_nr_buffers--; | |
1595 | J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0); | |
1596 | list = &transaction->t_buffers; | |
1597 | break; | |
1598 | case BJ_Forget: | |
1599 | list = &transaction->t_forget; | |
1600 | break; | |
1601 | case BJ_IO: | |
1602 | list = &transaction->t_iobuf_list; | |
1603 | break; | |
1604 | case BJ_Shadow: | |
1605 | list = &transaction->t_shadow_list; | |
1606 | break; | |
1607 | case BJ_LogCtl: | |
1608 | list = &transaction->t_log_list; | |
1609 | break; | |
1610 | case BJ_Reserved: | |
1611 | list = &transaction->t_reserved_list; | |
1612 | break; | |
1613 | case BJ_Locked: | |
1614 | list = &transaction->t_locked_list; | |
1615 | break; | |
1616 | } | |
1617 | ||
1618 | __blist_del_buffer(list, jh); | |
1619 | jh->b_jlist = BJ_None; | |
1620 | if (test_clear_buffer_jbddirty(bh)) | |
1621 | mark_buffer_dirty(bh); /* Expose it to the VM */ | |
1622 | } | |
1623 | ||
1624 | void __journal_unfile_buffer(struct journal_head *jh) | |
1625 | { | |
1626 | __journal_temp_unlink_buffer(jh); | |
1627 | jh->b_transaction = NULL; | |
1628 | } | |
1629 | ||
1630 | void journal_unfile_buffer(journal_t *journal, struct journal_head *jh) | |
1631 | { | |
1632 | jbd_lock_bh_state(jh2bh(jh)); | |
1633 | spin_lock(&journal->j_list_lock); | |
1634 | __journal_unfile_buffer(jh); | |
1635 | spin_unlock(&journal->j_list_lock); | |
1636 | jbd_unlock_bh_state(jh2bh(jh)); | |
1637 | } | |
1638 | ||
1639 | /* | |
1640 | * Called from journal_try_to_free_buffers(). | |
1641 | * | |
1642 | * Called under jbd_lock_bh_state(bh) | |
1643 | */ | |
1644 | static void | |
1645 | __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh) | |
1646 | { | |
1647 | struct journal_head *jh; | |
1648 | ||
1649 | jh = bh2jh(bh); | |
1650 | ||
1651 | if (buffer_locked(bh) || buffer_dirty(bh)) | |
1652 | goto out; | |
1653 | ||
c80544dc | 1654 | if (jh->b_next_transaction != NULL) |
1da177e4 LT |
1655 | goto out; |
1656 | ||
1657 | spin_lock(&journal->j_list_lock); | |
c80544dc | 1658 | if (jh->b_transaction != NULL && jh->b_cp_transaction == NULL) { |
1da177e4 LT |
1659 | if (jh->b_jlist == BJ_SyncData || jh->b_jlist == BJ_Locked) { |
1660 | /* A written-back ordered data buffer */ | |
1661 | JBUFFER_TRACE(jh, "release data"); | |
1662 | __journal_unfile_buffer(jh); | |
1663 | journal_remove_journal_head(bh); | |
1664 | __brelse(bh); | |
1665 | } | |
c80544dc | 1666 | } else if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) { |
1da177e4 LT |
1667 | /* written-back checkpointed metadata buffer */ |
1668 | if (jh->b_jlist == BJ_None) { | |
1669 | JBUFFER_TRACE(jh, "remove from checkpoint list"); | |
1670 | __journal_remove_checkpoint(jh); | |
1671 | journal_remove_journal_head(bh); | |
1672 | __brelse(bh); | |
1673 | } | |
1674 | } | |
1675 | spin_unlock(&journal->j_list_lock); | |
1676 | out: | |
1677 | return; | |
1678 | } | |
1679 | ||
ae6ddcc5 | 1680 | /** |
1da177e4 LT |
1681 | * int journal_try_to_free_buffers() - try to free page buffers. |
1682 | * @journal: journal for operation | |
1683 | * @page: to try and free | |
3f31fddf MC |
1684 | * @gfp_mask: we use the mask to detect how hard should we try to release |
1685 | * buffers. If __GFP_WAIT and __GFP_FS is set, we wait for commit code to | |
1686 | * release the buffers. | |
1da177e4 | 1687 | * |
ae6ddcc5 | 1688 | * |
1da177e4 LT |
1689 | * For all the buffers on this page, |
1690 | * if they are fully written out ordered data, move them onto BUF_CLEAN | |
1691 | * so try_to_free_buffers() can reap them. | |
ae6ddcc5 | 1692 | * |
1da177e4 LT |
1693 | * This function returns non-zero if we wish try_to_free_buffers() |
1694 | * to be called. We do this if the page is releasable by try_to_free_buffers(). | |
1695 | * We also do it if the page has locked or dirty buffers and the caller wants | |
1696 | * us to perform sync or async writeout. | |
1697 | * | |
1698 | * This complicates JBD locking somewhat. We aren't protected by the | |
1699 | * BKL here. We wish to remove the buffer from its committing or | |
1700 | * running transaction's ->t_datalist via __journal_unfile_buffer. | |
1701 | * | |
1702 | * This may *change* the value of transaction_t->t_datalist, so anyone | |
1703 | * who looks at t_datalist needs to lock against this function. | |
1704 | * | |
1705 | * Even worse, someone may be doing a journal_dirty_data on this | |
1706 | * buffer. So we need to lock against that. journal_dirty_data() | |
1707 | * will come out of the lock with the buffer dirty, which makes it | |
1708 | * ineligible for release here. | |
1709 | * | |
1710 | * Who else is affected by this? hmm... Really the only contender | |
1711 | * is do_get_write_access() - it could be looking at the buffer while | |
1712 | * journal_try_to_free_buffer() is changing its state. But that | |
1713 | * cannot happen because we never reallocate freed data as metadata | |
1714 | * while the data is part of a transaction. Yes? | |
3f31fddf MC |
1715 | * |
1716 | * Return 0 on failure, 1 on success | |
1da177e4 | 1717 | */ |
ae6ddcc5 | 1718 | int journal_try_to_free_buffers(journal_t *journal, |
3f31fddf | 1719 | struct page *page, gfp_t gfp_mask) |
1da177e4 LT |
1720 | { |
1721 | struct buffer_head *head; | |
1722 | struct buffer_head *bh; | |
1723 | int ret = 0; | |
1724 | ||
1725 | J_ASSERT(PageLocked(page)); | |
1726 | ||
1727 | head = page_buffers(page); | |
1728 | bh = head; | |
1729 | do { | |
1730 | struct journal_head *jh; | |
1731 | ||
1732 | /* | |
1733 | * We take our own ref against the journal_head here to avoid | |
1734 | * having to add tons of locking around each instance of | |
1735 | * journal_remove_journal_head() and journal_put_journal_head(). | |
1736 | */ | |
1737 | jh = journal_grab_journal_head(bh); | |
1738 | if (!jh) | |
1739 | continue; | |
1740 | ||
1741 | jbd_lock_bh_state(bh); | |
1742 | __journal_try_to_free_buffer(journal, bh); | |
1743 | journal_put_journal_head(jh); | |
1744 | jbd_unlock_bh_state(bh); | |
1745 | if (buffer_jbd(bh)) | |
1746 | goto busy; | |
1747 | } while ((bh = bh->b_this_page) != head); | |
3f31fddf | 1748 | |
1da177e4 | 1749 | ret = try_to_free_buffers(page); |
3f31fddf | 1750 | |
1da177e4 LT |
1751 | busy: |
1752 | return ret; | |
1753 | } | |
1754 | ||
1755 | /* | |
1756 | * This buffer is no longer needed. If it is on an older transaction's | |
1757 | * checkpoint list we need to record it on this transaction's forget list | |
1758 | * to pin this buffer (and hence its checkpointing transaction) down until | |
1759 | * this transaction commits. If the buffer isn't on a checkpoint list, we | |
1760 | * release it. | |
1761 | * Returns non-zero if JBD no longer has an interest in the buffer. | |
1762 | * | |
1763 | * Called under j_list_lock. | |
1764 | * | |
1765 | * Called under jbd_lock_bh_state(bh). | |
1766 | */ | |
1767 | static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) | |
1768 | { | |
1769 | int may_free = 1; | |
1770 | struct buffer_head *bh = jh2bh(jh); | |
1771 | ||
1772 | __journal_unfile_buffer(jh); | |
1773 | ||
1774 | if (jh->b_cp_transaction) { | |
1775 | JBUFFER_TRACE(jh, "on running+cp transaction"); | |
1e9fd53b JK |
1776 | /* |
1777 | * We don't want to write the buffer anymore, clear the | |
1778 | * bit so that we don't confuse checks in | |
1779 | * __journal_file_buffer | |
1780 | */ | |
1781 | clear_buffer_dirty(bh); | |
1da177e4 | 1782 | __journal_file_buffer(jh, transaction, BJ_Forget); |
1da177e4 LT |
1783 | may_free = 0; |
1784 | } else { | |
1785 | JBUFFER_TRACE(jh, "on running transaction"); | |
1786 | journal_remove_journal_head(bh); | |
1787 | __brelse(bh); | |
1788 | } | |
1789 | return may_free; | |
1790 | } | |
1791 | ||
1792 | /* | |
ae6ddcc5 | 1793 | * journal_invalidatepage |
1da177e4 LT |
1794 | * |
1795 | * This code is tricky. It has a number of cases to deal with. | |
1796 | * | |
1797 | * There are two invariants which this code relies on: | |
1798 | * | |
1799 | * i_size must be updated on disk before we start calling invalidatepage on the | |
1800 | * data. | |
ae6ddcc5 | 1801 | * |
1da177e4 LT |
1802 | * This is done in ext3 by defining an ext3_setattr method which |
1803 | * updates i_size before truncate gets going. By maintaining this | |
1804 | * invariant, we can be sure that it is safe to throw away any buffers | |
1805 | * attached to the current transaction: once the transaction commits, | |
1806 | * we know that the data will not be needed. | |
ae6ddcc5 | 1807 | * |
1da177e4 | 1808 | * Note however that we can *not* throw away data belonging to the |
ae6ddcc5 | 1809 | * previous, committing transaction! |
1da177e4 LT |
1810 | * |
1811 | * Any disk blocks which *are* part of the previous, committing | |
1812 | * transaction (and which therefore cannot be discarded immediately) are | |
1813 | * not going to be reused in the new running transaction | |
1814 | * | |
1815 | * The bitmap committed_data images guarantee this: any block which is | |
1816 | * allocated in one transaction and removed in the next will be marked | |
1817 | * as in-use in the committed_data bitmap, so cannot be reused until | |
1818 | * the next transaction to delete the block commits. This means that | |
1819 | * leaving committing buffers dirty is quite safe: the disk blocks | |
1820 | * cannot be reallocated to a different file and so buffer aliasing is | |
1821 | * not possible. | |
1822 | * | |
1823 | * | |
1824 | * The above applies mainly to ordered data mode. In writeback mode we | |
1825 | * don't make guarantees about the order in which data hits disk --- in | |
1826 | * particular we don't guarantee that new dirty data is flushed before | |
1827 | * transaction commit --- so it is always safe just to discard data | |
ae6ddcc5 | 1828 | * immediately in that mode. --sct |
1da177e4 LT |
1829 | */ |
1830 | ||
1831 | /* | |
1832 | * The journal_unmap_buffer helper function returns zero if the buffer | |
1833 | * concerned remains pinned as an anonymous buffer belonging to an older | |
1834 | * transaction. | |
1835 | * | |
1836 | * We're outside-transaction here. Either or both of j_running_transaction | |
1837 | * and j_committing_transaction may be NULL. | |
1838 | */ | |
1839 | static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) | |
1840 | { | |
1841 | transaction_t *transaction; | |
1842 | struct journal_head *jh; | |
1843 | int may_free = 1; | |
1844 | int ret; | |
1845 | ||
1846 | BUFFER_TRACE(bh, "entry"); | |
1847 | ||
1848 | /* | |
1849 | * It is safe to proceed here without the j_list_lock because the | |
1850 | * buffers cannot be stolen by try_to_free_buffers as long as we are | |
1851 | * holding the page lock. --sct | |
1852 | */ | |
1853 | ||
1854 | if (!buffer_jbd(bh)) | |
1855 | goto zap_buffer_unlocked; | |
1856 | ||
1857 | spin_lock(&journal->j_state_lock); | |
1858 | jbd_lock_bh_state(bh); | |
1859 | spin_lock(&journal->j_list_lock); | |
1860 | ||
1861 | jh = journal_grab_journal_head(bh); | |
1862 | if (!jh) | |
1863 | goto zap_buffer_no_jh; | |
1864 | ||
86963918 JK |
1865 | /* |
1866 | * We cannot remove the buffer from checkpoint lists until the | |
1867 | * transaction adding inode to orphan list (let's call it T) | |
1868 | * is committed. Otherwise if the transaction changing the | |
1869 | * buffer would be cleaned from the journal before T is | |
1870 | * committed, a crash will cause that the correct contents of | |
1871 | * the buffer will be lost. On the other hand we have to | |
1872 | * clear the buffer dirty bit at latest at the moment when the | |
1873 | * transaction marking the buffer as freed in the filesystem | |
1874 | * structures is committed because from that moment on the | |
1875 | * buffer can be reallocated and used by a different page. | |
1876 | * Since the block hasn't been freed yet but the inode has | |
1877 | * already been added to orphan list, it is safe for us to add | |
1878 | * the buffer to BJ_Forget list of the newest transaction. | |
1879 | */ | |
1da177e4 LT |
1880 | transaction = jh->b_transaction; |
1881 | if (transaction == NULL) { | |
1882 | /* First case: not on any transaction. If it | |
1883 | * has no checkpoint link, then we can zap it: | |
1884 | * it's a writeback-mode buffer so we don't care | |
1885 | * if it hits disk safely. */ | |
1886 | if (!jh->b_cp_transaction) { | |
1887 | JBUFFER_TRACE(jh, "not on any transaction: zap"); | |
1888 | goto zap_buffer; | |
1889 | } | |
1890 | ||
1891 | if (!buffer_dirty(bh)) { | |
1892 | /* bdflush has written it. We can drop it now */ | |
1893 | goto zap_buffer; | |
1894 | } | |
1895 | ||
1896 | /* OK, it must be in the journal but still not | |
1897 | * written fully to disk: it's metadata or | |
1898 | * journaled data... */ | |
1899 | ||
1900 | if (journal->j_running_transaction) { | |
1901 | /* ... and once the current transaction has | |
1902 | * committed, the buffer won't be needed any | |
1903 | * longer. */ | |
1904 | JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget"); | |
1905 | ret = __dispose_buffer(jh, | |
1906 | journal->j_running_transaction); | |
1907 | journal_put_journal_head(jh); | |
1908 | spin_unlock(&journal->j_list_lock); | |
1909 | jbd_unlock_bh_state(bh); | |
1910 | spin_unlock(&journal->j_state_lock); | |
1911 | return ret; | |
1912 | } else { | |
1913 | /* There is no currently-running transaction. So the | |
1914 | * orphan record which we wrote for this file must have | |
1915 | * passed into commit. We must attach this buffer to | |
1916 | * the committing transaction, if it exists. */ | |
1917 | if (journal->j_committing_transaction) { | |
1918 | JBUFFER_TRACE(jh, "give to committing trans"); | |
1919 | ret = __dispose_buffer(jh, | |
1920 | journal->j_committing_transaction); | |
1921 | journal_put_journal_head(jh); | |
1922 | spin_unlock(&journal->j_list_lock); | |
1923 | jbd_unlock_bh_state(bh); | |
1924 | spin_unlock(&journal->j_state_lock); | |
1925 | return ret; | |
1926 | } else { | |
1927 | /* The orphan record's transaction has | |
1928 | * committed. We can cleanse this buffer */ | |
1929 | clear_buffer_jbddirty(bh); | |
1930 | goto zap_buffer; | |
1931 | } | |
1932 | } | |
1933 | } else if (transaction == journal->j_committing_transaction) { | |
f58a74dc | 1934 | JBUFFER_TRACE(jh, "on committing transaction"); |
d13df84f | 1935 | if (jh->b_jlist == BJ_Locked) { |
1936 | /* | |
1937 | * The buffer is on the committing transaction's locked | |
1938 | * list. We have the buffer locked, so I/O has | |
1939 | * completed. So we can nail the buffer now. | |
1940 | */ | |
1941 | may_free = __dispose_buffer(jh, transaction); | |
1942 | goto zap_buffer; | |
1943 | } | |
1944 | /* | |
86963918 JK |
1945 | * The buffer is committing, we simply cannot touch |
1946 | * it. So we just set j_next_transaction to the | |
1947 | * running transaction (if there is one) and mark | |
1948 | * buffer as freed so that commit code knows it should | |
1949 | * clear dirty bits when it is done with the buffer. | |
1950 | */ | |
1da177e4 | 1951 | set_buffer_freed(bh); |
86963918 JK |
1952 | if (journal->j_running_transaction && buffer_jbddirty(bh)) |
1953 | jh->b_next_transaction = journal->j_running_transaction; | |
1da177e4 LT |
1954 | journal_put_journal_head(jh); |
1955 | spin_unlock(&journal->j_list_lock); | |
1956 | jbd_unlock_bh_state(bh); | |
1957 | spin_unlock(&journal->j_state_lock); | |
1958 | return 0; | |
1959 | } else { | |
1960 | /* Good, the buffer belongs to the running transaction. | |
1961 | * We are writing our own transaction's data, not any | |
1962 | * previous one's, so it is safe to throw it away | |
1963 | * (remember that we expect the filesystem to have set | |
1964 | * i_size already for this truncate so recovery will not | |
1965 | * expose the disk blocks we are discarding here.) */ | |
1966 | J_ASSERT_JH(jh, transaction == journal->j_running_transaction); | |
f58a74dc | 1967 | JBUFFER_TRACE(jh, "on running transaction"); |
1da177e4 LT |
1968 | may_free = __dispose_buffer(jh, transaction); |
1969 | } | |
1970 | ||
1971 | zap_buffer: | |
1972 | journal_put_journal_head(jh); | |
1973 | zap_buffer_no_jh: | |
1974 | spin_unlock(&journal->j_list_lock); | |
1975 | jbd_unlock_bh_state(bh); | |
1976 | spin_unlock(&journal->j_state_lock); | |
1977 | zap_buffer_unlocked: | |
1978 | clear_buffer_dirty(bh); | |
1979 | J_ASSERT_BH(bh, !buffer_jbddirty(bh)); | |
1980 | clear_buffer_mapped(bh); | |
1981 | clear_buffer_req(bh); | |
1982 | clear_buffer_new(bh); | |
1983 | bh->b_bdev = NULL; | |
1984 | return may_free; | |
1985 | } | |
1986 | ||
ae6ddcc5 | 1987 | /** |
a6b91919 RD |
1988 | * void journal_invalidatepage() - invalidate a journal page |
1989 | * @journal: journal to use for flush | |
1da177e4 LT |
1990 | * @page: page to flush |
1991 | * @offset: length of page to invalidate. | |
1992 | * | |
1993 | * Reap page buffers containing data after offset in page. | |
1da177e4 | 1994 | */ |
2ff28e22 | 1995 | void journal_invalidatepage(journal_t *journal, |
ae6ddcc5 | 1996 | struct page *page, |
1da177e4 LT |
1997 | unsigned long offset) |
1998 | { | |
1999 | struct buffer_head *head, *bh, *next; | |
2000 | unsigned int curr_off = 0; | |
2001 | int may_free = 1; | |
2002 | ||
2003 | if (!PageLocked(page)) | |
2004 | BUG(); | |
2005 | if (!page_has_buffers(page)) | |
2ff28e22 | 2006 | return; |
1da177e4 LT |
2007 | |
2008 | /* We will potentially be playing with lists other than just the | |
2009 | * data lists (especially for journaled data mode), so be | |
2010 | * cautious in our locking. */ | |
2011 | ||
2012 | head = bh = page_buffers(page); | |
2013 | do { | |
2014 | unsigned int next_off = curr_off + bh->b_size; | |
2015 | next = bh->b_this_page; | |
2016 | ||
1da177e4 | 2017 | if (offset <= curr_off) { |
e9ad5620 | 2018 | /* This block is wholly outside the truncation point */ |
1da177e4 LT |
2019 | lock_buffer(bh); |
2020 | may_free &= journal_unmap_buffer(journal, bh); | |
2021 | unlock_buffer(bh); | |
2022 | } | |
2023 | curr_off = next_off; | |
2024 | bh = next; | |
2025 | ||
2026 | } while (bh != head); | |
2027 | ||
2028 | if (!offset) { | |
2ff28e22 N |
2029 | if (may_free && try_to_free_buffers(page)) |
2030 | J_ASSERT(!page_has_buffers(page)); | |
1da177e4 | 2031 | } |
1da177e4 LT |
2032 | } |
2033 | ||
ae6ddcc5 MC |
2034 | /* |
2035 | * File a buffer on the given transaction list. | |
1da177e4 LT |
2036 | */ |
2037 | void __journal_file_buffer(struct journal_head *jh, | |
2038 | transaction_t *transaction, int jlist) | |
2039 | { | |
2040 | struct journal_head **list = NULL; | |
2041 | int was_dirty = 0; | |
2042 | struct buffer_head *bh = jh2bh(jh); | |
2043 | ||
2044 | J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); | |
2045 | assert_spin_locked(&transaction->t_journal->j_list_lock); | |
2046 | ||
2047 | J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); | |
2048 | J_ASSERT_JH(jh, jh->b_transaction == transaction || | |
c80544dc | 2049 | jh->b_transaction == NULL); |
1da177e4 LT |
2050 | |
2051 | if (jh->b_transaction && jh->b_jlist == jlist) | |
2052 | return; | |
2053 | ||
ae6ddcc5 | 2054 | if (jlist == BJ_Metadata || jlist == BJ_Reserved || |
1da177e4 | 2055 | jlist == BJ_Shadow || jlist == BJ_Forget) { |
1e9fd53b JK |
2056 | /* |
2057 | * For metadata buffers, we track dirty bit in buffer_jbddirty | |
2058 | * instead of buffer_dirty. We should not see a dirty bit set | |
2059 | * here because we clear it in do_get_write_access but e.g. | |
2060 | * tune2fs can modify the sb and set the dirty bit at any time | |
2061 | * so we try to gracefully handle that. | |
2062 | */ | |
2063 | if (buffer_dirty(bh)) | |
2064 | warn_dirty_buffer(bh); | |
1da177e4 LT |
2065 | if (test_clear_buffer_dirty(bh) || |
2066 | test_clear_buffer_jbddirty(bh)) | |
2067 | was_dirty = 1; | |
2068 | } | |
2069 | ||
2070 | if (jh->b_transaction) | |
2071 | __journal_temp_unlink_buffer(jh); | |
2072 | jh->b_transaction = transaction; | |
2073 | ||
2074 | switch (jlist) { | |
2075 | case BJ_None: | |
2076 | J_ASSERT_JH(jh, !jh->b_committed_data); | |
2077 | J_ASSERT_JH(jh, !jh->b_frozen_data); | |
2078 | return; | |
2079 | case BJ_SyncData: | |
2080 | list = &transaction->t_sync_datalist; | |
2081 | break; | |
2082 | case BJ_Metadata: | |
2083 | transaction->t_nr_buffers++; | |
2084 | list = &transaction->t_buffers; | |
2085 | break; | |
2086 | case BJ_Forget: | |
2087 | list = &transaction->t_forget; | |
2088 | break; | |
2089 | case BJ_IO: | |
2090 | list = &transaction->t_iobuf_list; | |
2091 | break; | |
2092 | case BJ_Shadow: | |
2093 | list = &transaction->t_shadow_list; | |
2094 | break; | |
2095 | case BJ_LogCtl: | |
2096 | list = &transaction->t_log_list; | |
2097 | break; | |
2098 | case BJ_Reserved: | |
2099 | list = &transaction->t_reserved_list; | |
2100 | break; | |
2101 | case BJ_Locked: | |
2102 | list = &transaction->t_locked_list; | |
2103 | break; | |
2104 | } | |
2105 | ||
2106 | __blist_add_buffer(list, jh); | |
2107 | jh->b_jlist = jlist; | |
2108 | ||
2109 | if (was_dirty) | |
2110 | set_buffer_jbddirty(bh); | |
2111 | } | |
2112 | ||
2113 | void journal_file_buffer(struct journal_head *jh, | |
2114 | transaction_t *transaction, int jlist) | |
2115 | { | |
2116 | jbd_lock_bh_state(jh2bh(jh)); | |
2117 | spin_lock(&transaction->t_journal->j_list_lock); | |
2118 | __journal_file_buffer(jh, transaction, jlist); | |
2119 | spin_unlock(&transaction->t_journal->j_list_lock); | |
2120 | jbd_unlock_bh_state(jh2bh(jh)); | |
2121 | } | |
2122 | ||
ae6ddcc5 | 2123 | /* |
1da177e4 LT |
2124 | * Remove a buffer from its current buffer list in preparation for |
2125 | * dropping it from its current transaction entirely. If the buffer has | |
2126 | * already started to be used by a subsequent transaction, refile the | |
2127 | * buffer on that transaction's metadata list. | |
2128 | * | |
2129 | * Called under journal->j_list_lock | |
2130 | * | |
2131 | * Called under jbd_lock_bh_state(jh2bh(jh)) | |
2132 | */ | |
2133 | void __journal_refile_buffer(struct journal_head *jh) | |
2134 | { | |
86963918 | 2135 | int was_dirty, jlist; |
1da177e4 LT |
2136 | struct buffer_head *bh = jh2bh(jh); |
2137 | ||
2138 | J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); | |
2139 | if (jh->b_transaction) | |
2140 | assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock); | |
2141 | ||
2142 | /* If the buffer is now unused, just drop it. */ | |
2143 | if (jh->b_next_transaction == NULL) { | |
2144 | __journal_unfile_buffer(jh); | |
2145 | return; | |
2146 | } | |
2147 | ||
2148 | /* | |
2149 | * It has been modified by a later transaction: add it to the new | |
2150 | * transaction's metadata list. | |
2151 | */ | |
2152 | ||
2153 | was_dirty = test_clear_buffer_jbddirty(bh); | |
2154 | __journal_temp_unlink_buffer(jh); | |
2155 | jh->b_transaction = jh->b_next_transaction; | |
2156 | jh->b_next_transaction = NULL; | |
86963918 JK |
2157 | if (buffer_freed(bh)) |
2158 | jlist = BJ_Forget; | |
2159 | else if (jh->b_modified) | |
2160 | jlist = BJ_Metadata; | |
2161 | else | |
2162 | jlist = BJ_Reserved; | |
2163 | __journal_file_buffer(jh, jh->b_transaction, jlist); | |
1da177e4 LT |
2164 | J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING); |
2165 | ||
2166 | if (was_dirty) | |
2167 | set_buffer_jbddirty(bh); | |
2168 | } | |
2169 | ||
2170 | /* | |
2171 | * For the unlocked version of this call, also make sure that any | |
2172 | * hanging journal_head is cleaned up if necessary. | |
2173 | * | |
2174 | * __journal_refile_buffer is usually called as part of a single locked | |
2175 | * operation on a buffer_head, in which the caller is probably going to | |
2176 | * be hooking the journal_head onto other lists. In that case it is up | |
2177 | * to the caller to remove the journal_head if necessary. For the | |
2178 | * unlocked journal_refile_buffer call, the caller isn't going to be | |
2179 | * doing anything else to the buffer so we need to do the cleanup | |
ae6ddcc5 | 2180 | * ourselves to avoid a jh leak. |
1da177e4 LT |
2181 | * |
2182 | * *** The journal_head may be freed by this call! *** | |
2183 | */ | |
2184 | void journal_refile_buffer(journal_t *journal, struct journal_head *jh) | |
2185 | { | |
2186 | struct buffer_head *bh = jh2bh(jh); | |
2187 | ||
2188 | jbd_lock_bh_state(bh); | |
2189 | spin_lock(&journal->j_list_lock); | |
2190 | ||
2191 | __journal_refile_buffer(jh); | |
2192 | jbd_unlock_bh_state(bh); | |
2193 | journal_remove_journal_head(bh); | |
2194 | ||
2195 | spin_unlock(&journal->j_list_lock); | |
2196 | __brelse(bh); | |
2197 | } |