Merge tag 'selinux-pr-20220523' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / fs / jbd2 / commit.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * linux/fs/jbd2/commit.c
4  *
5  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6  *
7  * Copyright 1998 Red Hat corp --- All Rights Reserved
8  *
9  * Journal commit routines for the generic filesystem journaling code;
10  * part of the ext2fs journaling system.
11  */
12
13 #include <linux/time.h>
14 #include <linux/fs.h>
15 #include <linux/jbd2.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/mm.h>
19 #include <linux/pagemap.h>
20 #include <linux/jiffies.h>
21 #include <linux/crc32.h>
22 #include <linux/writeback.h>
23 #include <linux/backing-dev.h>
24 #include <linux/bio.h>
25 #include <linux/blkdev.h>
26 #include <linux/bitops.h>
27 #include <trace/events/jbd2.h>
28
29 /*
30  * IO end handler for temporary buffer_heads handling writes to the journal.
31  */
32 static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
33 {
34         struct buffer_head *orig_bh = bh->b_private;
35
36         BUFFER_TRACE(bh, "");
37         if (uptodate)
38                 set_buffer_uptodate(bh);
39         else
40                 clear_buffer_uptodate(bh);
41         if (orig_bh) {
42                 clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
43                 smp_mb__after_atomic();
44                 wake_up_bit(&orig_bh->b_state, BH_Shadow);
45         }
46         unlock_buffer(bh);
47 }
48
49 /*
50  * When an ext4 file is truncated, it is possible that some pages are not
51  * successfully freed, because they are attached to a committing transaction.
52  * After the transaction commits, these pages are left on the LRU, with no
53  * ->mapping, and with attached buffers.  These pages are trivially reclaimable
54  * by the VM, but their apparent absence upsets the VM accounting, and it makes
55  * the numbers in /proc/meminfo look odd.
56  *
57  * So here, we have a buffer which has just come off the forget list.  Look to
58  * see if we can strip all buffers from the backing page.
59  *
60  * Called under lock_journal(), and possibly under journal_datalist_lock.  The
61  * caller provided us with a ref against the buffer, and we drop that here.
62  */
63 static void release_buffer_page(struct buffer_head *bh)
64 {
65         struct page *page;
66
67         if (buffer_dirty(bh))
68                 goto nope;
69         if (atomic_read(&bh->b_count) != 1)
70                 goto nope;
71         page = bh->b_page;
72         if (!page)
73                 goto nope;
74         if (page->mapping)
75                 goto nope;
76
77         /* OK, it's a truncated page */
78         if (!trylock_page(page))
79                 goto nope;
80
81         get_page(page);
82         __brelse(bh);
83         try_to_free_buffers(page);
84         unlock_page(page);
85         put_page(page);
86         return;
87
88 nope:
89         __brelse(bh);
90 }
91
92 static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
93 {
94         struct commit_header *h;
95         __u32 csum;
96
97         if (!jbd2_journal_has_csum_v2or3(j))
98                 return;
99
100         h = (struct commit_header *)(bh->b_data);
101         h->h_chksum_type = 0;
102         h->h_chksum_size = 0;
103         h->h_chksum[0] = 0;
104         csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
105         h->h_chksum[0] = cpu_to_be32(csum);
106 }
107
108 /*
109  * Done it all: now submit the commit record.  We should have
110  * cleaned up our previous buffers by now, so if we are in abort
111  * mode we can now just skip the rest of the journal write
112  * entirely.
113  *
114  * Returns 1 if the journal needs to be aborted or 0 on success
115  */
116 static int journal_submit_commit_record(journal_t *journal,
117                                         transaction_t *commit_transaction,
118                                         struct buffer_head **cbh,
119                                         __u32 crc32_sum)
120 {
121         struct commit_header *tmp;
122         struct buffer_head *bh;
123         int ret;
124         struct timespec64 now;
125
126         *cbh = NULL;
127
128         if (is_journal_aborted(journal))
129                 return 0;
130
131         bh = jbd2_journal_get_descriptor_buffer(commit_transaction,
132                                                 JBD2_COMMIT_BLOCK);
133         if (!bh)
134                 return 1;
135
136         tmp = (struct commit_header *)bh->b_data;
137         ktime_get_coarse_real_ts64(&now);
138         tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
139         tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
140
141         if (jbd2_has_feature_checksum(journal)) {
142                 tmp->h_chksum_type      = JBD2_CRC32_CHKSUM;
143                 tmp->h_chksum_size      = JBD2_CRC32_CHKSUM_SIZE;
144                 tmp->h_chksum[0]        = cpu_to_be32(crc32_sum);
145         }
146         jbd2_commit_block_csum_set(journal, bh);
147
148         BUFFER_TRACE(bh, "submit commit block");
149         lock_buffer(bh);
150         clear_buffer_dirty(bh);
151         set_buffer_uptodate(bh);
152         bh->b_end_io = journal_end_buffer_io_sync;
153
154         if (journal->j_flags & JBD2_BARRIER &&
155             !jbd2_has_feature_async_commit(journal))
156                 ret = submit_bh(REQ_OP_WRITE,
157                         REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh);
158         else
159                 ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
160
161         *cbh = bh;
162         return ret;
163 }
164
165 /*
166  * This function along with journal_submit_commit_record
167  * allows to write the commit record asynchronously.
168  */
169 static int journal_wait_on_commit_record(journal_t *journal,
170                                          struct buffer_head *bh)
171 {
172         int ret = 0;
173
174         clear_buffer_dirty(bh);
175         wait_on_buffer(bh);
176
177         if (unlikely(!buffer_uptodate(bh)))
178                 ret = -EIO;
179         put_bh(bh);            /* One for getblk() */
180
181         return ret;
182 }
183
184 /*
185  * write the filemap data using writepage() address_space_operations.
186  * We don't do block allocation here even for delalloc. We don't
187  * use writepages() because with delayed allocation we may be doing
188  * block allocation in writepages().
189  */
190 int jbd2_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
191 {
192         struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
193         struct writeback_control wbc = {
194                 .sync_mode =  WB_SYNC_ALL,
195                 .nr_to_write = mapping->nrpages * 2,
196                 .range_start = jinode->i_dirty_start,
197                 .range_end = jinode->i_dirty_end,
198         };
199
200         /*
201          * submit the inode data buffers. We use writepage
202          * instead of writepages. Because writepages can do
203          * block allocation with delalloc. We need to write
204          * only allocated blocks here.
205          */
206         return generic_writepages(mapping, &wbc);
207 }
208
209 /* Send all the data buffers related to an inode */
210 int jbd2_submit_inode_data(struct jbd2_inode *jinode)
211 {
212
213         if (!jinode || !(jinode->i_flags & JI_WRITE_DATA))
214                 return 0;
215
216         trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
217         return jbd2_journal_submit_inode_data_buffers(jinode);
218
219 }
220 EXPORT_SYMBOL(jbd2_submit_inode_data);
221
222 int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode)
223 {
224         if (!jinode || !(jinode->i_flags & JI_WAIT_DATA) ||
225                 !jinode->i_vfs_inode || !jinode->i_vfs_inode->i_mapping)
226                 return 0;
227         return filemap_fdatawait_range_keep_errors(
228                 jinode->i_vfs_inode->i_mapping, jinode->i_dirty_start,
229                 jinode->i_dirty_end);
230 }
231 EXPORT_SYMBOL(jbd2_wait_inode_data);
232
233 /*
234  * Submit all the data buffers of inode associated with the transaction to
235  * disk.
236  *
237  * We are in a committing transaction. Therefore no new inode can be added to
238  * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
239  * operate on from being released while we write out pages.
240  */
241 static int journal_submit_data_buffers(journal_t *journal,
242                 transaction_t *commit_transaction)
243 {
244         struct jbd2_inode *jinode;
245         int err, ret = 0;
246
247         spin_lock(&journal->j_list_lock);
248         list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
249                 if (!(jinode->i_flags & JI_WRITE_DATA))
250                         continue;
251                 jinode->i_flags |= JI_COMMIT_RUNNING;
252                 spin_unlock(&journal->j_list_lock);
253                 /* submit the inode data buffers. */
254                 trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
255                 if (journal->j_submit_inode_data_buffers) {
256                         err = journal->j_submit_inode_data_buffers(jinode);
257                         if (!ret)
258                                 ret = err;
259                 }
260                 spin_lock(&journal->j_list_lock);
261                 J_ASSERT(jinode->i_transaction == commit_transaction);
262                 jinode->i_flags &= ~JI_COMMIT_RUNNING;
263                 smp_mb();
264                 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
265         }
266         spin_unlock(&journal->j_list_lock);
267         return ret;
268 }
269
270 int jbd2_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
271 {
272         struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
273
274         return filemap_fdatawait_range_keep_errors(mapping,
275                                                    jinode->i_dirty_start,
276                                                    jinode->i_dirty_end);
277 }
278
279 /*
280  * Wait for data submitted for writeout, refile inodes to proper
281  * transaction if needed.
282  *
283  */
284 static int journal_finish_inode_data_buffers(journal_t *journal,
285                 transaction_t *commit_transaction)
286 {
287         struct jbd2_inode *jinode, *next_i;
288         int err, ret = 0;
289
290         /* For locking, see the comment in journal_submit_data_buffers() */
291         spin_lock(&journal->j_list_lock);
292         list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
293                 if (!(jinode->i_flags & JI_WAIT_DATA))
294                         continue;
295                 jinode->i_flags |= JI_COMMIT_RUNNING;
296                 spin_unlock(&journal->j_list_lock);
297                 /* wait for the inode data buffers writeout. */
298                 if (journal->j_finish_inode_data_buffers) {
299                         err = journal->j_finish_inode_data_buffers(jinode);
300                         if (!ret)
301                                 ret = err;
302                 }
303                 spin_lock(&journal->j_list_lock);
304                 jinode->i_flags &= ~JI_COMMIT_RUNNING;
305                 smp_mb();
306                 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
307         }
308
309         /* Now refile inode to proper lists */
310         list_for_each_entry_safe(jinode, next_i,
311                                  &commit_transaction->t_inode_list, i_list) {
312                 list_del(&jinode->i_list);
313                 if (jinode->i_next_transaction) {
314                         jinode->i_transaction = jinode->i_next_transaction;
315                         jinode->i_next_transaction = NULL;
316                         list_add(&jinode->i_list,
317                                 &jinode->i_transaction->t_inode_list);
318                 } else {
319                         jinode->i_transaction = NULL;
320                         jinode->i_dirty_start = 0;
321                         jinode->i_dirty_end = 0;
322                 }
323         }
324         spin_unlock(&journal->j_list_lock);
325
326         return ret;
327 }
328
329 static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
330 {
331         struct page *page = bh->b_page;
332         char *addr;
333         __u32 checksum;
334
335         addr = kmap_atomic(page);
336         checksum = crc32_be(crc32_sum,
337                 (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
338         kunmap_atomic(addr);
339
340         return checksum;
341 }
342
343 static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
344                                    unsigned long long block)
345 {
346         tag->t_blocknr = cpu_to_be32(block & (u32)~0);
347         if (jbd2_has_feature_64bit(j))
348                 tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
349 }
350
351 static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
352                                     struct buffer_head *bh, __u32 sequence)
353 {
354         journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
355         struct page *page = bh->b_page;
356         __u8 *addr;
357         __u32 csum32;
358         __be32 seq;
359
360         if (!jbd2_journal_has_csum_v2or3(j))
361                 return;
362
363         seq = cpu_to_be32(sequence);
364         addr = kmap_atomic(page);
365         csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
366         csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
367                              bh->b_size);
368         kunmap_atomic(addr);
369
370         if (jbd2_has_feature_csum3(j))
371                 tag3->t_checksum = cpu_to_be32(csum32);
372         else
373                 tag->t_checksum = cpu_to_be16(csum32);
374 }
375 /*
376  * jbd2_journal_commit_transaction
377  *
378  * The primary function for committing a transaction to the log.  This
379  * function is called by the journal thread to begin a complete commit.
380  */
381 void jbd2_journal_commit_transaction(journal_t *journal)
382 {
383         struct transaction_stats_s stats;
384         transaction_t *commit_transaction;
385         struct journal_head *jh;
386         struct buffer_head *descriptor;
387         struct buffer_head **wbuf = journal->j_wbuf;
388         int bufs;
389         int flags;
390         int err;
391         unsigned long long blocknr;
392         ktime_t start_time;
393         u64 commit_time;
394         char *tagp = NULL;
395         journal_block_tag_t *tag = NULL;
396         int space_left = 0;
397         int first_tag = 0;
398         int tag_flag;
399         int i;
400         int tag_bytes = journal_tag_bytes(journal);
401         struct buffer_head *cbh = NULL; /* For transactional checksums */
402         __u32 crc32_sum = ~0;
403         struct blk_plug plug;
404         /* Tail of the journal */
405         unsigned long first_block;
406         tid_t first_tid;
407         int update_tail;
408         int csum_size = 0;
409         LIST_HEAD(io_bufs);
410         LIST_HEAD(log_bufs);
411
412         if (jbd2_journal_has_csum_v2or3(journal))
413                 csum_size = sizeof(struct jbd2_journal_block_tail);
414
415         /*
416          * First job: lock down the current transaction and wait for
417          * all outstanding updates to complete.
418          */
419
420         /* Do we need to erase the effects of a prior jbd2_journal_flush? */
421         if (journal->j_flags & JBD2_FLUSHED) {
422                 jbd_debug(3, "super block updated\n");
423                 mutex_lock_io(&journal->j_checkpoint_mutex);
424                 /*
425                  * We hold j_checkpoint_mutex so tail cannot change under us.
426                  * We don't need any special data guarantees for writing sb
427                  * since journal is empty and it is ok for write to be
428                  * flushed only with transaction commit.
429                  */
430                 jbd2_journal_update_sb_log_tail(journal,
431                                                 journal->j_tail_sequence,
432                                                 journal->j_tail,
433                                                 REQ_SYNC);
434                 mutex_unlock(&journal->j_checkpoint_mutex);
435         } else {
436                 jbd_debug(3, "superblock not updated\n");
437         }
438
439         J_ASSERT(journal->j_running_transaction != NULL);
440         J_ASSERT(journal->j_committing_transaction == NULL);
441
442         write_lock(&journal->j_state_lock);
443         journal->j_flags |= JBD2_FULL_COMMIT_ONGOING;
444         while (journal->j_flags & JBD2_FAST_COMMIT_ONGOING) {
445                 DEFINE_WAIT(wait);
446
447                 prepare_to_wait(&journal->j_fc_wait, &wait,
448                                 TASK_UNINTERRUPTIBLE);
449                 write_unlock(&journal->j_state_lock);
450                 schedule();
451                 write_lock(&journal->j_state_lock);
452                 finish_wait(&journal->j_fc_wait, &wait);
453                 /*
454                  * TODO: by blocking fast commits here, we are increasing
455                  * fsync() latency slightly. Strictly speaking, we don't need
456                  * to block fast commits until the transaction enters T_FLUSH
457                  * state. So an optimization is possible where we block new fast
458                  * commits here and wait for existing ones to complete
459                  * just before we enter T_FLUSH. That way, the existing fast
460                  * commits and this full commit can proceed parallely.
461                  */
462         }
463         write_unlock(&journal->j_state_lock);
464
465         commit_transaction = journal->j_running_transaction;
466
467         trace_jbd2_start_commit(journal, commit_transaction);
468         jbd_debug(1, "JBD2: starting commit of transaction %d\n",
469                         commit_transaction->t_tid);
470
471         write_lock(&journal->j_state_lock);
472         journal->j_fc_off = 0;
473         J_ASSERT(commit_transaction->t_state == T_RUNNING);
474         commit_transaction->t_state = T_LOCKED;
475
476         trace_jbd2_commit_locking(journal, commit_transaction);
477         stats.run.rs_wait = commit_transaction->t_max_wait;
478         stats.run.rs_request_delay = 0;
479         stats.run.rs_locked = jiffies;
480         if (commit_transaction->t_requested)
481                 stats.run.rs_request_delay =
482                         jbd2_time_diff(commit_transaction->t_requested,
483                                        stats.run.rs_locked);
484         stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
485                                               stats.run.rs_locked);
486
487         // waits for any t_updates to finish
488         jbd2_journal_wait_updates(journal);
489
490         commit_transaction->t_state = T_SWITCH;
491
492         J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
493                         journal->j_max_transaction_buffers);
494
495         /*
496          * First thing we are allowed to do is to discard any remaining
497          * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
498          * that there are no such buffers: if a large filesystem
499          * operation like a truncate needs to split itself over multiple
500          * transactions, then it may try to do a jbd2_journal_restart() while
501          * there are still BJ_Reserved buffers outstanding.  These must
502          * be released cleanly from the current transaction.
503          *
504          * In this case, the filesystem must still reserve write access
505          * again before modifying the buffer in the new transaction, but
506          * we do not require it to remember exactly which old buffers it
507          * has reserved.  This is consistent with the existing behaviour
508          * that multiple jbd2_journal_get_write_access() calls to the same
509          * buffer are perfectly permissible.
510          * We use journal->j_state_lock here to serialize processing of
511          * t_reserved_list with eviction of buffers from journal_unmap_buffer().
512          */
513         while (commit_transaction->t_reserved_list) {
514                 jh = commit_transaction->t_reserved_list;
515                 JBUFFER_TRACE(jh, "reserved, unused: refile");
516                 /*
517                  * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
518                  * leave undo-committed data.
519                  */
520                 if (jh->b_committed_data) {
521                         struct buffer_head *bh = jh2bh(jh);
522
523                         spin_lock(&jh->b_state_lock);
524                         jbd2_free(jh->b_committed_data, bh->b_size);
525                         jh->b_committed_data = NULL;
526                         spin_unlock(&jh->b_state_lock);
527                 }
528                 jbd2_journal_refile_buffer(journal, jh);
529         }
530
531         write_unlock(&journal->j_state_lock);
532         /*
533          * Now try to drop any written-back buffers from the journal's
534          * checkpoint lists.  We do this *before* commit because it potentially
535          * frees some memory
536          */
537         spin_lock(&journal->j_list_lock);
538         __jbd2_journal_clean_checkpoint_list(journal, false);
539         spin_unlock(&journal->j_list_lock);
540
541         jbd_debug(3, "JBD2: commit phase 1\n");
542
543         /*
544          * Clear revoked flag to reflect there is no revoked buffers
545          * in the next transaction which is going to be started.
546          */
547         jbd2_clear_buffer_revoked_flags(journal);
548
549         /*
550          * Switch to a new revoke table.
551          */
552         jbd2_journal_switch_revoke_table(journal);
553
554         /*
555          * Reserved credits cannot be claimed anymore, free them
556          */
557         atomic_sub(atomic_read(&journal->j_reserved_credits),
558                    &commit_transaction->t_outstanding_credits);
559
560         write_lock(&journal->j_state_lock);
561         trace_jbd2_commit_flushing(journal, commit_transaction);
562         stats.run.rs_flushing = jiffies;
563         stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
564                                              stats.run.rs_flushing);
565
566         commit_transaction->t_state = T_FLUSH;
567         journal->j_committing_transaction = commit_transaction;
568         journal->j_running_transaction = NULL;
569         start_time = ktime_get();
570         commit_transaction->t_log_start = journal->j_head;
571         wake_up(&journal->j_wait_transaction_locked);
572         write_unlock(&journal->j_state_lock);
573
574         jbd_debug(3, "JBD2: commit phase 2a\n");
575
576         /*
577          * Now start flushing things to disk, in the order they appear
578          * on the transaction lists.  Data blocks go first.
579          */
580         err = journal_submit_data_buffers(journal, commit_transaction);
581         if (err)
582                 jbd2_journal_abort(journal, err);
583
584         blk_start_plug(&plug);
585         jbd2_journal_write_revoke_records(commit_transaction, &log_bufs);
586
587         jbd_debug(3, "JBD2: commit phase 2b\n");
588
589         /*
590          * Way to go: we have now written out all of the data for a
591          * transaction!  Now comes the tricky part: we need to write out
592          * metadata.  Loop over the transaction's entire buffer list:
593          */
594         write_lock(&journal->j_state_lock);
595         commit_transaction->t_state = T_COMMIT;
596         write_unlock(&journal->j_state_lock);
597
598         trace_jbd2_commit_logging(journal, commit_transaction);
599         stats.run.rs_logging = jiffies;
600         stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
601                                                stats.run.rs_logging);
602         stats.run.rs_blocks = commit_transaction->t_nr_buffers;
603         stats.run.rs_blocks_logged = 0;
604
605         J_ASSERT(commit_transaction->t_nr_buffers <=
606                  atomic_read(&commit_transaction->t_outstanding_credits));
607
608         err = 0;
609         bufs = 0;
610         descriptor = NULL;
611         while (commit_transaction->t_buffers) {
612
613                 /* Find the next buffer to be journaled... */
614
615                 jh = commit_transaction->t_buffers;
616
617                 /* If we're in abort mode, we just un-journal the buffer and
618                    release it. */
619
620                 if (is_journal_aborted(journal)) {
621                         clear_buffer_jbddirty(jh2bh(jh));
622                         JBUFFER_TRACE(jh, "journal is aborting: refile");
623                         jbd2_buffer_abort_trigger(jh,
624                                                   jh->b_frozen_data ?
625                                                   jh->b_frozen_triggers :
626                                                   jh->b_triggers);
627                         jbd2_journal_refile_buffer(journal, jh);
628                         /* If that was the last one, we need to clean up
629                          * any descriptor buffers which may have been
630                          * already allocated, even if we are now
631                          * aborting. */
632                         if (!commit_transaction->t_buffers)
633                                 goto start_journal_io;
634                         continue;
635                 }
636
637                 /* Make sure we have a descriptor block in which to
638                    record the metadata buffer. */
639
640                 if (!descriptor) {
641                         J_ASSERT (bufs == 0);
642
643                         jbd_debug(4, "JBD2: get descriptor\n");
644
645                         descriptor = jbd2_journal_get_descriptor_buffer(
646                                                         commit_transaction,
647                                                         JBD2_DESCRIPTOR_BLOCK);
648                         if (!descriptor) {
649                                 jbd2_journal_abort(journal, -EIO);
650                                 continue;
651                         }
652
653                         jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
654                                 (unsigned long long)descriptor->b_blocknr,
655                                 descriptor->b_data);
656                         tagp = &descriptor->b_data[sizeof(journal_header_t)];
657                         space_left = descriptor->b_size -
658                                                 sizeof(journal_header_t);
659                         first_tag = 1;
660                         set_buffer_jwrite(descriptor);
661                         set_buffer_dirty(descriptor);
662                         wbuf[bufs++] = descriptor;
663
664                         /* Record it so that we can wait for IO
665                            completion later */
666                         BUFFER_TRACE(descriptor, "ph3: file as descriptor");
667                         jbd2_file_log_bh(&log_bufs, descriptor);
668                 }
669
670                 /* Where is the buffer to be written? */
671
672                 err = jbd2_journal_next_log_block(journal, &blocknr);
673                 /* If the block mapping failed, just abandon the buffer
674                    and repeat this loop: we'll fall into the
675                    refile-on-abort condition above. */
676                 if (err) {
677                         jbd2_journal_abort(journal, err);
678                         continue;
679                 }
680
681                 /*
682                  * start_this_handle() uses t_outstanding_credits to determine
683                  * the free space in the log.
684                  */
685                 atomic_dec(&commit_transaction->t_outstanding_credits);
686
687                 /* Bump b_count to prevent truncate from stumbling over
688                    the shadowed buffer!  @@@ This can go if we ever get
689                    rid of the shadow pairing of buffers. */
690                 atomic_inc(&jh2bh(jh)->b_count);
691
692                 /*
693                  * Make a temporary IO buffer with which to write it out
694                  * (this will requeue the metadata buffer to BJ_Shadow).
695                  */
696                 set_bit(BH_JWrite, &jh2bh(jh)->b_state);
697                 JBUFFER_TRACE(jh, "ph3: write metadata");
698                 flags = jbd2_journal_write_metadata_buffer(commit_transaction,
699                                                 jh, &wbuf[bufs], blocknr);
700                 if (flags < 0) {
701                         jbd2_journal_abort(journal, flags);
702                         continue;
703                 }
704                 jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
705
706                 /* Record the new block's tag in the current descriptor
707                    buffer */
708
709                 tag_flag = 0;
710                 if (flags & 1)
711                         tag_flag |= JBD2_FLAG_ESCAPE;
712                 if (!first_tag)
713                         tag_flag |= JBD2_FLAG_SAME_UUID;
714
715                 tag = (journal_block_tag_t *) tagp;
716                 write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
717                 tag->t_flags = cpu_to_be16(tag_flag);
718                 jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
719                                         commit_transaction->t_tid);
720                 tagp += tag_bytes;
721                 space_left -= tag_bytes;
722                 bufs++;
723
724                 if (first_tag) {
725                         memcpy (tagp, journal->j_uuid, 16);
726                         tagp += 16;
727                         space_left -= 16;
728                         first_tag = 0;
729                 }
730
731                 /* If there's no more to do, or if the descriptor is full,
732                    let the IO rip! */
733
734                 if (bufs == journal->j_wbufsize ||
735                     commit_transaction->t_buffers == NULL ||
736                     space_left < tag_bytes + 16 + csum_size) {
737
738                         jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
739
740                         /* Write an end-of-descriptor marker before
741                            submitting the IOs.  "tag" still points to
742                            the last tag we set up. */
743
744                         tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
745 start_journal_io:
746                         if (descriptor)
747                                 jbd2_descriptor_block_csum_set(journal,
748                                                         descriptor);
749
750                         for (i = 0; i < bufs; i++) {
751                                 struct buffer_head *bh = wbuf[i];
752                                 /*
753                                  * Compute checksum.
754                                  */
755                                 if (jbd2_has_feature_checksum(journal)) {
756                                         crc32_sum =
757                                             jbd2_checksum_data(crc32_sum, bh);
758                                 }
759
760                                 lock_buffer(bh);
761                                 clear_buffer_dirty(bh);
762                                 set_buffer_uptodate(bh);
763                                 bh->b_end_io = journal_end_buffer_io_sync;
764                                 submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
765                         }
766                         cond_resched();
767
768                         /* Force a new descriptor to be generated next
769                            time round the loop. */
770                         descriptor = NULL;
771                         bufs = 0;
772                 }
773         }
774
775         err = journal_finish_inode_data_buffers(journal, commit_transaction);
776         if (err) {
777                 printk(KERN_WARNING
778                         "JBD2: Detected IO errors while flushing file data "
779                        "on %s\n", journal->j_devname);
780                 if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
781                         jbd2_journal_abort(journal, err);
782                 err = 0;
783         }
784
785         /*
786          * Get current oldest transaction in the log before we issue flush
787          * to the filesystem device. After the flush we can be sure that
788          * blocks of all older transactions are checkpointed to persistent
789          * storage and we will be safe to update journal start in the
790          * superblock with the numbers we get here.
791          */
792         update_tail =
793                 jbd2_journal_get_log_tail(journal, &first_tid, &first_block);
794
795         write_lock(&journal->j_state_lock);
796         if (update_tail) {
797                 long freed = first_block - journal->j_tail;
798
799                 if (first_block < journal->j_tail)
800                         freed += journal->j_last - journal->j_first;
801                 /* Update tail only if we free significant amount of space */
802                 if (freed < jbd2_journal_get_max_txn_bufs(journal))
803                         update_tail = 0;
804         }
805         J_ASSERT(commit_transaction->t_state == T_COMMIT);
806         commit_transaction->t_state = T_COMMIT_DFLUSH;
807         write_unlock(&journal->j_state_lock);
808
809         /*
810          * If the journal is not located on the file system device,
811          * then we must flush the file system device before we issue
812          * the commit record
813          */
814         if (commit_transaction->t_need_data_flush &&
815             (journal->j_fs_dev != journal->j_dev) &&
816             (journal->j_flags & JBD2_BARRIER))
817                 blkdev_issue_flush(journal->j_fs_dev);
818
819         /* Done it all: now write the commit record asynchronously. */
820         if (jbd2_has_feature_async_commit(journal)) {
821                 err = journal_submit_commit_record(journal, commit_transaction,
822                                                  &cbh, crc32_sum);
823                 if (err)
824                         jbd2_journal_abort(journal, err);
825         }
826
827         blk_finish_plug(&plug);
828
829         /* Lo and behold: we have just managed to send a transaction to
830            the log.  Before we can commit it, wait for the IO so far to
831            complete.  Control buffers being written are on the
832            transaction's t_log_list queue, and metadata buffers are on
833            the io_bufs list.
834
835            Wait for the buffers in reverse order.  That way we are
836            less likely to be woken up until all IOs have completed, and
837            so we incur less scheduling load.
838         */
839
840         jbd_debug(3, "JBD2: commit phase 3\n");
841
842         while (!list_empty(&io_bufs)) {
843                 struct buffer_head *bh = list_entry(io_bufs.prev,
844                                                     struct buffer_head,
845                                                     b_assoc_buffers);
846
847                 wait_on_buffer(bh);
848                 cond_resched();
849
850                 if (unlikely(!buffer_uptodate(bh)))
851                         err = -EIO;
852                 jbd2_unfile_log_bh(bh);
853                 stats.run.rs_blocks_logged++;
854
855                 /*
856                  * The list contains temporary buffer heads created by
857                  * jbd2_journal_write_metadata_buffer().
858                  */
859                 BUFFER_TRACE(bh, "dumping temporary bh");
860                 __brelse(bh);
861                 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
862                 free_buffer_head(bh);
863
864                 /* We also have to refile the corresponding shadowed buffer */
865                 jh = commit_transaction->t_shadow_list->b_tprev;
866                 bh = jh2bh(jh);
867                 clear_buffer_jwrite(bh);
868                 J_ASSERT_BH(bh, buffer_jbddirty(bh));
869                 J_ASSERT_BH(bh, !buffer_shadow(bh));
870
871                 /* The metadata is now released for reuse, but we need
872                    to remember it against this transaction so that when
873                    we finally commit, we can do any checkpointing
874                    required. */
875                 JBUFFER_TRACE(jh, "file as BJ_Forget");
876                 jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
877                 JBUFFER_TRACE(jh, "brelse shadowed buffer");
878                 __brelse(bh);
879         }
880
881         J_ASSERT (commit_transaction->t_shadow_list == NULL);
882
883         jbd_debug(3, "JBD2: commit phase 4\n");
884
885         /* Here we wait for the revoke record and descriptor record buffers */
886         while (!list_empty(&log_bufs)) {
887                 struct buffer_head *bh;
888
889                 bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
890                 wait_on_buffer(bh);
891                 cond_resched();
892
893                 if (unlikely(!buffer_uptodate(bh)))
894                         err = -EIO;
895
896                 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
897                 clear_buffer_jwrite(bh);
898                 jbd2_unfile_log_bh(bh);
899                 stats.run.rs_blocks_logged++;
900                 __brelse(bh);           /* One for getblk */
901                 /* AKPM: bforget here */
902         }
903
904         if (err)
905                 jbd2_journal_abort(journal, err);
906
907         jbd_debug(3, "JBD2: commit phase 5\n");
908         write_lock(&journal->j_state_lock);
909         J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
910         commit_transaction->t_state = T_COMMIT_JFLUSH;
911         write_unlock(&journal->j_state_lock);
912
913         if (!jbd2_has_feature_async_commit(journal)) {
914                 err = journal_submit_commit_record(journal, commit_transaction,
915                                                 &cbh, crc32_sum);
916                 if (err)
917                         jbd2_journal_abort(journal, err);
918         }
919         if (cbh)
920                 err = journal_wait_on_commit_record(journal, cbh);
921         stats.run.rs_blocks_logged++;
922         if (jbd2_has_feature_async_commit(journal) &&
923             journal->j_flags & JBD2_BARRIER) {
924                 blkdev_issue_flush(journal->j_dev);
925         }
926
927         if (err)
928                 jbd2_journal_abort(journal, err);
929
930         WARN_ON_ONCE(
931                 atomic_read(&commit_transaction->t_outstanding_credits) < 0);
932
933         /*
934          * Now disk caches for filesystem device are flushed so we are safe to
935          * erase checkpointed transactions from the log by updating journal
936          * superblock.
937          */
938         if (update_tail)
939                 jbd2_update_log_tail(journal, first_tid, first_block);
940
941         /* End of a transaction!  Finally, we can do checkpoint
942            processing: any buffers committed as a result of this
943            transaction can be removed from any checkpoint list it was on
944            before. */
945
946         jbd_debug(3, "JBD2: commit phase 6\n");
947
948         J_ASSERT(list_empty(&commit_transaction->t_inode_list));
949         J_ASSERT(commit_transaction->t_buffers == NULL);
950         J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
951         J_ASSERT(commit_transaction->t_shadow_list == NULL);
952
953 restart_loop:
954         /*
955          * As there are other places (journal_unmap_buffer()) adding buffers
956          * to this list we have to be careful and hold the j_list_lock.
957          */
958         spin_lock(&journal->j_list_lock);
959         while (commit_transaction->t_forget) {
960                 transaction_t *cp_transaction;
961                 struct buffer_head *bh;
962                 int try_to_free = 0;
963                 bool drop_ref;
964
965                 jh = commit_transaction->t_forget;
966                 spin_unlock(&journal->j_list_lock);
967                 bh = jh2bh(jh);
968                 /*
969                  * Get a reference so that bh cannot be freed before we are
970                  * done with it.
971                  */
972                 get_bh(bh);
973                 spin_lock(&jh->b_state_lock);
974                 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
975
976                 /*
977                  * If there is undo-protected committed data against
978                  * this buffer, then we can remove it now.  If it is a
979                  * buffer needing such protection, the old frozen_data
980                  * field now points to a committed version of the
981                  * buffer, so rotate that field to the new committed
982                  * data.
983                  *
984                  * Otherwise, we can just throw away the frozen data now.
985                  *
986                  * We also know that the frozen data has already fired
987                  * its triggers if they exist, so we can clear that too.
988                  */
989                 if (jh->b_committed_data) {
990                         jbd2_free(jh->b_committed_data, bh->b_size);
991                         jh->b_committed_data = NULL;
992                         if (jh->b_frozen_data) {
993                                 jh->b_committed_data = jh->b_frozen_data;
994                                 jh->b_frozen_data = NULL;
995                                 jh->b_frozen_triggers = NULL;
996                         }
997                 } else if (jh->b_frozen_data) {
998                         jbd2_free(jh->b_frozen_data, bh->b_size);
999                         jh->b_frozen_data = NULL;
1000                         jh->b_frozen_triggers = NULL;
1001                 }
1002
1003                 spin_lock(&journal->j_list_lock);
1004                 cp_transaction = jh->b_cp_transaction;
1005                 if (cp_transaction) {
1006                         JBUFFER_TRACE(jh, "remove from old cp transaction");
1007                         cp_transaction->t_chp_stats.cs_dropped++;
1008                         __jbd2_journal_remove_checkpoint(jh);
1009                 }
1010
1011                 /* Only re-checkpoint the buffer_head if it is marked
1012                  * dirty.  If the buffer was added to the BJ_Forget list
1013                  * by jbd2_journal_forget, it may no longer be dirty and
1014                  * there's no point in keeping a checkpoint record for
1015                  * it. */
1016
1017                 /*
1018                  * A buffer which has been freed while still being journaled
1019                  * by a previous transaction, refile the buffer to BJ_Forget of
1020                  * the running transaction. If the just committed transaction
1021                  * contains "add to orphan" operation, we can completely
1022                  * invalidate the buffer now. We are rather through in that
1023                  * since the buffer may be still accessible when blocksize <
1024                  * pagesize and it is attached to the last partial page.
1025                  */
1026                 if (buffer_freed(bh) && !jh->b_next_transaction) {
1027                         struct address_space *mapping;
1028
1029                         clear_buffer_freed(bh);
1030                         clear_buffer_jbddirty(bh);
1031
1032                         /*
1033                          * Block device buffers need to stay mapped all the
1034                          * time, so it is enough to clear buffer_jbddirty and
1035                          * buffer_freed bits. For the file mapping buffers (i.e.
1036                          * journalled data) we need to unmap buffer and clear
1037                          * more bits. We also need to be careful about the check
1038                          * because the data page mapping can get cleared under
1039                          * our hands. Note that if mapping == NULL, we don't
1040                          * need to make buffer unmapped because the page is
1041                          * already detached from the mapping and buffers cannot
1042                          * get reused.
1043                          */
1044                         mapping = READ_ONCE(bh->b_page->mapping);
1045                         if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
1046                                 clear_buffer_mapped(bh);
1047                                 clear_buffer_new(bh);
1048                                 clear_buffer_req(bh);
1049                                 bh->b_bdev = NULL;
1050                         }
1051                 }
1052
1053                 if (buffer_jbddirty(bh)) {
1054                         JBUFFER_TRACE(jh, "add to new checkpointing trans");
1055                         __jbd2_journal_insert_checkpoint(jh, commit_transaction);
1056                         if (is_journal_aborted(journal))
1057                                 clear_buffer_jbddirty(bh);
1058                 } else {
1059                         J_ASSERT_BH(bh, !buffer_dirty(bh));
1060                         /*
1061                          * The buffer on BJ_Forget list and not jbddirty means
1062                          * it has been freed by this transaction and hence it
1063                          * could not have been reallocated until this
1064                          * transaction has committed. *BUT* it could be
1065                          * reallocated once we have written all the data to
1066                          * disk and before we process the buffer on BJ_Forget
1067                          * list.
1068                          */
1069                         if (!jh->b_next_transaction)
1070                                 try_to_free = 1;
1071                 }
1072                 JBUFFER_TRACE(jh, "refile or unfile buffer");
1073                 drop_ref = __jbd2_journal_refile_buffer(jh);
1074                 spin_unlock(&jh->b_state_lock);
1075                 if (drop_ref)
1076                         jbd2_journal_put_journal_head(jh);
1077                 if (try_to_free)
1078                         release_buffer_page(bh);        /* Drops bh reference */
1079                 else
1080                         __brelse(bh);
1081                 cond_resched_lock(&journal->j_list_lock);
1082         }
1083         spin_unlock(&journal->j_list_lock);
1084         /*
1085          * This is a bit sleazy.  We use j_list_lock to protect transition
1086          * of a transaction into T_FINISHED state and calling
1087          * __jbd2_journal_drop_transaction(). Otherwise we could race with
1088          * other checkpointing code processing the transaction...
1089          */
1090         write_lock(&journal->j_state_lock);
1091         spin_lock(&journal->j_list_lock);
1092         /*
1093          * Now recheck if some buffers did not get attached to the transaction
1094          * while the lock was dropped...
1095          */
1096         if (commit_transaction->t_forget) {
1097                 spin_unlock(&journal->j_list_lock);
1098                 write_unlock(&journal->j_state_lock);
1099                 goto restart_loop;
1100         }
1101
1102         /* Add the transaction to the checkpoint list
1103          * __journal_remove_checkpoint() can not destroy transaction
1104          * under us because it is not marked as T_FINISHED yet */
1105         if (journal->j_checkpoint_transactions == NULL) {
1106                 journal->j_checkpoint_transactions = commit_transaction;
1107                 commit_transaction->t_cpnext = commit_transaction;
1108                 commit_transaction->t_cpprev = commit_transaction;
1109         } else {
1110                 commit_transaction->t_cpnext =
1111                         journal->j_checkpoint_transactions;
1112                 commit_transaction->t_cpprev =
1113                         commit_transaction->t_cpnext->t_cpprev;
1114                 commit_transaction->t_cpnext->t_cpprev =
1115                         commit_transaction;
1116                 commit_transaction->t_cpprev->t_cpnext =
1117                                 commit_transaction;
1118         }
1119         spin_unlock(&journal->j_list_lock);
1120
1121         /* Done with this transaction! */
1122
1123         jbd_debug(3, "JBD2: commit phase 7\n");
1124
1125         J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
1126
1127         commit_transaction->t_start = jiffies;
1128         stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
1129                                               commit_transaction->t_start);
1130
1131         /*
1132          * File the transaction statistics
1133          */
1134         stats.ts_tid = commit_transaction->t_tid;
1135         stats.run.rs_handle_count =
1136                 atomic_read(&commit_transaction->t_handle_count);
1137         trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
1138                              commit_transaction->t_tid, &stats.run);
1139         stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
1140
1141         commit_transaction->t_state = T_COMMIT_CALLBACK;
1142         J_ASSERT(commit_transaction == journal->j_committing_transaction);
1143         journal->j_commit_sequence = commit_transaction->t_tid;
1144         journal->j_committing_transaction = NULL;
1145         commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1146
1147         /*
1148          * weight the commit time higher than the average time so we don't
1149          * react too strongly to vast changes in the commit time
1150          */
1151         if (likely(journal->j_average_commit_time))
1152                 journal->j_average_commit_time = (commit_time +
1153                                 journal->j_average_commit_time*3) / 4;
1154         else
1155                 journal->j_average_commit_time = commit_time;
1156
1157         write_unlock(&journal->j_state_lock);
1158
1159         if (journal->j_commit_callback)
1160                 journal->j_commit_callback(journal, commit_transaction);
1161         if (journal->j_fc_cleanup_callback)
1162                 journal->j_fc_cleanup_callback(journal, 1, commit_transaction->t_tid);
1163
1164         trace_jbd2_end_commit(journal, commit_transaction);
1165         jbd_debug(1, "JBD2: commit %d complete, head %d\n",
1166                   journal->j_commit_sequence, journal->j_tail_sequence);
1167
1168         write_lock(&journal->j_state_lock);
1169         journal->j_flags &= ~JBD2_FULL_COMMIT_ONGOING;
1170         journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING;
1171         spin_lock(&journal->j_list_lock);
1172         commit_transaction->t_state = T_FINISHED;
1173         /* Check if the transaction can be dropped now that we are finished */
1174         if (commit_transaction->t_checkpoint_list == NULL &&
1175             commit_transaction->t_checkpoint_io_list == NULL) {
1176                 __jbd2_journal_drop_transaction(journal, commit_transaction);
1177                 jbd2_journal_free_transaction(commit_transaction);
1178         }
1179         spin_unlock(&journal->j_list_lock);
1180         write_unlock(&journal->j_state_lock);
1181         wake_up(&journal->j_wait_done_commit);
1182         wake_up(&journal->j_fc_wait);
1183
1184         /*
1185          * Calculate overall stats
1186          */
1187         spin_lock(&journal->j_history_lock);
1188         journal->j_stats.ts_tid++;
1189         journal->j_stats.ts_requested += stats.ts_requested;
1190         journal->j_stats.run.rs_wait += stats.run.rs_wait;
1191         journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
1192         journal->j_stats.run.rs_running += stats.run.rs_running;
1193         journal->j_stats.run.rs_locked += stats.run.rs_locked;
1194         journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
1195         journal->j_stats.run.rs_logging += stats.run.rs_logging;
1196         journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
1197         journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
1198         journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1199         spin_unlock(&journal->j_history_lock);
1200 }