Merge tag 'soc-drivers-6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-block.git] / fs / nilfs2 / segment.c
CommitLineData
ae98043f 1// SPDX-License-Identifier: GPL-2.0+
9ff05123 2/*
94ee1d91 3 * NILFS segment constructor.
9ff05123
RK
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
4b420ab4 7 * Written by Ryusuke Konishi.
9ff05123
RK
8 *
9 */
10
11#include <linux/pagemap.h>
12#include <linux/buffer_head.h>
13#include <linux/writeback.h>
ead8ecff 14#include <linux/bitops.h>
9ff05123
RK
15#include <linux/bio.h>
16#include <linux/completion.h>
17#include <linux/blkdev.h>
18#include <linux/backing-dev.h>
19#include <linux/freezer.h>
20#include <linux/kthread.h>
21#include <linux/crc32.h>
22#include <linux/pagevec.h>
5a0e3ad6 23#include <linux/slab.h>
174cd4b1
IM
24#include <linux/sched/signal.h>
25
9ff05123
RK
26#include "nilfs.h"
27#include "btnode.h"
28#include "page.h"
29#include "segment.h"
30#include "sufile.h"
31#include "cpfile.h"
32#include "ifile.h"
9ff05123
RK
33#include "segbuf.h"
34
35
36/*
37 * Segment constructor
38 */
39#define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
40
076a378b
RK
41#define SC_MAX_SEGDELTA 64 /*
42 * Upper limit of the number of segments
43 * appended in collection retry loop
44 */
9ff05123
RK
45
46/* Construction mode */
47enum {
48 SC_LSEG_SR = 1, /* Make a logical segment having a super root */
076a378b
RK
49 SC_LSEG_DSYNC, /*
50 * Flush data blocks of a given file and make
51 * a logical segment without a super root.
52 */
53 SC_FLUSH_FILE, /*
54 * Flush data files, leads to segment writes without
55 * creating a checkpoint.
56 */
57 SC_FLUSH_DAT, /*
58 * Flush DAT file. This also creates segments
59 * without a checkpoint.
60 */
9ff05123
RK
61};
62
63/* Stage numbers of dirty block collection */
64enum {
65 NILFS_ST_INIT = 0,
66 NILFS_ST_GC, /* Collecting dirty blocks for GC */
67 NILFS_ST_FILE,
9ff05123
RK
68 NILFS_ST_IFILE,
69 NILFS_ST_CPFILE,
70 NILFS_ST_SUFILE,
71 NILFS_ST_DAT,
72 NILFS_ST_SR, /* Super root */
73 NILFS_ST_DSYNC, /* Data sync blocks */
74 NILFS_ST_DONE,
75};
76
58497703
HM
77#define CREATE_TRACE_POINTS
78#include <trace/events/nilfs2.h>
79
80/*
81 * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are
82 * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of
83 * the variable must use them because transition of stage count must involve
84 * trace events (trace_nilfs2_collection_stage_transition).
85 *
86 * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't
87 * produce tracepoint events. It is provided just for making the intention
88 * clear.
89 */
90static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
91{
92 sci->sc_stage.scnt++;
93 trace_nilfs2_collection_stage_transition(sci);
94}
95
96static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
97{
98 sci->sc_stage.scnt = next_scnt;
99 trace_nilfs2_collection_stage_transition(sci);
100}
101
102static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
103{
104 return sci->sc_stage.scnt;
105}
106
9ff05123
RK
107/* State flags of collection */
108#define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
109#define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
071cb4b8
RK
110#define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
111#define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
9ff05123
RK
112
113/* Operations depending on the construction mode and file type */
114struct nilfs_sc_operations {
115 int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
116 struct inode *);
117 int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
118 struct inode *);
119 int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
120 struct inode *);
121 void (*write_data_binfo)(struct nilfs_sc_info *,
122 struct nilfs_segsum_pointer *,
123 union nilfs_binfo *);
124 void (*write_node_binfo)(struct nilfs_sc_info *,
125 struct nilfs_segsum_pointer *,
126 union nilfs_binfo *);
127};
128
129/*
130 * Other definitions
131 */
132static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
133static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
134static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
693dd321 135static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
9ff05123 136
9ff05123
RK
137#define nilfs_cnt32_ge(a, b) \
138 (typecheck(__u32, a) && typecheck(__u32, b) && \
139 ((__s32)(a) - (__s32)(b) >= 0))
9ff05123 140
feee880f
RK
141static int nilfs_prepare_segment_lock(struct super_block *sb,
142 struct nilfs_transaction_info *ti)
9ff05123
RK
143{
144 struct nilfs_transaction_info *cur_ti = current->journal_info;
145 void *save = NULL;
146
147 if (cur_ti) {
148 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
149 return ++cur_ti->ti_count;
7f00184e
RK
150
151 /*
152 * If journal_info field is occupied by other FS,
153 * it is saved and will be restored on
154 * nilfs_transaction_commit().
155 */
a1d0747a 156 nilfs_warn(sb, "journal info from a different FS");
7f00184e 157 save = current->journal_info;
9ff05123
RK
158 }
159 if (!ti) {
160 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
161 if (!ti)
162 return -ENOMEM;
163 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
164 } else {
165 ti->ti_flags = 0;
166 }
167 ti->ti_count = 0;
168 ti->ti_save = save;
169 ti->ti_magic = NILFS_TI_MAGIC;
170 current->journal_info = ti;
171 return 0;
172}
173
174/**
175 * nilfs_transaction_begin - start indivisible file operations.
176 * @sb: super block
177 * @ti: nilfs_transaction_info
178 * @vacancy_check: flags for vacancy rate checks
179 *
180 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
181 * the segment semaphore, to make a segment construction and write tasks
47420c79 182 * exclusive. The function is used with nilfs_transaction_commit() in pairs.
9ff05123
RK
183 * The region enclosed by these two functions can be nested. To avoid a
184 * deadlock, the semaphore is only acquired or released in the outermost call.
185 *
186 * This function allocates a nilfs_transaction_info struct to keep context
187 * information on it. It is initialized and hooked onto the current task in
188 * the outermost call. If a pre-allocated struct is given to @ti, it is used
7a65004b 189 * instead; otherwise a new struct is assigned from a slab.
9ff05123
RK
190 *
191 * When @vacancy_check flag is set, this function will check the amount of
192 * free space, and will wait for the GC to reclaim disk space if low capacity.
193 *
194 * Return Value: On success, 0 is returned. On error, one of the following
195 * negative error code is returned.
196 *
197 * %-ENOMEM - Insufficient memory available.
198 *
9ff05123
RK
199 * %-ENOSPC - No space left on device
200 */
201int nilfs_transaction_begin(struct super_block *sb,
202 struct nilfs_transaction_info *ti,
203 int vacancy_check)
204{
9ff05123 205 struct the_nilfs *nilfs;
feee880f 206 int ret = nilfs_prepare_segment_lock(sb, ti);
44fda114 207 struct nilfs_transaction_info *trace_ti;
9ff05123
RK
208
209 if (unlikely(ret < 0))
210 return ret;
44fda114
HM
211 if (ret > 0) {
212 trace_ti = current->journal_info;
213
214 trace_nilfs2_transaction_transition(sb, trace_ti,
215 trace_ti->ti_count, trace_ti->ti_flags,
216 TRACE_NILFS2_TRANSACTION_BEGIN);
9ff05123 217 return 0;
44fda114 218 }
9ff05123 219
2c22b337 220 sb_start_intwrite(sb);
5beb6e0b 221
e3154e97 222 nilfs = sb->s_fs_info;
9ff05123
RK
223 down_read(&nilfs->ns_segctor_sem);
224 if (vacancy_check && nilfs_near_disk_full(nilfs)) {
225 up_read(&nilfs->ns_segctor_sem);
226 ret = -ENOSPC;
227 goto failed;
228 }
44fda114
HM
229
230 trace_ti = current->journal_info;
231 trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
232 trace_ti->ti_flags,
233 TRACE_NILFS2_TRANSACTION_BEGIN);
9ff05123
RK
234 return 0;
235
236 failed:
237 ti = current->journal_info;
238 current->journal_info = ti->ti_save;
239 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
240 kmem_cache_free(nilfs_transaction_cachep, ti);
2c22b337 241 sb_end_intwrite(sb);
9ff05123
RK
242 return ret;
243}
244
245/**
47420c79 246 * nilfs_transaction_commit - commit indivisible file operations.
9ff05123 247 * @sb: super block
9ff05123 248 *
47420c79
RK
249 * nilfs_transaction_commit() releases the read semaphore which is
250 * acquired by nilfs_transaction_begin(). This is only performed
251 * in outermost call of this function. If a commit flag is set,
252 * nilfs_transaction_commit() sets a timer to start the segment
253 * constructor. If a sync flag is set, it starts construction
254 * directly.
9ff05123 255 */
47420c79 256int nilfs_transaction_commit(struct super_block *sb)
9ff05123
RK
257{
258 struct nilfs_transaction_info *ti = current->journal_info;
e3154e97 259 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
260 int err = 0;
261
262 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
47420c79 263 ti->ti_flags |= NILFS_TI_COMMIT;
9ff05123
RK
264 if (ti->ti_count > 0) {
265 ti->ti_count--;
44fda114
HM
266 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
267 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
9ff05123
RK
268 return 0;
269 }
3fd3fe5a
RK
270 if (nilfs->ns_writer) {
271 struct nilfs_sc_info *sci = nilfs->ns_writer;
272
9ff05123
RK
273 if (ti->ti_flags & NILFS_TI_COMMIT)
274 nilfs_segctor_start_timer(sci);
3fd3fe5a 275 if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
9ff05123
RK
276 nilfs_segctor_do_flush(sci, 0);
277 }
3fd3fe5a 278 up_read(&nilfs->ns_segctor_sem);
44fda114
HM
279 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
280 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
281
9ff05123
RK
282 current->journal_info = ti->ti_save;
283
284 if (ti->ti_flags & NILFS_TI_SYNC)
285 err = nilfs_construct_segment(sb);
286 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
287 kmem_cache_free(nilfs_transaction_cachep, ti);
2c22b337 288 sb_end_intwrite(sb);
9ff05123
RK
289 return err;
290}
291
47420c79
RK
292void nilfs_transaction_abort(struct super_block *sb)
293{
294 struct nilfs_transaction_info *ti = current->journal_info;
e3154e97 295 struct the_nilfs *nilfs = sb->s_fs_info;
47420c79
RK
296
297 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
298 if (ti->ti_count > 0) {
299 ti->ti_count--;
44fda114
HM
300 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
301 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
47420c79
RK
302 return;
303 }
e3154e97 304 up_read(&nilfs->ns_segctor_sem);
47420c79 305
44fda114
HM
306 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
307 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
308
47420c79
RK
309 current->journal_info = ti->ti_save;
310 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
311 kmem_cache_free(nilfs_transaction_cachep, ti);
2c22b337 312 sb_end_intwrite(sb);
47420c79
RK
313}
314
9ff05123
RK
315void nilfs_relax_pressure_in_lock(struct super_block *sb)
316{
e3154e97 317 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 318 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123 319
8cccf05f 320 if (sb_rdonly(sb) || unlikely(!sci) || !sci->sc_flush_request)
9ff05123
RK
321 return;
322
323 set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
324 up_read(&nilfs->ns_segctor_sem);
325
326 down_write(&nilfs->ns_segctor_sem);
327 if (sci->sc_flush_request &&
328 test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
329 struct nilfs_transaction_info *ti = current->journal_info;
330
331 ti->ti_flags |= NILFS_TI_WRITER;
332 nilfs_segctor_do_immediate_flush(sci);
333 ti->ti_flags &= ~NILFS_TI_WRITER;
334 }
335 downgrade_write(&nilfs->ns_segctor_sem);
336}
337
f7545144 338static void nilfs_transaction_lock(struct super_block *sb,
9ff05123
RK
339 struct nilfs_transaction_info *ti,
340 int gcflag)
341{
342 struct nilfs_transaction_info *cur_ti = current->journal_info;
e3154e97 343 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 344 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123 345
1f5abe7e 346 WARN_ON(cur_ti);
9ff05123
RK
347 ti->ti_flags = NILFS_TI_WRITER;
348 ti->ti_count = 0;
349 ti->ti_save = cur_ti;
350 ti->ti_magic = NILFS_TI_MAGIC;
9ff05123
RK
351 current->journal_info = ti;
352
353 for (;;) {
44fda114
HM
354 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
355 ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
356
3fd3fe5a
RK
357 down_write(&nilfs->ns_segctor_sem);
358 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
9ff05123
RK
359 break;
360
3fd3fe5a 361 nilfs_segctor_do_immediate_flush(sci);
9ff05123 362
f7545144 363 up_write(&nilfs->ns_segctor_sem);
aceb4170 364 cond_resched();
9ff05123
RK
365 }
366 if (gcflag)
367 ti->ti_flags |= NILFS_TI_GC;
44fda114
HM
368
369 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
370 ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
9ff05123
RK
371}
372
f7545144 373static void nilfs_transaction_unlock(struct super_block *sb)
9ff05123
RK
374{
375 struct nilfs_transaction_info *ti = current->journal_info;
e3154e97 376 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
377
378 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
379 BUG_ON(ti->ti_count > 0);
380
693dd321 381 up_write(&nilfs->ns_segctor_sem);
9ff05123 382 current->journal_info = ti->ti_save;
44fda114
HM
383
384 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
385 ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
9ff05123
RK
386}
387
388static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
389 struct nilfs_segsum_pointer *ssp,
0c6c44cb 390 unsigned int bytes)
9ff05123
RK
391{
392 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
0c6c44cb 393 unsigned int blocksize = sci->sc_super->s_blocksize;
9ff05123
RK
394 void *p;
395
396 if (unlikely(ssp->offset + bytes > blocksize)) {
397 ssp->offset = 0;
398 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
399 &segbuf->sb_segsum_buffers));
400 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
401 }
402 p = ssp->bh->b_data + ssp->offset;
403 ssp->offset += bytes;
404 return p;
405}
406
407/**
408 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
409 * @sci: nilfs_sc_info
410 */
411static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
412{
413 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
414 struct buffer_head *sumbh;
0c6c44cb
RK
415 unsigned int sumbytes;
416 unsigned int flags = 0;
9ff05123
RK
417 int err;
418
419 if (nilfs_doing_gc())
420 flags = NILFS_SS_GC;
6c43f410 421 err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
9ff05123
RK
422 if (unlikely(err))
423 return err;
424
425 sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
426 sumbytes = segbuf->sb_sum.sumbytes;
427 sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
428 sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
429 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
430 return 0;
431}
432
ef832747
RK
433/**
434 * nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area
435 * @sci: segment constructor object
436 *
437 * nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of
438 * the current segment summary block.
439 */
440static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci)
441{
442 struct nilfs_segsum_pointer *ssp;
443
444 ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr;
445 if (ssp->offset < ssp->bh->b_size)
446 memset(ssp->bh->b_data + ssp->offset, 0,
447 ssp->bh->b_size - ssp->offset);
448}
449
9ff05123
RK
450static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
451{
452 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
453 if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
076a378b
RK
454 return -E2BIG; /*
455 * The current segment is filled up
456 * (internal code)
457 */
ef832747 458 nilfs_segctor_zeropad_segsum(sci);
9ff05123
RK
459 sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
460 return nilfs_segctor_reset_segment_buffer(sci);
461}
462
463static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
464{
465 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
466 int err;
467
468 if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
469 err = nilfs_segctor_feed_segment(sci);
470 if (err)
471 return err;
472 segbuf = sci->sc_curseg;
473 }
1e2b68bf 474 err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
9ff05123
RK
475 if (likely(!err))
476 segbuf->sb_sum.flags |= NILFS_SS_SR;
477 return err;
478}
479
480/*
481 * Functions for making segment summary and payloads
482 */
483static int nilfs_segctor_segsum_block_required(
484 struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
0c6c44cb 485 unsigned int binfo_size)
9ff05123 486{
0c6c44cb 487 unsigned int blocksize = sci->sc_super->s_blocksize;
9ff05123
RK
488 /* Size of finfo and binfo is enough small against blocksize */
489
490 return ssp->offset + binfo_size +
491 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
492 blocksize;
493}
494
495static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
496 struct inode *inode)
497{
498 sci->sc_curseg->sb_sum.nfinfo++;
499 sci->sc_binfo_ptr = sci->sc_finfo_ptr;
500 nilfs_segctor_map_segsum_entry(
501 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
c96fa464 502
72746ac6
RK
503 if (NILFS_I(inode)->i_root &&
504 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
c96fa464 505 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
9ff05123
RK
506 /* skip finfo */
507}
508
509static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
510 struct inode *inode)
511{
512 struct nilfs_finfo *finfo;
513 struct nilfs_inode_info *ii;
514 struct nilfs_segment_buffer *segbuf;
6c43f410 515 __u64 cno;
9ff05123
RK
516
517 if (sci->sc_blk_cnt == 0)
518 return;
519
520 ii = NILFS_I(inode);
6c43f410
RK
521
522 if (test_bit(NILFS_I_GCINODE, &ii->i_state))
523 cno = ii->i_cno;
524 else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
525 cno = 0;
526 else
527 cno = sci->sc_cno;
528
9ff05123
RK
529 finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
530 sizeof(*finfo));
531 finfo->fi_ino = cpu_to_le64(inode->i_ino);
532 finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
533 finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
6c43f410 534 finfo->fi_cno = cpu_to_le64(cno);
9ff05123
RK
535
536 segbuf = sci->sc_curseg;
537 segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
538 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
539 sci->sc_finfo_ptr = sci->sc_binfo_ptr;
540 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
541}
542
543static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
544 struct buffer_head *bh,
545 struct inode *inode,
0c6c44cb 546 unsigned int binfo_size)
9ff05123
RK
547{
548 struct nilfs_segment_buffer *segbuf;
549 int required, err = 0;
550
551 retry:
552 segbuf = sci->sc_curseg;
553 required = nilfs_segctor_segsum_block_required(
554 sci, &sci->sc_binfo_ptr, binfo_size);
555 if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
556 nilfs_segctor_end_finfo(sci, inode);
557 err = nilfs_segctor_feed_segment(sci);
558 if (err)
559 return err;
560 goto retry;
561 }
562 if (unlikely(required)) {
ef832747 563 nilfs_segctor_zeropad_segsum(sci);
9ff05123
RK
564 err = nilfs_segbuf_extend_segsum(segbuf);
565 if (unlikely(err))
566 goto failed;
567 }
568 if (sci->sc_blk_cnt == 0)
569 nilfs_segctor_begin_finfo(sci, inode);
570
571 nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
572 /* Substitution to vblocknr is delayed until update_blocknr() */
573 nilfs_segbuf_add_file_buffer(segbuf, bh);
574 sci->sc_blk_cnt++;
575 failed:
576 return err;
577}
578
9ff05123
RK
579/*
580 * Callback functions that enumerate, mark, and collect dirty blocks
581 */
582static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
583 struct buffer_head *bh, struct inode *inode)
584{
585 int err;
586
9ff05123 587 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
e828949e
RK
588 if (err < 0)
589 return err;
9ff05123
RK
590
591 err = nilfs_segctor_add_file_block(sci, bh, inode,
592 sizeof(struct nilfs_binfo_v));
593 if (!err)
594 sci->sc_datablk_cnt++;
595 return err;
596}
597
598static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
599 struct buffer_head *bh,
600 struct inode *inode)
601{
e828949e 602 return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
9ff05123
RK
603}
604
605static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
606 struct buffer_head *bh,
607 struct inode *inode)
608{
1f5abe7e 609 WARN_ON(!buffer_dirty(bh));
9ff05123
RK
610 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
611}
612
613static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
614 struct nilfs_segsum_pointer *ssp,
615 union nilfs_binfo *binfo)
616{
617 struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
618 sci, ssp, sizeof(*binfo_v));
619 *binfo_v = binfo->bi_v;
620}
621
622static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
623 struct nilfs_segsum_pointer *ssp,
624 union nilfs_binfo *binfo)
625{
626 __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
627 sci, ssp, sizeof(*vblocknr));
628 *vblocknr = binfo->bi_v.bi_vblocknr;
629}
630
1c613cb9 631static const struct nilfs_sc_operations nilfs_sc_file_ops = {
9ff05123
RK
632 .collect_data = nilfs_collect_file_data,
633 .collect_node = nilfs_collect_file_node,
634 .collect_bmap = nilfs_collect_file_bmap,
635 .write_data_binfo = nilfs_write_file_data_binfo,
636 .write_node_binfo = nilfs_write_file_node_binfo,
637};
638
639static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
640 struct buffer_head *bh, struct inode *inode)
641{
642 int err;
643
644 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
e828949e
RK
645 if (err < 0)
646 return err;
9ff05123
RK
647
648 err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
649 if (!err)
650 sci->sc_datablk_cnt++;
651 return err;
652}
653
654static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
655 struct buffer_head *bh, struct inode *inode)
656{
1f5abe7e 657 WARN_ON(!buffer_dirty(bh));
9ff05123
RK
658 return nilfs_segctor_add_file_block(sci, bh, inode,
659 sizeof(struct nilfs_binfo_dat));
660}
661
662static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
663 struct nilfs_segsum_pointer *ssp,
664 union nilfs_binfo *binfo)
665{
666 __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
667 sizeof(*blkoff));
668 *blkoff = binfo->bi_dat.bi_blkoff;
669}
670
671static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
672 struct nilfs_segsum_pointer *ssp,
673 union nilfs_binfo *binfo)
674{
675 struct nilfs_binfo_dat *binfo_dat =
676 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
677 *binfo_dat = binfo->bi_dat;
678}
679
1c613cb9 680static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
9ff05123
RK
681 .collect_data = nilfs_collect_dat_data,
682 .collect_node = nilfs_collect_file_node,
683 .collect_bmap = nilfs_collect_dat_bmap,
684 .write_data_binfo = nilfs_write_dat_data_binfo,
685 .write_node_binfo = nilfs_write_dat_node_binfo,
686};
687
1c613cb9 688static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
9ff05123
RK
689 .collect_data = nilfs_collect_file_data,
690 .collect_node = NULL,
691 .collect_bmap = NULL,
692 .write_data_binfo = nilfs_write_file_data_binfo,
693 .write_node_binfo = NULL,
694};
695
f30bf3e4
RK
696static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
697 struct list_head *listp,
698 size_t nlimit,
699 loff_t start, loff_t end)
9ff05123 700{
9ff05123 701 struct address_space *mapping = inode->i_mapping;
5ee4b25c 702 struct folio_batch fbatch;
f30bf3e4
RK
703 pgoff_t index = 0, last = ULONG_MAX;
704 size_t ndirties = 0;
705 int i;
9ff05123 706
f30bf3e4
RK
707 if (unlikely(start != 0 || end != LLONG_MAX)) {
708 /*
709 * A valid range is given for sync-ing data pages. The
710 * range is rounded to per-page; extra dirty buffers
711 * may be included if blocksize < pagesize.
712 */
713 index = start >> PAGE_SHIFT;
714 last = end >> PAGE_SHIFT;
715 }
5ee4b25c 716 folio_batch_init(&fbatch);
9ff05123 717 repeat:
f30bf3e4 718 if (unlikely(index > last) ||
5ee4b25c
VMO
719 !filemap_get_folios_tag(mapping, &index, last,
720 PAGECACHE_TAG_DIRTY, &fbatch))
f30bf3e4 721 return ndirties;
9ff05123 722
5ee4b25c 723 for (i = 0; i < folio_batch_count(&fbatch); i++) {
9ff05123 724 struct buffer_head *bh, *head;
5ee4b25c 725 struct folio *folio = fbatch.folios[i];
9ff05123 726
5ee4b25c 727 folio_lock(folio);
f83913f8
RK
728 if (unlikely(folio->mapping != mapping)) {
729 /* Exclude folios removed from the address space */
730 folio_unlock(folio);
731 continue;
732 }
5ee4b25c 733 head = folio_buffers(folio);
922b12ef 734 if (!head)
0a88810d 735 head = create_empty_buffers(folio,
922b12ef 736 i_blocksize(inode), 0);
5ee4b25c 737 folio_unlock(folio);
9ff05123 738
5ee4b25c 739 bh = head;
9ff05123 740 do {
7f42ec39 741 if (!buffer_dirty(bh) || buffer_async_write(bh))
f30bf3e4
RK
742 continue;
743 get_bh(bh);
744 list_add_tail(&bh->b_assoc_buffers, listp);
745 ndirties++;
746 if (unlikely(ndirties >= nlimit)) {
5ee4b25c 747 folio_batch_release(&fbatch);
f30bf3e4
RK
748 cond_resched();
749 return ndirties;
9ff05123 750 }
f30bf3e4 751 } while (bh = bh->b_this_page, bh != head);
9ff05123 752 }
5ee4b25c 753 folio_batch_release(&fbatch);
9ff05123 754 cond_resched();
f30bf3e4 755 goto repeat;
9ff05123
RK
756}
757
758static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
759 struct list_head *listp)
760{
761 struct nilfs_inode_info *ii = NILFS_I(inode);
e897be17 762 struct inode *btnc_inode = ii->i_assoc_inode;
a2458658 763 struct folio_batch fbatch;
9ff05123
RK
764 struct buffer_head *bh, *head;
765 unsigned int i;
766 pgoff_t index = 0;
767
e897be17
RK
768 if (!btnc_inode)
769 return;
a2458658 770 folio_batch_init(&fbatch);
e897be17 771
a2458658
VMO
772 while (filemap_get_folios_tag(btnc_inode->i_mapping, &index,
773 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch)) {
774 for (i = 0; i < folio_batch_count(&fbatch); i++) {
775 bh = head = folio_buffers(fbatch.folios[i]);
9ff05123 776 do {
7f42ec39
VD
777 if (buffer_dirty(bh) &&
778 !buffer_async_write(bh)) {
9ff05123
RK
779 get_bh(bh);
780 list_add_tail(&bh->b_assoc_buffers,
781 listp);
782 }
783 bh = bh->b_this_page;
784 } while (bh != head);
785 }
a2458658 786 folio_batch_release(&fbatch);
9ff05123
RK
787 cond_resched();
788 }
789}
790
693dd321 791static void nilfs_dispose_list(struct the_nilfs *nilfs,
9ff05123
RK
792 struct list_head *head, int force)
793{
794 struct nilfs_inode_info *ii, *n;
795 struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
0c6c44cb 796 unsigned int nv = 0;
9ff05123
RK
797
798 while (!list_empty(head)) {
693dd321 799 spin_lock(&nilfs->ns_inode_lock);
9ff05123
RK
800 list_for_each_entry_safe(ii, n, head, i_dirty) {
801 list_del_init(&ii->i_dirty);
802 if (force) {
803 if (unlikely(ii->i_bh)) {
804 brelse(ii->i_bh);
805 ii->i_bh = NULL;
806 }
807 } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
808 set_bit(NILFS_I_QUEUED, &ii->i_state);
809 list_add_tail(&ii->i_dirty,
693dd321 810 &nilfs->ns_dirty_files);
9ff05123
RK
811 continue;
812 }
813 ivec[nv++] = ii;
814 if (nv == SC_N_INODEVEC)
815 break;
816 }
693dd321 817 spin_unlock(&nilfs->ns_inode_lock);
9ff05123
RK
818
819 for (pii = ivec; nv > 0; pii++, nv--)
820 iput(&(*pii)->vfs_inode);
821 }
822}
823
7ef3ff2f
RK
824static void nilfs_iput_work_func(struct work_struct *work)
825{
826 struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
827 sc_iput_work);
828 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
829
830 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
831}
832
e912a5b6
RK
833static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
834 struct nilfs_root *root)
9ff05123 835{
9ff05123
RK
836 int ret = 0;
837
e912a5b6 838 if (nilfs_mdt_fetch_dirty(root->ifile))
9ff05123
RK
839 ret++;
840 if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
841 ret++;
842 if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
843 ret++;
365e215c
RK
844 if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
845 ret++;
9ff05123
RK
846 return ret;
847}
848
849static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
850{
851 return list_empty(&sci->sc_dirty_files) &&
852 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
071cb4b8 853 sci->sc_nfreesegs == 0 &&
9ff05123
RK
854 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
855}
856
857static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
858{
e3154e97 859 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
860 int ret = 0;
861
693dd321 862 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
9ff05123
RK
863 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
864
693dd321
RK
865 spin_lock(&nilfs->ns_inode_lock);
866 if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
9ff05123
RK
867 ret++;
868
693dd321 869 spin_unlock(&nilfs->ns_inode_lock);
9ff05123
RK
870 return ret;
871}
872
873static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
874{
e3154e97 875 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123 876
e912a5b6 877 nilfs_mdt_clear_dirty(sci->sc_root->ifile);
9ff05123
RK
878 nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
879 nilfs_mdt_clear_dirty(nilfs->ns_sufile);
365e215c 880 nilfs_mdt_clear_dirty(nilfs->ns_dat);
9ff05123
RK
881}
882
883static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
884{
e3154e97 885 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
886 struct buffer_head *bh_cp;
887 struct nilfs_checkpoint *raw_cp;
888 int err;
889
890 /* XXX: this interface will be changed */
891 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
892 &raw_cp, &bh_cp);
893 if (likely(!err)) {
076a378b
RK
894 /*
895 * The following code is duplicated with cpfile. But, it is
896 * needed to collect the checkpoint even if it was not newly
897 * created.
898 */
5fc7b141 899 mark_buffer_dirty(bh_cp);
9ff05123
RK
900 nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
901 nilfs_cpfile_put_checkpoint(
902 nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
723ac751
RK
903 } else if (err == -EINVAL || err == -ENOENT) {
904 nilfs_error(sci->sc_super,
905 "checkpoint creation failed due to metadata corruption.");
906 err = -EIO;
907 }
9ff05123
RK
908 return err;
909}
910
911static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
912{
e3154e97 913 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
914 struct buffer_head *bh_cp;
915 struct nilfs_checkpoint *raw_cp;
916 int err;
917
918 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
919 &raw_cp, &bh_cp);
920 if (unlikely(err)) {
723ac751
RK
921 if (err == -EINVAL || err == -ENOENT) {
922 nilfs_error(sci->sc_super,
923 "checkpoint finalization failed due to metadata corruption.");
924 err = -EIO;
925 }
9ff05123
RK
926 goto failed_ibh;
927 }
928 raw_cp->cp_snapshot_list.ssl_next = 0;
929 raw_cp->cp_snapshot_list.ssl_prev = 0;
930 raw_cp->cp_inodes_count =
e5f7f848 931 cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
9ff05123 932 raw_cp->cp_blocks_count =
e5f7f848 933 cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
9ff05123
RK
934 raw_cp->cp_nblk_inc =
935 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
936 raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
937 raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
458c5b08 938
c96fa464
RK
939 if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
940 nilfs_checkpoint_clear_minor(raw_cp);
941 else
942 nilfs_checkpoint_set_minor(raw_cp);
943
e912a5b6
RK
944 nilfs_write_inode_common(sci->sc_root->ifile,
945 &raw_cp->cp_ifile_inode, 1);
9ff05123
RK
946 nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
947 return 0;
948
949 failed_ibh:
950 return err;
951}
952
953static void nilfs_fill_in_file_bmap(struct inode *ifile,
954 struct nilfs_inode_info *ii)
955
956{
957 struct buffer_head *ibh;
958 struct nilfs_inode *raw_inode;
959
960 if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
961 ibh = ii->i_bh;
962 BUG_ON(!ibh);
963 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
964 ibh);
965 nilfs_bmap_write(ii->i_bmap, raw_inode);
966 nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
967 }
968}
969
e912a5b6 970static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
9ff05123
RK
971{
972 struct nilfs_inode_info *ii;
973
974 list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
e912a5b6 975 nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
9ff05123
RK
976 set_bit(NILFS_I_COLLECTED, &ii->i_state);
977 }
9ff05123
RK
978}
979
9ff05123
RK
980static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
981 struct the_nilfs *nilfs)
982{
1e2b68bf
RK
983 struct buffer_head *bh_sr;
984 struct nilfs_super_root *raw_sr;
0c6c44cb 985 unsigned int isz, srsz;
9ff05123 986
1e2b68bf 987 bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
679bd7eb
RK
988
989 lock_buffer(bh_sr);
1e2b68bf 990 raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
56eb5538
RK
991 isz = nilfs->ns_inode_size;
992 srsz = NILFS_SR_BYTES(isz);
1e2b68bf 993
679bd7eb 994 raw_sr->sr_sum = 0; /* Ensure initialization within this update */
56eb5538 995 raw_sr->sr_bytes = cpu_to_le16(srsz);
9ff05123
RK
996 raw_sr->sr_nongc_ctime
997 = cpu_to_le64(nilfs_doing_gc() ?
998 nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
999 raw_sr->sr_flags = 0;
1000
365e215c 1001 nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
3961f0e2
RK
1002 NILFS_SR_DAT_OFFSET(isz), 1);
1003 nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
1004 NILFS_SR_CPFILE_OFFSET(isz), 1);
1005 nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
1006 NILFS_SR_SUFILE_OFFSET(isz), 1);
56eb5538 1007 memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
679bd7eb
RK
1008 set_buffer_uptodate(bh_sr);
1009 unlock_buffer(bh_sr);
9ff05123
RK
1010}
1011
1012static void nilfs_redirty_inodes(struct list_head *head)
1013{
1014 struct nilfs_inode_info *ii;
1015
1016 list_for_each_entry(ii, head, i_dirty) {
1017 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
1018 clear_bit(NILFS_I_COLLECTED, &ii->i_state);
1019 }
1020}
1021
1022static void nilfs_drop_collected_inodes(struct list_head *head)
1023{
1024 struct nilfs_inode_info *ii;
1025
1026 list_for_each_entry(ii, head, i_dirty) {
1027 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
1028 continue;
1029
b9f66140 1030 clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
9ff05123
RK
1031 set_bit(NILFS_I_UPDATED, &ii->i_state);
1032 }
1033}
1034
9ff05123
RK
1035static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
1036 struct inode *inode,
1037 struct list_head *listp,
1038 int (*collect)(struct nilfs_sc_info *,
1039 struct buffer_head *,
1040 struct inode *))
1041{
1042 struct buffer_head *bh, *n;
1043 int err = 0;
1044
1045 if (collect) {
1046 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
1047 list_del_init(&bh->b_assoc_buffers);
1048 err = collect(sci, bh, inode);
1049 brelse(bh);
1050 if (unlikely(err))
1051 goto dispose_buffers;
1052 }
1053 return 0;
1054 }
1055
1056 dispose_buffers:
1057 while (!list_empty(listp)) {
0cc12838
RK
1058 bh = list_first_entry(listp, struct buffer_head,
1059 b_assoc_buffers);
9ff05123
RK
1060 list_del_init(&bh->b_assoc_buffers);
1061 brelse(bh);
1062 }
1063 return err;
1064}
1065
f30bf3e4
RK
1066static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1067{
1068 /* Remaining number of blocks within segment buffer */
1069 return sci->sc_segbuf_nblocks -
1070 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1071}
1072
9ff05123
RK
1073static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1074 struct inode *inode,
1c613cb9 1075 const struct nilfs_sc_operations *sc_ops)
9ff05123
RK
1076{
1077 LIST_HEAD(data_buffers);
1078 LIST_HEAD(node_buffers);
f30bf3e4 1079 int err;
9ff05123
RK
1080
1081 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
f30bf3e4
RK
1082 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1083
1084 n = nilfs_lookup_dirty_data_buffers(
1085 inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1086 if (n > rest) {
1087 err = nilfs_segctor_apply_buffers(
9ff05123 1088 sci, inode, &data_buffers,
f30bf3e4
RK
1089 sc_ops->collect_data);
1090 BUG_ON(!err); /* always receive -E2BIG or true error */
9ff05123
RK
1091 goto break_or_fail;
1092 }
1093 }
1094 nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1095
1096 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1097 err = nilfs_segctor_apply_buffers(
1098 sci, inode, &data_buffers, sc_ops->collect_data);
1099 if (unlikely(err)) {
1100 /* dispose node list */
1101 nilfs_segctor_apply_buffers(
1102 sci, inode, &node_buffers, NULL);
1103 goto break_or_fail;
1104 }
1105 sci->sc_stage.flags |= NILFS_CF_NODE;
1106 }
1107 /* Collect node */
1108 err = nilfs_segctor_apply_buffers(
1109 sci, inode, &node_buffers, sc_ops->collect_node);
1110 if (unlikely(err))
1111 goto break_or_fail;
1112
1113 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1114 err = nilfs_segctor_apply_buffers(
1115 sci, inode, &node_buffers, sc_ops->collect_bmap);
1116 if (unlikely(err))
1117 goto break_or_fail;
1118
1119 nilfs_segctor_end_finfo(sci, inode);
1120 sci->sc_stage.flags &= ~NILFS_CF_NODE;
1121
1122 break_or_fail:
1123 return err;
1124}
1125
1126static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1127 struct inode *inode)
1128{
1129 LIST_HEAD(data_buffers);
f30bf3e4
RK
1130 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1131 int err;
9ff05123 1132
f30bf3e4
RK
1133 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1134 sci->sc_dsync_start,
1135 sci->sc_dsync_end);
1136
1137 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1138 nilfs_collect_file_data);
1139 if (!err) {
9ff05123 1140 nilfs_segctor_end_finfo(sci, inode);
f30bf3e4
RK
1141 BUG_ON(n > rest);
1142 /* always receive -E2BIG or true error if n > rest */
1143 }
9ff05123
RK
1144 return err;
1145}
1146
1147static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1148{
e3154e97 1149 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
1150 struct list_head *head;
1151 struct nilfs_inode_info *ii;
071cb4b8 1152 size_t ndone;
9ff05123
RK
1153 int err = 0;
1154
58497703 1155 switch (nilfs_sc_cstage_get(sci)) {
9ff05123
RK
1156 case NILFS_ST_INIT:
1157 /* Pre-processes */
1158 sci->sc_stage.flags = 0;
1159
1160 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1161 sci->sc_nblk_inc = 0;
1162 sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1163 if (mode == SC_LSEG_DSYNC) {
58497703 1164 nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
9ff05123
RK
1165 goto dsync_mode;
1166 }
1167 }
1168
1169 sci->sc_stage.dirty_file_ptr = NULL;
1170 sci->sc_stage.gc_inode_ptr = NULL;
1171 if (mode == SC_FLUSH_DAT) {
58497703 1172 nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
9ff05123
RK
1173 goto dat_stage;
1174 }
df561f66
GS
1175 nilfs_sc_cstage_inc(sci);
1176 fallthrough;
9ff05123
RK
1177 case NILFS_ST_GC:
1178 if (nilfs_doing_gc()) {
1179 head = &sci->sc_gc_inodes;
1180 ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1181 head, i_dirty);
1182 list_for_each_entry_continue(ii, head, i_dirty) {
1183 err = nilfs_segctor_scan_file(
1184 sci, &ii->vfs_inode,
1185 &nilfs_sc_file_ops);
1186 if (unlikely(err)) {
1187 sci->sc_stage.gc_inode_ptr = list_entry(
1188 ii->i_dirty.prev,
1189 struct nilfs_inode_info,
1190 i_dirty);
1191 goto break_or_fail;
1192 }
1193 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1194 }
1195 sci->sc_stage.gc_inode_ptr = NULL;
1196 }
df561f66
GS
1197 nilfs_sc_cstage_inc(sci);
1198 fallthrough;
9ff05123
RK
1199 case NILFS_ST_FILE:
1200 head = &sci->sc_dirty_files;
1201 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1202 i_dirty);
1203 list_for_each_entry_continue(ii, head, i_dirty) {
1204 clear_bit(NILFS_I_DIRTY, &ii->i_state);
1205
1206 err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1207 &nilfs_sc_file_ops);
1208 if (unlikely(err)) {
1209 sci->sc_stage.dirty_file_ptr =
1210 list_entry(ii->i_dirty.prev,
1211 struct nilfs_inode_info,
1212 i_dirty);
1213 goto break_or_fail;
1214 }
1215 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1216 /* XXX: required ? */
1217 }
1218 sci->sc_stage.dirty_file_ptr = NULL;
1219 if (mode == SC_FLUSH_FILE) {
58497703 1220 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
9ff05123
RK
1221 return 0;
1222 }
58497703 1223 nilfs_sc_cstage_inc(sci);
9ff05123 1224 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
df561f66 1225 fallthrough;
9ff05123 1226 case NILFS_ST_IFILE:
e912a5b6 1227 err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
9ff05123
RK
1228 &nilfs_sc_file_ops);
1229 if (unlikely(err))
1230 break;
58497703 1231 nilfs_sc_cstage_inc(sci);
9ff05123
RK
1232 /* Creating a checkpoint */
1233 err = nilfs_segctor_create_checkpoint(sci);
1234 if (unlikely(err))
1235 break;
df561f66 1236 fallthrough;
9ff05123
RK
1237 case NILFS_ST_CPFILE:
1238 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1239 &nilfs_sc_file_ops);
1240 if (unlikely(err))
1241 break;
df561f66
GS
1242 nilfs_sc_cstage_inc(sci);
1243 fallthrough;
9ff05123 1244 case NILFS_ST_SUFILE:
071cb4b8
RK
1245 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1246 sci->sc_nfreesegs, &ndone);
1247 if (unlikely(err)) {
1248 nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1249 sci->sc_freesegs, ndone,
1250 NULL);
9ff05123 1251 break;
071cb4b8
RK
1252 }
1253 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1254
9ff05123
RK
1255 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1256 &nilfs_sc_file_ops);
1257 if (unlikely(err))
1258 break;
df561f66
GS
1259 nilfs_sc_cstage_inc(sci);
1260 fallthrough;
9ff05123
RK
1261 case NILFS_ST_DAT:
1262 dat_stage:
365e215c 1263 err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
9ff05123
RK
1264 &nilfs_sc_dat_ops);
1265 if (unlikely(err))
1266 break;
1267 if (mode == SC_FLUSH_DAT) {
58497703 1268 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
9ff05123
RK
1269 return 0;
1270 }
df561f66
GS
1271 nilfs_sc_cstage_inc(sci);
1272 fallthrough;
9ff05123
RK
1273 case NILFS_ST_SR:
1274 if (mode == SC_LSEG_SR) {
1275 /* Appending a super root */
1276 err = nilfs_segctor_add_super_root(sci);
1277 if (unlikely(err))
1278 break;
1279 }
1280 /* End of a logical segment */
1281 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
58497703 1282 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
9ff05123
RK
1283 return 0;
1284 case NILFS_ST_DSYNC:
1285 dsync_mode:
1286 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
f30bf3e4 1287 ii = sci->sc_dsync_inode;
9ff05123
RK
1288 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1289 break;
1290
1291 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1292 if (unlikely(err))
1293 break;
9ff05123 1294 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
58497703 1295 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
9ff05123
RK
1296 return 0;
1297 case NILFS_ST_DONE:
1298 return 0;
1299 default:
1300 BUG();
1301 }
1302
1303 break_or_fail:
1304 return err;
1305}
1306
a694291a
RK
1307/**
1308 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1309 * @sci: nilfs_sc_info
1310 * @nilfs: nilfs object
1311 */
9ff05123
RK
1312static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1313 struct the_nilfs *nilfs)
1314{
a694291a 1315 struct nilfs_segment_buffer *segbuf, *prev;
9ff05123 1316 __u64 nextnum;
a694291a 1317 int err, alloc = 0;
9ff05123 1318
a694291a
RK
1319 segbuf = nilfs_segbuf_new(sci->sc_super);
1320 if (unlikely(!segbuf))
1321 return -ENOMEM;
9ff05123 1322
a694291a
RK
1323 if (list_empty(&sci->sc_write_logs)) {
1324 nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1325 nilfs->ns_pseg_offset, nilfs);
1326 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1327 nilfs_shift_to_next_segment(nilfs);
1328 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1329 }
9ff05123 1330
a694291a
RK
1331 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1332 nextnum = nilfs->ns_nextnum;
1333
1334 if (nilfs->ns_segnum == nilfs->ns_nextnum)
1335 /* Start from the head of a new full segment */
1336 alloc++;
1337 } else {
1338 /* Continue logs */
1339 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1340 nilfs_segbuf_map_cont(segbuf, prev);
1341 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1342 nextnum = prev->sb_nextnum;
1343
1344 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1345 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1346 segbuf->sb_sum.seg_seq++;
1347 alloc++;
1348 }
9ff05123 1349 }
9ff05123 1350
61a189e9 1351 err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
a694291a
RK
1352 if (err)
1353 goto failed;
9ff05123 1354
a694291a 1355 if (alloc) {
cece5520 1356 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
a694291a
RK
1357 if (err)
1358 goto failed;
1359 }
9ff05123
RK
1360 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1361
a694291a
RK
1362 BUG_ON(!list_empty(&sci->sc_segbufs));
1363 list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1364 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
cece5520 1365 return 0;
a694291a
RK
1366
1367 failed:
1368 nilfs_segbuf_free(segbuf);
1369 return err;
9ff05123
RK
1370}
1371
1372static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1373 struct the_nilfs *nilfs, int nadd)
1374{
e29df395 1375 struct nilfs_segment_buffer *segbuf, *prev;
9ff05123
RK
1376 struct inode *sufile = nilfs->ns_sufile;
1377 __u64 nextnextnum;
1378 LIST_HEAD(list);
1379 int err, ret, i;
1380
1381 prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1382 /*
1383 * Since the segment specified with nextnum might be allocated during
1384 * the previous construction, the buffer including its segusage may
1385 * not be dirty. The following call ensures that the buffer is dirty
1386 * and will pin the buffer on memory until the sufile is written.
1387 */
61a189e9 1388 err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
9ff05123
RK
1389 if (unlikely(err))
1390 return err;
1391
1392 for (i = 0; i < nadd; i++) {
1393 /* extend segment info */
1394 err = -ENOMEM;
1395 segbuf = nilfs_segbuf_new(sci->sc_super);
1396 if (unlikely(!segbuf))
1397 goto failed;
1398
1399 /* map this buffer to region of segment on-disk */
cece5520 1400 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
9ff05123
RK
1401 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1402
1403 /* allocate the next next full segment */
1404 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1405 if (unlikely(err))
1406 goto failed_segbuf;
1407
1408 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1409 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1410
1411 list_add_tail(&segbuf->sb_list, &list);
1412 prev = segbuf;
1413 }
0935db74 1414 list_splice_tail(&list, &sci->sc_segbufs);
9ff05123
RK
1415 return 0;
1416
1417 failed_segbuf:
1418 nilfs_segbuf_free(segbuf);
1419 failed:
e29df395 1420 list_for_each_entry(segbuf, &list, sb_list) {
9ff05123 1421 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1f5abe7e 1422 WARN_ON(ret); /* never fails */
9ff05123 1423 }
e29df395 1424 nilfs_destroy_logs(&list);
9ff05123
RK
1425 return err;
1426}
1427
a694291a
RK
1428static void nilfs_free_incomplete_logs(struct list_head *logs,
1429 struct the_nilfs *nilfs)
9ff05123 1430{
a694291a
RK
1431 struct nilfs_segment_buffer *segbuf, *prev;
1432 struct inode *sufile = nilfs->ns_sufile;
9284ad2a 1433 int ret;
9ff05123 1434
a694291a 1435 segbuf = NILFS_FIRST_SEGBUF(logs);
9ff05123 1436 if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
a694291a 1437 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1f5abe7e 1438 WARN_ON(ret); /* never fails */
9ff05123 1439 }
9284ad2a 1440 if (atomic_read(&segbuf->sb_err)) {
9ff05123
RK
1441 /* Case 1: The first segment failed */
1442 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
076a378b
RK
1443 /*
1444 * Case 1a: Partial segment appended into an existing
1445 * segment
1446 */
9ff05123
RK
1447 nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1448 segbuf->sb_fseg_end);
1449 else /* Case 1b: New full segment */
1450 set_nilfs_discontinued(nilfs);
9ff05123
RK
1451 }
1452
a694291a
RK
1453 prev = segbuf;
1454 list_for_each_entry_continue(segbuf, logs, sb_list) {
1455 if (prev->sb_nextnum != segbuf->sb_nextnum) {
1456 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1457 WARN_ON(ret); /* never fails */
1458 }
9284ad2a
RK
1459 if (atomic_read(&segbuf->sb_err) &&
1460 segbuf->sb_segnum != nilfs->ns_nextnum)
1461 /* Case 2: extended segment (!= next) failed */
a694291a
RK
1462 nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1463 prev = segbuf;
9ff05123 1464 }
9ff05123
RK
1465}
1466
1467static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1468 struct inode *sufile)
1469{
1470 struct nilfs_segment_buffer *segbuf;
9ff05123
RK
1471 unsigned long live_blocks;
1472 int ret;
1473
1474 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
9ff05123
RK
1475 live_blocks = segbuf->sb_sum.nblocks +
1476 (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
071ec54d
RK
1477 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1478 live_blocks,
1479 sci->sc_seg_ctime);
1480 WARN_ON(ret); /* always succeed because the segusage is dirty */
9ff05123
RK
1481 }
1482}
1483
a694291a 1484static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
9ff05123
RK
1485{
1486 struct nilfs_segment_buffer *segbuf;
9ff05123
RK
1487 int ret;
1488
a694291a 1489 segbuf = NILFS_FIRST_SEGBUF(logs);
071ec54d
RK
1490 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1491 segbuf->sb_pseg_start -
1492 segbuf->sb_fseg_start, 0);
1493 WARN_ON(ret); /* always succeed because the segusage is dirty */
9ff05123 1494
a694291a 1495 list_for_each_entry_continue(segbuf, logs, sb_list) {
071ec54d
RK
1496 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1497 0, 0);
1f5abe7e 1498 WARN_ON(ret); /* always succeed */
9ff05123
RK
1499 }
1500}
1501
1502static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1503 struct nilfs_segment_buffer *last,
1504 struct inode *sufile)
1505{
e29df395 1506 struct nilfs_segment_buffer *segbuf = last;
9ff05123
RK
1507 int ret;
1508
e29df395 1509 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
9ff05123
RK
1510 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1511 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1f5abe7e 1512 WARN_ON(ret);
9ff05123 1513 }
e29df395 1514 nilfs_truncate_logs(&sci->sc_segbufs, last);
9ff05123
RK
1515}
1516
1517
1518static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1519 struct the_nilfs *nilfs, int mode)
1520{
1521 struct nilfs_cstage prev_stage = sci->sc_stage;
1522 int err, nadd = 1;
1523
1524 /* Collection retry loop */
1525 for (;;) {
9ff05123
RK
1526 sci->sc_nblk_this_inc = 0;
1527 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1528
1529 err = nilfs_segctor_reset_segment_buffer(sci);
1530 if (unlikely(err))
1531 goto failed;
1532
1533 err = nilfs_segctor_collect_blocks(sci, mode);
1534 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1535 if (!err)
1536 break;
1537
1538 if (unlikely(err != -E2BIG))
1539 goto failed;
1540
1541 /* The current segment is filled up */
58497703
HM
1542 if (mode != SC_LSEG_SR ||
1543 nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
9ff05123
RK
1544 break;
1545
2d8428ac
RK
1546 nilfs_clear_logs(&sci->sc_segbufs);
1547
071cb4b8
RK
1548 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1549 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1550 sci->sc_freesegs,
1551 sci->sc_nfreesegs,
1552 NULL);
1553 WARN_ON(err); /* do not happen */
70f2fe3a 1554 sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
071cb4b8 1555 }
70f2fe3a
AR
1556
1557 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1558 if (unlikely(err))
1559 return err;
1560
9ff05123
RK
1561 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1562 sci->sc_stage = prev_stage;
1563 }
ef832747 1564 nilfs_segctor_zeropad_segsum(sci);
9ff05123
RK
1565 nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1566 return 0;
1567
1568 failed:
1569 return err;
1570}
1571
1572static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1573 struct buffer_head *new_bh)
1574{
1575 BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1576
1577 list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1578 /* The caller must release old_bh */
1579}
1580
1581static int
1582nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1583 struct nilfs_segment_buffer *segbuf,
1584 int mode)
1585{
1586 struct inode *inode = NULL;
1587 sector_t blocknr;
1588 unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1589 unsigned long nblocks = 0, ndatablk = 0;
1c613cb9 1590 const struct nilfs_sc_operations *sc_op = NULL;
9ff05123
RK
1591 struct nilfs_segsum_pointer ssp;
1592 struct nilfs_finfo *finfo = NULL;
1593 union nilfs_binfo binfo;
1594 struct buffer_head *bh, *bh_org;
1595 ino_t ino = 0;
1596 int err = 0;
1597
1598 if (!nfinfo)
1599 goto out;
1600
1601 blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1602 ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1603 ssp.offset = sizeof(struct nilfs_segment_summary);
1604
1605 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1e2b68bf 1606 if (bh == segbuf->sb_super_root)
9ff05123
RK
1607 break;
1608 if (!finfo) {
1609 finfo = nilfs_segctor_map_segsum_entry(
1610 sci, &ssp, sizeof(*finfo));
1611 ino = le64_to_cpu(finfo->fi_ino);
1612 nblocks = le32_to_cpu(finfo->fi_nblocks);
1613 ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1614
6ad4cd7f 1615 inode = bh->b_folio->mapping->host;
9ff05123
RK
1616
1617 if (mode == SC_LSEG_DSYNC)
1618 sc_op = &nilfs_sc_dsync_ops;
1619 else if (ino == NILFS_DAT_INO)
1620 sc_op = &nilfs_sc_dat_ops;
1621 else /* file blocks */
1622 sc_op = &nilfs_sc_file_ops;
1623 }
1624 bh_org = bh;
1625 get_bh(bh_org);
1626 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1627 &binfo);
1628 if (bh != bh_org)
1629 nilfs_list_replace_buffer(bh_org, bh);
1630 brelse(bh_org);
1631 if (unlikely(err))
1632 goto failed_bmap;
1633
1634 if (ndatablk > 0)
1635 sc_op->write_data_binfo(sci, &ssp, &binfo);
1636 else
1637 sc_op->write_node_binfo(sci, &ssp, &binfo);
1638
1639 blocknr++;
1640 if (--nblocks == 0) {
1641 finfo = NULL;
1642 if (--nfinfo == 0)
1643 break;
1644 } else if (ndatablk > 0)
1645 ndatablk--;
1646 }
1647 out:
1648 return 0;
1649
1650 failed_bmap:
9ff05123
RK
1651 return err;
1652}
1653
1654static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1655{
1656 struct nilfs_segment_buffer *segbuf;
1657 int err;
1658
1659 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1660 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1661 if (unlikely(err))
1662 return err;
1663 nilfs_segbuf_fill_in_segsum(segbuf);
1664 }
1665 return 0;
1666}
1667
ff5710c3 1668static void nilfs_begin_folio_io(struct folio *folio)
9ff05123 1669{
ff5710c3 1670 if (!folio || folio_test_writeback(folio))
076a378b
RK
1671 /*
1672 * For split b-tree node pages, this function may be called
1673 * twice. We ignore the 2nd or later calls by this check.
1674 */
1cb2d38c 1675 return;
9ff05123 1676
ff5710c3
MWO
1677 folio_lock(folio);
1678 folio_clear_dirty_for_io(folio);
1679 folio_start_writeback(folio);
1680 folio_unlock(folio);
9ff05123
RK
1681}
1682
1cb2d38c 1683static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
9ff05123
RK
1684{
1685 struct nilfs_segment_buffer *segbuf;
ff5710c3 1686 struct folio *bd_folio = NULL, *fs_folio = NULL;
9ff05123 1687
9ff05123
RK
1688 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1689 struct buffer_head *bh;
1690
1691 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1692 b_assoc_buffers) {
ff5710c3
MWO
1693 if (bh->b_folio != bd_folio) {
1694 if (bd_folio) {
1695 folio_lock(bd_folio);
1696 folio_clear_dirty_for_io(bd_folio);
1697 folio_start_writeback(bd_folio);
1698 folio_unlock(bd_folio);
9ff05123 1699 }
ff5710c3 1700 bd_folio = bh->b_folio;
9ff05123
RK
1701 }
1702 }
1703
1704 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1705 b_assoc_buffers) {
1e2b68bf 1706 if (bh == segbuf->sb_super_root) {
ff5710c3
MWO
1707 if (bh->b_folio != bd_folio) {
1708 folio_lock(bd_folio);
1709 folio_clear_dirty_for_io(bd_folio);
1710 folio_start_writeback(bd_folio);
1711 folio_unlock(bd_folio);
1712 bd_folio = bh->b_folio;
9ff05123
RK
1713 }
1714 break;
1715 }
5bc09b39 1716 set_buffer_async_write(bh);
ff5710c3
MWO
1717 if (bh->b_folio != fs_folio) {
1718 nilfs_begin_folio_io(fs_folio);
1719 fs_folio = bh->b_folio;
9ff05123
RK
1720 }
1721 }
1722 }
ff5710c3
MWO
1723 if (bd_folio) {
1724 folio_lock(bd_folio);
1725 folio_clear_dirty_for_io(bd_folio);
1726 folio_start_writeback(bd_folio);
1727 folio_unlock(bd_folio);
9ff05123 1728 }
ff5710c3 1729 nilfs_begin_folio_io(fs_folio);
9ff05123
RK
1730}
1731
1732static int nilfs_segctor_write(struct nilfs_sc_info *sci,
9c965bac 1733 struct the_nilfs *nilfs)
9ff05123 1734{
d1c6b72a 1735 int ret;
9ff05123 1736
d1c6b72a 1737 ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
a694291a
RK
1738 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1739 return ret;
9ff05123
RK
1740}
1741
8f46eaf6 1742static void nilfs_end_folio_io(struct folio *folio, int err)
9ff05123 1743{
8f46eaf6 1744 if (!folio)
9ff05123
RK
1745 return;
1746
8f46eaf6
MWO
1747 if (buffer_nilfs_node(folio_buffers(folio)) &&
1748 !folio_test_writeback(folio)) {
8227b297
RK
1749 /*
1750 * For b-tree node pages, this function may be called twice
1751 * or more because they might be split in a segment.
1752 */
8f46eaf6 1753 if (folio_test_dirty(folio)) {
a9777845
RK
1754 /*
1755 * For pages holding split b-tree node buffers, dirty
1756 * flag on the buffers may be cleared discretely.
1757 * In that case, the page is once redirtied for
1758 * remaining buffers, and it must be cancelled if
1759 * all the buffers get cleaned later.
1760 */
8f46eaf6 1761 folio_lock(folio);
36319c0c 1762 if (nilfs_folio_buffers_clean(folio))
6609e235 1763 __nilfs_clear_folio_dirty(folio);
8f46eaf6 1764 folio_unlock(folio);
a9777845 1765 }
9ff05123 1766 return;
a9777845 1767 }
9ff05123 1768
1cb2d38c 1769 if (!err) {
36319c0c 1770 if (!nilfs_folio_buffers_clean(folio))
8f46eaf6
MWO
1771 filemap_dirty_folio(folio->mapping, folio);
1772 folio_clear_error(folio);
1cb2d38c 1773 } else {
8f46eaf6
MWO
1774 filemap_dirty_folio(folio->mapping, folio);
1775 folio_set_error(folio);
9ff05123 1776 }
1cb2d38c 1777
8f46eaf6
MWO
1778 folio_end_writeback(folio);
1779}
1780
1cb2d38c 1781static void nilfs_abort_logs(struct list_head *logs, int err)
9ff05123
RK
1782{
1783 struct nilfs_segment_buffer *segbuf;
50196f00 1784 struct folio *bd_folio = NULL, *fs_folio = NULL;
a694291a 1785 struct buffer_head *bh;
9ff05123 1786
a694291a
RK
1787 if (list_empty(logs))
1788 return;
9ff05123 1789
a694291a 1790 list_for_each_entry(segbuf, logs, sb_list) {
9ff05123
RK
1791 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1792 b_assoc_buffers) {
679bd7eb 1793 clear_buffer_uptodate(bh);
50196f00
MWO
1794 if (bh->b_folio != bd_folio) {
1795 if (bd_folio)
1796 folio_end_writeback(bd_folio);
1797 bd_folio = bh->b_folio;
9ff05123
RK
1798 }
1799 }
1800
1801 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1802 b_assoc_buffers) {
1e2b68bf 1803 if (bh == segbuf->sb_super_root) {
679bd7eb 1804 clear_buffer_uptodate(bh);
50196f00
MWO
1805 if (bh->b_folio != bd_folio) {
1806 folio_end_writeback(bd_folio);
1807 bd_folio = bh->b_folio;
9ff05123
RK
1808 }
1809 break;
1810 }
5bc09b39 1811 clear_buffer_async_write(bh);
50196f00
MWO
1812 if (bh->b_folio != fs_folio) {
1813 nilfs_end_folio_io(fs_folio, err);
1814 fs_folio = bh->b_folio;
9ff05123
RK
1815 }
1816 }
1817 }
50196f00
MWO
1818 if (bd_folio)
1819 folio_end_writeback(bd_folio);
9ff05123 1820
50196f00 1821 nilfs_end_folio_io(fs_folio, err);
a694291a
RK
1822}
1823
1824static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1825 struct the_nilfs *nilfs, int err)
1826{
1827 LIST_HEAD(logs);
1828 int ret;
1829
1830 list_splice_tail_init(&sci->sc_write_logs, &logs);
1831 ret = nilfs_wait_on_logs(&logs);
1cb2d38c 1832 nilfs_abort_logs(&logs, ret ? : err);
a694291a
RK
1833
1834 list_splice_tail_init(&sci->sc_segbufs, &logs);
1835 nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1836 nilfs_free_incomplete_logs(&logs, nilfs);
a694291a
RK
1837
1838 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1839 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1840 sci->sc_freesegs,
1841 sci->sc_nfreesegs,
1842 NULL);
1843 WARN_ON(ret); /* do not happen */
1844 }
1845
1846 nilfs_destroy_logs(&logs);
9ff05123
RK
1847}
1848
1849static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1850 struct nilfs_segment_buffer *segbuf)
1851{
1852 nilfs->ns_segnum = segbuf->sb_segnum;
1853 nilfs->ns_nextnum = segbuf->sb_nextnum;
1854 nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1855 + segbuf->sb_sum.nblocks;
1856 nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1857 nilfs->ns_ctime = segbuf->sb_sum.ctime;
1858}
1859
1860static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1861{
1862 struct nilfs_segment_buffer *segbuf;
3cd36212 1863 struct folio *bd_folio = NULL, *fs_folio = NULL;
e3154e97 1864 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1e2b68bf 1865 int update_sr = false;
9ff05123 1866
a694291a 1867 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
9ff05123
RK
1868 struct buffer_head *bh;
1869
1870 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1871 b_assoc_buffers) {
1872 set_buffer_uptodate(bh);
1873 clear_buffer_dirty(bh);
3cd36212
MWO
1874 if (bh->b_folio != bd_folio) {
1875 if (bd_folio)
1876 folio_end_writeback(bd_folio);
1877 bd_folio = bh->b_folio;
9ff05123
RK
1878 }
1879 }
1880 /*
3cd36212 1881 * We assume that the buffers which belong to the same folio
9ff05123 1882 * continue over the buffer list.
3cd36212
MWO
1883 * Under this assumption, the last BHs of folios is
1884 * identifiable by the discontinuity of bh->b_folio
1885 * (folio != fs_folio).
9ff05123
RK
1886 *
1887 * For B-tree node blocks, however, this assumption is not
3cd36212 1888 * guaranteed. The cleanup code of B-tree node folios needs
9ff05123
RK
1889 * special care.
1890 */
1891 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1892 b_assoc_buffers) {
4ce5c342 1893 const unsigned long set_bits = BIT(BH_Uptodate);
ead8ecff 1894 const unsigned long clear_bits =
4ce5c342
RK
1895 (BIT(BH_Dirty) | BIT(BH_Async_Write) |
1896 BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
1897 BIT(BH_NILFS_Redirected));
ead8ecff 1898
1e2b68bf 1899 if (bh == segbuf->sb_super_root) {
5bc09b39
RK
1900 set_buffer_uptodate(bh);
1901 clear_buffer_dirty(bh);
3cd36212
MWO
1902 if (bh->b_folio != bd_folio) {
1903 folio_end_writeback(bd_folio);
1904 bd_folio = bh->b_folio;
9ff05123 1905 }
1e2b68bf 1906 update_sr = true;
9ff05123
RK
1907 break;
1908 }
5bc09b39 1909 set_mask_bits(&bh->b_state, clear_bits, set_bits);
3cd36212
MWO
1910 if (bh->b_folio != fs_folio) {
1911 nilfs_end_folio_io(fs_folio, 0);
1912 fs_folio = bh->b_folio;
9ff05123
RK
1913 }
1914 }
1915
4762077c
RK
1916 if (!nilfs_segbuf_simplex(segbuf)) {
1917 if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
9ff05123
RK
1918 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1919 sci->sc_lseg_stime = jiffies;
1920 }
4762077c 1921 if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
9ff05123
RK
1922 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1923 }
1924 }
1925 /*
3cd36212
MWO
1926 * Since folios may continue over multiple segment buffers,
1927 * end of the last folio must be checked outside of the loop.
9ff05123 1928 */
3cd36212
MWO
1929 if (bd_folio)
1930 folio_end_writeback(bd_folio);
9ff05123 1931
3cd36212 1932 nilfs_end_folio_io(fs_folio, 0);
9ff05123 1933
9ff05123
RK
1934 nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1935
c1c1d709 1936 if (nilfs_doing_gc())
9ff05123 1937 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
c1c1d709 1938 else
9ff05123 1939 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
9ff05123
RK
1940
1941 sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1942
a694291a 1943 segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
9ff05123
RK
1944 nilfs_set_next_segment(nilfs, segbuf);
1945
1946 if (update_sr) {
e2c7617a 1947 nilfs->ns_flushed_device = 0;
9ff05123 1948 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
e339ad31 1949 segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
9ff05123 1950
c96fa464 1951 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
9ff05123
RK
1952 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1953 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
a694291a 1954 nilfs_segctor_clear_metadata_dirty(sci);
9ff05123
RK
1955 } else
1956 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1957}
1958
a694291a
RK
1959static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1960{
1961 int ret;
1962
1963 ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1964 if (!ret) {
1965 nilfs_segctor_complete_write(sci);
1966 nilfs_destroy_logs(&sci->sc_write_logs);
1967 }
1968 return ret;
1969}
1970
693dd321
RK
1971static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1972 struct the_nilfs *nilfs)
9ff05123
RK
1973{
1974 struct nilfs_inode_info *ii, *n;
e912a5b6 1975 struct inode *ifile = sci->sc_root->ifile;
9ff05123 1976
693dd321 1977 spin_lock(&nilfs->ns_inode_lock);
9ff05123 1978 retry:
693dd321 1979 list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
9ff05123
RK
1980 if (!ii->i_bh) {
1981 struct buffer_head *ibh;
1982 int err;
1983
693dd321 1984 spin_unlock(&nilfs->ns_inode_lock);
9ff05123 1985 err = nilfs_ifile_get_inode_block(
e912a5b6 1986 ifile, ii->vfs_inode.i_ino, &ibh);
9ff05123 1987 if (unlikely(err)) {
a1d0747a
JP
1988 nilfs_warn(sci->sc_super,
1989 "log writer: error %d getting inode block (ino=%lu)",
1990 err, ii->vfs_inode.i_ino);
9ff05123
RK
1991 return err;
1992 }
693dd321 1993 spin_lock(&nilfs->ns_inode_lock);
9ff05123
RK
1994 if (likely(!ii->i_bh))
1995 ii->i_bh = ibh;
1996 else
1997 brelse(ibh);
1998 goto retry;
1999 }
9ff05123 2000
31ccb1f7
AR
2001 // Always redirty the buffer to avoid race condition
2002 mark_buffer_dirty(ii->i_bh);
2003 nilfs_mdt_mark_dirty(ifile);
2004
9ff05123
RK
2005 clear_bit(NILFS_I_QUEUED, &ii->i_state);
2006 set_bit(NILFS_I_BUSY, &ii->i_state);
eaae0f37 2007 list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
9ff05123 2008 }
693dd321 2009 spin_unlock(&nilfs->ns_inode_lock);
9ff05123 2010
9ff05123
RK
2011 return 0;
2012}
2013
693dd321
RK
2014static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
2015 struct the_nilfs *nilfs)
9ff05123 2016{
9ff05123 2017 struct nilfs_inode_info *ii, *n;
1751e8a6 2018 int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
7ef3ff2f 2019 int defer_iput = false;
9ff05123 2020
693dd321 2021 spin_lock(&nilfs->ns_inode_lock);
9ff05123
RK
2022 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
2023 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
6c43f410 2024 test_bit(NILFS_I_DIRTY, &ii->i_state))
9ff05123 2025 continue;
6c43f410 2026
9ff05123
RK
2027 clear_bit(NILFS_I_BUSY, &ii->i_state);
2028 brelse(ii->i_bh);
2029 ii->i_bh = NULL;
7ef3ff2f 2030 list_del_init(&ii->i_dirty);
283ee148 2031 if (!ii->vfs_inode.i_nlink || during_mount) {
7ef3ff2f 2032 /*
283ee148
RK
2033 * Defer calling iput() to avoid deadlocks if
2034 * i_nlink == 0 or mount is not yet finished.
7ef3ff2f
RK
2035 */
2036 list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
2037 defer_iput = true;
2038 } else {
2039 spin_unlock(&nilfs->ns_inode_lock);
2040 iput(&ii->vfs_inode);
2041 spin_lock(&nilfs->ns_inode_lock);
2042 }
9ff05123 2043 }
693dd321 2044 spin_unlock(&nilfs->ns_inode_lock);
7ef3ff2f
RK
2045
2046 if (defer_iput)
2047 schedule_work(&sci->sc_iput_work);
9ff05123
RK
2048}
2049
9ff05123
RK
2050/*
2051 * Main procedure of segment constructor
2052 */
2053static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2054{
e3154e97 2055 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1e2b68bf 2056 int err;
9ff05123 2057
28a65b49
RK
2058 if (sb_rdonly(sci->sc_super))
2059 return -EROFS;
2060
58497703 2061 nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
6c43f410 2062 sci->sc_cno = nilfs->ns_cno;
9ff05123 2063
693dd321 2064 err = nilfs_segctor_collect_dirty_files(sci, nilfs);
9ff05123
RK
2065 if (unlikely(err))
2066 goto out;
2067
e912a5b6 2068 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
9ff05123
RK
2069 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2070
2071 if (nilfs_segctor_clean(sci))
2072 goto out;
2073
2074 do {
2075 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2076
2077 err = nilfs_segctor_begin_construction(sci, nilfs);
2078 if (unlikely(err))
2079 goto out;
2080
2081 /* Update time stamp */
fb04b91b 2082 sci->sc_seg_ctime = ktime_get_real_seconds();
9ff05123
RK
2083
2084 err = nilfs_segctor_collect(sci, nilfs, mode);
2085 if (unlikely(err))
2086 goto failed;
2087
9ff05123 2088 /* Avoid empty segment */
58497703 2089 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
4762077c 2090 nilfs_segbuf_empty(sci->sc_curseg)) {
a694291a 2091 nilfs_segctor_abort_construction(sci, nilfs, 1);
9ff05123
RK
2092 goto out;
2093 }
2094
2095 err = nilfs_segctor_assign(sci, mode);
2096 if (unlikely(err))
2097 goto failed;
2098
9ff05123 2099 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
e912a5b6 2100 nilfs_segctor_fill_in_file_bmap(sci);
9ff05123 2101
1e2b68bf 2102 if (mode == SC_LSEG_SR &&
58497703 2103 nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
9ff05123
RK
2104 err = nilfs_segctor_fill_in_checkpoint(sci);
2105 if (unlikely(err))
a694291a 2106 goto failed_to_write;
9ff05123
RK
2107
2108 nilfs_segctor_fill_in_super_root(sci, nilfs);
2109 }
2110 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2111
2112 /* Write partial segments */
1cb2d38c 2113 nilfs_segctor_prepare_write(sci);
aaed1d5b
RK
2114
2115 nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2116 nilfs->ns_crc_seed);
9ff05123 2117
9c965bac 2118 err = nilfs_segctor_write(sci, nilfs);
9ff05123
RK
2119 if (unlikely(err))
2120 goto failed_to_write;
2121
58497703 2122 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
09cbfeaf 2123 nilfs->ns_blocksize_bits != PAGE_SHIFT) {
a694291a
RK
2124 /*
2125 * At this point, we avoid double buffering
2126 * for blocksize < pagesize because page dirty
2127 * flag is turned off during write and dirty
2128 * buffers are not properly collected for
2129 * pages crossing over segments.
2130 */
2131 err = nilfs_segctor_wait(sci);
2132 if (err)
2133 goto failed_to_write;
2134 }
58497703 2135 } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
9ff05123 2136
9ff05123 2137 out:
693dd321 2138 nilfs_segctor_drop_written_files(sci, nilfs);
9ff05123
RK
2139 return err;
2140
2141 failed_to_write:
9ff05123
RK
2142 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2143 nilfs_redirty_inodes(&sci->sc_dirty_files);
9ff05123
RK
2144
2145 failed:
2146 if (nilfs_doing_gc())
2147 nilfs_redirty_inodes(&sci->sc_gc_inodes);
a694291a 2148 nilfs_segctor_abort_construction(sci, nilfs, err);
9ff05123
RK
2149 goto out;
2150}
2151
2152/**
9ccf56c1 2153 * nilfs_segctor_start_timer - set timer of background write
9ff05123
RK
2154 * @sci: nilfs_sc_info
2155 *
2156 * If the timer has already been set, it ignores the new request.
2157 * This function MUST be called within a section locking the segment
2158 * semaphore.
2159 */
2160static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2161{
2162 spin_lock(&sci->sc_state_lock);
fdce895e
LH
2163 if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2164 sci->sc_timer.expires = jiffies + sci->sc_interval;
2165 add_timer(&sci->sc_timer);
9ff05123
RK
2166 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2167 }
2168 spin_unlock(&sci->sc_state_lock);
2169}
2170
2171static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2172{
2173 spin_lock(&sci->sc_state_lock);
4ce5c342 2174 if (!(sci->sc_flush_request & BIT(bn))) {
9ff05123
RK
2175 unsigned long prev_req = sci->sc_flush_request;
2176
4ce5c342 2177 sci->sc_flush_request |= BIT(bn);
9ff05123
RK
2178 if (!prev_req)
2179 wake_up(&sci->sc_wait_daemon);
2180 }
2181 spin_unlock(&sci->sc_state_lock);
2182}
2183
2184/**
2185 * nilfs_flush_segment - trigger a segment construction for resource control
2186 * @sb: super block
2187 * @ino: inode number of the file to be flushed out.
2188 */
2189void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2190{
e3154e97 2191 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 2192 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123
RK
2193
2194 if (!sci || nilfs_doing_construction())
2195 return;
2196 nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2197 /* assign bit 0 to data files */
2198}
2199
9ff05123 2200struct nilfs_segctor_wait_request {
ac6424b9 2201 wait_queue_entry_t wq;
9ff05123
RK
2202 __u32 seq;
2203 int err;
2204 atomic_t done;
2205};
2206
2207static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2208{
2209 struct nilfs_segctor_wait_request wait_req;
2210 int err = 0;
2211
2212 spin_lock(&sci->sc_state_lock);
2213 init_wait(&wait_req.wq);
2214 wait_req.err = 0;
2215 atomic_set(&wait_req.done, 0);
2216 wait_req.seq = ++sci->sc_seq_request;
2217 spin_unlock(&sci->sc_state_lock);
2218
2219 init_waitqueue_entry(&wait_req.wq, current);
2220 add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2221 set_current_state(TASK_INTERRUPTIBLE);
2222 wake_up(&sci->sc_wait_daemon);
2223
2224 for (;;) {
2225 if (atomic_read(&wait_req.done)) {
2226 err = wait_req.err;
2227 break;
2228 }
2229 if (!signal_pending(current)) {
2230 schedule();
2231 continue;
2232 }
2233 err = -ERESTARTSYS;
2234 break;
2235 }
2236 finish_wait(&sci->sc_wait_request, &wait_req.wq);
2237 return err;
2238}
2239
2240static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2241{
2242 struct nilfs_segctor_wait_request *wrq, *n;
2243 unsigned long flags;
2244
2245 spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2055da97 2246 list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
9ff05123
RK
2247 if (!atomic_read(&wrq->done) &&
2248 nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2249 wrq->err = err;
2250 atomic_set(&wrq->done, 1);
2251 }
2252 if (atomic_read(&wrq->done)) {
2253 wrq->wq.func(&wrq->wq,
2254 TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2255 0, NULL);
2256 }
2257 }
2258 spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2259}
2260
2261/**
2262 * nilfs_construct_segment - construct a logical segment
2263 * @sb: super block
2264 *
300563e6 2265 * Return Value: On success, 0 is returned. On errors, one of the following
9ff05123
RK
2266 * negative error code is returned.
2267 *
2268 * %-EROFS - Read only filesystem.
2269 *
2270 * %-EIO - I/O error
2271 *
2272 * %-ENOSPC - No space left on device (only in a panic state).
2273 *
2274 * %-ERESTARTSYS - Interrupted.
2275 *
2276 * %-ENOMEM - Insufficient memory available.
2277 */
2278int nilfs_construct_segment(struct super_block *sb)
2279{
e3154e97 2280 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 2281 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123 2282 struct nilfs_transaction_info *ti;
9ff05123 2283
8cccf05f 2284 if (sb_rdonly(sb) || unlikely(!sci))
9ff05123
RK
2285 return -EROFS;
2286
2287 /* A call inside transactions causes a deadlock. */
2288 BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2289
da6f7916 2290 return nilfs_segctor_sync(sci);
9ff05123
RK
2291}
2292
2293/**
2294 * nilfs_construct_dsync_segment - construct a data-only logical segment
2295 * @sb: super block
f30bf3e4
RK
2296 * @inode: inode whose data blocks should be written out
2297 * @start: start byte offset
2298 * @end: end byte offset (inclusive)
9ff05123 2299 *
300563e6 2300 * Return Value: On success, 0 is returned. On errors, one of the following
9ff05123
RK
2301 * negative error code is returned.
2302 *
2303 * %-EROFS - Read only filesystem.
2304 *
2305 * %-EIO - I/O error
2306 *
2307 * %-ENOSPC - No space left on device (only in a panic state).
2308 *
2309 * %-ERESTARTSYS - Interrupted.
2310 *
2311 * %-ENOMEM - Insufficient memory available.
2312 */
f30bf3e4
RK
2313int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2314 loff_t start, loff_t end)
9ff05123 2315{
e3154e97 2316 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 2317 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123
RK
2318 struct nilfs_inode_info *ii;
2319 struct nilfs_transaction_info ti;
2320 int err = 0;
2321
8cccf05f 2322 if (sb_rdonly(sb) || unlikely(!sci))
9ff05123
RK
2323 return -EROFS;
2324
f7545144 2325 nilfs_transaction_lock(sb, &ti, 0);
9ff05123
RK
2326
2327 ii = NILFS_I(inode);
b9f66140 2328 if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
3b2ce58b 2329 nilfs_test_opt(nilfs, STRICT_ORDER) ||
9ff05123 2330 test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
3b2ce58b 2331 nilfs_discontinued(nilfs)) {
f7545144 2332 nilfs_transaction_unlock(sb);
9ff05123
RK
2333 err = nilfs_segctor_sync(sci);
2334 return err;
2335 }
2336
693dd321 2337 spin_lock(&nilfs->ns_inode_lock);
9ff05123
RK
2338 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2339 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
693dd321 2340 spin_unlock(&nilfs->ns_inode_lock);
f7545144 2341 nilfs_transaction_unlock(sb);
9ff05123
RK
2342 return 0;
2343 }
693dd321 2344 spin_unlock(&nilfs->ns_inode_lock);
f30bf3e4
RK
2345 sci->sc_dsync_inode = ii;
2346 sci->sc_dsync_start = start;
2347 sci->sc_dsync_end = end;
9ff05123
RK
2348
2349 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
e2c7617a
AR
2350 if (!err)
2351 nilfs->ns_flushed_device = 0;
9ff05123 2352
f7545144 2353 nilfs_transaction_unlock(sb);
9ff05123
RK
2354 return err;
2355}
2356
9ff05123 2357#define FLUSH_FILE_BIT (0x1) /* data file only */
4ce5c342 2358#define FLUSH_DAT_BIT BIT(NILFS_DAT_INO) /* DAT only */
9ff05123 2359
dcd76186
RK
2360/**
2361 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2362 * @sci: segment constructor object
2363 */
2364static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
9ff05123 2365{
9ff05123 2366 spin_lock(&sci->sc_state_lock);
dcd76186 2367 sci->sc_seq_accepted = sci->sc_seq_request;
9ff05123 2368 spin_unlock(&sci->sc_state_lock);
fdce895e 2369 del_timer_sync(&sci->sc_timer);
9ff05123
RK
2370}
2371
dcd76186
RK
2372/**
2373 * nilfs_segctor_notify - notify the result of request to caller threads
2374 * @sci: segment constructor object
2375 * @mode: mode of log forming
2376 * @err: error code to be notified
2377 */
2378static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
9ff05123
RK
2379{
2380 /* Clear requests (even when the construction failed) */
2381 spin_lock(&sci->sc_state_lock);
2382
dcd76186 2383 if (mode == SC_LSEG_SR) {
aeda7f63 2384 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
dcd76186
RK
2385 sci->sc_seq_done = sci->sc_seq_accepted;
2386 nilfs_segctor_wakeup(sci, err);
9ff05123 2387 sci->sc_flush_request = 0;
aeda7f63 2388 } else {
dcd76186 2389 if (mode == SC_FLUSH_FILE)
aeda7f63 2390 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
dcd76186 2391 else if (mode == SC_FLUSH_DAT)
aeda7f63
RK
2392 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2393
2394 /* re-enable timer if checkpoint creation was not done */
fdce895e
LH
2395 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2396 time_before(jiffies, sci->sc_timer.expires))
2397 add_timer(&sci->sc_timer);
aeda7f63 2398 }
9ff05123
RK
2399 spin_unlock(&sci->sc_state_lock);
2400}
2401
dcd76186
RK
2402/**
2403 * nilfs_segctor_construct - form logs and write them to disk
2404 * @sci: segment constructor object
2405 * @mode: mode of log forming
2406 */
2407static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
9ff05123 2408{
e3154e97 2409 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
d26493b6 2410 struct nilfs_super_block **sbp;
9ff05123
RK
2411 int err = 0;
2412
dcd76186
RK
2413 nilfs_segctor_accept(sci);
2414
9ff05123 2415 if (nilfs_discontinued(nilfs))
dcd76186
RK
2416 mode = SC_LSEG_SR;
2417 if (!nilfs_segctor_confirm(sci))
2418 err = nilfs_segctor_do_construct(sci, mode);
2419
9ff05123 2420 if (likely(!err)) {
dcd76186 2421 if (mode != SC_FLUSH_DAT)
9ff05123
RK
2422 atomic_set(&nilfs->ns_ndirtyblks, 0);
2423 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2424 nilfs_discontinued(nilfs)) {
2425 down_write(&nilfs->ns_sem);
d26493b6 2426 err = -EIO;
f7545144 2427 sbp = nilfs_prepare_super(sci->sc_super,
b2ac86e1
JS
2428 nilfs_sb_will_flip(nilfs));
2429 if (likely(sbp)) {
2430 nilfs_set_log_cursor(sbp[0], nilfs);
f7545144
RK
2431 err = nilfs_commit_super(sci->sc_super,
2432 NILFS_SB_COMMIT);
b2ac86e1 2433 }
9ff05123
RK
2434 up_write(&nilfs->ns_sem);
2435 }
2436 }
dcd76186
RK
2437
2438 nilfs_segctor_notify(sci, mode, err);
9ff05123
RK
2439 return err;
2440}
2441
7554e9c4 2442static void nilfs_construction_timeout(struct timer_list *t)
9ff05123 2443{
7554e9c4 2444 struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
4ad364ca 2445
7554e9c4 2446 wake_up_process(sci->sc_timer_task);
9ff05123
RK
2447}
2448
2449static void
2450nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2451{
2452 struct nilfs_inode_info *ii, *n;
2453
2454 list_for_each_entry_safe(ii, n, head, i_dirty) {
2455 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2456 continue;
9ff05123 2457 list_del_init(&ii->i_dirty);
fbb24a3a 2458 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
e897be17 2459 nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
263d90ce 2460 iput(&ii->vfs_inode);
9ff05123
RK
2461 }
2462}
2463
4f6b8288
RK
2464int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2465 void **kbufs)
9ff05123 2466{
e3154e97 2467 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 2468 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123 2469 struct nilfs_transaction_info ti;
9ff05123
RK
2470 int err;
2471
2472 if (unlikely(!sci))
2473 return -EROFS;
2474
f7545144 2475 nilfs_transaction_lock(sb, &ti, 1);
9ff05123 2476
c1c1d709 2477 err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
9ff05123
RK
2478 if (unlikely(err))
2479 goto out_unlock;
071cb4b8 2480
4f6b8288 2481 err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
c1c1d709
RK
2482 if (unlikely(err)) {
2483 nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
9ff05123 2484 goto out_unlock;
c1c1d709 2485 }
9ff05123 2486
071cb4b8
RK
2487 sci->sc_freesegs = kbufs[4];
2488 sci->sc_nfreesegs = argv[4].v_nmembs;
0935db74 2489 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
9ff05123
RK
2490
2491 for (;;) {
dcd76186 2492 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
9ff05123 2493 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
9ff05123
RK
2494
2495 if (likely(!err))
2496 break;
2497
a1d0747a 2498 nilfs_warn(sb, "error %d cleaning segments", err);
9ff05123
RK
2499 set_current_state(TASK_INTERRUPTIBLE);
2500 schedule_timeout(sci->sc_interval);
2501 }
3b2ce58b 2502 if (nilfs_test_opt(nilfs, DISCARD)) {
e902ec99
JS
2503 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2504 sci->sc_nfreesegs);
2505 if (ret) {
a1d0747a
JP
2506 nilfs_warn(sb,
2507 "error %d on discard request, turning discards off for the device",
2508 ret);
3b2ce58b 2509 nilfs_clear_opt(nilfs, DISCARD);
e902ec99
JS
2510 }
2511 }
9ff05123
RK
2512
2513 out_unlock:
071cb4b8
RK
2514 sci->sc_freesegs = NULL;
2515 sci->sc_nfreesegs = 0;
c1c1d709 2516 nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
f7545144 2517 nilfs_transaction_unlock(sb);
9ff05123
RK
2518 return err;
2519}
2520
2521static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2522{
9ff05123 2523 struct nilfs_transaction_info ti;
9ff05123 2524
f7545144 2525 nilfs_transaction_lock(sci->sc_super, &ti, 0);
dcd76186 2526 nilfs_segctor_construct(sci, mode);
9ff05123
RK
2527
2528 /*
2529 * Unclosed segment should be retried. We do this using sc_timer.
2530 * Timeout of sc_timer will invoke complete construction which leads
2531 * to close the current logical segment.
2532 */
2533 if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2534 nilfs_segctor_start_timer(sci);
2535
f7545144 2536 nilfs_transaction_unlock(sci->sc_super);
9ff05123
RK
2537}
2538
2539static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2540{
2541 int mode = 0;
9ff05123
RK
2542
2543 spin_lock(&sci->sc_state_lock);
2544 mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2545 SC_FLUSH_DAT : SC_FLUSH_FILE;
2546 spin_unlock(&sci->sc_state_lock);
2547
2548 if (mode) {
09ef29e0 2549 nilfs_segctor_do_construct(sci, mode);
9ff05123
RK
2550
2551 spin_lock(&sci->sc_state_lock);
2552 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2553 ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2554 spin_unlock(&sci->sc_state_lock);
2555 }
2556 clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2557}
2558
2559static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2560{
2561 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2562 time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2563 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2564 return SC_FLUSH_FILE;
2565 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2566 return SC_FLUSH_DAT;
2567 }
2568 return SC_LSEG_SR;
2569}
2570
2571/**
2572 * nilfs_segctor_thread - main loop of the segment constructor thread.
2573 * @arg: pointer to a struct nilfs_sc_info.
2574 *
2575 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2576 * to execute segment constructions.
2577 */
2578static int nilfs_segctor_thread(void *arg)
2579{
2580 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
e3154e97 2581 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
2582 int timeout = 0;
2583
7554e9c4 2584 sci->sc_timer_task = current;
9ff05123
RK
2585
2586 /* start sync. */
2587 sci->sc_task = current;
2588 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
a1d0747a
JP
2589 nilfs_info(sci->sc_super,
2590 "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
2591 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
9ff05123 2592
5b130948 2593 set_freezable();
9ff05123
RK
2594 spin_lock(&sci->sc_state_lock);
2595 loop:
2596 for (;;) {
2597 int mode;
2598
2599 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2600 goto end_thread;
2601
2602 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2603 mode = SC_LSEG_SR;
7f00184e 2604 else if (sci->sc_flush_request)
9ff05123 2605 mode = nilfs_segctor_flush_mode(sci);
7f00184e
RK
2606 else
2607 break;
9ff05123
RK
2608
2609 spin_unlock(&sci->sc_state_lock);
2610 nilfs_segctor_thread_construct(sci, mode);
2611 spin_lock(&sci->sc_state_lock);
2612 timeout = 0;
2613 }
2614
2615
2616 if (freezing(current)) {
2617 spin_unlock(&sci->sc_state_lock);
a0acae0e 2618 try_to_freeze();
9ff05123
RK
2619 spin_lock(&sci->sc_state_lock);
2620 } else {
2621 DEFINE_WAIT(wait);
2622 int should_sleep = 1;
2623
2624 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2625 TASK_INTERRUPTIBLE);
2626
2627 if (sci->sc_seq_request != sci->sc_seq_done)
2628 should_sleep = 0;
2629 else if (sci->sc_flush_request)
2630 should_sleep = 0;
2631 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2632 should_sleep = time_before(jiffies,
fdce895e 2633 sci->sc_timer.expires);
9ff05123
RK
2634
2635 if (should_sleep) {
2636 spin_unlock(&sci->sc_state_lock);
2637 schedule();
2638 spin_lock(&sci->sc_state_lock);
2639 }
2640 finish_wait(&sci->sc_wait_daemon, &wait);
2641 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
fdce895e 2642 time_after_eq(jiffies, sci->sc_timer.expires));
e605f0a7
RK
2643
2644 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
1dfa2710 2645 set_nilfs_discontinued(nilfs);
9ff05123
RK
2646 }
2647 goto loop;
2648
2649 end_thread:
9ff05123
RK
2650 /* end sync. */
2651 sci->sc_task = NULL;
2652 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
6be49d10 2653 spin_unlock(&sci->sc_state_lock);
9ff05123
RK
2654 return 0;
2655}
2656
2657static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2658{
2659 struct task_struct *t;
2660
2661 t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2662 if (IS_ERR(t)) {
2663 int err = PTR_ERR(t);
2664
a1d0747a
JP
2665 nilfs_err(sci->sc_super, "error %d creating segctord thread",
2666 err);
9ff05123
RK
2667 return err;
2668 }
2669 wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2670 return 0;
2671}
2672
2673static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
6b81e14e
JS
2674 __acquires(&sci->sc_state_lock)
2675 __releases(&sci->sc_state_lock)
9ff05123
RK
2676{
2677 sci->sc_state |= NILFS_SEGCTOR_QUIT;
2678
2679 while (sci->sc_task) {
2680 wake_up(&sci->sc_wait_daemon);
2681 spin_unlock(&sci->sc_state_lock);
2682 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2683 spin_lock(&sci->sc_state_lock);
2684 }
2685}
2686
9ff05123
RK
2687/*
2688 * Setup & clean-up functions
2689 */
f7545144 2690static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
e912a5b6 2691 struct nilfs_root *root)
9ff05123 2692{
e3154e97 2693 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
2694 struct nilfs_sc_info *sci;
2695
2696 sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2697 if (!sci)
2698 return NULL;
2699
f7545144 2700 sci->sc_super = sb;
9ff05123 2701
e912a5b6
RK
2702 nilfs_get_root(root);
2703 sci->sc_root = root;
2704
9ff05123
RK
2705 init_waitqueue_head(&sci->sc_wait_request);
2706 init_waitqueue_head(&sci->sc_wait_daemon);
2707 init_waitqueue_head(&sci->sc_wait_task);
2708 spin_lock_init(&sci->sc_state_lock);
2709 INIT_LIST_HEAD(&sci->sc_dirty_files);
2710 INIT_LIST_HEAD(&sci->sc_segbufs);
a694291a 2711 INIT_LIST_HEAD(&sci->sc_write_logs);
9ff05123 2712 INIT_LIST_HEAD(&sci->sc_gc_inodes);
7ef3ff2f
RK
2713 INIT_LIST_HEAD(&sci->sc_iput_queue);
2714 INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
7554e9c4 2715 timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
9ff05123
RK
2716
2717 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2718 sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2719 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2720
574e6c31 2721 if (nilfs->ns_interval)
071d73cf 2722 sci->sc_interval = HZ * nilfs->ns_interval;
574e6c31
RK
2723 if (nilfs->ns_watermark)
2724 sci->sc_watermark = nilfs->ns_watermark;
9ff05123
RK
2725 return sci;
2726}
2727
2728static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2729{
2730 int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2731
076a378b
RK
2732 /*
2733 * The segctord thread was stopped and its timer was removed.
2734 * But some tasks remain.
2735 */
9ff05123 2736 do {
9ff05123 2737 struct nilfs_transaction_info ti;
9ff05123 2738
f7545144 2739 nilfs_transaction_lock(sci->sc_super, &ti, 0);
dcd76186 2740 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
f7545144 2741 nilfs_transaction_unlock(sci->sc_super);
9ff05123 2742
7ef3ff2f
RK
2743 flush_work(&sci->sc_iput_work);
2744
28a65b49 2745 } while (ret && ret != -EROFS && retrycount-- > 0);
9ff05123
RK
2746}
2747
2748/**
2749 * nilfs_segctor_destroy - destroy the segment constructor.
2750 * @sci: nilfs_sc_info
2751 *
2752 * nilfs_segctor_destroy() kills the segctord thread and frees
2753 * the nilfs_sc_info struct.
2754 * Caller must hold the segment semaphore.
2755 */
2756static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2757{
e3154e97 2758 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
2759 int flag;
2760
693dd321 2761 up_write(&nilfs->ns_segctor_sem);
9ff05123
RK
2762
2763 spin_lock(&sci->sc_state_lock);
2764 nilfs_segctor_kill_thread(sci);
2765 flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2766 || sci->sc_seq_request != sci->sc_seq_done);
2767 spin_unlock(&sci->sc_state_lock);
2768
7ef3ff2f
RK
2769 if (flush_work(&sci->sc_iput_work))
2770 flag = true;
2771
3256a055 2772 if (flag || !nilfs_segctor_confirm(sci))
9ff05123
RK
2773 nilfs_segctor_write_out(sci);
2774
9ff05123 2775 if (!list_empty(&sci->sc_dirty_files)) {
a1d0747a
JP
2776 nilfs_warn(sci->sc_super,
2777 "disposed unprocessed dirty file(s) when stopping log writer");
693dd321 2778 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
9ff05123 2779 }
9ff05123 2780
7ef3ff2f 2781 if (!list_empty(&sci->sc_iput_queue)) {
a1d0747a
JP
2782 nilfs_warn(sci->sc_super,
2783 "disposed unprocessed inode(s) in iput queue when stopping log writer");
7ef3ff2f
RK
2784 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2785 }
2786
1f5abe7e 2787 WARN_ON(!list_empty(&sci->sc_segbufs));
a694291a 2788 WARN_ON(!list_empty(&sci->sc_write_logs));
9ff05123 2789
e912a5b6
RK
2790 nilfs_put_root(sci->sc_root);
2791
693dd321 2792 down_write(&nilfs->ns_segctor_sem);
9ff05123 2793
292a089d 2794 timer_shutdown_sync(&sci->sc_timer);
9ff05123
RK
2795 kfree(sci);
2796}
2797
2798/**
f7545144
RK
2799 * nilfs_attach_log_writer - attach log writer
2800 * @sb: super block instance
e912a5b6 2801 * @root: root object of the current filesystem tree
9ff05123 2802 *
f7545144
RK
2803 * This allocates a log writer object, initializes it, and starts the
2804 * log writer.
9ff05123
RK
2805 *
2806 * Return Value: On success, 0 is returned. On error, one of the following
2807 * negative error code is returned.
2808 *
2809 * %-ENOMEM - Insufficient memory available.
2810 */
f7545144 2811int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
9ff05123 2812{
e3154e97 2813 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
2814 int err;
2815
3fd3fe5a 2816 if (nilfs->ns_writer) {
fe5f171b 2817 /*
8cccf05f
RK
2818 * This happens if the filesystem is made read-only by
2819 * __nilfs_error or nilfs_remount and then remounted
2820 * read/write. In these cases, reuse the existing
2821 * writer.
fe5f171b 2822 */
8cccf05f 2823 return 0;
fe5f171b
RK
2824 }
2825
f7545144 2826 nilfs->ns_writer = nilfs_segctor_new(sb, root);
3fd3fe5a 2827 if (!nilfs->ns_writer)
9ff05123
RK
2828 return -ENOMEM;
2829
8301c719
RK
2830 inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
2831
3fd3fe5a 2832 err = nilfs_segctor_start_thread(nilfs->ns_writer);
d0d51a97
RK
2833 if (unlikely(err))
2834 nilfs_detach_log_writer(sb);
2835
9ff05123
RK
2836 return err;
2837}
2838
2839/**
f7545144
RK
2840 * nilfs_detach_log_writer - destroy log writer
2841 * @sb: super block instance
9ff05123 2842 *
f7545144
RK
2843 * This kills log writer daemon, frees the log writer object, and
2844 * destroys list of dirty files.
9ff05123 2845 */
f7545144 2846void nilfs_detach_log_writer(struct super_block *sb)
9ff05123 2847{
e3154e97 2848 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
2849 LIST_HEAD(garbage_list);
2850
2851 down_write(&nilfs->ns_segctor_sem);
3fd3fe5a
RK
2852 if (nilfs->ns_writer) {
2853 nilfs_segctor_destroy(nilfs->ns_writer);
2854 nilfs->ns_writer = NULL;
9ff05123 2855 }
f8654743 2856 set_nilfs_purging(nilfs);
9ff05123
RK
2857
2858 /* Force to free the list of dirty files */
693dd321
RK
2859 spin_lock(&nilfs->ns_inode_lock);
2860 if (!list_empty(&nilfs->ns_dirty_files)) {
2861 list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
a1d0747a
JP
2862 nilfs_warn(sb,
2863 "disposed unprocessed dirty file(s) when detaching log writer");
9ff05123 2864 }
693dd321 2865 spin_unlock(&nilfs->ns_inode_lock);
9ff05123
RK
2866 up_write(&nilfs->ns_segctor_sem);
2867
693dd321 2868 nilfs_dispose_list(nilfs, &garbage_list, 1);
f8654743 2869 clear_nilfs_purging(nilfs);
9ff05123 2870}