Merge tag 'phy-for-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/phy/linux-phy
[linux-block.git] / fs / nilfs2 / segment.c
CommitLineData
ae98043f 1// SPDX-License-Identifier: GPL-2.0+
9ff05123 2/*
94ee1d91 3 * NILFS segment constructor.
9ff05123
RK
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
4b420ab4 7 * Written by Ryusuke Konishi.
9ff05123
RK
8 *
9 */
10
11#include <linux/pagemap.h>
12#include <linux/buffer_head.h>
13#include <linux/writeback.h>
ead8ecff 14#include <linux/bitops.h>
9ff05123
RK
15#include <linux/bio.h>
16#include <linux/completion.h>
17#include <linux/blkdev.h>
18#include <linux/backing-dev.h>
19#include <linux/freezer.h>
20#include <linux/kthread.h>
21#include <linux/crc32.h>
22#include <linux/pagevec.h>
5a0e3ad6 23#include <linux/slab.h>
174cd4b1
IM
24#include <linux/sched/signal.h>
25
9ff05123
RK
26#include "nilfs.h"
27#include "btnode.h"
28#include "page.h"
29#include "segment.h"
30#include "sufile.h"
31#include "cpfile.h"
32#include "ifile.h"
9ff05123
RK
33#include "segbuf.h"
34
35
36/*
37 * Segment constructor
38 */
39#define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
40
076a378b
RK
41#define SC_MAX_SEGDELTA 64 /*
42 * Upper limit of the number of segments
43 * appended in collection retry loop
44 */
9ff05123
RK
45
46/* Construction mode */
47enum {
48 SC_LSEG_SR = 1, /* Make a logical segment having a super root */
076a378b
RK
49 SC_LSEG_DSYNC, /*
50 * Flush data blocks of a given file and make
51 * a logical segment without a super root.
52 */
53 SC_FLUSH_FILE, /*
54 * Flush data files, leads to segment writes without
55 * creating a checkpoint.
56 */
57 SC_FLUSH_DAT, /*
58 * Flush DAT file. This also creates segments
59 * without a checkpoint.
60 */
9ff05123
RK
61};
62
63/* Stage numbers of dirty block collection */
64enum {
65 NILFS_ST_INIT = 0,
66 NILFS_ST_GC, /* Collecting dirty blocks for GC */
67 NILFS_ST_FILE,
9ff05123
RK
68 NILFS_ST_IFILE,
69 NILFS_ST_CPFILE,
70 NILFS_ST_SUFILE,
71 NILFS_ST_DAT,
72 NILFS_ST_SR, /* Super root */
73 NILFS_ST_DSYNC, /* Data sync blocks */
74 NILFS_ST_DONE,
75};
76
58497703
HM
77#define CREATE_TRACE_POINTS
78#include <trace/events/nilfs2.h>
79
80/*
81 * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are
82 * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of
83 * the variable must use them because transition of stage count must involve
84 * trace events (trace_nilfs2_collection_stage_transition).
85 *
86 * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't
87 * produce tracepoint events. It is provided just for making the intention
88 * clear.
89 */
90static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
91{
92 sci->sc_stage.scnt++;
93 trace_nilfs2_collection_stage_transition(sci);
94}
95
96static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
97{
98 sci->sc_stage.scnt = next_scnt;
99 trace_nilfs2_collection_stage_transition(sci);
100}
101
102static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
103{
104 return sci->sc_stage.scnt;
105}
106
9ff05123
RK
107/* State flags of collection */
108#define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
109#define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
071cb4b8
RK
110#define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
111#define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
9ff05123
RK
112
113/* Operations depending on the construction mode and file type */
114struct nilfs_sc_operations {
115 int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
116 struct inode *);
117 int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
118 struct inode *);
119 int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
120 struct inode *);
121 void (*write_data_binfo)(struct nilfs_sc_info *,
122 struct nilfs_segsum_pointer *,
123 union nilfs_binfo *);
124 void (*write_node_binfo)(struct nilfs_sc_info *,
125 struct nilfs_segsum_pointer *,
126 union nilfs_binfo *);
127};
128
129/*
130 * Other definitions
131 */
132static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
133static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
134static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
693dd321 135static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
9ff05123 136
9ff05123
RK
137#define nilfs_cnt32_ge(a, b) \
138 (typecheck(__u32, a) && typecheck(__u32, b) && \
139 ((__s32)(a) - (__s32)(b) >= 0))
9ff05123 140
feee880f
RK
141static int nilfs_prepare_segment_lock(struct super_block *sb,
142 struct nilfs_transaction_info *ti)
9ff05123
RK
143{
144 struct nilfs_transaction_info *cur_ti = current->journal_info;
145 void *save = NULL;
146
147 if (cur_ti) {
148 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
149 return ++cur_ti->ti_count;
7f00184e
RK
150
151 /*
152 * If journal_info field is occupied by other FS,
153 * it is saved and will be restored on
154 * nilfs_transaction_commit().
155 */
a1d0747a 156 nilfs_warn(sb, "journal info from a different FS");
7f00184e 157 save = current->journal_info;
9ff05123
RK
158 }
159 if (!ti) {
160 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
161 if (!ti)
162 return -ENOMEM;
163 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
164 } else {
165 ti->ti_flags = 0;
166 }
167 ti->ti_count = 0;
168 ti->ti_save = save;
169 ti->ti_magic = NILFS_TI_MAGIC;
170 current->journal_info = ti;
171 return 0;
172}
173
174/**
175 * nilfs_transaction_begin - start indivisible file operations.
176 * @sb: super block
177 * @ti: nilfs_transaction_info
178 * @vacancy_check: flags for vacancy rate checks
179 *
180 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
181 * the segment semaphore, to make a segment construction and write tasks
47420c79 182 * exclusive. The function is used with nilfs_transaction_commit() in pairs.
9ff05123
RK
183 * The region enclosed by these two functions can be nested. To avoid a
184 * deadlock, the semaphore is only acquired or released in the outermost call.
185 *
186 * This function allocates a nilfs_transaction_info struct to keep context
187 * information on it. It is initialized and hooked onto the current task in
188 * the outermost call. If a pre-allocated struct is given to @ti, it is used
7a65004b 189 * instead; otherwise a new struct is assigned from a slab.
9ff05123
RK
190 *
191 * When @vacancy_check flag is set, this function will check the amount of
192 * free space, and will wait for the GC to reclaim disk space if low capacity.
193 *
194 * Return Value: On success, 0 is returned. On error, one of the following
195 * negative error code is returned.
196 *
197 * %-ENOMEM - Insufficient memory available.
198 *
9ff05123
RK
199 * %-ENOSPC - No space left on device
200 */
201int nilfs_transaction_begin(struct super_block *sb,
202 struct nilfs_transaction_info *ti,
203 int vacancy_check)
204{
9ff05123 205 struct the_nilfs *nilfs;
feee880f 206 int ret = nilfs_prepare_segment_lock(sb, ti);
44fda114 207 struct nilfs_transaction_info *trace_ti;
9ff05123
RK
208
209 if (unlikely(ret < 0))
210 return ret;
44fda114
HM
211 if (ret > 0) {
212 trace_ti = current->journal_info;
213
214 trace_nilfs2_transaction_transition(sb, trace_ti,
215 trace_ti->ti_count, trace_ti->ti_flags,
216 TRACE_NILFS2_TRANSACTION_BEGIN);
9ff05123 217 return 0;
44fda114 218 }
9ff05123 219
2c22b337 220 sb_start_intwrite(sb);
5beb6e0b 221
e3154e97 222 nilfs = sb->s_fs_info;
9ff05123
RK
223 down_read(&nilfs->ns_segctor_sem);
224 if (vacancy_check && nilfs_near_disk_full(nilfs)) {
225 up_read(&nilfs->ns_segctor_sem);
226 ret = -ENOSPC;
227 goto failed;
228 }
44fda114
HM
229
230 trace_ti = current->journal_info;
231 trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
232 trace_ti->ti_flags,
233 TRACE_NILFS2_TRANSACTION_BEGIN);
9ff05123
RK
234 return 0;
235
236 failed:
237 ti = current->journal_info;
238 current->journal_info = ti->ti_save;
239 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
240 kmem_cache_free(nilfs_transaction_cachep, ti);
2c22b337 241 sb_end_intwrite(sb);
9ff05123
RK
242 return ret;
243}
244
245/**
47420c79 246 * nilfs_transaction_commit - commit indivisible file operations.
9ff05123 247 * @sb: super block
9ff05123 248 *
47420c79
RK
249 * nilfs_transaction_commit() releases the read semaphore which is
250 * acquired by nilfs_transaction_begin(). This is only performed
251 * in outermost call of this function. If a commit flag is set,
252 * nilfs_transaction_commit() sets a timer to start the segment
253 * constructor. If a sync flag is set, it starts construction
254 * directly.
9ff05123 255 */
47420c79 256int nilfs_transaction_commit(struct super_block *sb)
9ff05123
RK
257{
258 struct nilfs_transaction_info *ti = current->journal_info;
e3154e97 259 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
260 int err = 0;
261
262 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
47420c79 263 ti->ti_flags |= NILFS_TI_COMMIT;
9ff05123
RK
264 if (ti->ti_count > 0) {
265 ti->ti_count--;
44fda114
HM
266 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
267 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
9ff05123
RK
268 return 0;
269 }
3fd3fe5a
RK
270 if (nilfs->ns_writer) {
271 struct nilfs_sc_info *sci = nilfs->ns_writer;
272
9ff05123
RK
273 if (ti->ti_flags & NILFS_TI_COMMIT)
274 nilfs_segctor_start_timer(sci);
3fd3fe5a 275 if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
9ff05123
RK
276 nilfs_segctor_do_flush(sci, 0);
277 }
3fd3fe5a 278 up_read(&nilfs->ns_segctor_sem);
44fda114
HM
279 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
280 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
281
9ff05123
RK
282 current->journal_info = ti->ti_save;
283
284 if (ti->ti_flags & NILFS_TI_SYNC)
285 err = nilfs_construct_segment(sb);
286 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
287 kmem_cache_free(nilfs_transaction_cachep, ti);
2c22b337 288 sb_end_intwrite(sb);
9ff05123
RK
289 return err;
290}
291
47420c79
RK
292void nilfs_transaction_abort(struct super_block *sb)
293{
294 struct nilfs_transaction_info *ti = current->journal_info;
e3154e97 295 struct the_nilfs *nilfs = sb->s_fs_info;
47420c79
RK
296
297 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
298 if (ti->ti_count > 0) {
299 ti->ti_count--;
44fda114
HM
300 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
301 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
47420c79
RK
302 return;
303 }
e3154e97 304 up_read(&nilfs->ns_segctor_sem);
47420c79 305
44fda114
HM
306 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
307 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
308
47420c79
RK
309 current->journal_info = ti->ti_save;
310 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
311 kmem_cache_free(nilfs_transaction_cachep, ti);
2c22b337 312 sb_end_intwrite(sb);
47420c79
RK
313}
314
9ff05123
RK
315void nilfs_relax_pressure_in_lock(struct super_block *sb)
316{
e3154e97 317 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 318 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123 319
8cccf05f 320 if (sb_rdonly(sb) || unlikely(!sci) || !sci->sc_flush_request)
9ff05123
RK
321 return;
322
323 set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
324 up_read(&nilfs->ns_segctor_sem);
325
326 down_write(&nilfs->ns_segctor_sem);
327 if (sci->sc_flush_request &&
328 test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
329 struct nilfs_transaction_info *ti = current->journal_info;
330
331 ti->ti_flags |= NILFS_TI_WRITER;
332 nilfs_segctor_do_immediate_flush(sci);
333 ti->ti_flags &= ~NILFS_TI_WRITER;
334 }
335 downgrade_write(&nilfs->ns_segctor_sem);
336}
337
f7545144 338static void nilfs_transaction_lock(struct super_block *sb,
9ff05123
RK
339 struct nilfs_transaction_info *ti,
340 int gcflag)
341{
342 struct nilfs_transaction_info *cur_ti = current->journal_info;
e3154e97 343 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 344 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123 345
1f5abe7e 346 WARN_ON(cur_ti);
9ff05123
RK
347 ti->ti_flags = NILFS_TI_WRITER;
348 ti->ti_count = 0;
349 ti->ti_save = cur_ti;
350 ti->ti_magic = NILFS_TI_MAGIC;
9ff05123
RK
351 current->journal_info = ti;
352
353 for (;;) {
44fda114
HM
354 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
355 ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
356
3fd3fe5a
RK
357 down_write(&nilfs->ns_segctor_sem);
358 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
9ff05123
RK
359 break;
360
3fd3fe5a 361 nilfs_segctor_do_immediate_flush(sci);
9ff05123 362
f7545144 363 up_write(&nilfs->ns_segctor_sem);
aceb4170 364 cond_resched();
9ff05123
RK
365 }
366 if (gcflag)
367 ti->ti_flags |= NILFS_TI_GC;
44fda114
HM
368
369 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
370 ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
9ff05123
RK
371}
372
f7545144 373static void nilfs_transaction_unlock(struct super_block *sb)
9ff05123
RK
374{
375 struct nilfs_transaction_info *ti = current->journal_info;
e3154e97 376 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
377
378 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
379 BUG_ON(ti->ti_count > 0);
380
693dd321 381 up_write(&nilfs->ns_segctor_sem);
9ff05123 382 current->journal_info = ti->ti_save;
44fda114
HM
383
384 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
385 ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
9ff05123
RK
386}
387
388static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
389 struct nilfs_segsum_pointer *ssp,
0c6c44cb 390 unsigned int bytes)
9ff05123
RK
391{
392 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
0c6c44cb 393 unsigned int blocksize = sci->sc_super->s_blocksize;
9ff05123
RK
394 void *p;
395
396 if (unlikely(ssp->offset + bytes > blocksize)) {
397 ssp->offset = 0;
398 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
399 &segbuf->sb_segsum_buffers));
400 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
401 }
402 p = ssp->bh->b_data + ssp->offset;
403 ssp->offset += bytes;
404 return p;
405}
406
407/**
408 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
409 * @sci: nilfs_sc_info
410 */
411static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
412{
413 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
414 struct buffer_head *sumbh;
0c6c44cb
RK
415 unsigned int sumbytes;
416 unsigned int flags = 0;
9ff05123
RK
417 int err;
418
419 if (nilfs_doing_gc())
420 flags = NILFS_SS_GC;
6c43f410 421 err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
9ff05123
RK
422 if (unlikely(err))
423 return err;
424
425 sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
426 sumbytes = segbuf->sb_sum.sumbytes;
427 sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
428 sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
429 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
430 return 0;
431}
432
433static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
434{
435 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
436 if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
076a378b
RK
437 return -E2BIG; /*
438 * The current segment is filled up
439 * (internal code)
440 */
9ff05123
RK
441 sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
442 return nilfs_segctor_reset_segment_buffer(sci);
443}
444
445static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
446{
447 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
448 int err;
449
450 if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
451 err = nilfs_segctor_feed_segment(sci);
452 if (err)
453 return err;
454 segbuf = sci->sc_curseg;
455 }
1e2b68bf 456 err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
9ff05123
RK
457 if (likely(!err))
458 segbuf->sb_sum.flags |= NILFS_SS_SR;
459 return err;
460}
461
462/*
463 * Functions for making segment summary and payloads
464 */
465static int nilfs_segctor_segsum_block_required(
466 struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
0c6c44cb 467 unsigned int binfo_size)
9ff05123 468{
0c6c44cb 469 unsigned int blocksize = sci->sc_super->s_blocksize;
9ff05123
RK
470 /* Size of finfo and binfo is enough small against blocksize */
471
472 return ssp->offset + binfo_size +
473 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
474 blocksize;
475}
476
477static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
478 struct inode *inode)
479{
480 sci->sc_curseg->sb_sum.nfinfo++;
481 sci->sc_binfo_ptr = sci->sc_finfo_ptr;
482 nilfs_segctor_map_segsum_entry(
483 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
c96fa464 484
72746ac6
RK
485 if (NILFS_I(inode)->i_root &&
486 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
c96fa464 487 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
9ff05123
RK
488 /* skip finfo */
489}
490
491static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
492 struct inode *inode)
493{
494 struct nilfs_finfo *finfo;
495 struct nilfs_inode_info *ii;
496 struct nilfs_segment_buffer *segbuf;
6c43f410 497 __u64 cno;
9ff05123
RK
498
499 if (sci->sc_blk_cnt == 0)
500 return;
501
502 ii = NILFS_I(inode);
6c43f410
RK
503
504 if (test_bit(NILFS_I_GCINODE, &ii->i_state))
505 cno = ii->i_cno;
506 else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
507 cno = 0;
508 else
509 cno = sci->sc_cno;
510
9ff05123
RK
511 finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
512 sizeof(*finfo));
513 finfo->fi_ino = cpu_to_le64(inode->i_ino);
514 finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
515 finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
6c43f410 516 finfo->fi_cno = cpu_to_le64(cno);
9ff05123
RK
517
518 segbuf = sci->sc_curseg;
519 segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
520 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
521 sci->sc_finfo_ptr = sci->sc_binfo_ptr;
522 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
523}
524
525static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
526 struct buffer_head *bh,
527 struct inode *inode,
0c6c44cb 528 unsigned int binfo_size)
9ff05123
RK
529{
530 struct nilfs_segment_buffer *segbuf;
531 int required, err = 0;
532
533 retry:
534 segbuf = sci->sc_curseg;
535 required = nilfs_segctor_segsum_block_required(
536 sci, &sci->sc_binfo_ptr, binfo_size);
537 if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
538 nilfs_segctor_end_finfo(sci, inode);
539 err = nilfs_segctor_feed_segment(sci);
540 if (err)
541 return err;
542 goto retry;
543 }
544 if (unlikely(required)) {
545 err = nilfs_segbuf_extend_segsum(segbuf);
546 if (unlikely(err))
547 goto failed;
548 }
549 if (sci->sc_blk_cnt == 0)
550 nilfs_segctor_begin_finfo(sci, inode);
551
552 nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
553 /* Substitution to vblocknr is delayed until update_blocknr() */
554 nilfs_segbuf_add_file_buffer(segbuf, bh);
555 sci->sc_blk_cnt++;
556 failed:
557 return err;
558}
559
9ff05123
RK
560/*
561 * Callback functions that enumerate, mark, and collect dirty blocks
562 */
563static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
564 struct buffer_head *bh, struct inode *inode)
565{
566 int err;
567
9ff05123 568 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
e828949e
RK
569 if (err < 0)
570 return err;
9ff05123
RK
571
572 err = nilfs_segctor_add_file_block(sci, bh, inode,
573 sizeof(struct nilfs_binfo_v));
574 if (!err)
575 sci->sc_datablk_cnt++;
576 return err;
577}
578
579static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
580 struct buffer_head *bh,
581 struct inode *inode)
582{
e828949e 583 return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
9ff05123
RK
584}
585
586static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
587 struct buffer_head *bh,
588 struct inode *inode)
589{
1f5abe7e 590 WARN_ON(!buffer_dirty(bh));
9ff05123
RK
591 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
592}
593
594static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
595 struct nilfs_segsum_pointer *ssp,
596 union nilfs_binfo *binfo)
597{
598 struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
599 sci, ssp, sizeof(*binfo_v));
600 *binfo_v = binfo->bi_v;
601}
602
603static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
604 struct nilfs_segsum_pointer *ssp,
605 union nilfs_binfo *binfo)
606{
607 __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
608 sci, ssp, sizeof(*vblocknr));
609 *vblocknr = binfo->bi_v.bi_vblocknr;
610}
611
1c613cb9 612static const struct nilfs_sc_operations nilfs_sc_file_ops = {
9ff05123
RK
613 .collect_data = nilfs_collect_file_data,
614 .collect_node = nilfs_collect_file_node,
615 .collect_bmap = nilfs_collect_file_bmap,
616 .write_data_binfo = nilfs_write_file_data_binfo,
617 .write_node_binfo = nilfs_write_file_node_binfo,
618};
619
620static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
621 struct buffer_head *bh, struct inode *inode)
622{
623 int err;
624
625 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
e828949e
RK
626 if (err < 0)
627 return err;
9ff05123
RK
628
629 err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
630 if (!err)
631 sci->sc_datablk_cnt++;
632 return err;
633}
634
635static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
636 struct buffer_head *bh, struct inode *inode)
637{
1f5abe7e 638 WARN_ON(!buffer_dirty(bh));
9ff05123
RK
639 return nilfs_segctor_add_file_block(sci, bh, inode,
640 sizeof(struct nilfs_binfo_dat));
641}
642
643static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
644 struct nilfs_segsum_pointer *ssp,
645 union nilfs_binfo *binfo)
646{
647 __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
648 sizeof(*blkoff));
649 *blkoff = binfo->bi_dat.bi_blkoff;
650}
651
652static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
653 struct nilfs_segsum_pointer *ssp,
654 union nilfs_binfo *binfo)
655{
656 struct nilfs_binfo_dat *binfo_dat =
657 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
658 *binfo_dat = binfo->bi_dat;
659}
660
1c613cb9 661static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
9ff05123
RK
662 .collect_data = nilfs_collect_dat_data,
663 .collect_node = nilfs_collect_file_node,
664 .collect_bmap = nilfs_collect_dat_bmap,
665 .write_data_binfo = nilfs_write_dat_data_binfo,
666 .write_node_binfo = nilfs_write_dat_node_binfo,
667};
668
1c613cb9 669static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
9ff05123
RK
670 .collect_data = nilfs_collect_file_data,
671 .collect_node = NULL,
672 .collect_bmap = NULL,
673 .write_data_binfo = nilfs_write_file_data_binfo,
674 .write_node_binfo = NULL,
675};
676
f30bf3e4
RK
677static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
678 struct list_head *listp,
679 size_t nlimit,
680 loff_t start, loff_t end)
9ff05123 681{
9ff05123 682 struct address_space *mapping = inode->i_mapping;
5ee4b25c 683 struct folio_batch fbatch;
f30bf3e4
RK
684 pgoff_t index = 0, last = ULONG_MAX;
685 size_t ndirties = 0;
686 int i;
9ff05123 687
f30bf3e4
RK
688 if (unlikely(start != 0 || end != LLONG_MAX)) {
689 /*
690 * A valid range is given for sync-ing data pages. The
691 * range is rounded to per-page; extra dirty buffers
692 * may be included if blocksize < pagesize.
693 */
694 index = start >> PAGE_SHIFT;
695 last = end >> PAGE_SHIFT;
696 }
5ee4b25c 697 folio_batch_init(&fbatch);
9ff05123 698 repeat:
f30bf3e4 699 if (unlikely(index > last) ||
5ee4b25c
VMO
700 !filemap_get_folios_tag(mapping, &index, last,
701 PAGECACHE_TAG_DIRTY, &fbatch))
f30bf3e4 702 return ndirties;
9ff05123 703
5ee4b25c 704 for (i = 0; i < folio_batch_count(&fbatch); i++) {
9ff05123 705 struct buffer_head *bh, *head;
5ee4b25c 706 struct folio *folio = fbatch.folios[i];
9ff05123 707
5ee4b25c
VMO
708 folio_lock(folio);
709 head = folio_buffers(folio);
710 if (!head) {
711 create_empty_buffers(&folio->page, i_blocksize(inode), 0);
712 head = folio_buffers(folio);
713 }
714 folio_unlock(folio);
9ff05123 715
5ee4b25c 716 bh = head;
9ff05123 717 do {
7f42ec39 718 if (!buffer_dirty(bh) || buffer_async_write(bh))
f30bf3e4
RK
719 continue;
720 get_bh(bh);
721 list_add_tail(&bh->b_assoc_buffers, listp);
722 ndirties++;
723 if (unlikely(ndirties >= nlimit)) {
5ee4b25c 724 folio_batch_release(&fbatch);
f30bf3e4
RK
725 cond_resched();
726 return ndirties;
9ff05123 727 }
f30bf3e4 728 } while (bh = bh->b_this_page, bh != head);
9ff05123 729 }
5ee4b25c 730 folio_batch_release(&fbatch);
9ff05123 731 cond_resched();
f30bf3e4 732 goto repeat;
9ff05123
RK
733}
734
735static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
736 struct list_head *listp)
737{
738 struct nilfs_inode_info *ii = NILFS_I(inode);
e897be17 739 struct inode *btnc_inode = ii->i_assoc_inode;
a2458658 740 struct folio_batch fbatch;
9ff05123
RK
741 struct buffer_head *bh, *head;
742 unsigned int i;
743 pgoff_t index = 0;
744
e897be17
RK
745 if (!btnc_inode)
746 return;
a2458658 747 folio_batch_init(&fbatch);
e897be17 748
a2458658
VMO
749 while (filemap_get_folios_tag(btnc_inode->i_mapping, &index,
750 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch)) {
751 for (i = 0; i < folio_batch_count(&fbatch); i++) {
752 bh = head = folio_buffers(fbatch.folios[i]);
9ff05123 753 do {
7f42ec39
VD
754 if (buffer_dirty(bh) &&
755 !buffer_async_write(bh)) {
9ff05123
RK
756 get_bh(bh);
757 list_add_tail(&bh->b_assoc_buffers,
758 listp);
759 }
760 bh = bh->b_this_page;
761 } while (bh != head);
762 }
a2458658 763 folio_batch_release(&fbatch);
9ff05123
RK
764 cond_resched();
765 }
766}
767
693dd321 768static void nilfs_dispose_list(struct the_nilfs *nilfs,
9ff05123
RK
769 struct list_head *head, int force)
770{
771 struct nilfs_inode_info *ii, *n;
772 struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
0c6c44cb 773 unsigned int nv = 0;
9ff05123
RK
774
775 while (!list_empty(head)) {
693dd321 776 spin_lock(&nilfs->ns_inode_lock);
9ff05123
RK
777 list_for_each_entry_safe(ii, n, head, i_dirty) {
778 list_del_init(&ii->i_dirty);
779 if (force) {
780 if (unlikely(ii->i_bh)) {
781 brelse(ii->i_bh);
782 ii->i_bh = NULL;
783 }
784 } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
785 set_bit(NILFS_I_QUEUED, &ii->i_state);
786 list_add_tail(&ii->i_dirty,
693dd321 787 &nilfs->ns_dirty_files);
9ff05123
RK
788 continue;
789 }
790 ivec[nv++] = ii;
791 if (nv == SC_N_INODEVEC)
792 break;
793 }
693dd321 794 spin_unlock(&nilfs->ns_inode_lock);
9ff05123
RK
795
796 for (pii = ivec; nv > 0; pii++, nv--)
797 iput(&(*pii)->vfs_inode);
798 }
799}
800
7ef3ff2f
RK
801static void nilfs_iput_work_func(struct work_struct *work)
802{
803 struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
804 sc_iput_work);
805 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
806
807 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
808}
809
e912a5b6
RK
810static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
811 struct nilfs_root *root)
9ff05123 812{
9ff05123
RK
813 int ret = 0;
814
e912a5b6 815 if (nilfs_mdt_fetch_dirty(root->ifile))
9ff05123
RK
816 ret++;
817 if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
818 ret++;
819 if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
820 ret++;
365e215c
RK
821 if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
822 ret++;
9ff05123
RK
823 return ret;
824}
825
826static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
827{
828 return list_empty(&sci->sc_dirty_files) &&
829 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
071cb4b8 830 sci->sc_nfreesegs == 0 &&
9ff05123
RK
831 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
832}
833
834static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
835{
e3154e97 836 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
837 int ret = 0;
838
693dd321 839 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
9ff05123
RK
840 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
841
693dd321
RK
842 spin_lock(&nilfs->ns_inode_lock);
843 if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
9ff05123
RK
844 ret++;
845
693dd321 846 spin_unlock(&nilfs->ns_inode_lock);
9ff05123
RK
847 return ret;
848}
849
850static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
851{
e3154e97 852 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123 853
e912a5b6 854 nilfs_mdt_clear_dirty(sci->sc_root->ifile);
9ff05123
RK
855 nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
856 nilfs_mdt_clear_dirty(nilfs->ns_sufile);
365e215c 857 nilfs_mdt_clear_dirty(nilfs->ns_dat);
9ff05123
RK
858}
859
860static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
861{
e3154e97 862 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
863 struct buffer_head *bh_cp;
864 struct nilfs_checkpoint *raw_cp;
865 int err;
866
867 /* XXX: this interface will be changed */
868 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
869 &raw_cp, &bh_cp);
870 if (likely(!err)) {
076a378b
RK
871 /*
872 * The following code is duplicated with cpfile. But, it is
873 * needed to collect the checkpoint even if it was not newly
874 * created.
875 */
5fc7b141 876 mark_buffer_dirty(bh_cp);
9ff05123
RK
877 nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
878 nilfs_cpfile_put_checkpoint(
879 nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
723ac751
RK
880 } else if (err == -EINVAL || err == -ENOENT) {
881 nilfs_error(sci->sc_super,
882 "checkpoint creation failed due to metadata corruption.");
883 err = -EIO;
884 }
9ff05123
RK
885 return err;
886}
887
888static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
889{
e3154e97 890 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
891 struct buffer_head *bh_cp;
892 struct nilfs_checkpoint *raw_cp;
893 int err;
894
895 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
896 &raw_cp, &bh_cp);
897 if (unlikely(err)) {
723ac751
RK
898 if (err == -EINVAL || err == -ENOENT) {
899 nilfs_error(sci->sc_super,
900 "checkpoint finalization failed due to metadata corruption.");
901 err = -EIO;
902 }
9ff05123
RK
903 goto failed_ibh;
904 }
905 raw_cp->cp_snapshot_list.ssl_next = 0;
906 raw_cp->cp_snapshot_list.ssl_prev = 0;
907 raw_cp->cp_inodes_count =
e5f7f848 908 cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
9ff05123 909 raw_cp->cp_blocks_count =
e5f7f848 910 cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
9ff05123
RK
911 raw_cp->cp_nblk_inc =
912 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
913 raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
914 raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
458c5b08 915
c96fa464
RK
916 if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
917 nilfs_checkpoint_clear_minor(raw_cp);
918 else
919 nilfs_checkpoint_set_minor(raw_cp);
920
e912a5b6
RK
921 nilfs_write_inode_common(sci->sc_root->ifile,
922 &raw_cp->cp_ifile_inode, 1);
9ff05123
RK
923 nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
924 return 0;
925
926 failed_ibh:
927 return err;
928}
929
930static void nilfs_fill_in_file_bmap(struct inode *ifile,
931 struct nilfs_inode_info *ii)
932
933{
934 struct buffer_head *ibh;
935 struct nilfs_inode *raw_inode;
936
937 if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
938 ibh = ii->i_bh;
939 BUG_ON(!ibh);
940 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
941 ibh);
942 nilfs_bmap_write(ii->i_bmap, raw_inode);
943 nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
944 }
945}
946
e912a5b6 947static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
9ff05123
RK
948{
949 struct nilfs_inode_info *ii;
950
951 list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
e912a5b6 952 nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
9ff05123
RK
953 set_bit(NILFS_I_COLLECTED, &ii->i_state);
954 }
9ff05123
RK
955}
956
9ff05123
RK
957static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
958 struct the_nilfs *nilfs)
959{
1e2b68bf
RK
960 struct buffer_head *bh_sr;
961 struct nilfs_super_root *raw_sr;
0c6c44cb 962 unsigned int isz, srsz;
9ff05123 963
1e2b68bf
RK
964 bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
965 raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
56eb5538
RK
966 isz = nilfs->ns_inode_size;
967 srsz = NILFS_SR_BYTES(isz);
1e2b68bf 968
56eb5538 969 raw_sr->sr_bytes = cpu_to_le16(srsz);
9ff05123
RK
970 raw_sr->sr_nongc_ctime
971 = cpu_to_le64(nilfs_doing_gc() ?
972 nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
973 raw_sr->sr_flags = 0;
974
365e215c 975 nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
3961f0e2
RK
976 NILFS_SR_DAT_OFFSET(isz), 1);
977 nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
978 NILFS_SR_CPFILE_OFFSET(isz), 1);
979 nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
980 NILFS_SR_SUFILE_OFFSET(isz), 1);
56eb5538 981 memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
9ff05123
RK
982}
983
984static void nilfs_redirty_inodes(struct list_head *head)
985{
986 struct nilfs_inode_info *ii;
987
988 list_for_each_entry(ii, head, i_dirty) {
989 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
990 clear_bit(NILFS_I_COLLECTED, &ii->i_state);
991 }
992}
993
994static void nilfs_drop_collected_inodes(struct list_head *head)
995{
996 struct nilfs_inode_info *ii;
997
998 list_for_each_entry(ii, head, i_dirty) {
999 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
1000 continue;
1001
b9f66140 1002 clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
9ff05123
RK
1003 set_bit(NILFS_I_UPDATED, &ii->i_state);
1004 }
1005}
1006
9ff05123
RK
1007static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
1008 struct inode *inode,
1009 struct list_head *listp,
1010 int (*collect)(struct nilfs_sc_info *,
1011 struct buffer_head *,
1012 struct inode *))
1013{
1014 struct buffer_head *bh, *n;
1015 int err = 0;
1016
1017 if (collect) {
1018 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
1019 list_del_init(&bh->b_assoc_buffers);
1020 err = collect(sci, bh, inode);
1021 brelse(bh);
1022 if (unlikely(err))
1023 goto dispose_buffers;
1024 }
1025 return 0;
1026 }
1027
1028 dispose_buffers:
1029 while (!list_empty(listp)) {
0cc12838
RK
1030 bh = list_first_entry(listp, struct buffer_head,
1031 b_assoc_buffers);
9ff05123
RK
1032 list_del_init(&bh->b_assoc_buffers);
1033 brelse(bh);
1034 }
1035 return err;
1036}
1037
f30bf3e4
RK
1038static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1039{
1040 /* Remaining number of blocks within segment buffer */
1041 return sci->sc_segbuf_nblocks -
1042 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1043}
1044
9ff05123
RK
1045static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1046 struct inode *inode,
1c613cb9 1047 const struct nilfs_sc_operations *sc_ops)
9ff05123
RK
1048{
1049 LIST_HEAD(data_buffers);
1050 LIST_HEAD(node_buffers);
f30bf3e4 1051 int err;
9ff05123
RK
1052
1053 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
f30bf3e4
RK
1054 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1055
1056 n = nilfs_lookup_dirty_data_buffers(
1057 inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1058 if (n > rest) {
1059 err = nilfs_segctor_apply_buffers(
9ff05123 1060 sci, inode, &data_buffers,
f30bf3e4
RK
1061 sc_ops->collect_data);
1062 BUG_ON(!err); /* always receive -E2BIG or true error */
9ff05123
RK
1063 goto break_or_fail;
1064 }
1065 }
1066 nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1067
1068 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1069 err = nilfs_segctor_apply_buffers(
1070 sci, inode, &data_buffers, sc_ops->collect_data);
1071 if (unlikely(err)) {
1072 /* dispose node list */
1073 nilfs_segctor_apply_buffers(
1074 sci, inode, &node_buffers, NULL);
1075 goto break_or_fail;
1076 }
1077 sci->sc_stage.flags |= NILFS_CF_NODE;
1078 }
1079 /* Collect node */
1080 err = nilfs_segctor_apply_buffers(
1081 sci, inode, &node_buffers, sc_ops->collect_node);
1082 if (unlikely(err))
1083 goto break_or_fail;
1084
1085 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1086 err = nilfs_segctor_apply_buffers(
1087 sci, inode, &node_buffers, sc_ops->collect_bmap);
1088 if (unlikely(err))
1089 goto break_or_fail;
1090
1091 nilfs_segctor_end_finfo(sci, inode);
1092 sci->sc_stage.flags &= ~NILFS_CF_NODE;
1093
1094 break_or_fail:
1095 return err;
1096}
1097
1098static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1099 struct inode *inode)
1100{
1101 LIST_HEAD(data_buffers);
f30bf3e4
RK
1102 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1103 int err;
9ff05123 1104
f30bf3e4
RK
1105 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1106 sci->sc_dsync_start,
1107 sci->sc_dsync_end);
1108
1109 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1110 nilfs_collect_file_data);
1111 if (!err) {
9ff05123 1112 nilfs_segctor_end_finfo(sci, inode);
f30bf3e4
RK
1113 BUG_ON(n > rest);
1114 /* always receive -E2BIG or true error if n > rest */
1115 }
9ff05123
RK
1116 return err;
1117}
1118
1119static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1120{
e3154e97 1121 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
1122 struct list_head *head;
1123 struct nilfs_inode_info *ii;
071cb4b8 1124 size_t ndone;
9ff05123
RK
1125 int err = 0;
1126
58497703 1127 switch (nilfs_sc_cstage_get(sci)) {
9ff05123
RK
1128 case NILFS_ST_INIT:
1129 /* Pre-processes */
1130 sci->sc_stage.flags = 0;
1131
1132 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1133 sci->sc_nblk_inc = 0;
1134 sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1135 if (mode == SC_LSEG_DSYNC) {
58497703 1136 nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
9ff05123
RK
1137 goto dsync_mode;
1138 }
1139 }
1140
1141 sci->sc_stage.dirty_file_ptr = NULL;
1142 sci->sc_stage.gc_inode_ptr = NULL;
1143 if (mode == SC_FLUSH_DAT) {
58497703 1144 nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
9ff05123
RK
1145 goto dat_stage;
1146 }
df561f66
GS
1147 nilfs_sc_cstage_inc(sci);
1148 fallthrough;
9ff05123
RK
1149 case NILFS_ST_GC:
1150 if (nilfs_doing_gc()) {
1151 head = &sci->sc_gc_inodes;
1152 ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1153 head, i_dirty);
1154 list_for_each_entry_continue(ii, head, i_dirty) {
1155 err = nilfs_segctor_scan_file(
1156 sci, &ii->vfs_inode,
1157 &nilfs_sc_file_ops);
1158 if (unlikely(err)) {
1159 sci->sc_stage.gc_inode_ptr = list_entry(
1160 ii->i_dirty.prev,
1161 struct nilfs_inode_info,
1162 i_dirty);
1163 goto break_or_fail;
1164 }
1165 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1166 }
1167 sci->sc_stage.gc_inode_ptr = NULL;
1168 }
df561f66
GS
1169 nilfs_sc_cstage_inc(sci);
1170 fallthrough;
9ff05123
RK
1171 case NILFS_ST_FILE:
1172 head = &sci->sc_dirty_files;
1173 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1174 i_dirty);
1175 list_for_each_entry_continue(ii, head, i_dirty) {
1176 clear_bit(NILFS_I_DIRTY, &ii->i_state);
1177
1178 err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1179 &nilfs_sc_file_ops);
1180 if (unlikely(err)) {
1181 sci->sc_stage.dirty_file_ptr =
1182 list_entry(ii->i_dirty.prev,
1183 struct nilfs_inode_info,
1184 i_dirty);
1185 goto break_or_fail;
1186 }
1187 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1188 /* XXX: required ? */
1189 }
1190 sci->sc_stage.dirty_file_ptr = NULL;
1191 if (mode == SC_FLUSH_FILE) {
58497703 1192 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
9ff05123
RK
1193 return 0;
1194 }
58497703 1195 nilfs_sc_cstage_inc(sci);
9ff05123 1196 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
df561f66 1197 fallthrough;
9ff05123 1198 case NILFS_ST_IFILE:
e912a5b6 1199 err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
9ff05123
RK
1200 &nilfs_sc_file_ops);
1201 if (unlikely(err))
1202 break;
58497703 1203 nilfs_sc_cstage_inc(sci);
9ff05123
RK
1204 /* Creating a checkpoint */
1205 err = nilfs_segctor_create_checkpoint(sci);
1206 if (unlikely(err))
1207 break;
df561f66 1208 fallthrough;
9ff05123
RK
1209 case NILFS_ST_CPFILE:
1210 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1211 &nilfs_sc_file_ops);
1212 if (unlikely(err))
1213 break;
df561f66
GS
1214 nilfs_sc_cstage_inc(sci);
1215 fallthrough;
9ff05123 1216 case NILFS_ST_SUFILE:
071cb4b8
RK
1217 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1218 sci->sc_nfreesegs, &ndone);
1219 if (unlikely(err)) {
1220 nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1221 sci->sc_freesegs, ndone,
1222 NULL);
9ff05123 1223 break;
071cb4b8
RK
1224 }
1225 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1226
9ff05123
RK
1227 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1228 &nilfs_sc_file_ops);
1229 if (unlikely(err))
1230 break;
df561f66
GS
1231 nilfs_sc_cstage_inc(sci);
1232 fallthrough;
9ff05123
RK
1233 case NILFS_ST_DAT:
1234 dat_stage:
365e215c 1235 err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
9ff05123
RK
1236 &nilfs_sc_dat_ops);
1237 if (unlikely(err))
1238 break;
1239 if (mode == SC_FLUSH_DAT) {
58497703 1240 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
9ff05123
RK
1241 return 0;
1242 }
df561f66
GS
1243 nilfs_sc_cstage_inc(sci);
1244 fallthrough;
9ff05123
RK
1245 case NILFS_ST_SR:
1246 if (mode == SC_LSEG_SR) {
1247 /* Appending a super root */
1248 err = nilfs_segctor_add_super_root(sci);
1249 if (unlikely(err))
1250 break;
1251 }
1252 /* End of a logical segment */
1253 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
58497703 1254 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
9ff05123
RK
1255 return 0;
1256 case NILFS_ST_DSYNC:
1257 dsync_mode:
1258 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
f30bf3e4 1259 ii = sci->sc_dsync_inode;
9ff05123
RK
1260 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1261 break;
1262
1263 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1264 if (unlikely(err))
1265 break;
9ff05123 1266 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
58497703 1267 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
9ff05123
RK
1268 return 0;
1269 case NILFS_ST_DONE:
1270 return 0;
1271 default:
1272 BUG();
1273 }
1274
1275 break_or_fail:
1276 return err;
1277}
1278
a694291a
RK
1279/**
1280 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1281 * @sci: nilfs_sc_info
1282 * @nilfs: nilfs object
1283 */
9ff05123
RK
1284static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1285 struct the_nilfs *nilfs)
1286{
a694291a 1287 struct nilfs_segment_buffer *segbuf, *prev;
9ff05123 1288 __u64 nextnum;
a694291a 1289 int err, alloc = 0;
9ff05123 1290
a694291a
RK
1291 segbuf = nilfs_segbuf_new(sci->sc_super);
1292 if (unlikely(!segbuf))
1293 return -ENOMEM;
9ff05123 1294
a694291a
RK
1295 if (list_empty(&sci->sc_write_logs)) {
1296 nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1297 nilfs->ns_pseg_offset, nilfs);
1298 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1299 nilfs_shift_to_next_segment(nilfs);
1300 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1301 }
9ff05123 1302
a694291a
RK
1303 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1304 nextnum = nilfs->ns_nextnum;
1305
1306 if (nilfs->ns_segnum == nilfs->ns_nextnum)
1307 /* Start from the head of a new full segment */
1308 alloc++;
1309 } else {
1310 /* Continue logs */
1311 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1312 nilfs_segbuf_map_cont(segbuf, prev);
1313 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1314 nextnum = prev->sb_nextnum;
1315
1316 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1317 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1318 segbuf->sb_sum.seg_seq++;
1319 alloc++;
1320 }
9ff05123 1321 }
9ff05123 1322
61a189e9 1323 err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
a694291a
RK
1324 if (err)
1325 goto failed;
9ff05123 1326
a694291a 1327 if (alloc) {
cece5520 1328 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
a694291a
RK
1329 if (err)
1330 goto failed;
1331 }
9ff05123
RK
1332 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1333
a694291a
RK
1334 BUG_ON(!list_empty(&sci->sc_segbufs));
1335 list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1336 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
cece5520 1337 return 0;
a694291a
RK
1338
1339 failed:
1340 nilfs_segbuf_free(segbuf);
1341 return err;
9ff05123
RK
1342}
1343
1344static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1345 struct the_nilfs *nilfs, int nadd)
1346{
e29df395 1347 struct nilfs_segment_buffer *segbuf, *prev;
9ff05123
RK
1348 struct inode *sufile = nilfs->ns_sufile;
1349 __u64 nextnextnum;
1350 LIST_HEAD(list);
1351 int err, ret, i;
1352
1353 prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1354 /*
1355 * Since the segment specified with nextnum might be allocated during
1356 * the previous construction, the buffer including its segusage may
1357 * not be dirty. The following call ensures that the buffer is dirty
1358 * and will pin the buffer on memory until the sufile is written.
1359 */
61a189e9 1360 err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
9ff05123
RK
1361 if (unlikely(err))
1362 return err;
1363
1364 for (i = 0; i < nadd; i++) {
1365 /* extend segment info */
1366 err = -ENOMEM;
1367 segbuf = nilfs_segbuf_new(sci->sc_super);
1368 if (unlikely(!segbuf))
1369 goto failed;
1370
1371 /* map this buffer to region of segment on-disk */
cece5520 1372 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
9ff05123
RK
1373 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1374
1375 /* allocate the next next full segment */
1376 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1377 if (unlikely(err))
1378 goto failed_segbuf;
1379
1380 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1381 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1382
1383 list_add_tail(&segbuf->sb_list, &list);
1384 prev = segbuf;
1385 }
0935db74 1386 list_splice_tail(&list, &sci->sc_segbufs);
9ff05123
RK
1387 return 0;
1388
1389 failed_segbuf:
1390 nilfs_segbuf_free(segbuf);
1391 failed:
e29df395 1392 list_for_each_entry(segbuf, &list, sb_list) {
9ff05123 1393 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1f5abe7e 1394 WARN_ON(ret); /* never fails */
9ff05123 1395 }
e29df395 1396 nilfs_destroy_logs(&list);
9ff05123
RK
1397 return err;
1398}
1399
a694291a
RK
1400static void nilfs_free_incomplete_logs(struct list_head *logs,
1401 struct the_nilfs *nilfs)
9ff05123 1402{
a694291a
RK
1403 struct nilfs_segment_buffer *segbuf, *prev;
1404 struct inode *sufile = nilfs->ns_sufile;
9284ad2a 1405 int ret;
9ff05123 1406
a694291a 1407 segbuf = NILFS_FIRST_SEGBUF(logs);
9ff05123 1408 if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
a694291a 1409 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1f5abe7e 1410 WARN_ON(ret); /* never fails */
9ff05123 1411 }
9284ad2a 1412 if (atomic_read(&segbuf->sb_err)) {
9ff05123
RK
1413 /* Case 1: The first segment failed */
1414 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
076a378b
RK
1415 /*
1416 * Case 1a: Partial segment appended into an existing
1417 * segment
1418 */
9ff05123
RK
1419 nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1420 segbuf->sb_fseg_end);
1421 else /* Case 1b: New full segment */
1422 set_nilfs_discontinued(nilfs);
9ff05123
RK
1423 }
1424
a694291a
RK
1425 prev = segbuf;
1426 list_for_each_entry_continue(segbuf, logs, sb_list) {
1427 if (prev->sb_nextnum != segbuf->sb_nextnum) {
1428 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1429 WARN_ON(ret); /* never fails */
1430 }
9284ad2a
RK
1431 if (atomic_read(&segbuf->sb_err) &&
1432 segbuf->sb_segnum != nilfs->ns_nextnum)
1433 /* Case 2: extended segment (!= next) failed */
a694291a
RK
1434 nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1435 prev = segbuf;
9ff05123 1436 }
9ff05123
RK
1437}
1438
1439static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1440 struct inode *sufile)
1441{
1442 struct nilfs_segment_buffer *segbuf;
9ff05123
RK
1443 unsigned long live_blocks;
1444 int ret;
1445
1446 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
9ff05123
RK
1447 live_blocks = segbuf->sb_sum.nblocks +
1448 (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
071ec54d
RK
1449 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1450 live_blocks,
1451 sci->sc_seg_ctime);
1452 WARN_ON(ret); /* always succeed because the segusage is dirty */
9ff05123
RK
1453 }
1454}
1455
a694291a 1456static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
9ff05123
RK
1457{
1458 struct nilfs_segment_buffer *segbuf;
9ff05123
RK
1459 int ret;
1460
a694291a 1461 segbuf = NILFS_FIRST_SEGBUF(logs);
071ec54d
RK
1462 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1463 segbuf->sb_pseg_start -
1464 segbuf->sb_fseg_start, 0);
1465 WARN_ON(ret); /* always succeed because the segusage is dirty */
9ff05123 1466
a694291a 1467 list_for_each_entry_continue(segbuf, logs, sb_list) {
071ec54d
RK
1468 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1469 0, 0);
1f5abe7e 1470 WARN_ON(ret); /* always succeed */
9ff05123
RK
1471 }
1472}
1473
1474static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1475 struct nilfs_segment_buffer *last,
1476 struct inode *sufile)
1477{
e29df395 1478 struct nilfs_segment_buffer *segbuf = last;
9ff05123
RK
1479 int ret;
1480
e29df395 1481 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
9ff05123
RK
1482 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1483 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1f5abe7e 1484 WARN_ON(ret);
9ff05123 1485 }
e29df395 1486 nilfs_truncate_logs(&sci->sc_segbufs, last);
9ff05123
RK
1487}
1488
1489
1490static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1491 struct the_nilfs *nilfs, int mode)
1492{
1493 struct nilfs_cstage prev_stage = sci->sc_stage;
1494 int err, nadd = 1;
1495
1496 /* Collection retry loop */
1497 for (;;) {
9ff05123
RK
1498 sci->sc_nblk_this_inc = 0;
1499 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1500
1501 err = nilfs_segctor_reset_segment_buffer(sci);
1502 if (unlikely(err))
1503 goto failed;
1504
1505 err = nilfs_segctor_collect_blocks(sci, mode);
1506 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1507 if (!err)
1508 break;
1509
1510 if (unlikely(err != -E2BIG))
1511 goto failed;
1512
1513 /* The current segment is filled up */
58497703
HM
1514 if (mode != SC_LSEG_SR ||
1515 nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
9ff05123
RK
1516 break;
1517
2d8428ac
RK
1518 nilfs_clear_logs(&sci->sc_segbufs);
1519
071cb4b8
RK
1520 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1521 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1522 sci->sc_freesegs,
1523 sci->sc_nfreesegs,
1524 NULL);
1525 WARN_ON(err); /* do not happen */
70f2fe3a 1526 sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
071cb4b8 1527 }
70f2fe3a
AR
1528
1529 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1530 if (unlikely(err))
1531 return err;
1532
9ff05123
RK
1533 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1534 sci->sc_stage = prev_stage;
1535 }
1536 nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1537 return 0;
1538
1539 failed:
1540 return err;
1541}
1542
1543static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1544 struct buffer_head *new_bh)
1545{
1546 BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1547
1548 list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1549 /* The caller must release old_bh */
1550}
1551
1552static int
1553nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1554 struct nilfs_segment_buffer *segbuf,
1555 int mode)
1556{
1557 struct inode *inode = NULL;
1558 sector_t blocknr;
1559 unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1560 unsigned long nblocks = 0, ndatablk = 0;
1c613cb9 1561 const struct nilfs_sc_operations *sc_op = NULL;
9ff05123
RK
1562 struct nilfs_segsum_pointer ssp;
1563 struct nilfs_finfo *finfo = NULL;
1564 union nilfs_binfo binfo;
1565 struct buffer_head *bh, *bh_org;
1566 ino_t ino = 0;
1567 int err = 0;
1568
1569 if (!nfinfo)
1570 goto out;
1571
1572 blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1573 ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1574 ssp.offset = sizeof(struct nilfs_segment_summary);
1575
1576 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1e2b68bf 1577 if (bh == segbuf->sb_super_root)
9ff05123
RK
1578 break;
1579 if (!finfo) {
1580 finfo = nilfs_segctor_map_segsum_entry(
1581 sci, &ssp, sizeof(*finfo));
1582 ino = le64_to_cpu(finfo->fi_ino);
1583 nblocks = le32_to_cpu(finfo->fi_nblocks);
1584 ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1585
6ad4cd7f 1586 inode = bh->b_folio->mapping->host;
9ff05123
RK
1587
1588 if (mode == SC_LSEG_DSYNC)
1589 sc_op = &nilfs_sc_dsync_ops;
1590 else if (ino == NILFS_DAT_INO)
1591 sc_op = &nilfs_sc_dat_ops;
1592 else /* file blocks */
1593 sc_op = &nilfs_sc_file_ops;
1594 }
1595 bh_org = bh;
1596 get_bh(bh_org);
1597 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1598 &binfo);
1599 if (bh != bh_org)
1600 nilfs_list_replace_buffer(bh_org, bh);
1601 brelse(bh_org);
1602 if (unlikely(err))
1603 goto failed_bmap;
1604
1605 if (ndatablk > 0)
1606 sc_op->write_data_binfo(sci, &ssp, &binfo);
1607 else
1608 sc_op->write_node_binfo(sci, &ssp, &binfo);
1609
1610 blocknr++;
1611 if (--nblocks == 0) {
1612 finfo = NULL;
1613 if (--nfinfo == 0)
1614 break;
1615 } else if (ndatablk > 0)
1616 ndatablk--;
1617 }
1618 out:
1619 return 0;
1620
1621 failed_bmap:
9ff05123
RK
1622 return err;
1623}
1624
1625static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1626{
1627 struct nilfs_segment_buffer *segbuf;
1628 int err;
1629
1630 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1631 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1632 if (unlikely(err))
1633 return err;
1634 nilfs_segbuf_fill_in_segsum(segbuf);
1635 }
1636 return 0;
1637}
1638
1cb2d38c 1639static void nilfs_begin_page_io(struct page *page)
9ff05123
RK
1640{
1641 if (!page || PageWriteback(page))
076a378b
RK
1642 /*
1643 * For split b-tree node pages, this function may be called
1644 * twice. We ignore the 2nd or later calls by this check.
1645 */
1cb2d38c 1646 return;
9ff05123
RK
1647
1648 lock_page(page);
1649 clear_page_dirty_for_io(page);
1650 set_page_writeback(page);
1651 unlock_page(page);
9ff05123
RK
1652}
1653
1cb2d38c 1654static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
9ff05123
RK
1655{
1656 struct nilfs_segment_buffer *segbuf;
1657 struct page *bd_page = NULL, *fs_page = NULL;
9ff05123 1658
9ff05123
RK
1659 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1660 struct buffer_head *bh;
1661
1662 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1663 b_assoc_buffers) {
1664 if (bh->b_page != bd_page) {
1665 if (bd_page) {
1666 lock_page(bd_page);
1667 clear_page_dirty_for_io(bd_page);
1668 set_page_writeback(bd_page);
1669 unlock_page(bd_page);
1670 }
1671 bd_page = bh->b_page;
1672 }
1673 }
1674
1675 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1676 b_assoc_buffers) {
7f42ec39 1677 set_buffer_async_write(bh);
1e2b68bf 1678 if (bh == segbuf->sb_super_root) {
9ff05123
RK
1679 if (bh->b_page != bd_page) {
1680 lock_page(bd_page);
1681 clear_page_dirty_for_io(bd_page);
1682 set_page_writeback(bd_page);
1683 unlock_page(bd_page);
1684 bd_page = bh->b_page;
1685 }
1686 break;
1687 }
1688 if (bh->b_page != fs_page) {
1cb2d38c 1689 nilfs_begin_page_io(fs_page);
9ff05123
RK
1690 fs_page = bh->b_page;
1691 }
1692 }
1693 }
1694 if (bd_page) {
1695 lock_page(bd_page);
1696 clear_page_dirty_for_io(bd_page);
1697 set_page_writeback(bd_page);
1698 unlock_page(bd_page);
1699 }
1cb2d38c 1700 nilfs_begin_page_io(fs_page);
9ff05123
RK
1701}
1702
1703static int nilfs_segctor_write(struct nilfs_sc_info *sci,
9c965bac 1704 struct the_nilfs *nilfs)
9ff05123 1705{
d1c6b72a 1706 int ret;
9ff05123 1707
d1c6b72a 1708 ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
a694291a
RK
1709 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1710 return ret;
9ff05123
RK
1711}
1712
9ff05123
RK
1713static void nilfs_end_page_io(struct page *page, int err)
1714{
1715 if (!page)
1716 return;
1717
a9777845 1718 if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
8227b297
RK
1719 /*
1720 * For b-tree node pages, this function may be called twice
1721 * or more because they might be split in a segment.
1722 */
a9777845
RK
1723 if (PageDirty(page)) {
1724 /*
1725 * For pages holding split b-tree node buffers, dirty
1726 * flag on the buffers may be cleared discretely.
1727 * In that case, the page is once redirtied for
1728 * remaining buffers, and it must be cancelled if
1729 * all the buffers get cleaned later.
1730 */
1731 lock_page(page);
1732 if (nilfs_page_buffers_clean(page))
1733 __nilfs_clear_page_dirty(page);
1734 unlock_page(page);
1735 }
9ff05123 1736 return;
a9777845 1737 }
9ff05123 1738
1cb2d38c
RK
1739 if (!err) {
1740 if (!nilfs_page_buffers_clean(page))
1741 __set_page_dirty_nobuffers(page);
1742 ClearPageError(page);
1743 } else {
1744 __set_page_dirty_nobuffers(page);
1745 SetPageError(page);
9ff05123 1746 }
1cb2d38c
RK
1747
1748 end_page_writeback(page);
9ff05123
RK
1749}
1750
1cb2d38c 1751static void nilfs_abort_logs(struct list_head *logs, int err)
9ff05123
RK
1752{
1753 struct nilfs_segment_buffer *segbuf;
1754 struct page *bd_page = NULL, *fs_page = NULL;
a694291a 1755 struct buffer_head *bh;
9ff05123 1756
a694291a
RK
1757 if (list_empty(logs))
1758 return;
9ff05123 1759
a694291a 1760 list_for_each_entry(segbuf, logs, sb_list) {
9ff05123
RK
1761 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1762 b_assoc_buffers) {
1763 if (bh->b_page != bd_page) {
1764 if (bd_page)
1765 end_page_writeback(bd_page);
1766 bd_page = bh->b_page;
1767 }
1768 }
1769
1770 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1771 b_assoc_buffers) {
7f42ec39 1772 clear_buffer_async_write(bh);
1e2b68bf 1773 if (bh == segbuf->sb_super_root) {
9ff05123
RK
1774 if (bh->b_page != bd_page) {
1775 end_page_writeback(bd_page);
1776 bd_page = bh->b_page;
1777 }
1778 break;
1779 }
1780 if (bh->b_page != fs_page) {
1781 nilfs_end_page_io(fs_page, err);
9ff05123
RK
1782 fs_page = bh->b_page;
1783 }
1784 }
1785 }
1786 if (bd_page)
1787 end_page_writeback(bd_page);
1788
1789 nilfs_end_page_io(fs_page, err);
a694291a
RK
1790}
1791
1792static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1793 struct the_nilfs *nilfs, int err)
1794{
1795 LIST_HEAD(logs);
1796 int ret;
1797
1798 list_splice_tail_init(&sci->sc_write_logs, &logs);
1799 ret = nilfs_wait_on_logs(&logs);
1cb2d38c 1800 nilfs_abort_logs(&logs, ret ? : err);
a694291a
RK
1801
1802 list_splice_tail_init(&sci->sc_segbufs, &logs);
1803 nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1804 nilfs_free_incomplete_logs(&logs, nilfs);
a694291a
RK
1805
1806 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1807 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1808 sci->sc_freesegs,
1809 sci->sc_nfreesegs,
1810 NULL);
1811 WARN_ON(ret); /* do not happen */
1812 }
1813
1814 nilfs_destroy_logs(&logs);
9ff05123
RK
1815}
1816
1817static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1818 struct nilfs_segment_buffer *segbuf)
1819{
1820 nilfs->ns_segnum = segbuf->sb_segnum;
1821 nilfs->ns_nextnum = segbuf->sb_nextnum;
1822 nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1823 + segbuf->sb_sum.nblocks;
1824 nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1825 nilfs->ns_ctime = segbuf->sb_sum.ctime;
1826}
1827
1828static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1829{
1830 struct nilfs_segment_buffer *segbuf;
1831 struct page *bd_page = NULL, *fs_page = NULL;
e3154e97 1832 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1e2b68bf 1833 int update_sr = false;
9ff05123 1834
a694291a 1835 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
9ff05123
RK
1836 struct buffer_head *bh;
1837
1838 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1839 b_assoc_buffers) {
1840 set_buffer_uptodate(bh);
1841 clear_buffer_dirty(bh);
1842 if (bh->b_page != bd_page) {
1843 if (bd_page)
1844 end_page_writeback(bd_page);
1845 bd_page = bh->b_page;
1846 }
1847 }
1848 /*
1849 * We assume that the buffers which belong to the same page
1850 * continue over the buffer list.
1851 * Under this assumption, the last BHs of pages is
1852 * identifiable by the discontinuity of bh->b_page
1853 * (page != fs_page).
1854 *
1855 * For B-tree node blocks, however, this assumption is not
1856 * guaranteed. The cleanup code of B-tree node pages needs
1857 * special care.
1858 */
1859 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1860 b_assoc_buffers) {
4ce5c342 1861 const unsigned long set_bits = BIT(BH_Uptodate);
ead8ecff 1862 const unsigned long clear_bits =
4ce5c342
RK
1863 (BIT(BH_Dirty) | BIT(BH_Async_Write) |
1864 BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
1865 BIT(BH_NILFS_Redirected));
ead8ecff
RK
1866
1867 set_mask_bits(&bh->b_state, clear_bits, set_bits);
1e2b68bf 1868 if (bh == segbuf->sb_super_root) {
9ff05123
RK
1869 if (bh->b_page != bd_page) {
1870 end_page_writeback(bd_page);
1871 bd_page = bh->b_page;
1872 }
1e2b68bf 1873 update_sr = true;
9ff05123
RK
1874 break;
1875 }
1876 if (bh->b_page != fs_page) {
1877 nilfs_end_page_io(fs_page, 0);
1878 fs_page = bh->b_page;
1879 }
1880 }
1881
4762077c
RK
1882 if (!nilfs_segbuf_simplex(segbuf)) {
1883 if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
9ff05123
RK
1884 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1885 sci->sc_lseg_stime = jiffies;
1886 }
4762077c 1887 if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
9ff05123
RK
1888 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1889 }
1890 }
1891 /*
1892 * Since pages may continue over multiple segment buffers,
1893 * end of the last page must be checked outside of the loop.
1894 */
1895 if (bd_page)
1896 end_page_writeback(bd_page);
1897
1898 nilfs_end_page_io(fs_page, 0);
1899
9ff05123
RK
1900 nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1901
c1c1d709 1902 if (nilfs_doing_gc())
9ff05123 1903 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
c1c1d709 1904 else
9ff05123 1905 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
9ff05123
RK
1906
1907 sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1908
a694291a 1909 segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
9ff05123
RK
1910 nilfs_set_next_segment(nilfs, segbuf);
1911
1912 if (update_sr) {
e2c7617a 1913 nilfs->ns_flushed_device = 0;
9ff05123 1914 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
e339ad31 1915 segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
9ff05123 1916
c96fa464 1917 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
9ff05123
RK
1918 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1919 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
a694291a 1920 nilfs_segctor_clear_metadata_dirty(sci);
9ff05123
RK
1921 } else
1922 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1923}
1924
a694291a
RK
1925static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1926{
1927 int ret;
1928
1929 ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1930 if (!ret) {
1931 nilfs_segctor_complete_write(sci);
1932 nilfs_destroy_logs(&sci->sc_write_logs);
1933 }
1934 return ret;
1935}
1936
693dd321
RK
1937static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1938 struct the_nilfs *nilfs)
9ff05123
RK
1939{
1940 struct nilfs_inode_info *ii, *n;
e912a5b6 1941 struct inode *ifile = sci->sc_root->ifile;
9ff05123 1942
693dd321 1943 spin_lock(&nilfs->ns_inode_lock);
9ff05123 1944 retry:
693dd321 1945 list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
9ff05123
RK
1946 if (!ii->i_bh) {
1947 struct buffer_head *ibh;
1948 int err;
1949
693dd321 1950 spin_unlock(&nilfs->ns_inode_lock);
9ff05123 1951 err = nilfs_ifile_get_inode_block(
e912a5b6 1952 ifile, ii->vfs_inode.i_ino, &ibh);
9ff05123 1953 if (unlikely(err)) {
a1d0747a
JP
1954 nilfs_warn(sci->sc_super,
1955 "log writer: error %d getting inode block (ino=%lu)",
1956 err, ii->vfs_inode.i_ino);
9ff05123
RK
1957 return err;
1958 }
693dd321 1959 spin_lock(&nilfs->ns_inode_lock);
9ff05123
RK
1960 if (likely(!ii->i_bh))
1961 ii->i_bh = ibh;
1962 else
1963 brelse(ibh);
1964 goto retry;
1965 }
9ff05123 1966
31ccb1f7
AR
1967 // Always redirty the buffer to avoid race condition
1968 mark_buffer_dirty(ii->i_bh);
1969 nilfs_mdt_mark_dirty(ifile);
1970
9ff05123
RK
1971 clear_bit(NILFS_I_QUEUED, &ii->i_state);
1972 set_bit(NILFS_I_BUSY, &ii->i_state);
eaae0f37 1973 list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
9ff05123 1974 }
693dd321 1975 spin_unlock(&nilfs->ns_inode_lock);
9ff05123 1976
9ff05123
RK
1977 return 0;
1978}
1979
693dd321
RK
1980static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1981 struct the_nilfs *nilfs)
9ff05123 1982{
9ff05123 1983 struct nilfs_inode_info *ii, *n;
1751e8a6 1984 int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
7ef3ff2f 1985 int defer_iput = false;
9ff05123 1986
693dd321 1987 spin_lock(&nilfs->ns_inode_lock);
9ff05123
RK
1988 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
1989 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
6c43f410 1990 test_bit(NILFS_I_DIRTY, &ii->i_state))
9ff05123 1991 continue;
6c43f410 1992
9ff05123
RK
1993 clear_bit(NILFS_I_BUSY, &ii->i_state);
1994 brelse(ii->i_bh);
1995 ii->i_bh = NULL;
7ef3ff2f 1996 list_del_init(&ii->i_dirty);
283ee148 1997 if (!ii->vfs_inode.i_nlink || during_mount) {
7ef3ff2f 1998 /*
283ee148
RK
1999 * Defer calling iput() to avoid deadlocks if
2000 * i_nlink == 0 or mount is not yet finished.
7ef3ff2f
RK
2001 */
2002 list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
2003 defer_iput = true;
2004 } else {
2005 spin_unlock(&nilfs->ns_inode_lock);
2006 iput(&ii->vfs_inode);
2007 spin_lock(&nilfs->ns_inode_lock);
2008 }
9ff05123 2009 }
693dd321 2010 spin_unlock(&nilfs->ns_inode_lock);
7ef3ff2f
RK
2011
2012 if (defer_iput)
2013 schedule_work(&sci->sc_iput_work);
9ff05123
RK
2014}
2015
9ff05123
RK
2016/*
2017 * Main procedure of segment constructor
2018 */
2019static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2020{
e3154e97 2021 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1e2b68bf 2022 int err;
9ff05123 2023
58497703 2024 nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
6c43f410 2025 sci->sc_cno = nilfs->ns_cno;
9ff05123 2026
693dd321 2027 err = nilfs_segctor_collect_dirty_files(sci, nilfs);
9ff05123
RK
2028 if (unlikely(err))
2029 goto out;
2030
e912a5b6 2031 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
9ff05123
RK
2032 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2033
2034 if (nilfs_segctor_clean(sci))
2035 goto out;
2036
2037 do {
2038 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2039
2040 err = nilfs_segctor_begin_construction(sci, nilfs);
2041 if (unlikely(err))
2042 goto out;
2043
2044 /* Update time stamp */
fb04b91b 2045 sci->sc_seg_ctime = ktime_get_real_seconds();
9ff05123
RK
2046
2047 err = nilfs_segctor_collect(sci, nilfs, mode);
2048 if (unlikely(err))
2049 goto failed;
2050
9ff05123 2051 /* Avoid empty segment */
58497703 2052 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
4762077c 2053 nilfs_segbuf_empty(sci->sc_curseg)) {
a694291a 2054 nilfs_segctor_abort_construction(sci, nilfs, 1);
9ff05123
RK
2055 goto out;
2056 }
2057
2058 err = nilfs_segctor_assign(sci, mode);
2059 if (unlikely(err))
2060 goto failed;
2061
9ff05123 2062 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
e912a5b6 2063 nilfs_segctor_fill_in_file_bmap(sci);
9ff05123 2064
1e2b68bf 2065 if (mode == SC_LSEG_SR &&
58497703 2066 nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
9ff05123
RK
2067 err = nilfs_segctor_fill_in_checkpoint(sci);
2068 if (unlikely(err))
a694291a 2069 goto failed_to_write;
9ff05123
RK
2070
2071 nilfs_segctor_fill_in_super_root(sci, nilfs);
2072 }
2073 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2074
2075 /* Write partial segments */
1cb2d38c 2076 nilfs_segctor_prepare_write(sci);
aaed1d5b
RK
2077
2078 nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2079 nilfs->ns_crc_seed);
9ff05123 2080
9c965bac 2081 err = nilfs_segctor_write(sci, nilfs);
9ff05123
RK
2082 if (unlikely(err))
2083 goto failed_to_write;
2084
58497703 2085 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
09cbfeaf 2086 nilfs->ns_blocksize_bits != PAGE_SHIFT) {
a694291a
RK
2087 /*
2088 * At this point, we avoid double buffering
2089 * for blocksize < pagesize because page dirty
2090 * flag is turned off during write and dirty
2091 * buffers are not properly collected for
2092 * pages crossing over segments.
2093 */
2094 err = nilfs_segctor_wait(sci);
2095 if (err)
2096 goto failed_to_write;
2097 }
58497703 2098 } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
9ff05123 2099
9ff05123 2100 out:
693dd321 2101 nilfs_segctor_drop_written_files(sci, nilfs);
9ff05123
RK
2102 return err;
2103
2104 failed_to_write:
9ff05123
RK
2105 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2106 nilfs_redirty_inodes(&sci->sc_dirty_files);
9ff05123
RK
2107
2108 failed:
2109 if (nilfs_doing_gc())
2110 nilfs_redirty_inodes(&sci->sc_gc_inodes);
a694291a 2111 nilfs_segctor_abort_construction(sci, nilfs, err);
9ff05123
RK
2112 goto out;
2113}
2114
2115/**
9ccf56c1 2116 * nilfs_segctor_start_timer - set timer of background write
9ff05123
RK
2117 * @sci: nilfs_sc_info
2118 *
2119 * If the timer has already been set, it ignores the new request.
2120 * This function MUST be called within a section locking the segment
2121 * semaphore.
2122 */
2123static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2124{
2125 spin_lock(&sci->sc_state_lock);
fdce895e
LH
2126 if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2127 sci->sc_timer.expires = jiffies + sci->sc_interval;
2128 add_timer(&sci->sc_timer);
9ff05123
RK
2129 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2130 }
2131 spin_unlock(&sci->sc_state_lock);
2132}
2133
2134static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2135{
2136 spin_lock(&sci->sc_state_lock);
4ce5c342 2137 if (!(sci->sc_flush_request & BIT(bn))) {
9ff05123
RK
2138 unsigned long prev_req = sci->sc_flush_request;
2139
4ce5c342 2140 sci->sc_flush_request |= BIT(bn);
9ff05123
RK
2141 if (!prev_req)
2142 wake_up(&sci->sc_wait_daemon);
2143 }
2144 spin_unlock(&sci->sc_state_lock);
2145}
2146
2147/**
2148 * nilfs_flush_segment - trigger a segment construction for resource control
2149 * @sb: super block
2150 * @ino: inode number of the file to be flushed out.
2151 */
2152void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2153{
e3154e97 2154 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 2155 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123
RK
2156
2157 if (!sci || nilfs_doing_construction())
2158 return;
2159 nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2160 /* assign bit 0 to data files */
2161}
2162
9ff05123 2163struct nilfs_segctor_wait_request {
ac6424b9 2164 wait_queue_entry_t wq;
9ff05123
RK
2165 __u32 seq;
2166 int err;
2167 atomic_t done;
2168};
2169
2170static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2171{
2172 struct nilfs_segctor_wait_request wait_req;
2173 int err = 0;
2174
2175 spin_lock(&sci->sc_state_lock);
2176 init_wait(&wait_req.wq);
2177 wait_req.err = 0;
2178 atomic_set(&wait_req.done, 0);
2179 wait_req.seq = ++sci->sc_seq_request;
2180 spin_unlock(&sci->sc_state_lock);
2181
2182 init_waitqueue_entry(&wait_req.wq, current);
2183 add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2184 set_current_state(TASK_INTERRUPTIBLE);
2185 wake_up(&sci->sc_wait_daemon);
2186
2187 for (;;) {
2188 if (atomic_read(&wait_req.done)) {
2189 err = wait_req.err;
2190 break;
2191 }
2192 if (!signal_pending(current)) {
2193 schedule();
2194 continue;
2195 }
2196 err = -ERESTARTSYS;
2197 break;
2198 }
2199 finish_wait(&sci->sc_wait_request, &wait_req.wq);
2200 return err;
2201}
2202
2203static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2204{
2205 struct nilfs_segctor_wait_request *wrq, *n;
2206 unsigned long flags;
2207
2208 spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2055da97 2209 list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
9ff05123
RK
2210 if (!atomic_read(&wrq->done) &&
2211 nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2212 wrq->err = err;
2213 atomic_set(&wrq->done, 1);
2214 }
2215 if (atomic_read(&wrq->done)) {
2216 wrq->wq.func(&wrq->wq,
2217 TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2218 0, NULL);
2219 }
2220 }
2221 spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2222}
2223
2224/**
2225 * nilfs_construct_segment - construct a logical segment
2226 * @sb: super block
2227 *
300563e6 2228 * Return Value: On success, 0 is returned. On errors, one of the following
9ff05123
RK
2229 * negative error code is returned.
2230 *
2231 * %-EROFS - Read only filesystem.
2232 *
2233 * %-EIO - I/O error
2234 *
2235 * %-ENOSPC - No space left on device (only in a panic state).
2236 *
2237 * %-ERESTARTSYS - Interrupted.
2238 *
2239 * %-ENOMEM - Insufficient memory available.
2240 */
2241int nilfs_construct_segment(struct super_block *sb)
2242{
e3154e97 2243 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 2244 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123 2245 struct nilfs_transaction_info *ti;
9ff05123 2246
8cccf05f 2247 if (sb_rdonly(sb) || unlikely(!sci))
9ff05123
RK
2248 return -EROFS;
2249
2250 /* A call inside transactions causes a deadlock. */
2251 BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2252
da6f7916 2253 return nilfs_segctor_sync(sci);
9ff05123
RK
2254}
2255
2256/**
2257 * nilfs_construct_dsync_segment - construct a data-only logical segment
2258 * @sb: super block
f30bf3e4
RK
2259 * @inode: inode whose data blocks should be written out
2260 * @start: start byte offset
2261 * @end: end byte offset (inclusive)
9ff05123 2262 *
300563e6 2263 * Return Value: On success, 0 is returned. On errors, one of the following
9ff05123
RK
2264 * negative error code is returned.
2265 *
2266 * %-EROFS - Read only filesystem.
2267 *
2268 * %-EIO - I/O error
2269 *
2270 * %-ENOSPC - No space left on device (only in a panic state).
2271 *
2272 * %-ERESTARTSYS - Interrupted.
2273 *
2274 * %-ENOMEM - Insufficient memory available.
2275 */
f30bf3e4
RK
2276int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2277 loff_t start, loff_t end)
9ff05123 2278{
e3154e97 2279 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 2280 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123
RK
2281 struct nilfs_inode_info *ii;
2282 struct nilfs_transaction_info ti;
2283 int err = 0;
2284
8cccf05f 2285 if (sb_rdonly(sb) || unlikely(!sci))
9ff05123
RK
2286 return -EROFS;
2287
f7545144 2288 nilfs_transaction_lock(sb, &ti, 0);
9ff05123
RK
2289
2290 ii = NILFS_I(inode);
b9f66140 2291 if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
3b2ce58b 2292 nilfs_test_opt(nilfs, STRICT_ORDER) ||
9ff05123 2293 test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
3b2ce58b 2294 nilfs_discontinued(nilfs)) {
f7545144 2295 nilfs_transaction_unlock(sb);
9ff05123
RK
2296 err = nilfs_segctor_sync(sci);
2297 return err;
2298 }
2299
693dd321 2300 spin_lock(&nilfs->ns_inode_lock);
9ff05123
RK
2301 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2302 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
693dd321 2303 spin_unlock(&nilfs->ns_inode_lock);
f7545144 2304 nilfs_transaction_unlock(sb);
9ff05123
RK
2305 return 0;
2306 }
693dd321 2307 spin_unlock(&nilfs->ns_inode_lock);
f30bf3e4
RK
2308 sci->sc_dsync_inode = ii;
2309 sci->sc_dsync_start = start;
2310 sci->sc_dsync_end = end;
9ff05123
RK
2311
2312 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
e2c7617a
AR
2313 if (!err)
2314 nilfs->ns_flushed_device = 0;
9ff05123 2315
f7545144 2316 nilfs_transaction_unlock(sb);
9ff05123
RK
2317 return err;
2318}
2319
9ff05123 2320#define FLUSH_FILE_BIT (0x1) /* data file only */
4ce5c342 2321#define FLUSH_DAT_BIT BIT(NILFS_DAT_INO) /* DAT only */
9ff05123 2322
dcd76186
RK
2323/**
2324 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2325 * @sci: segment constructor object
2326 */
2327static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
9ff05123 2328{
9ff05123 2329 spin_lock(&sci->sc_state_lock);
dcd76186 2330 sci->sc_seq_accepted = sci->sc_seq_request;
9ff05123 2331 spin_unlock(&sci->sc_state_lock);
fdce895e 2332 del_timer_sync(&sci->sc_timer);
9ff05123
RK
2333}
2334
dcd76186
RK
2335/**
2336 * nilfs_segctor_notify - notify the result of request to caller threads
2337 * @sci: segment constructor object
2338 * @mode: mode of log forming
2339 * @err: error code to be notified
2340 */
2341static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
9ff05123
RK
2342{
2343 /* Clear requests (even when the construction failed) */
2344 spin_lock(&sci->sc_state_lock);
2345
dcd76186 2346 if (mode == SC_LSEG_SR) {
aeda7f63 2347 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
dcd76186
RK
2348 sci->sc_seq_done = sci->sc_seq_accepted;
2349 nilfs_segctor_wakeup(sci, err);
9ff05123 2350 sci->sc_flush_request = 0;
aeda7f63 2351 } else {
dcd76186 2352 if (mode == SC_FLUSH_FILE)
aeda7f63 2353 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
dcd76186 2354 else if (mode == SC_FLUSH_DAT)
aeda7f63
RK
2355 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2356
2357 /* re-enable timer if checkpoint creation was not done */
fdce895e
LH
2358 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2359 time_before(jiffies, sci->sc_timer.expires))
2360 add_timer(&sci->sc_timer);
aeda7f63 2361 }
9ff05123
RK
2362 spin_unlock(&sci->sc_state_lock);
2363}
2364
dcd76186
RK
2365/**
2366 * nilfs_segctor_construct - form logs and write them to disk
2367 * @sci: segment constructor object
2368 * @mode: mode of log forming
2369 */
2370static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
9ff05123 2371{
e3154e97 2372 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
d26493b6 2373 struct nilfs_super_block **sbp;
9ff05123
RK
2374 int err = 0;
2375
dcd76186
RK
2376 nilfs_segctor_accept(sci);
2377
9ff05123 2378 if (nilfs_discontinued(nilfs))
dcd76186
RK
2379 mode = SC_LSEG_SR;
2380 if (!nilfs_segctor_confirm(sci))
2381 err = nilfs_segctor_do_construct(sci, mode);
2382
9ff05123 2383 if (likely(!err)) {
dcd76186 2384 if (mode != SC_FLUSH_DAT)
9ff05123
RK
2385 atomic_set(&nilfs->ns_ndirtyblks, 0);
2386 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2387 nilfs_discontinued(nilfs)) {
2388 down_write(&nilfs->ns_sem);
d26493b6 2389 err = -EIO;
f7545144 2390 sbp = nilfs_prepare_super(sci->sc_super,
b2ac86e1
JS
2391 nilfs_sb_will_flip(nilfs));
2392 if (likely(sbp)) {
2393 nilfs_set_log_cursor(sbp[0], nilfs);
f7545144
RK
2394 err = nilfs_commit_super(sci->sc_super,
2395 NILFS_SB_COMMIT);
b2ac86e1 2396 }
9ff05123
RK
2397 up_write(&nilfs->ns_sem);
2398 }
2399 }
dcd76186
RK
2400
2401 nilfs_segctor_notify(sci, mode, err);
9ff05123
RK
2402 return err;
2403}
2404
7554e9c4 2405static void nilfs_construction_timeout(struct timer_list *t)
9ff05123 2406{
7554e9c4 2407 struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
4ad364ca 2408
7554e9c4 2409 wake_up_process(sci->sc_timer_task);
9ff05123
RK
2410}
2411
2412static void
2413nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2414{
2415 struct nilfs_inode_info *ii, *n;
2416
2417 list_for_each_entry_safe(ii, n, head, i_dirty) {
2418 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2419 continue;
9ff05123 2420 list_del_init(&ii->i_dirty);
fbb24a3a 2421 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
e897be17 2422 nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
263d90ce 2423 iput(&ii->vfs_inode);
9ff05123
RK
2424 }
2425}
2426
4f6b8288
RK
2427int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2428 void **kbufs)
9ff05123 2429{
e3154e97 2430 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 2431 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123 2432 struct nilfs_transaction_info ti;
9ff05123
RK
2433 int err;
2434
2435 if (unlikely(!sci))
2436 return -EROFS;
2437
f7545144 2438 nilfs_transaction_lock(sb, &ti, 1);
9ff05123 2439
c1c1d709 2440 err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
9ff05123
RK
2441 if (unlikely(err))
2442 goto out_unlock;
071cb4b8 2443
4f6b8288 2444 err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
c1c1d709
RK
2445 if (unlikely(err)) {
2446 nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
9ff05123 2447 goto out_unlock;
c1c1d709 2448 }
9ff05123 2449
071cb4b8
RK
2450 sci->sc_freesegs = kbufs[4];
2451 sci->sc_nfreesegs = argv[4].v_nmembs;
0935db74 2452 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
9ff05123
RK
2453
2454 for (;;) {
dcd76186 2455 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
9ff05123 2456 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
9ff05123
RK
2457
2458 if (likely(!err))
2459 break;
2460
a1d0747a 2461 nilfs_warn(sb, "error %d cleaning segments", err);
9ff05123
RK
2462 set_current_state(TASK_INTERRUPTIBLE);
2463 schedule_timeout(sci->sc_interval);
2464 }
3b2ce58b 2465 if (nilfs_test_opt(nilfs, DISCARD)) {
e902ec99
JS
2466 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2467 sci->sc_nfreesegs);
2468 if (ret) {
a1d0747a
JP
2469 nilfs_warn(sb,
2470 "error %d on discard request, turning discards off for the device",
2471 ret);
3b2ce58b 2472 nilfs_clear_opt(nilfs, DISCARD);
e902ec99
JS
2473 }
2474 }
9ff05123
RK
2475
2476 out_unlock:
071cb4b8
RK
2477 sci->sc_freesegs = NULL;
2478 sci->sc_nfreesegs = 0;
c1c1d709 2479 nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
f7545144 2480 nilfs_transaction_unlock(sb);
9ff05123
RK
2481 return err;
2482}
2483
2484static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2485{
9ff05123 2486 struct nilfs_transaction_info ti;
9ff05123 2487
f7545144 2488 nilfs_transaction_lock(sci->sc_super, &ti, 0);
dcd76186 2489 nilfs_segctor_construct(sci, mode);
9ff05123
RK
2490
2491 /*
2492 * Unclosed segment should be retried. We do this using sc_timer.
2493 * Timeout of sc_timer will invoke complete construction which leads
2494 * to close the current logical segment.
2495 */
2496 if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2497 nilfs_segctor_start_timer(sci);
2498
f7545144 2499 nilfs_transaction_unlock(sci->sc_super);
9ff05123
RK
2500}
2501
2502static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2503{
2504 int mode = 0;
9ff05123
RK
2505
2506 spin_lock(&sci->sc_state_lock);
2507 mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2508 SC_FLUSH_DAT : SC_FLUSH_FILE;
2509 spin_unlock(&sci->sc_state_lock);
2510
2511 if (mode) {
09ef29e0 2512 nilfs_segctor_do_construct(sci, mode);
9ff05123
RK
2513
2514 spin_lock(&sci->sc_state_lock);
2515 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2516 ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2517 spin_unlock(&sci->sc_state_lock);
2518 }
2519 clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2520}
2521
2522static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2523{
2524 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2525 time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2526 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2527 return SC_FLUSH_FILE;
2528 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2529 return SC_FLUSH_DAT;
2530 }
2531 return SC_LSEG_SR;
2532}
2533
2534/**
2535 * nilfs_segctor_thread - main loop of the segment constructor thread.
2536 * @arg: pointer to a struct nilfs_sc_info.
2537 *
2538 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2539 * to execute segment constructions.
2540 */
2541static int nilfs_segctor_thread(void *arg)
2542{
2543 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
e3154e97 2544 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
2545 int timeout = 0;
2546
7554e9c4 2547 sci->sc_timer_task = current;
9ff05123
RK
2548
2549 /* start sync. */
2550 sci->sc_task = current;
2551 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
a1d0747a
JP
2552 nilfs_info(sci->sc_super,
2553 "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
2554 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
9ff05123
RK
2555
2556 spin_lock(&sci->sc_state_lock);
2557 loop:
2558 for (;;) {
2559 int mode;
2560
2561 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2562 goto end_thread;
2563
2564 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2565 mode = SC_LSEG_SR;
7f00184e 2566 else if (sci->sc_flush_request)
9ff05123 2567 mode = nilfs_segctor_flush_mode(sci);
7f00184e
RK
2568 else
2569 break;
9ff05123
RK
2570
2571 spin_unlock(&sci->sc_state_lock);
2572 nilfs_segctor_thread_construct(sci, mode);
2573 spin_lock(&sci->sc_state_lock);
2574 timeout = 0;
2575 }
2576
2577
2578 if (freezing(current)) {
2579 spin_unlock(&sci->sc_state_lock);
a0acae0e 2580 try_to_freeze();
9ff05123
RK
2581 spin_lock(&sci->sc_state_lock);
2582 } else {
2583 DEFINE_WAIT(wait);
2584 int should_sleep = 1;
2585
2586 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2587 TASK_INTERRUPTIBLE);
2588
2589 if (sci->sc_seq_request != sci->sc_seq_done)
2590 should_sleep = 0;
2591 else if (sci->sc_flush_request)
2592 should_sleep = 0;
2593 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2594 should_sleep = time_before(jiffies,
fdce895e 2595 sci->sc_timer.expires);
9ff05123
RK
2596
2597 if (should_sleep) {
2598 spin_unlock(&sci->sc_state_lock);
2599 schedule();
2600 spin_lock(&sci->sc_state_lock);
2601 }
2602 finish_wait(&sci->sc_wait_daemon, &wait);
2603 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
fdce895e 2604 time_after_eq(jiffies, sci->sc_timer.expires));
e605f0a7
RK
2605
2606 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
1dfa2710 2607 set_nilfs_discontinued(nilfs);
9ff05123
RK
2608 }
2609 goto loop;
2610
2611 end_thread:
2612 spin_unlock(&sci->sc_state_lock);
9ff05123
RK
2613
2614 /* end sync. */
2615 sci->sc_task = NULL;
2616 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2617 return 0;
2618}
2619
2620static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2621{
2622 struct task_struct *t;
2623
2624 t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2625 if (IS_ERR(t)) {
2626 int err = PTR_ERR(t);
2627
a1d0747a
JP
2628 nilfs_err(sci->sc_super, "error %d creating segctord thread",
2629 err);
9ff05123
RK
2630 return err;
2631 }
2632 wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2633 return 0;
2634}
2635
2636static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
6b81e14e
JS
2637 __acquires(&sci->sc_state_lock)
2638 __releases(&sci->sc_state_lock)
9ff05123
RK
2639{
2640 sci->sc_state |= NILFS_SEGCTOR_QUIT;
2641
2642 while (sci->sc_task) {
2643 wake_up(&sci->sc_wait_daemon);
2644 spin_unlock(&sci->sc_state_lock);
2645 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2646 spin_lock(&sci->sc_state_lock);
2647 }
2648}
2649
9ff05123
RK
2650/*
2651 * Setup & clean-up functions
2652 */
f7545144 2653static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
e912a5b6 2654 struct nilfs_root *root)
9ff05123 2655{
e3154e97 2656 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
2657 struct nilfs_sc_info *sci;
2658
2659 sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2660 if (!sci)
2661 return NULL;
2662
f7545144 2663 sci->sc_super = sb;
9ff05123 2664
e912a5b6
RK
2665 nilfs_get_root(root);
2666 sci->sc_root = root;
2667
9ff05123
RK
2668 init_waitqueue_head(&sci->sc_wait_request);
2669 init_waitqueue_head(&sci->sc_wait_daemon);
2670 init_waitqueue_head(&sci->sc_wait_task);
2671 spin_lock_init(&sci->sc_state_lock);
2672 INIT_LIST_HEAD(&sci->sc_dirty_files);
2673 INIT_LIST_HEAD(&sci->sc_segbufs);
a694291a 2674 INIT_LIST_HEAD(&sci->sc_write_logs);
9ff05123 2675 INIT_LIST_HEAD(&sci->sc_gc_inodes);
7ef3ff2f
RK
2676 INIT_LIST_HEAD(&sci->sc_iput_queue);
2677 INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
7554e9c4 2678 timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
9ff05123
RK
2679
2680 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2681 sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2682 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2683
574e6c31 2684 if (nilfs->ns_interval)
071d73cf 2685 sci->sc_interval = HZ * nilfs->ns_interval;
574e6c31
RK
2686 if (nilfs->ns_watermark)
2687 sci->sc_watermark = nilfs->ns_watermark;
9ff05123
RK
2688 return sci;
2689}
2690
2691static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2692{
2693 int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2694
076a378b
RK
2695 /*
2696 * The segctord thread was stopped and its timer was removed.
2697 * But some tasks remain.
2698 */
9ff05123 2699 do {
9ff05123 2700 struct nilfs_transaction_info ti;
9ff05123 2701
f7545144 2702 nilfs_transaction_lock(sci->sc_super, &ti, 0);
dcd76186 2703 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
f7545144 2704 nilfs_transaction_unlock(sci->sc_super);
9ff05123 2705
7ef3ff2f
RK
2706 flush_work(&sci->sc_iput_work);
2707
9ff05123
RK
2708 } while (ret && retrycount-- > 0);
2709}
2710
2711/**
2712 * nilfs_segctor_destroy - destroy the segment constructor.
2713 * @sci: nilfs_sc_info
2714 *
2715 * nilfs_segctor_destroy() kills the segctord thread and frees
2716 * the nilfs_sc_info struct.
2717 * Caller must hold the segment semaphore.
2718 */
2719static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2720{
e3154e97 2721 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
2722 int flag;
2723
693dd321 2724 up_write(&nilfs->ns_segctor_sem);
9ff05123
RK
2725
2726 spin_lock(&sci->sc_state_lock);
2727 nilfs_segctor_kill_thread(sci);
2728 flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2729 || sci->sc_seq_request != sci->sc_seq_done);
2730 spin_unlock(&sci->sc_state_lock);
2731
7ef3ff2f
RK
2732 if (flush_work(&sci->sc_iput_work))
2733 flag = true;
2734
3256a055 2735 if (flag || !nilfs_segctor_confirm(sci))
9ff05123
RK
2736 nilfs_segctor_write_out(sci);
2737
9ff05123 2738 if (!list_empty(&sci->sc_dirty_files)) {
a1d0747a
JP
2739 nilfs_warn(sci->sc_super,
2740 "disposed unprocessed dirty file(s) when stopping log writer");
693dd321 2741 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
9ff05123 2742 }
9ff05123 2743
7ef3ff2f 2744 if (!list_empty(&sci->sc_iput_queue)) {
a1d0747a
JP
2745 nilfs_warn(sci->sc_super,
2746 "disposed unprocessed inode(s) in iput queue when stopping log writer");
7ef3ff2f
RK
2747 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2748 }
2749
1f5abe7e 2750 WARN_ON(!list_empty(&sci->sc_segbufs));
a694291a 2751 WARN_ON(!list_empty(&sci->sc_write_logs));
9ff05123 2752
e912a5b6
RK
2753 nilfs_put_root(sci->sc_root);
2754
693dd321 2755 down_write(&nilfs->ns_segctor_sem);
9ff05123 2756
292a089d 2757 timer_shutdown_sync(&sci->sc_timer);
9ff05123
RK
2758 kfree(sci);
2759}
2760
2761/**
f7545144
RK
2762 * nilfs_attach_log_writer - attach log writer
2763 * @sb: super block instance
e912a5b6 2764 * @root: root object of the current filesystem tree
9ff05123 2765 *
f7545144
RK
2766 * This allocates a log writer object, initializes it, and starts the
2767 * log writer.
9ff05123
RK
2768 *
2769 * Return Value: On success, 0 is returned. On error, one of the following
2770 * negative error code is returned.
2771 *
2772 * %-ENOMEM - Insufficient memory available.
2773 */
f7545144 2774int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
9ff05123 2775{
e3154e97 2776 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
2777 int err;
2778
3fd3fe5a 2779 if (nilfs->ns_writer) {
fe5f171b 2780 /*
8cccf05f
RK
2781 * This happens if the filesystem is made read-only by
2782 * __nilfs_error or nilfs_remount and then remounted
2783 * read/write. In these cases, reuse the existing
2784 * writer.
fe5f171b 2785 */
8cccf05f 2786 return 0;
fe5f171b
RK
2787 }
2788
f7545144 2789 nilfs->ns_writer = nilfs_segctor_new(sb, root);
3fd3fe5a 2790 if (!nilfs->ns_writer)
9ff05123
RK
2791 return -ENOMEM;
2792
8301c719
RK
2793 inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
2794
3fd3fe5a 2795 err = nilfs_segctor_start_thread(nilfs->ns_writer);
d0d51a97
RK
2796 if (unlikely(err))
2797 nilfs_detach_log_writer(sb);
2798
9ff05123
RK
2799 return err;
2800}
2801
2802/**
f7545144
RK
2803 * nilfs_detach_log_writer - destroy log writer
2804 * @sb: super block instance
9ff05123 2805 *
f7545144
RK
2806 * This kills log writer daemon, frees the log writer object, and
2807 * destroys list of dirty files.
9ff05123 2808 */
f7545144 2809void nilfs_detach_log_writer(struct super_block *sb)
9ff05123 2810{
e3154e97 2811 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
2812 LIST_HEAD(garbage_list);
2813
2814 down_write(&nilfs->ns_segctor_sem);
3fd3fe5a
RK
2815 if (nilfs->ns_writer) {
2816 nilfs_segctor_destroy(nilfs->ns_writer);
2817 nilfs->ns_writer = NULL;
9ff05123
RK
2818 }
2819
2820 /* Force to free the list of dirty files */
693dd321
RK
2821 spin_lock(&nilfs->ns_inode_lock);
2822 if (!list_empty(&nilfs->ns_dirty_files)) {
2823 list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
a1d0747a
JP
2824 nilfs_warn(sb,
2825 "disposed unprocessed dirty file(s) when detaching log writer");
9ff05123 2826 }
693dd321 2827 spin_unlock(&nilfs->ns_inode_lock);
9ff05123
RK
2828 up_write(&nilfs->ns_segctor_sem);
2829
693dd321 2830 nilfs_dispose_list(nilfs, &garbage_list, 1);
9ff05123 2831}