nilfs2: get rid of sc_sbi back pointer
[linux-2.6-block.git] / fs / nilfs2 / segment.c
1 /*
2  * segment.c - NILFS segment constructor.
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>
21  *
22  */
23
24 #include <linux/pagemap.h>
25 #include <linux/buffer_head.h>
26 #include <linux/writeback.h>
27 #include <linux/bio.h>
28 #include <linux/completion.h>
29 #include <linux/blkdev.h>
30 #include <linux/backing-dev.h>
31 #include <linux/freezer.h>
32 #include <linux/kthread.h>
33 #include <linux/crc32.h>
34 #include <linux/pagevec.h>
35 #include <linux/slab.h>
36 #include "nilfs.h"
37 #include "btnode.h"
38 #include "page.h"
39 #include "segment.h"
40 #include "sufile.h"
41 #include "cpfile.h"
42 #include "ifile.h"
43 #include "segbuf.h"
44
45
46 /*
47  * Segment constructor
48  */
49 #define SC_N_INODEVEC   16   /* Size of locally allocated inode vector */
50
51 #define SC_MAX_SEGDELTA 64   /* Upper limit of the number of segments
52                                 appended in collection retry loop */
53
54 /* Construction mode */
55 enum {
56         SC_LSEG_SR = 1, /* Make a logical segment having a super root */
57         SC_LSEG_DSYNC,  /* Flush data blocks of a given file and make
58                            a logical segment without a super root */
59         SC_FLUSH_FILE,  /* Flush data files, leads to segment writes without
60                            creating a checkpoint */
61         SC_FLUSH_DAT,   /* Flush DAT file. This also creates segments without
62                            a checkpoint */
63 };
64
65 /* Stage numbers of dirty block collection */
66 enum {
67         NILFS_ST_INIT = 0,
68         NILFS_ST_GC,            /* Collecting dirty blocks for GC */
69         NILFS_ST_FILE,
70         NILFS_ST_IFILE,
71         NILFS_ST_CPFILE,
72         NILFS_ST_SUFILE,
73         NILFS_ST_DAT,
74         NILFS_ST_SR,            /* Super root */
75         NILFS_ST_DSYNC,         /* Data sync blocks */
76         NILFS_ST_DONE,
77 };
78
79 /* State flags of collection */
80 #define NILFS_CF_NODE           0x0001  /* Collecting node blocks */
81 #define NILFS_CF_IFILE_STARTED  0x0002  /* IFILE stage has started */
82 #define NILFS_CF_SUFREED        0x0004  /* segment usages has been freed */
83 #define NILFS_CF_HISTORY_MASK   (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
84
85 /* Operations depending on the construction mode and file type */
86 struct nilfs_sc_operations {
87         int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
88                             struct inode *);
89         int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
90                             struct inode *);
91         int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
92                             struct inode *);
93         void (*write_data_binfo)(struct nilfs_sc_info *,
94                                  struct nilfs_segsum_pointer *,
95                                  union nilfs_binfo *);
96         void (*write_node_binfo)(struct nilfs_sc_info *,
97                                  struct nilfs_segsum_pointer *,
98                                  union nilfs_binfo *);
99 };
100
101 /*
102  * Other definitions
103  */
104 static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
105 static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
106 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
107 static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
108
109 #define nilfs_cnt32_gt(a, b)   \
110         (typecheck(__u32, a) && typecheck(__u32, b) && \
111          ((__s32)(b) - (__s32)(a) < 0))
112 #define nilfs_cnt32_ge(a, b)   \
113         (typecheck(__u32, a) && typecheck(__u32, b) && \
114          ((__s32)(a) - (__s32)(b) >= 0))
115 #define nilfs_cnt32_lt(a, b)  nilfs_cnt32_gt(b, a)
116 #define nilfs_cnt32_le(a, b)  nilfs_cnt32_ge(b, a)
117
118 static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti)
119 {
120         struct nilfs_transaction_info *cur_ti = current->journal_info;
121         void *save = NULL;
122
123         if (cur_ti) {
124                 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
125                         return ++cur_ti->ti_count;
126                 else {
127                         /*
128                          * If journal_info field is occupied by other FS,
129                          * it is saved and will be restored on
130                          * nilfs_transaction_commit().
131                          */
132                         printk(KERN_WARNING
133                                "NILFS warning: journal info from a different "
134                                "FS\n");
135                         save = current->journal_info;
136                 }
137         }
138         if (!ti) {
139                 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
140                 if (!ti)
141                         return -ENOMEM;
142                 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
143         } else {
144                 ti->ti_flags = 0;
145         }
146         ti->ti_count = 0;
147         ti->ti_save = save;
148         ti->ti_magic = NILFS_TI_MAGIC;
149         current->journal_info = ti;
150         return 0;
151 }
152
153 /**
154  * nilfs_transaction_begin - start indivisible file operations.
155  * @sb: super block
156  * @ti: nilfs_transaction_info
157  * @vacancy_check: flags for vacancy rate checks
158  *
159  * nilfs_transaction_begin() acquires a reader/writer semaphore, called
160  * the segment semaphore, to make a segment construction and write tasks
161  * exclusive.  The function is used with nilfs_transaction_commit() in pairs.
162  * The region enclosed by these two functions can be nested.  To avoid a
163  * deadlock, the semaphore is only acquired or released in the outermost call.
164  *
165  * This function allocates a nilfs_transaction_info struct to keep context
166  * information on it.  It is initialized and hooked onto the current task in
167  * the outermost call.  If a pre-allocated struct is given to @ti, it is used
168  * instead; otherwise a new struct is assigned from a slab.
169  *
170  * When @vacancy_check flag is set, this function will check the amount of
171  * free space, and will wait for the GC to reclaim disk space if low capacity.
172  *
173  * Return Value: On success, 0 is returned. On error, one of the following
174  * negative error code is returned.
175  *
176  * %-ENOMEM - Insufficient memory available.
177  *
178  * %-ENOSPC - No space left on device
179  */
180 int nilfs_transaction_begin(struct super_block *sb,
181                             struct nilfs_transaction_info *ti,
182                             int vacancy_check)
183 {
184         struct nilfs_sb_info *sbi;
185         struct the_nilfs *nilfs;
186         int ret = nilfs_prepare_segment_lock(ti);
187
188         if (unlikely(ret < 0))
189                 return ret;
190         if (ret > 0)
191                 return 0;
192
193         vfs_check_frozen(sb, SB_FREEZE_WRITE);
194
195         sbi = NILFS_SB(sb);
196         nilfs = sbi->s_nilfs;
197         down_read(&nilfs->ns_segctor_sem);
198         if (vacancy_check && nilfs_near_disk_full(nilfs)) {
199                 up_read(&nilfs->ns_segctor_sem);
200                 ret = -ENOSPC;
201                 goto failed;
202         }
203         return 0;
204
205  failed:
206         ti = current->journal_info;
207         current->journal_info = ti->ti_save;
208         if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
209                 kmem_cache_free(nilfs_transaction_cachep, ti);
210         return ret;
211 }
212
213 /**
214  * nilfs_transaction_commit - commit indivisible file operations.
215  * @sb: super block
216  *
217  * nilfs_transaction_commit() releases the read semaphore which is
218  * acquired by nilfs_transaction_begin(). This is only performed
219  * in outermost call of this function.  If a commit flag is set,
220  * nilfs_transaction_commit() sets a timer to start the segment
221  * constructor.  If a sync flag is set, it starts construction
222  * directly.
223  */
224 int nilfs_transaction_commit(struct super_block *sb)
225 {
226         struct nilfs_transaction_info *ti = current->journal_info;
227         struct the_nilfs *nilfs = NILFS_SB(sb)->s_nilfs;
228         int err = 0;
229
230         BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
231         ti->ti_flags |= NILFS_TI_COMMIT;
232         if (ti->ti_count > 0) {
233                 ti->ti_count--;
234                 return 0;
235         }
236         if (nilfs->ns_writer) {
237                 struct nilfs_sc_info *sci = nilfs->ns_writer;
238
239                 if (ti->ti_flags & NILFS_TI_COMMIT)
240                         nilfs_segctor_start_timer(sci);
241                 if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
242                         nilfs_segctor_do_flush(sci, 0);
243         }
244         up_read(&nilfs->ns_segctor_sem);
245         current->journal_info = ti->ti_save;
246
247         if (ti->ti_flags & NILFS_TI_SYNC)
248                 err = nilfs_construct_segment(sb);
249         if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
250                 kmem_cache_free(nilfs_transaction_cachep, ti);
251         return err;
252 }
253
254 void nilfs_transaction_abort(struct super_block *sb)
255 {
256         struct nilfs_transaction_info *ti = current->journal_info;
257
258         BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
259         if (ti->ti_count > 0) {
260                 ti->ti_count--;
261                 return;
262         }
263         up_read(&NILFS_SB(sb)->s_nilfs->ns_segctor_sem);
264
265         current->journal_info = ti->ti_save;
266         if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
267                 kmem_cache_free(nilfs_transaction_cachep, ti);
268 }
269
270 void nilfs_relax_pressure_in_lock(struct super_block *sb)
271 {
272         struct the_nilfs *nilfs = NILFS_SB(sb)->s_nilfs;
273         struct nilfs_sc_info *sci = nilfs->ns_writer;
274
275         if (!sci || !sci->sc_flush_request)
276                 return;
277
278         set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
279         up_read(&nilfs->ns_segctor_sem);
280
281         down_write(&nilfs->ns_segctor_sem);
282         if (sci->sc_flush_request &&
283             test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
284                 struct nilfs_transaction_info *ti = current->journal_info;
285
286                 ti->ti_flags |= NILFS_TI_WRITER;
287                 nilfs_segctor_do_immediate_flush(sci);
288                 ti->ti_flags &= ~NILFS_TI_WRITER;
289         }
290         downgrade_write(&nilfs->ns_segctor_sem);
291 }
292
293 static void nilfs_transaction_lock(struct nilfs_sb_info *sbi,
294                                    struct nilfs_transaction_info *ti,
295                                    int gcflag)
296 {
297         struct nilfs_transaction_info *cur_ti = current->journal_info;
298         struct the_nilfs *nilfs = sbi->s_nilfs;
299         struct nilfs_sc_info *sci = nilfs->ns_writer;
300
301         WARN_ON(cur_ti);
302         ti->ti_flags = NILFS_TI_WRITER;
303         ti->ti_count = 0;
304         ti->ti_save = cur_ti;
305         ti->ti_magic = NILFS_TI_MAGIC;
306         INIT_LIST_HEAD(&ti->ti_garbage);
307         current->journal_info = ti;
308
309         for (;;) {
310                 down_write(&nilfs->ns_segctor_sem);
311                 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
312                         break;
313
314                 nilfs_segctor_do_immediate_flush(sci);
315
316                 up_write(&sbi->s_nilfs->ns_segctor_sem);
317                 yield();
318         }
319         if (gcflag)
320                 ti->ti_flags |= NILFS_TI_GC;
321 }
322
323 static void nilfs_transaction_unlock(struct nilfs_sb_info *sbi)
324 {
325         struct nilfs_transaction_info *ti = current->journal_info;
326         struct the_nilfs *nilfs = sbi->s_nilfs;
327
328         BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
329         BUG_ON(ti->ti_count > 0);
330
331         up_write(&nilfs->ns_segctor_sem);
332         current->journal_info = ti->ti_save;
333         if (!list_empty(&ti->ti_garbage))
334                 nilfs_dispose_list(nilfs, &ti->ti_garbage, 0);
335 }
336
337 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
338                                             struct nilfs_segsum_pointer *ssp,
339                                             unsigned bytes)
340 {
341         struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
342         unsigned blocksize = sci->sc_super->s_blocksize;
343         void *p;
344
345         if (unlikely(ssp->offset + bytes > blocksize)) {
346                 ssp->offset = 0;
347                 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
348                                                &segbuf->sb_segsum_buffers));
349                 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
350         }
351         p = ssp->bh->b_data + ssp->offset;
352         ssp->offset += bytes;
353         return p;
354 }
355
356 /**
357  * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
358  * @sci: nilfs_sc_info
359  */
360 static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
361 {
362         struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
363         struct buffer_head *sumbh;
364         unsigned sumbytes;
365         unsigned flags = 0;
366         int err;
367
368         if (nilfs_doing_gc())
369                 flags = NILFS_SS_GC;
370         err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
371         if (unlikely(err))
372                 return err;
373
374         sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
375         sumbytes = segbuf->sb_sum.sumbytes;
376         sci->sc_finfo_ptr.bh = sumbh;  sci->sc_finfo_ptr.offset = sumbytes;
377         sci->sc_binfo_ptr.bh = sumbh;  sci->sc_binfo_ptr.offset = sumbytes;
378         sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
379         return 0;
380 }
381
382 static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
383 {
384         sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
385         if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
386                 return -E2BIG; /* The current segment is filled up
387                                   (internal code) */
388         sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
389         return nilfs_segctor_reset_segment_buffer(sci);
390 }
391
392 static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
393 {
394         struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
395         int err;
396
397         if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
398                 err = nilfs_segctor_feed_segment(sci);
399                 if (err)
400                         return err;
401                 segbuf = sci->sc_curseg;
402         }
403         err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
404         if (likely(!err))
405                 segbuf->sb_sum.flags |= NILFS_SS_SR;
406         return err;
407 }
408
409 /*
410  * Functions for making segment summary and payloads
411  */
412 static int nilfs_segctor_segsum_block_required(
413         struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
414         unsigned binfo_size)
415 {
416         unsigned blocksize = sci->sc_super->s_blocksize;
417         /* Size of finfo and binfo is enough small against blocksize */
418
419         return ssp->offset + binfo_size +
420                 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
421                 blocksize;
422 }
423
424 static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
425                                       struct inode *inode)
426 {
427         sci->sc_curseg->sb_sum.nfinfo++;
428         sci->sc_binfo_ptr = sci->sc_finfo_ptr;
429         nilfs_segctor_map_segsum_entry(
430                 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
431
432         if (NILFS_I(inode)->i_root &&
433             !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
434                 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
435         /* skip finfo */
436 }
437
438 static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
439                                     struct inode *inode)
440 {
441         struct nilfs_finfo *finfo;
442         struct nilfs_inode_info *ii;
443         struct nilfs_segment_buffer *segbuf;
444         __u64 cno;
445
446         if (sci->sc_blk_cnt == 0)
447                 return;
448
449         ii = NILFS_I(inode);
450
451         if (test_bit(NILFS_I_GCINODE, &ii->i_state))
452                 cno = ii->i_cno;
453         else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
454                 cno = 0;
455         else
456                 cno = sci->sc_cno;
457
458         finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
459                                                  sizeof(*finfo));
460         finfo->fi_ino = cpu_to_le64(inode->i_ino);
461         finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
462         finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
463         finfo->fi_cno = cpu_to_le64(cno);
464
465         segbuf = sci->sc_curseg;
466         segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
467                 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
468         sci->sc_finfo_ptr = sci->sc_binfo_ptr;
469         sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
470 }
471
472 static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
473                                         struct buffer_head *bh,
474                                         struct inode *inode,
475                                         unsigned binfo_size)
476 {
477         struct nilfs_segment_buffer *segbuf;
478         int required, err = 0;
479
480  retry:
481         segbuf = sci->sc_curseg;
482         required = nilfs_segctor_segsum_block_required(
483                 sci, &sci->sc_binfo_ptr, binfo_size);
484         if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
485                 nilfs_segctor_end_finfo(sci, inode);
486                 err = nilfs_segctor_feed_segment(sci);
487                 if (err)
488                         return err;
489                 goto retry;
490         }
491         if (unlikely(required)) {
492                 err = nilfs_segbuf_extend_segsum(segbuf);
493                 if (unlikely(err))
494                         goto failed;
495         }
496         if (sci->sc_blk_cnt == 0)
497                 nilfs_segctor_begin_finfo(sci, inode);
498
499         nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
500         /* Substitution to vblocknr is delayed until update_blocknr() */
501         nilfs_segbuf_add_file_buffer(segbuf, bh);
502         sci->sc_blk_cnt++;
503  failed:
504         return err;
505 }
506
507 /*
508  * Callback functions that enumerate, mark, and collect dirty blocks
509  */
510 static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
511                                    struct buffer_head *bh, struct inode *inode)
512 {
513         int err;
514
515         err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
516         if (err < 0)
517                 return err;
518
519         err = nilfs_segctor_add_file_block(sci, bh, inode,
520                                            sizeof(struct nilfs_binfo_v));
521         if (!err)
522                 sci->sc_datablk_cnt++;
523         return err;
524 }
525
526 static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
527                                    struct buffer_head *bh,
528                                    struct inode *inode)
529 {
530         return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
531 }
532
533 static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
534                                    struct buffer_head *bh,
535                                    struct inode *inode)
536 {
537         WARN_ON(!buffer_dirty(bh));
538         return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
539 }
540
541 static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
542                                         struct nilfs_segsum_pointer *ssp,
543                                         union nilfs_binfo *binfo)
544 {
545         struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
546                 sci, ssp, sizeof(*binfo_v));
547         *binfo_v = binfo->bi_v;
548 }
549
550 static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
551                                         struct nilfs_segsum_pointer *ssp,
552                                         union nilfs_binfo *binfo)
553 {
554         __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
555                 sci, ssp, sizeof(*vblocknr));
556         *vblocknr = binfo->bi_v.bi_vblocknr;
557 }
558
559 static struct nilfs_sc_operations nilfs_sc_file_ops = {
560         .collect_data = nilfs_collect_file_data,
561         .collect_node = nilfs_collect_file_node,
562         .collect_bmap = nilfs_collect_file_bmap,
563         .write_data_binfo = nilfs_write_file_data_binfo,
564         .write_node_binfo = nilfs_write_file_node_binfo,
565 };
566
567 static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
568                                   struct buffer_head *bh, struct inode *inode)
569 {
570         int err;
571
572         err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
573         if (err < 0)
574                 return err;
575
576         err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
577         if (!err)
578                 sci->sc_datablk_cnt++;
579         return err;
580 }
581
582 static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
583                                   struct buffer_head *bh, struct inode *inode)
584 {
585         WARN_ON(!buffer_dirty(bh));
586         return nilfs_segctor_add_file_block(sci, bh, inode,
587                                             sizeof(struct nilfs_binfo_dat));
588 }
589
590 static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
591                                        struct nilfs_segsum_pointer *ssp,
592                                        union nilfs_binfo *binfo)
593 {
594         __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
595                                                           sizeof(*blkoff));
596         *blkoff = binfo->bi_dat.bi_blkoff;
597 }
598
599 static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
600                                        struct nilfs_segsum_pointer *ssp,
601                                        union nilfs_binfo *binfo)
602 {
603         struct nilfs_binfo_dat *binfo_dat =
604                 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
605         *binfo_dat = binfo->bi_dat;
606 }
607
608 static struct nilfs_sc_operations nilfs_sc_dat_ops = {
609         .collect_data = nilfs_collect_dat_data,
610         .collect_node = nilfs_collect_file_node,
611         .collect_bmap = nilfs_collect_dat_bmap,
612         .write_data_binfo = nilfs_write_dat_data_binfo,
613         .write_node_binfo = nilfs_write_dat_node_binfo,
614 };
615
616 static struct nilfs_sc_operations nilfs_sc_dsync_ops = {
617         .collect_data = nilfs_collect_file_data,
618         .collect_node = NULL,
619         .collect_bmap = NULL,
620         .write_data_binfo = nilfs_write_file_data_binfo,
621         .write_node_binfo = NULL,
622 };
623
624 static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
625                                               struct list_head *listp,
626                                               size_t nlimit,
627                                               loff_t start, loff_t end)
628 {
629         struct address_space *mapping = inode->i_mapping;
630         struct pagevec pvec;
631         pgoff_t index = 0, last = ULONG_MAX;
632         size_t ndirties = 0;
633         int i;
634
635         if (unlikely(start != 0 || end != LLONG_MAX)) {
636                 /*
637                  * A valid range is given for sync-ing data pages. The
638                  * range is rounded to per-page; extra dirty buffers
639                  * may be included if blocksize < pagesize.
640                  */
641                 index = start >> PAGE_SHIFT;
642                 last = end >> PAGE_SHIFT;
643         }
644         pagevec_init(&pvec, 0);
645  repeat:
646         if (unlikely(index > last) ||
647             !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
648                                 min_t(pgoff_t, last - index,
649                                       PAGEVEC_SIZE - 1) + 1))
650                 return ndirties;
651
652         for (i = 0; i < pagevec_count(&pvec); i++) {
653                 struct buffer_head *bh, *head;
654                 struct page *page = pvec.pages[i];
655
656                 if (unlikely(page->index > last))
657                         break;
658
659                 if (mapping->host) {
660                         lock_page(page);
661                         if (!page_has_buffers(page))
662                                 create_empty_buffers(page,
663                                                      1 << inode->i_blkbits, 0);
664                         unlock_page(page);
665                 }
666
667                 bh = head = page_buffers(page);
668                 do {
669                         if (!buffer_dirty(bh))
670                                 continue;
671                         get_bh(bh);
672                         list_add_tail(&bh->b_assoc_buffers, listp);
673                         ndirties++;
674                         if (unlikely(ndirties >= nlimit)) {
675                                 pagevec_release(&pvec);
676                                 cond_resched();
677                                 return ndirties;
678                         }
679                 } while (bh = bh->b_this_page, bh != head);
680         }
681         pagevec_release(&pvec);
682         cond_resched();
683         goto repeat;
684 }
685
686 static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
687                                             struct list_head *listp)
688 {
689         struct nilfs_inode_info *ii = NILFS_I(inode);
690         struct address_space *mapping = &ii->i_btnode_cache;
691         struct pagevec pvec;
692         struct buffer_head *bh, *head;
693         unsigned int i;
694         pgoff_t index = 0;
695
696         pagevec_init(&pvec, 0);
697
698         while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
699                                   PAGEVEC_SIZE)) {
700                 for (i = 0; i < pagevec_count(&pvec); i++) {
701                         bh = head = page_buffers(pvec.pages[i]);
702                         do {
703                                 if (buffer_dirty(bh)) {
704                                         get_bh(bh);
705                                         list_add_tail(&bh->b_assoc_buffers,
706                                                       listp);
707                                 }
708                                 bh = bh->b_this_page;
709                         } while (bh != head);
710                 }
711                 pagevec_release(&pvec);
712                 cond_resched();
713         }
714 }
715
716 static void nilfs_dispose_list(struct the_nilfs *nilfs,
717                                struct list_head *head, int force)
718 {
719         struct nilfs_inode_info *ii, *n;
720         struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
721         unsigned nv = 0;
722
723         while (!list_empty(head)) {
724                 spin_lock(&nilfs->ns_inode_lock);
725                 list_for_each_entry_safe(ii, n, head, i_dirty) {
726                         list_del_init(&ii->i_dirty);
727                         if (force) {
728                                 if (unlikely(ii->i_bh)) {
729                                         brelse(ii->i_bh);
730                                         ii->i_bh = NULL;
731                                 }
732                         } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
733                                 set_bit(NILFS_I_QUEUED, &ii->i_state);
734                                 list_add_tail(&ii->i_dirty,
735                                               &nilfs->ns_dirty_files);
736                                 continue;
737                         }
738                         ivec[nv++] = ii;
739                         if (nv == SC_N_INODEVEC)
740                                 break;
741                 }
742                 spin_unlock(&nilfs->ns_inode_lock);
743
744                 for (pii = ivec; nv > 0; pii++, nv--)
745                         iput(&(*pii)->vfs_inode);
746         }
747 }
748
749 static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
750                                      struct nilfs_root *root)
751 {
752         int ret = 0;
753
754         if (nilfs_mdt_fetch_dirty(root->ifile))
755                 ret++;
756         if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
757                 ret++;
758         if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
759                 ret++;
760         if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
761                 ret++;
762         return ret;
763 }
764
765 static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
766 {
767         return list_empty(&sci->sc_dirty_files) &&
768                 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
769                 sci->sc_nfreesegs == 0 &&
770                 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
771 }
772
773 static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
774 {
775         struct the_nilfs *nilfs = NILFS_SB(sci->sc_super)->s_nilfs;
776         int ret = 0;
777
778         if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
779                 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
780
781         spin_lock(&nilfs->ns_inode_lock);
782         if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
783                 ret++;
784
785         spin_unlock(&nilfs->ns_inode_lock);
786         return ret;
787 }
788
789 static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
790 {
791         struct the_nilfs *nilfs = NILFS_SB(sci->sc_super)->s_nilfs;
792
793         nilfs_mdt_clear_dirty(sci->sc_root->ifile);
794         nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
795         nilfs_mdt_clear_dirty(nilfs->ns_sufile);
796         nilfs_mdt_clear_dirty(nilfs->ns_dat);
797 }
798
799 static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
800 {
801         struct the_nilfs *nilfs = NILFS_SB(sci->sc_super)->s_nilfs;
802         struct buffer_head *bh_cp;
803         struct nilfs_checkpoint *raw_cp;
804         int err;
805
806         /* XXX: this interface will be changed */
807         err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
808                                           &raw_cp, &bh_cp);
809         if (likely(!err)) {
810                 /* The following code is duplicated with cpfile.  But, it is
811                    needed to collect the checkpoint even if it was not newly
812                    created */
813                 nilfs_mdt_mark_buffer_dirty(bh_cp);
814                 nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
815                 nilfs_cpfile_put_checkpoint(
816                         nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
817         } else
818                 WARN_ON(err == -EINVAL || err == -ENOENT);
819
820         return err;
821 }
822
823 static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
824 {
825         struct the_nilfs *nilfs = NILFS_SB(sci->sc_super)->s_nilfs;
826         struct buffer_head *bh_cp;
827         struct nilfs_checkpoint *raw_cp;
828         int err;
829
830         err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
831                                           &raw_cp, &bh_cp);
832         if (unlikely(err)) {
833                 WARN_ON(err == -EINVAL || err == -ENOENT);
834                 goto failed_ibh;
835         }
836         raw_cp->cp_snapshot_list.ssl_next = 0;
837         raw_cp->cp_snapshot_list.ssl_prev = 0;
838         raw_cp->cp_inodes_count =
839                 cpu_to_le64(atomic_read(&sci->sc_root->inodes_count));
840         raw_cp->cp_blocks_count =
841                 cpu_to_le64(atomic_read(&sci->sc_root->blocks_count));
842         raw_cp->cp_nblk_inc =
843                 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
844         raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
845         raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
846
847         if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
848                 nilfs_checkpoint_clear_minor(raw_cp);
849         else
850                 nilfs_checkpoint_set_minor(raw_cp);
851
852         nilfs_write_inode_common(sci->sc_root->ifile,
853                                  &raw_cp->cp_ifile_inode, 1);
854         nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
855         return 0;
856
857  failed_ibh:
858         return err;
859 }
860
861 static void nilfs_fill_in_file_bmap(struct inode *ifile,
862                                     struct nilfs_inode_info *ii)
863
864 {
865         struct buffer_head *ibh;
866         struct nilfs_inode *raw_inode;
867
868         if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
869                 ibh = ii->i_bh;
870                 BUG_ON(!ibh);
871                 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
872                                                   ibh);
873                 nilfs_bmap_write(ii->i_bmap, raw_inode);
874                 nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
875         }
876 }
877
878 static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
879 {
880         struct nilfs_inode_info *ii;
881
882         list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
883                 nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
884                 set_bit(NILFS_I_COLLECTED, &ii->i_state);
885         }
886 }
887
888 static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
889                                              struct the_nilfs *nilfs)
890 {
891         struct buffer_head *bh_sr;
892         struct nilfs_super_root *raw_sr;
893         unsigned isz = nilfs->ns_inode_size;
894
895         bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
896         raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
897
898         raw_sr->sr_bytes = cpu_to_le16(NILFS_SR_BYTES);
899         raw_sr->sr_nongc_ctime
900                 = cpu_to_le64(nilfs_doing_gc() ?
901                               nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
902         raw_sr->sr_flags = 0;
903
904         nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
905                                  NILFS_SR_DAT_OFFSET(isz), 1);
906         nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
907                                  NILFS_SR_CPFILE_OFFSET(isz), 1);
908         nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
909                                  NILFS_SR_SUFILE_OFFSET(isz), 1);
910 }
911
912 static void nilfs_redirty_inodes(struct list_head *head)
913 {
914         struct nilfs_inode_info *ii;
915
916         list_for_each_entry(ii, head, i_dirty) {
917                 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
918                         clear_bit(NILFS_I_COLLECTED, &ii->i_state);
919         }
920 }
921
922 static void nilfs_drop_collected_inodes(struct list_head *head)
923 {
924         struct nilfs_inode_info *ii;
925
926         list_for_each_entry(ii, head, i_dirty) {
927                 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
928                         continue;
929
930                 clear_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
931                 set_bit(NILFS_I_UPDATED, &ii->i_state);
932         }
933 }
934
935 static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
936                                        struct inode *inode,
937                                        struct list_head *listp,
938                                        int (*collect)(struct nilfs_sc_info *,
939                                                       struct buffer_head *,
940                                                       struct inode *))
941 {
942         struct buffer_head *bh, *n;
943         int err = 0;
944
945         if (collect) {
946                 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
947                         list_del_init(&bh->b_assoc_buffers);
948                         err = collect(sci, bh, inode);
949                         brelse(bh);
950                         if (unlikely(err))
951                                 goto dispose_buffers;
952                 }
953                 return 0;
954         }
955
956  dispose_buffers:
957         while (!list_empty(listp)) {
958                 bh = list_entry(listp->next, struct buffer_head,
959                                 b_assoc_buffers);
960                 list_del_init(&bh->b_assoc_buffers);
961                 brelse(bh);
962         }
963         return err;
964 }
965
966 static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
967 {
968         /* Remaining number of blocks within segment buffer */
969         return sci->sc_segbuf_nblocks -
970                 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
971 }
972
973 static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
974                                    struct inode *inode,
975                                    struct nilfs_sc_operations *sc_ops)
976 {
977         LIST_HEAD(data_buffers);
978         LIST_HEAD(node_buffers);
979         int err;
980
981         if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
982                 size_t n, rest = nilfs_segctor_buffer_rest(sci);
983
984                 n = nilfs_lookup_dirty_data_buffers(
985                         inode, &data_buffers, rest + 1, 0, LLONG_MAX);
986                 if (n > rest) {
987                         err = nilfs_segctor_apply_buffers(
988                                 sci, inode, &data_buffers,
989                                 sc_ops->collect_data);
990                         BUG_ON(!err); /* always receive -E2BIG or true error */
991                         goto break_or_fail;
992                 }
993         }
994         nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
995
996         if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
997                 err = nilfs_segctor_apply_buffers(
998                         sci, inode, &data_buffers, sc_ops->collect_data);
999                 if (unlikely(err)) {
1000                         /* dispose node list */
1001                         nilfs_segctor_apply_buffers(
1002                                 sci, inode, &node_buffers, NULL);
1003                         goto break_or_fail;
1004                 }
1005                 sci->sc_stage.flags |= NILFS_CF_NODE;
1006         }
1007         /* Collect node */
1008         err = nilfs_segctor_apply_buffers(
1009                 sci, inode, &node_buffers, sc_ops->collect_node);
1010         if (unlikely(err))
1011                 goto break_or_fail;
1012
1013         nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1014         err = nilfs_segctor_apply_buffers(
1015                 sci, inode, &node_buffers, sc_ops->collect_bmap);
1016         if (unlikely(err))
1017                 goto break_or_fail;
1018
1019         nilfs_segctor_end_finfo(sci, inode);
1020         sci->sc_stage.flags &= ~NILFS_CF_NODE;
1021
1022  break_or_fail:
1023         return err;
1024 }
1025
1026 static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1027                                          struct inode *inode)
1028 {
1029         LIST_HEAD(data_buffers);
1030         size_t n, rest = nilfs_segctor_buffer_rest(sci);
1031         int err;
1032
1033         n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1034                                             sci->sc_dsync_start,
1035                                             sci->sc_dsync_end);
1036
1037         err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1038                                           nilfs_collect_file_data);
1039         if (!err) {
1040                 nilfs_segctor_end_finfo(sci, inode);
1041                 BUG_ON(n > rest);
1042                 /* always receive -E2BIG or true error if n > rest */
1043         }
1044         return err;
1045 }
1046
1047 static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1048 {
1049         struct the_nilfs *nilfs = NILFS_SB(sci->sc_super)->s_nilfs;
1050         struct list_head *head;
1051         struct nilfs_inode_info *ii;
1052         size_t ndone;
1053         int err = 0;
1054
1055         switch (sci->sc_stage.scnt) {
1056         case NILFS_ST_INIT:
1057                 /* Pre-processes */
1058                 sci->sc_stage.flags = 0;
1059
1060                 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1061                         sci->sc_nblk_inc = 0;
1062                         sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1063                         if (mode == SC_LSEG_DSYNC) {
1064                                 sci->sc_stage.scnt = NILFS_ST_DSYNC;
1065                                 goto dsync_mode;
1066                         }
1067                 }
1068
1069                 sci->sc_stage.dirty_file_ptr = NULL;
1070                 sci->sc_stage.gc_inode_ptr = NULL;
1071                 if (mode == SC_FLUSH_DAT) {
1072                         sci->sc_stage.scnt = NILFS_ST_DAT;
1073                         goto dat_stage;
1074                 }
1075                 sci->sc_stage.scnt++;  /* Fall through */
1076         case NILFS_ST_GC:
1077                 if (nilfs_doing_gc()) {
1078                         head = &sci->sc_gc_inodes;
1079                         ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1080                                                 head, i_dirty);
1081                         list_for_each_entry_continue(ii, head, i_dirty) {
1082                                 err = nilfs_segctor_scan_file(
1083                                         sci, &ii->vfs_inode,
1084                                         &nilfs_sc_file_ops);
1085                                 if (unlikely(err)) {
1086                                         sci->sc_stage.gc_inode_ptr = list_entry(
1087                                                 ii->i_dirty.prev,
1088                                                 struct nilfs_inode_info,
1089                                                 i_dirty);
1090                                         goto break_or_fail;
1091                                 }
1092                                 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1093                         }
1094                         sci->sc_stage.gc_inode_ptr = NULL;
1095                 }
1096                 sci->sc_stage.scnt++;  /* Fall through */
1097         case NILFS_ST_FILE:
1098                 head = &sci->sc_dirty_files;
1099                 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1100                                         i_dirty);
1101                 list_for_each_entry_continue(ii, head, i_dirty) {
1102                         clear_bit(NILFS_I_DIRTY, &ii->i_state);
1103
1104                         err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1105                                                       &nilfs_sc_file_ops);
1106                         if (unlikely(err)) {
1107                                 sci->sc_stage.dirty_file_ptr =
1108                                         list_entry(ii->i_dirty.prev,
1109                                                    struct nilfs_inode_info,
1110                                                    i_dirty);
1111                                 goto break_or_fail;
1112                         }
1113                         /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1114                         /* XXX: required ? */
1115                 }
1116                 sci->sc_stage.dirty_file_ptr = NULL;
1117                 if (mode == SC_FLUSH_FILE) {
1118                         sci->sc_stage.scnt = NILFS_ST_DONE;
1119                         return 0;
1120                 }
1121                 sci->sc_stage.scnt++;
1122                 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1123                 /* Fall through */
1124         case NILFS_ST_IFILE:
1125                 err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
1126                                               &nilfs_sc_file_ops);
1127                 if (unlikely(err))
1128                         break;
1129                 sci->sc_stage.scnt++;
1130                 /* Creating a checkpoint */
1131                 err = nilfs_segctor_create_checkpoint(sci);
1132                 if (unlikely(err))
1133                         break;
1134                 /* Fall through */
1135         case NILFS_ST_CPFILE:
1136                 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1137                                               &nilfs_sc_file_ops);
1138                 if (unlikely(err))
1139                         break;
1140                 sci->sc_stage.scnt++;  /* Fall through */
1141         case NILFS_ST_SUFILE:
1142                 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1143                                          sci->sc_nfreesegs, &ndone);
1144                 if (unlikely(err)) {
1145                         nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1146                                                   sci->sc_freesegs, ndone,
1147                                                   NULL);
1148                         break;
1149                 }
1150                 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1151
1152                 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1153                                               &nilfs_sc_file_ops);
1154                 if (unlikely(err))
1155                         break;
1156                 sci->sc_stage.scnt++;  /* Fall through */
1157         case NILFS_ST_DAT:
1158  dat_stage:
1159                 err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
1160                                               &nilfs_sc_dat_ops);
1161                 if (unlikely(err))
1162                         break;
1163                 if (mode == SC_FLUSH_DAT) {
1164                         sci->sc_stage.scnt = NILFS_ST_DONE;
1165                         return 0;
1166                 }
1167                 sci->sc_stage.scnt++;  /* Fall through */
1168         case NILFS_ST_SR:
1169                 if (mode == SC_LSEG_SR) {
1170                         /* Appending a super root */
1171                         err = nilfs_segctor_add_super_root(sci);
1172                         if (unlikely(err))
1173                                 break;
1174                 }
1175                 /* End of a logical segment */
1176                 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1177                 sci->sc_stage.scnt = NILFS_ST_DONE;
1178                 return 0;
1179         case NILFS_ST_DSYNC:
1180  dsync_mode:
1181                 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1182                 ii = sci->sc_dsync_inode;
1183                 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1184                         break;
1185
1186                 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1187                 if (unlikely(err))
1188                         break;
1189                 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1190                 sci->sc_stage.scnt = NILFS_ST_DONE;
1191                 return 0;
1192         case NILFS_ST_DONE:
1193                 return 0;
1194         default:
1195                 BUG();
1196         }
1197
1198  break_or_fail:
1199         return err;
1200 }
1201
1202 /**
1203  * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1204  * @sci: nilfs_sc_info
1205  * @nilfs: nilfs object
1206  */
1207 static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1208                                             struct the_nilfs *nilfs)
1209 {
1210         struct nilfs_segment_buffer *segbuf, *prev;
1211         __u64 nextnum;
1212         int err, alloc = 0;
1213
1214         segbuf = nilfs_segbuf_new(sci->sc_super);
1215         if (unlikely(!segbuf))
1216                 return -ENOMEM;
1217
1218         if (list_empty(&sci->sc_write_logs)) {
1219                 nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1220                                  nilfs->ns_pseg_offset, nilfs);
1221                 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1222                         nilfs_shift_to_next_segment(nilfs);
1223                         nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1224                 }
1225
1226                 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1227                 nextnum = nilfs->ns_nextnum;
1228
1229                 if (nilfs->ns_segnum == nilfs->ns_nextnum)
1230                         /* Start from the head of a new full segment */
1231                         alloc++;
1232         } else {
1233                 /* Continue logs */
1234                 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1235                 nilfs_segbuf_map_cont(segbuf, prev);
1236                 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1237                 nextnum = prev->sb_nextnum;
1238
1239                 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1240                         nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1241                         segbuf->sb_sum.seg_seq++;
1242                         alloc++;
1243                 }
1244         }
1245
1246         err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
1247         if (err)
1248                 goto failed;
1249
1250         if (alloc) {
1251                 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
1252                 if (err)
1253                         goto failed;
1254         }
1255         nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1256
1257         BUG_ON(!list_empty(&sci->sc_segbufs));
1258         list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1259         sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1260         return 0;
1261
1262  failed:
1263         nilfs_segbuf_free(segbuf);
1264         return err;
1265 }
1266
1267 static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1268                                          struct the_nilfs *nilfs, int nadd)
1269 {
1270         struct nilfs_segment_buffer *segbuf, *prev;
1271         struct inode *sufile = nilfs->ns_sufile;
1272         __u64 nextnextnum;
1273         LIST_HEAD(list);
1274         int err, ret, i;
1275
1276         prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1277         /*
1278          * Since the segment specified with nextnum might be allocated during
1279          * the previous construction, the buffer including its segusage may
1280          * not be dirty.  The following call ensures that the buffer is dirty
1281          * and will pin the buffer on memory until the sufile is written.
1282          */
1283         err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
1284         if (unlikely(err))
1285                 return err;
1286
1287         for (i = 0; i < nadd; i++) {
1288                 /* extend segment info */
1289                 err = -ENOMEM;
1290                 segbuf = nilfs_segbuf_new(sci->sc_super);
1291                 if (unlikely(!segbuf))
1292                         goto failed;
1293
1294                 /* map this buffer to region of segment on-disk */
1295                 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1296                 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1297
1298                 /* allocate the next next full segment */
1299                 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1300                 if (unlikely(err))
1301                         goto failed_segbuf;
1302
1303                 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1304                 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1305
1306                 list_add_tail(&segbuf->sb_list, &list);
1307                 prev = segbuf;
1308         }
1309         list_splice_tail(&list, &sci->sc_segbufs);
1310         return 0;
1311
1312  failed_segbuf:
1313         nilfs_segbuf_free(segbuf);
1314  failed:
1315         list_for_each_entry(segbuf, &list, sb_list) {
1316                 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1317                 WARN_ON(ret); /* never fails */
1318         }
1319         nilfs_destroy_logs(&list);
1320         return err;
1321 }
1322
1323 static void nilfs_free_incomplete_logs(struct list_head *logs,
1324                                        struct the_nilfs *nilfs)
1325 {
1326         struct nilfs_segment_buffer *segbuf, *prev;
1327         struct inode *sufile = nilfs->ns_sufile;
1328         int ret;
1329
1330         segbuf = NILFS_FIRST_SEGBUF(logs);
1331         if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1332                 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1333                 WARN_ON(ret); /* never fails */
1334         }
1335         if (atomic_read(&segbuf->sb_err)) {
1336                 /* Case 1: The first segment failed */
1337                 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1338                         /* Case 1a:  Partial segment appended into an existing
1339                            segment */
1340                         nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1341                                                 segbuf->sb_fseg_end);
1342                 else /* Case 1b:  New full segment */
1343                         set_nilfs_discontinued(nilfs);
1344         }
1345
1346         prev = segbuf;
1347         list_for_each_entry_continue(segbuf, logs, sb_list) {
1348                 if (prev->sb_nextnum != segbuf->sb_nextnum) {
1349                         ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1350                         WARN_ON(ret); /* never fails */
1351                 }
1352                 if (atomic_read(&segbuf->sb_err) &&
1353                     segbuf->sb_segnum != nilfs->ns_nextnum)
1354                         /* Case 2: extended segment (!= next) failed */
1355                         nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1356                 prev = segbuf;
1357         }
1358 }
1359
1360 static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1361                                           struct inode *sufile)
1362 {
1363         struct nilfs_segment_buffer *segbuf;
1364         unsigned long live_blocks;
1365         int ret;
1366
1367         list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1368                 live_blocks = segbuf->sb_sum.nblocks +
1369                         (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1370                 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1371                                                      live_blocks,
1372                                                      sci->sc_seg_ctime);
1373                 WARN_ON(ret); /* always succeed because the segusage is dirty */
1374         }
1375 }
1376
1377 static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
1378 {
1379         struct nilfs_segment_buffer *segbuf;
1380         int ret;
1381
1382         segbuf = NILFS_FIRST_SEGBUF(logs);
1383         ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1384                                              segbuf->sb_pseg_start -
1385                                              segbuf->sb_fseg_start, 0);
1386         WARN_ON(ret); /* always succeed because the segusage is dirty */
1387
1388         list_for_each_entry_continue(segbuf, logs, sb_list) {
1389                 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1390                                                      0, 0);
1391                 WARN_ON(ret); /* always succeed */
1392         }
1393 }
1394
1395 static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1396                                             struct nilfs_segment_buffer *last,
1397                                             struct inode *sufile)
1398 {
1399         struct nilfs_segment_buffer *segbuf = last;
1400         int ret;
1401
1402         list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1403                 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1404                 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1405                 WARN_ON(ret);
1406         }
1407         nilfs_truncate_logs(&sci->sc_segbufs, last);
1408 }
1409
1410
1411 static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1412                                  struct the_nilfs *nilfs, int mode)
1413 {
1414         struct nilfs_cstage prev_stage = sci->sc_stage;
1415         int err, nadd = 1;
1416
1417         /* Collection retry loop */
1418         for (;;) {
1419                 sci->sc_nblk_this_inc = 0;
1420                 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1421
1422                 err = nilfs_segctor_reset_segment_buffer(sci);
1423                 if (unlikely(err))
1424                         goto failed;
1425
1426                 err = nilfs_segctor_collect_blocks(sci, mode);
1427                 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1428                 if (!err)
1429                         break;
1430
1431                 if (unlikely(err != -E2BIG))
1432                         goto failed;
1433
1434                 /* The current segment is filled up */
1435                 if (mode != SC_LSEG_SR || sci->sc_stage.scnt < NILFS_ST_CPFILE)
1436                         break;
1437
1438                 nilfs_clear_logs(&sci->sc_segbufs);
1439
1440                 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1441                 if (unlikely(err))
1442                         return err;
1443
1444                 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1445                         err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1446                                                         sci->sc_freesegs,
1447                                                         sci->sc_nfreesegs,
1448                                                         NULL);
1449                         WARN_ON(err); /* do not happen */
1450                 }
1451                 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1452                 sci->sc_stage = prev_stage;
1453         }
1454         nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1455         return 0;
1456
1457  failed:
1458         return err;
1459 }
1460
1461 static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1462                                       struct buffer_head *new_bh)
1463 {
1464         BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1465
1466         list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1467         /* The caller must release old_bh */
1468 }
1469
1470 static int
1471 nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1472                                      struct nilfs_segment_buffer *segbuf,
1473                                      int mode)
1474 {
1475         struct inode *inode = NULL;
1476         sector_t blocknr;
1477         unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1478         unsigned long nblocks = 0, ndatablk = 0;
1479         struct nilfs_sc_operations *sc_op = NULL;
1480         struct nilfs_segsum_pointer ssp;
1481         struct nilfs_finfo *finfo = NULL;
1482         union nilfs_binfo binfo;
1483         struct buffer_head *bh, *bh_org;
1484         ino_t ino = 0;
1485         int err = 0;
1486
1487         if (!nfinfo)
1488                 goto out;
1489
1490         blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1491         ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1492         ssp.offset = sizeof(struct nilfs_segment_summary);
1493
1494         list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1495                 if (bh == segbuf->sb_super_root)
1496                         break;
1497                 if (!finfo) {
1498                         finfo = nilfs_segctor_map_segsum_entry(
1499                                 sci, &ssp, sizeof(*finfo));
1500                         ino = le64_to_cpu(finfo->fi_ino);
1501                         nblocks = le32_to_cpu(finfo->fi_nblocks);
1502                         ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1503
1504                         if (buffer_nilfs_node(bh))
1505                                 inode = NILFS_BTNC_I(bh->b_page->mapping);
1506                         else
1507                                 inode = NILFS_AS_I(bh->b_page->mapping);
1508
1509                         if (mode == SC_LSEG_DSYNC)
1510                                 sc_op = &nilfs_sc_dsync_ops;
1511                         else if (ino == NILFS_DAT_INO)
1512                                 sc_op = &nilfs_sc_dat_ops;
1513                         else /* file blocks */
1514                                 sc_op = &nilfs_sc_file_ops;
1515                 }
1516                 bh_org = bh;
1517                 get_bh(bh_org);
1518                 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1519                                         &binfo);
1520                 if (bh != bh_org)
1521                         nilfs_list_replace_buffer(bh_org, bh);
1522                 brelse(bh_org);
1523                 if (unlikely(err))
1524                         goto failed_bmap;
1525
1526                 if (ndatablk > 0)
1527                         sc_op->write_data_binfo(sci, &ssp, &binfo);
1528                 else
1529                         sc_op->write_node_binfo(sci, &ssp, &binfo);
1530
1531                 blocknr++;
1532                 if (--nblocks == 0) {
1533                         finfo = NULL;
1534                         if (--nfinfo == 0)
1535                                 break;
1536                 } else if (ndatablk > 0)
1537                         ndatablk--;
1538         }
1539  out:
1540         return 0;
1541
1542  failed_bmap:
1543         return err;
1544 }
1545
1546 static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1547 {
1548         struct nilfs_segment_buffer *segbuf;
1549         int err;
1550
1551         list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1552                 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1553                 if (unlikely(err))
1554                         return err;
1555                 nilfs_segbuf_fill_in_segsum(segbuf);
1556         }
1557         return 0;
1558 }
1559
1560 static int
1561 nilfs_copy_replace_page_buffers(struct page *page, struct list_head *out)
1562 {
1563         struct page *clone_page;
1564         struct buffer_head *bh, *head, *bh2;
1565         void *kaddr;
1566
1567         bh = head = page_buffers(page);
1568
1569         clone_page = nilfs_alloc_private_page(bh->b_bdev, bh->b_size, 0);
1570         if (unlikely(!clone_page))
1571                 return -ENOMEM;
1572
1573         bh2 = page_buffers(clone_page);
1574         kaddr = kmap_atomic(page, KM_USER0);
1575         do {
1576                 if (list_empty(&bh->b_assoc_buffers))
1577                         continue;
1578                 get_bh(bh2);
1579                 page_cache_get(clone_page); /* for each bh */
1580                 memcpy(bh2->b_data, kaddr + bh_offset(bh), bh2->b_size);
1581                 bh2->b_blocknr = bh->b_blocknr;
1582                 list_replace(&bh->b_assoc_buffers, &bh2->b_assoc_buffers);
1583                 list_add_tail(&bh->b_assoc_buffers, out);
1584         } while (bh = bh->b_this_page, bh2 = bh2->b_this_page, bh != head);
1585         kunmap_atomic(kaddr, KM_USER0);
1586
1587         if (!TestSetPageWriteback(clone_page))
1588                 account_page_writeback(clone_page);
1589         unlock_page(clone_page);
1590
1591         return 0;
1592 }
1593
1594 static int nilfs_test_page_to_be_frozen(struct page *page)
1595 {
1596         struct address_space *mapping = page->mapping;
1597
1598         if (!mapping || !mapping->host || S_ISDIR(mapping->host->i_mode))
1599                 return 0;
1600
1601         if (page_mapped(page)) {
1602                 ClearPageChecked(page);
1603                 return 1;
1604         }
1605         return PageChecked(page);
1606 }
1607
1608 static int nilfs_begin_page_io(struct page *page, struct list_head *out)
1609 {
1610         if (!page || PageWriteback(page))
1611                 /* For split b-tree node pages, this function may be called
1612                    twice.  We ignore the 2nd or later calls by this check. */
1613                 return 0;
1614
1615         lock_page(page);
1616         clear_page_dirty_for_io(page);
1617         set_page_writeback(page);
1618         unlock_page(page);
1619
1620         if (nilfs_test_page_to_be_frozen(page)) {
1621                 int err = nilfs_copy_replace_page_buffers(page, out);
1622                 if (unlikely(err))
1623                         return err;
1624         }
1625         return 0;
1626 }
1627
1628 static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci,
1629                                        struct page **failed_page)
1630 {
1631         struct nilfs_segment_buffer *segbuf;
1632         struct page *bd_page = NULL, *fs_page = NULL;
1633         struct list_head *list = &sci->sc_copied_buffers;
1634         int err;
1635
1636         *failed_page = NULL;
1637         list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1638                 struct buffer_head *bh;
1639
1640                 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1641                                     b_assoc_buffers) {
1642                         if (bh->b_page != bd_page) {
1643                                 if (bd_page) {
1644                                         lock_page(bd_page);
1645                                         clear_page_dirty_for_io(bd_page);
1646                                         set_page_writeback(bd_page);
1647                                         unlock_page(bd_page);
1648                                 }
1649                                 bd_page = bh->b_page;
1650                         }
1651                 }
1652
1653                 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1654                                     b_assoc_buffers) {
1655                         if (bh == segbuf->sb_super_root) {
1656                                 if (bh->b_page != bd_page) {
1657                                         lock_page(bd_page);
1658                                         clear_page_dirty_for_io(bd_page);
1659                                         set_page_writeback(bd_page);
1660                                         unlock_page(bd_page);
1661                                         bd_page = bh->b_page;
1662                                 }
1663                                 break;
1664                         }
1665                         if (bh->b_page != fs_page) {
1666                                 err = nilfs_begin_page_io(fs_page, list);
1667                                 if (unlikely(err)) {
1668                                         *failed_page = fs_page;
1669                                         goto out;
1670                                 }
1671                                 fs_page = bh->b_page;
1672                         }
1673                 }
1674         }
1675         if (bd_page) {
1676                 lock_page(bd_page);
1677                 clear_page_dirty_for_io(bd_page);
1678                 set_page_writeback(bd_page);
1679                 unlock_page(bd_page);
1680         }
1681         err = nilfs_begin_page_io(fs_page, list);
1682         if (unlikely(err))
1683                 *failed_page = fs_page;
1684  out:
1685         return err;
1686 }
1687
1688 static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1689                                struct the_nilfs *nilfs)
1690 {
1691         int ret;
1692
1693         ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1694         list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1695         return ret;
1696 }
1697
1698 static void __nilfs_end_page_io(struct page *page, int err)
1699 {
1700         if (!err) {
1701                 if (!nilfs_page_buffers_clean(page))
1702                         __set_page_dirty_nobuffers(page);
1703                 ClearPageError(page);
1704         } else {
1705                 __set_page_dirty_nobuffers(page);
1706                 SetPageError(page);
1707         }
1708
1709         if (buffer_nilfs_allocated(page_buffers(page))) {
1710                 if (TestClearPageWriteback(page))
1711                         dec_zone_page_state(page, NR_WRITEBACK);
1712         } else
1713                 end_page_writeback(page);
1714 }
1715
1716 static void nilfs_end_page_io(struct page *page, int err)
1717 {
1718         if (!page)
1719                 return;
1720
1721         if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
1722                 /*
1723                  * For b-tree node pages, this function may be called twice
1724                  * or more because they might be split in a segment.
1725                  */
1726                 if (PageDirty(page)) {
1727                         /*
1728                          * For pages holding split b-tree node buffers, dirty
1729                          * flag on the buffers may be cleared discretely.
1730                          * In that case, the page is once redirtied for
1731                          * remaining buffers, and it must be cancelled if
1732                          * all the buffers get cleaned later.
1733                          */
1734                         lock_page(page);
1735                         if (nilfs_page_buffers_clean(page))
1736                                 __nilfs_clear_page_dirty(page);
1737                         unlock_page(page);
1738                 }
1739                 return;
1740         }
1741
1742         __nilfs_end_page_io(page, err);
1743 }
1744
1745 static void nilfs_clear_copied_buffers(struct list_head *list, int err)
1746 {
1747         struct buffer_head *bh, *head;
1748         struct page *page;
1749
1750         while (!list_empty(list)) {
1751                 bh = list_entry(list->next, struct buffer_head,
1752                                 b_assoc_buffers);
1753                 page = bh->b_page;
1754                 page_cache_get(page);
1755                 head = bh = page_buffers(page);
1756                 do {
1757                         if (!list_empty(&bh->b_assoc_buffers)) {
1758                                 list_del_init(&bh->b_assoc_buffers);
1759                                 if (!err) {
1760                                         set_buffer_uptodate(bh);
1761                                         clear_buffer_dirty(bh);
1762                                         clear_buffer_delay(bh);
1763                                         clear_buffer_nilfs_volatile(bh);
1764                                 }
1765                                 brelse(bh); /* for b_assoc_buffers */
1766                         }
1767                 } while ((bh = bh->b_this_page) != head);
1768
1769                 __nilfs_end_page_io(page, err);
1770                 page_cache_release(page);
1771         }
1772 }
1773
1774 static void nilfs_abort_logs(struct list_head *logs, struct page *failed_page,
1775                              int err)
1776 {
1777         struct nilfs_segment_buffer *segbuf;
1778         struct page *bd_page = NULL, *fs_page = NULL;
1779         struct buffer_head *bh;
1780
1781         if (list_empty(logs))
1782                 return;
1783
1784         list_for_each_entry(segbuf, logs, sb_list) {
1785                 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1786                                     b_assoc_buffers) {
1787                         if (bh->b_page != bd_page) {
1788                                 if (bd_page)
1789                                         end_page_writeback(bd_page);
1790                                 bd_page = bh->b_page;
1791                         }
1792                 }
1793
1794                 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1795                                     b_assoc_buffers) {
1796                         if (bh == segbuf->sb_super_root) {
1797                                 if (bh->b_page != bd_page) {
1798                                         end_page_writeback(bd_page);
1799                                         bd_page = bh->b_page;
1800                                 }
1801                                 break;
1802                         }
1803                         if (bh->b_page != fs_page) {
1804                                 nilfs_end_page_io(fs_page, err);
1805                                 if (fs_page && fs_page == failed_page)
1806                                         return;
1807                                 fs_page = bh->b_page;
1808                         }
1809                 }
1810         }
1811         if (bd_page)
1812                 end_page_writeback(bd_page);
1813
1814         nilfs_end_page_io(fs_page, err);
1815 }
1816
1817 static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1818                                              struct the_nilfs *nilfs, int err)
1819 {
1820         LIST_HEAD(logs);
1821         int ret;
1822
1823         list_splice_tail_init(&sci->sc_write_logs, &logs);
1824         ret = nilfs_wait_on_logs(&logs);
1825         nilfs_abort_logs(&logs, NULL, ret ? : err);
1826
1827         list_splice_tail_init(&sci->sc_segbufs, &logs);
1828         nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1829         nilfs_free_incomplete_logs(&logs, nilfs);
1830         nilfs_clear_copied_buffers(&sci->sc_copied_buffers, err);
1831
1832         if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1833                 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1834                                                 sci->sc_freesegs,
1835                                                 sci->sc_nfreesegs,
1836                                                 NULL);
1837                 WARN_ON(ret); /* do not happen */
1838         }
1839
1840         nilfs_destroy_logs(&logs);
1841 }
1842
1843 static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1844                                    struct nilfs_segment_buffer *segbuf)
1845 {
1846         nilfs->ns_segnum = segbuf->sb_segnum;
1847         nilfs->ns_nextnum = segbuf->sb_nextnum;
1848         nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1849                 + segbuf->sb_sum.nblocks;
1850         nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1851         nilfs->ns_ctime = segbuf->sb_sum.ctime;
1852 }
1853
1854 static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1855 {
1856         struct nilfs_segment_buffer *segbuf;
1857         struct page *bd_page = NULL, *fs_page = NULL;
1858         struct the_nilfs *nilfs = NILFS_SB(sci->sc_super)->s_nilfs;
1859         int update_sr = false;
1860
1861         list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1862                 struct buffer_head *bh;
1863
1864                 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1865                                     b_assoc_buffers) {
1866                         set_buffer_uptodate(bh);
1867                         clear_buffer_dirty(bh);
1868                         if (bh->b_page != bd_page) {
1869                                 if (bd_page)
1870                                         end_page_writeback(bd_page);
1871                                 bd_page = bh->b_page;
1872                         }
1873                 }
1874                 /*
1875                  * We assume that the buffers which belong to the same page
1876                  * continue over the buffer list.
1877                  * Under this assumption, the last BHs of pages is
1878                  * identifiable by the discontinuity of bh->b_page
1879                  * (page != fs_page).
1880                  *
1881                  * For B-tree node blocks, however, this assumption is not
1882                  * guaranteed.  The cleanup code of B-tree node pages needs
1883                  * special care.
1884                  */
1885                 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1886                                     b_assoc_buffers) {
1887                         set_buffer_uptodate(bh);
1888                         clear_buffer_dirty(bh);
1889                         clear_buffer_delay(bh);
1890                         clear_buffer_nilfs_volatile(bh);
1891                         clear_buffer_nilfs_redirected(bh);
1892                         if (bh == segbuf->sb_super_root) {
1893                                 if (bh->b_page != bd_page) {
1894                                         end_page_writeback(bd_page);
1895                                         bd_page = bh->b_page;
1896                                 }
1897                                 update_sr = true;
1898                                 break;
1899                         }
1900                         if (bh->b_page != fs_page) {
1901                                 nilfs_end_page_io(fs_page, 0);
1902                                 fs_page = bh->b_page;
1903                         }
1904                 }
1905
1906                 if (!nilfs_segbuf_simplex(segbuf)) {
1907                         if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
1908                                 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1909                                 sci->sc_lseg_stime = jiffies;
1910                         }
1911                         if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
1912                                 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1913                 }
1914         }
1915         /*
1916          * Since pages may continue over multiple segment buffers,
1917          * end of the last page must be checked outside of the loop.
1918          */
1919         if (bd_page)
1920                 end_page_writeback(bd_page);
1921
1922         nilfs_end_page_io(fs_page, 0);
1923
1924         nilfs_clear_copied_buffers(&sci->sc_copied_buffers, 0);
1925
1926         nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1927
1928         if (nilfs_doing_gc())
1929                 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
1930         else
1931                 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
1932
1933         sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1934
1935         segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1936         nilfs_set_next_segment(nilfs, segbuf);
1937
1938         if (update_sr) {
1939                 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
1940                                        segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
1941
1942                 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
1943                 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1944                 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1945                 nilfs_segctor_clear_metadata_dirty(sci);
1946         } else
1947                 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1948 }
1949
1950 static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1951 {
1952         int ret;
1953
1954         ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1955         if (!ret) {
1956                 nilfs_segctor_complete_write(sci);
1957                 nilfs_destroy_logs(&sci->sc_write_logs);
1958         }
1959         return ret;
1960 }
1961
1962 static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1963                                              struct the_nilfs *nilfs)
1964 {
1965         struct nilfs_inode_info *ii, *n;
1966         struct inode *ifile = sci->sc_root->ifile;
1967
1968         spin_lock(&nilfs->ns_inode_lock);
1969  retry:
1970         list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
1971                 if (!ii->i_bh) {
1972                         struct buffer_head *ibh;
1973                         int err;
1974
1975                         spin_unlock(&nilfs->ns_inode_lock);
1976                         err = nilfs_ifile_get_inode_block(
1977                                 ifile, ii->vfs_inode.i_ino, &ibh);
1978                         if (unlikely(err)) {
1979                                 nilfs_warning(sci->sc_super, __func__,
1980                                               "failed to get inode block.\n");
1981                                 return err;
1982                         }
1983                         nilfs_mdt_mark_buffer_dirty(ibh);
1984                         nilfs_mdt_mark_dirty(ifile);
1985                         spin_lock(&nilfs->ns_inode_lock);
1986                         if (likely(!ii->i_bh))
1987                                 ii->i_bh = ibh;
1988                         else
1989                                 brelse(ibh);
1990                         goto retry;
1991                 }
1992
1993                 clear_bit(NILFS_I_QUEUED, &ii->i_state);
1994                 set_bit(NILFS_I_BUSY, &ii->i_state);
1995                 list_del(&ii->i_dirty);
1996                 list_add_tail(&ii->i_dirty, &sci->sc_dirty_files);
1997         }
1998         spin_unlock(&nilfs->ns_inode_lock);
1999
2000         return 0;
2001 }
2002
2003 static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
2004                                              struct the_nilfs *nilfs)
2005 {
2006         struct nilfs_transaction_info *ti = current->journal_info;
2007         struct nilfs_inode_info *ii, *n;
2008
2009         spin_lock(&nilfs->ns_inode_lock);
2010         list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
2011                 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
2012                     test_bit(NILFS_I_DIRTY, &ii->i_state))
2013                         continue;
2014
2015                 clear_bit(NILFS_I_BUSY, &ii->i_state);
2016                 brelse(ii->i_bh);
2017                 ii->i_bh = NULL;
2018                 list_del(&ii->i_dirty);
2019                 list_add_tail(&ii->i_dirty, &ti->ti_garbage);
2020         }
2021         spin_unlock(&nilfs->ns_inode_lock);
2022 }
2023
2024 /*
2025  * Main procedure of segment constructor
2026  */
2027 static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2028 {
2029         struct the_nilfs *nilfs = NILFS_SB(sci->sc_super)->s_nilfs;
2030         struct page *failed_page;
2031         int err;
2032
2033         sci->sc_stage.scnt = NILFS_ST_INIT;
2034         sci->sc_cno = nilfs->ns_cno;
2035
2036         err = nilfs_segctor_collect_dirty_files(sci, nilfs);
2037         if (unlikely(err))
2038                 goto out;
2039
2040         if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
2041                 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2042
2043         if (nilfs_segctor_clean(sci))
2044                 goto out;
2045
2046         do {
2047                 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2048
2049                 err = nilfs_segctor_begin_construction(sci, nilfs);
2050                 if (unlikely(err))
2051                         goto out;
2052
2053                 /* Update time stamp */
2054                 sci->sc_seg_ctime = get_seconds();
2055
2056                 err = nilfs_segctor_collect(sci, nilfs, mode);
2057                 if (unlikely(err))
2058                         goto failed;
2059
2060                 /* Avoid empty segment */
2061                 if (sci->sc_stage.scnt == NILFS_ST_DONE &&
2062                     nilfs_segbuf_empty(sci->sc_curseg)) {
2063                         nilfs_segctor_abort_construction(sci, nilfs, 1);
2064                         goto out;
2065                 }
2066
2067                 err = nilfs_segctor_assign(sci, mode);
2068                 if (unlikely(err))
2069                         goto failed;
2070
2071                 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2072                         nilfs_segctor_fill_in_file_bmap(sci);
2073
2074                 if (mode == SC_LSEG_SR &&
2075                     sci->sc_stage.scnt >= NILFS_ST_CPFILE) {
2076                         err = nilfs_segctor_fill_in_checkpoint(sci);
2077                         if (unlikely(err))
2078                                 goto failed_to_write;
2079
2080                         nilfs_segctor_fill_in_super_root(sci, nilfs);
2081                 }
2082                 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2083
2084                 /* Write partial segments */
2085                 err = nilfs_segctor_prepare_write(sci, &failed_page);
2086                 if (err) {
2087                         nilfs_abort_logs(&sci->sc_segbufs, failed_page, err);
2088                         goto failed_to_write;
2089                 }
2090
2091                 nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2092                                             nilfs->ns_crc_seed);
2093
2094                 err = nilfs_segctor_write(sci, nilfs);
2095                 if (unlikely(err))
2096                         goto failed_to_write;
2097
2098                 if (sci->sc_stage.scnt == NILFS_ST_DONE ||
2099                     nilfs->ns_blocksize_bits != PAGE_CACHE_SHIFT) {
2100                         /*
2101                          * At this point, we avoid double buffering
2102                          * for blocksize < pagesize because page dirty
2103                          * flag is turned off during write and dirty
2104                          * buffers are not properly collected for
2105                          * pages crossing over segments.
2106                          */
2107                         err = nilfs_segctor_wait(sci);
2108                         if (err)
2109                                 goto failed_to_write;
2110                 }
2111         } while (sci->sc_stage.scnt != NILFS_ST_DONE);
2112
2113  out:
2114         nilfs_segctor_drop_written_files(sci, nilfs);
2115         return err;
2116
2117  failed_to_write:
2118         if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2119                 nilfs_redirty_inodes(&sci->sc_dirty_files);
2120
2121  failed:
2122         if (nilfs_doing_gc())
2123                 nilfs_redirty_inodes(&sci->sc_gc_inodes);
2124         nilfs_segctor_abort_construction(sci, nilfs, err);
2125         goto out;
2126 }
2127
2128 /**
2129  * nilfs_segctor_start_timer - set timer of background write
2130  * @sci: nilfs_sc_info
2131  *
2132  * If the timer has already been set, it ignores the new request.
2133  * This function MUST be called within a section locking the segment
2134  * semaphore.
2135  */
2136 static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2137 {
2138         spin_lock(&sci->sc_state_lock);
2139         if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2140                 sci->sc_timer.expires = jiffies + sci->sc_interval;
2141                 add_timer(&sci->sc_timer);
2142                 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2143         }
2144         spin_unlock(&sci->sc_state_lock);
2145 }
2146
2147 static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2148 {
2149         spin_lock(&sci->sc_state_lock);
2150         if (!(sci->sc_flush_request & (1 << bn))) {
2151                 unsigned long prev_req = sci->sc_flush_request;
2152
2153                 sci->sc_flush_request |= (1 << bn);
2154                 if (!prev_req)
2155                         wake_up(&sci->sc_wait_daemon);
2156         }
2157         spin_unlock(&sci->sc_state_lock);
2158 }
2159
2160 /**
2161  * nilfs_flush_segment - trigger a segment construction for resource control
2162  * @sb: super block
2163  * @ino: inode number of the file to be flushed out.
2164  */
2165 void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2166 {
2167         struct the_nilfs *nilfs = NILFS_SB(sb)->s_nilfs;
2168         struct nilfs_sc_info *sci = nilfs->ns_writer;
2169
2170         if (!sci || nilfs_doing_construction())
2171                 return;
2172         nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2173                                         /* assign bit 0 to data files */
2174 }
2175
2176 struct nilfs_segctor_wait_request {
2177         wait_queue_t    wq;
2178         __u32           seq;
2179         int             err;
2180         atomic_t        done;
2181 };
2182
2183 static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2184 {
2185         struct nilfs_segctor_wait_request wait_req;
2186         int err = 0;
2187
2188         spin_lock(&sci->sc_state_lock);
2189         init_wait(&wait_req.wq);
2190         wait_req.err = 0;
2191         atomic_set(&wait_req.done, 0);
2192         wait_req.seq = ++sci->sc_seq_request;
2193         spin_unlock(&sci->sc_state_lock);
2194
2195         init_waitqueue_entry(&wait_req.wq, current);
2196         add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2197         set_current_state(TASK_INTERRUPTIBLE);
2198         wake_up(&sci->sc_wait_daemon);
2199
2200         for (;;) {
2201                 if (atomic_read(&wait_req.done)) {
2202                         err = wait_req.err;
2203                         break;
2204                 }
2205                 if (!signal_pending(current)) {
2206                         schedule();
2207                         continue;
2208                 }
2209                 err = -ERESTARTSYS;
2210                 break;
2211         }
2212         finish_wait(&sci->sc_wait_request, &wait_req.wq);
2213         return err;
2214 }
2215
2216 static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2217 {
2218         struct nilfs_segctor_wait_request *wrq, *n;
2219         unsigned long flags;
2220
2221         spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2222         list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.task_list,
2223                                  wq.task_list) {
2224                 if (!atomic_read(&wrq->done) &&
2225                     nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2226                         wrq->err = err;
2227                         atomic_set(&wrq->done, 1);
2228                 }
2229                 if (atomic_read(&wrq->done)) {
2230                         wrq->wq.func(&wrq->wq,
2231                                      TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2232                                      0, NULL);
2233                 }
2234         }
2235         spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2236 }
2237
2238 /**
2239  * nilfs_construct_segment - construct a logical segment
2240  * @sb: super block
2241  *
2242  * Return Value: On success, 0 is retured. On errors, one of the following
2243  * negative error code is returned.
2244  *
2245  * %-EROFS - Read only filesystem.
2246  *
2247  * %-EIO - I/O error
2248  *
2249  * %-ENOSPC - No space left on device (only in a panic state).
2250  *
2251  * %-ERESTARTSYS - Interrupted.
2252  *
2253  * %-ENOMEM - Insufficient memory available.
2254  */
2255 int nilfs_construct_segment(struct super_block *sb)
2256 {
2257         struct the_nilfs *nilfs = NILFS_SB(sb)->s_nilfs;
2258         struct nilfs_sc_info *sci = nilfs->ns_writer;
2259         struct nilfs_transaction_info *ti;
2260         int err;
2261
2262         if (!sci)
2263                 return -EROFS;
2264
2265         /* A call inside transactions causes a deadlock. */
2266         BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2267
2268         err = nilfs_segctor_sync(sci);
2269         return err;
2270 }
2271
2272 /**
2273  * nilfs_construct_dsync_segment - construct a data-only logical segment
2274  * @sb: super block
2275  * @inode: inode whose data blocks should be written out
2276  * @start: start byte offset
2277  * @end: end byte offset (inclusive)
2278  *
2279  * Return Value: On success, 0 is retured. On errors, one of the following
2280  * negative error code is returned.
2281  *
2282  * %-EROFS - Read only filesystem.
2283  *
2284  * %-EIO - I/O error
2285  *
2286  * %-ENOSPC - No space left on device (only in a panic state).
2287  *
2288  * %-ERESTARTSYS - Interrupted.
2289  *
2290  * %-ENOMEM - Insufficient memory available.
2291  */
2292 int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2293                                   loff_t start, loff_t end)
2294 {
2295         struct nilfs_sb_info *sbi = NILFS_SB(sb);
2296         struct the_nilfs *nilfs = sbi->s_nilfs;
2297         struct nilfs_sc_info *sci = nilfs->ns_writer;
2298         struct nilfs_inode_info *ii;
2299         struct nilfs_transaction_info ti;
2300         int err = 0;
2301
2302         if (!sci)
2303                 return -EROFS;
2304
2305         nilfs_transaction_lock(sbi, &ti, 0);
2306
2307         ii = NILFS_I(inode);
2308         if (test_bit(NILFS_I_INODE_DIRTY, &ii->i_state) ||
2309             nilfs_test_opt(nilfs, STRICT_ORDER) ||
2310             test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2311             nilfs_discontinued(nilfs)) {
2312                 nilfs_transaction_unlock(sbi);
2313                 err = nilfs_segctor_sync(sci);
2314                 return err;
2315         }
2316
2317         spin_lock(&nilfs->ns_inode_lock);
2318         if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2319             !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2320                 spin_unlock(&nilfs->ns_inode_lock);
2321                 nilfs_transaction_unlock(sbi);
2322                 return 0;
2323         }
2324         spin_unlock(&nilfs->ns_inode_lock);
2325         sci->sc_dsync_inode = ii;
2326         sci->sc_dsync_start = start;
2327         sci->sc_dsync_end = end;
2328
2329         err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2330
2331         nilfs_transaction_unlock(sbi);
2332         return err;
2333 }
2334
2335 #define FLUSH_FILE_BIT  (0x1) /* data file only */
2336 #define FLUSH_DAT_BIT   (1 << NILFS_DAT_INO) /* DAT only */
2337
2338 /**
2339  * nilfs_segctor_accept - record accepted sequence count of log-write requests
2340  * @sci: segment constructor object
2341  */
2342 static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2343 {
2344         spin_lock(&sci->sc_state_lock);
2345         sci->sc_seq_accepted = sci->sc_seq_request;
2346         spin_unlock(&sci->sc_state_lock);
2347         del_timer_sync(&sci->sc_timer);
2348 }
2349
2350 /**
2351  * nilfs_segctor_notify - notify the result of request to caller threads
2352  * @sci: segment constructor object
2353  * @mode: mode of log forming
2354  * @err: error code to be notified
2355  */
2356 static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2357 {
2358         /* Clear requests (even when the construction failed) */
2359         spin_lock(&sci->sc_state_lock);
2360
2361         if (mode == SC_LSEG_SR) {
2362                 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2363                 sci->sc_seq_done = sci->sc_seq_accepted;
2364                 nilfs_segctor_wakeup(sci, err);
2365                 sci->sc_flush_request = 0;
2366         } else {
2367                 if (mode == SC_FLUSH_FILE)
2368                         sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2369                 else if (mode == SC_FLUSH_DAT)
2370                         sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2371
2372                 /* re-enable timer if checkpoint creation was not done */
2373                 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2374                     time_before(jiffies, sci->sc_timer.expires))
2375                         add_timer(&sci->sc_timer);
2376         }
2377         spin_unlock(&sci->sc_state_lock);
2378 }
2379
2380 /**
2381  * nilfs_segctor_construct - form logs and write them to disk
2382  * @sci: segment constructor object
2383  * @mode: mode of log forming
2384  */
2385 static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2386 {
2387         struct nilfs_sb_info *sbi = NILFS_SB(sci->sc_super);
2388         struct the_nilfs *nilfs = sbi->s_nilfs;
2389         struct nilfs_super_block **sbp;
2390         int err = 0;
2391
2392         nilfs_segctor_accept(sci);
2393
2394         if (nilfs_discontinued(nilfs))
2395                 mode = SC_LSEG_SR;
2396         if (!nilfs_segctor_confirm(sci))
2397                 err = nilfs_segctor_do_construct(sci, mode);
2398
2399         if (likely(!err)) {
2400                 if (mode != SC_FLUSH_DAT)
2401                         atomic_set(&nilfs->ns_ndirtyblks, 0);
2402                 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2403                     nilfs_discontinued(nilfs)) {
2404                         down_write(&nilfs->ns_sem);
2405                         err = -EIO;
2406                         sbp = nilfs_prepare_super(sbi,
2407                                                   nilfs_sb_will_flip(nilfs));
2408                         if (likely(sbp)) {
2409                                 nilfs_set_log_cursor(sbp[0], nilfs);
2410                                 err = nilfs_commit_super(sbi, NILFS_SB_COMMIT);
2411                         }
2412                         up_write(&nilfs->ns_sem);
2413                 }
2414         }
2415
2416         nilfs_segctor_notify(sci, mode, err);
2417         return err;
2418 }
2419
2420 static void nilfs_construction_timeout(unsigned long data)
2421 {
2422         struct task_struct *p = (struct task_struct *)data;
2423         wake_up_process(p);
2424 }
2425
2426 static void
2427 nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2428 {
2429         struct nilfs_inode_info *ii, *n;
2430
2431         list_for_each_entry_safe(ii, n, head, i_dirty) {
2432                 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2433                         continue;
2434                 list_del_init(&ii->i_dirty);
2435                 iput(&ii->vfs_inode);
2436         }
2437 }
2438
2439 int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2440                          void **kbufs)
2441 {
2442         struct nilfs_sb_info *sbi = NILFS_SB(sb);
2443         struct the_nilfs *nilfs = sbi->s_nilfs;
2444         struct nilfs_sc_info *sci = nilfs->ns_writer;
2445         struct nilfs_transaction_info ti;
2446         int err;
2447
2448         if (unlikely(!sci))
2449                 return -EROFS;
2450
2451         nilfs_transaction_lock(sbi, &ti, 1);
2452
2453         err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
2454         if (unlikely(err))
2455                 goto out_unlock;
2456
2457         err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
2458         if (unlikely(err)) {
2459                 nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
2460                 goto out_unlock;
2461         }
2462
2463         sci->sc_freesegs = kbufs[4];
2464         sci->sc_nfreesegs = argv[4].v_nmembs;
2465         list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2466
2467         for (;;) {
2468                 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2469                 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2470
2471                 if (likely(!err))
2472                         break;
2473
2474                 nilfs_warning(sb, __func__,
2475                               "segment construction failed. (err=%d)", err);
2476                 set_current_state(TASK_INTERRUPTIBLE);
2477                 schedule_timeout(sci->sc_interval);
2478         }
2479         if (nilfs_test_opt(nilfs, DISCARD)) {
2480                 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2481                                                  sci->sc_nfreesegs);
2482                 if (ret) {
2483                         printk(KERN_WARNING
2484                                "NILFS warning: error %d on discard request, "
2485                                "turning discards off for the device\n", ret);
2486                         nilfs_clear_opt(nilfs, DISCARD);
2487                 }
2488         }
2489
2490  out_unlock:
2491         sci->sc_freesegs = NULL;
2492         sci->sc_nfreesegs = 0;
2493         nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
2494         nilfs_transaction_unlock(sbi);
2495         return err;
2496 }
2497
2498 static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2499 {
2500         struct nilfs_sb_info *sbi = NILFS_SB(sci->sc_super);
2501         struct nilfs_transaction_info ti;
2502
2503         nilfs_transaction_lock(sbi, &ti, 0);
2504         nilfs_segctor_construct(sci, mode);
2505
2506         /*
2507          * Unclosed segment should be retried.  We do this using sc_timer.
2508          * Timeout of sc_timer will invoke complete construction which leads
2509          * to close the current logical segment.
2510          */
2511         if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2512                 nilfs_segctor_start_timer(sci);
2513
2514         nilfs_transaction_unlock(sbi);
2515 }
2516
2517 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2518 {
2519         int mode = 0;
2520         int err;
2521
2522         spin_lock(&sci->sc_state_lock);
2523         mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2524                 SC_FLUSH_DAT : SC_FLUSH_FILE;
2525         spin_unlock(&sci->sc_state_lock);
2526
2527         if (mode) {
2528                 err = nilfs_segctor_do_construct(sci, mode);
2529
2530                 spin_lock(&sci->sc_state_lock);
2531                 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2532                         ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2533                 spin_unlock(&sci->sc_state_lock);
2534         }
2535         clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2536 }
2537
2538 static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2539 {
2540         if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2541             time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2542                 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2543                         return SC_FLUSH_FILE;
2544                 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2545                         return SC_FLUSH_DAT;
2546         }
2547         return SC_LSEG_SR;
2548 }
2549
2550 /**
2551  * nilfs_segctor_thread - main loop of the segment constructor thread.
2552  * @arg: pointer to a struct nilfs_sc_info.
2553  *
2554  * nilfs_segctor_thread() initializes a timer and serves as a daemon
2555  * to execute segment constructions.
2556  */
2557 static int nilfs_segctor_thread(void *arg)
2558 {
2559         struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2560         struct the_nilfs *nilfs = NILFS_SB(sci->sc_super)->s_nilfs;
2561         int timeout = 0;
2562
2563         sci->sc_timer.data = (unsigned long)current;
2564         sci->sc_timer.function = nilfs_construction_timeout;
2565
2566         /* start sync. */
2567         sci->sc_task = current;
2568         wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2569         printk(KERN_INFO
2570                "segctord starting. Construction interval = %lu seconds, "
2571                "CP frequency < %lu seconds\n",
2572                sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2573
2574         spin_lock(&sci->sc_state_lock);
2575  loop:
2576         for (;;) {
2577                 int mode;
2578
2579                 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2580                         goto end_thread;
2581
2582                 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2583                         mode = SC_LSEG_SR;
2584                 else if (!sci->sc_flush_request)
2585                         break;
2586                 else
2587                         mode = nilfs_segctor_flush_mode(sci);
2588
2589                 spin_unlock(&sci->sc_state_lock);
2590                 nilfs_segctor_thread_construct(sci, mode);
2591                 spin_lock(&sci->sc_state_lock);
2592                 timeout = 0;
2593         }
2594
2595
2596         if (freezing(current)) {
2597                 spin_unlock(&sci->sc_state_lock);
2598                 refrigerator();
2599                 spin_lock(&sci->sc_state_lock);
2600         } else {
2601                 DEFINE_WAIT(wait);
2602                 int should_sleep = 1;
2603
2604                 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2605                                 TASK_INTERRUPTIBLE);
2606
2607                 if (sci->sc_seq_request != sci->sc_seq_done)
2608                         should_sleep = 0;
2609                 else if (sci->sc_flush_request)
2610                         should_sleep = 0;
2611                 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2612                         should_sleep = time_before(jiffies,
2613                                         sci->sc_timer.expires);
2614
2615                 if (should_sleep) {
2616                         spin_unlock(&sci->sc_state_lock);
2617                         schedule();
2618                         spin_lock(&sci->sc_state_lock);
2619                 }
2620                 finish_wait(&sci->sc_wait_daemon, &wait);
2621                 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2622                            time_after_eq(jiffies, sci->sc_timer.expires));
2623
2624                 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2625                         set_nilfs_discontinued(nilfs);
2626         }
2627         goto loop;
2628
2629  end_thread:
2630         spin_unlock(&sci->sc_state_lock);
2631
2632         /* end sync. */
2633         sci->sc_task = NULL;
2634         wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2635         return 0;
2636 }
2637
2638 static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2639 {
2640         struct task_struct *t;
2641
2642         t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2643         if (IS_ERR(t)) {
2644                 int err = PTR_ERR(t);
2645
2646                 printk(KERN_ERR "NILFS: error %d creating segctord thread\n",
2647                        err);
2648                 return err;
2649         }
2650         wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2651         return 0;
2652 }
2653
2654 static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2655         __acquires(&sci->sc_state_lock)
2656         __releases(&sci->sc_state_lock)
2657 {
2658         sci->sc_state |= NILFS_SEGCTOR_QUIT;
2659
2660         while (sci->sc_task) {
2661                 wake_up(&sci->sc_wait_daemon);
2662                 spin_unlock(&sci->sc_state_lock);
2663                 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2664                 spin_lock(&sci->sc_state_lock);
2665         }
2666 }
2667
2668 /*
2669  * Setup & clean-up functions
2670  */
2671 static struct nilfs_sc_info *nilfs_segctor_new(struct nilfs_sb_info *sbi,
2672                                                struct nilfs_root *root)
2673 {
2674         struct the_nilfs *nilfs = sbi->s_nilfs;
2675         struct nilfs_sc_info *sci;
2676
2677         sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2678         if (!sci)
2679                 return NULL;
2680
2681         sci->sc_super = sbi->s_super;
2682
2683         nilfs_get_root(root);
2684         sci->sc_root = root;
2685
2686         init_waitqueue_head(&sci->sc_wait_request);
2687         init_waitqueue_head(&sci->sc_wait_daemon);
2688         init_waitqueue_head(&sci->sc_wait_task);
2689         spin_lock_init(&sci->sc_state_lock);
2690         INIT_LIST_HEAD(&sci->sc_dirty_files);
2691         INIT_LIST_HEAD(&sci->sc_segbufs);
2692         INIT_LIST_HEAD(&sci->sc_write_logs);
2693         INIT_LIST_HEAD(&sci->sc_gc_inodes);
2694         INIT_LIST_HEAD(&sci->sc_copied_buffers);
2695         init_timer(&sci->sc_timer);
2696
2697         sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2698         sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2699         sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2700
2701         if (nilfs->ns_interval)
2702                 sci->sc_interval = nilfs->ns_interval;
2703         if (nilfs->ns_watermark)
2704                 sci->sc_watermark = nilfs->ns_watermark;
2705         return sci;
2706 }
2707
2708 static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2709 {
2710         int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2711
2712         /* The segctord thread was stopped and its timer was removed.
2713            But some tasks remain. */
2714         do {
2715                 struct nilfs_sb_info *sbi = NILFS_SB(sci->sc_super);
2716                 struct nilfs_transaction_info ti;
2717
2718                 nilfs_transaction_lock(sbi, &ti, 0);
2719                 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2720                 nilfs_transaction_unlock(sbi);
2721
2722         } while (ret && retrycount-- > 0);
2723 }
2724
2725 /**
2726  * nilfs_segctor_destroy - destroy the segment constructor.
2727  * @sci: nilfs_sc_info
2728  *
2729  * nilfs_segctor_destroy() kills the segctord thread and frees
2730  * the nilfs_sc_info struct.
2731  * Caller must hold the segment semaphore.
2732  */
2733 static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2734 {
2735         struct the_nilfs *nilfs = NILFS_SB(sci->sc_super)->s_nilfs;
2736         int flag;
2737
2738         up_write(&nilfs->ns_segctor_sem);
2739
2740         spin_lock(&sci->sc_state_lock);
2741         nilfs_segctor_kill_thread(sci);
2742         flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2743                 || sci->sc_seq_request != sci->sc_seq_done);
2744         spin_unlock(&sci->sc_state_lock);
2745
2746         if (flag || !nilfs_segctor_confirm(sci))
2747                 nilfs_segctor_write_out(sci);
2748
2749         WARN_ON(!list_empty(&sci->sc_copied_buffers));
2750
2751         if (!list_empty(&sci->sc_dirty_files)) {
2752                 nilfs_warning(sci->sc_super, __func__,
2753                               "dirty file(s) after the final construction\n");
2754                 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
2755         }
2756
2757         WARN_ON(!list_empty(&sci->sc_segbufs));
2758         WARN_ON(!list_empty(&sci->sc_write_logs));
2759
2760         nilfs_put_root(sci->sc_root);
2761
2762         down_write(&nilfs->ns_segctor_sem);
2763
2764         del_timer_sync(&sci->sc_timer);
2765         kfree(sci);
2766 }
2767
2768 /**
2769  * nilfs_attach_segment_constructor - attach a segment constructor
2770  * @sbi: nilfs_sb_info
2771  * @root: root object of the current filesystem tree
2772  *
2773  * nilfs_attach_segment_constructor() allocates a struct nilfs_sc_info,
2774  * initializes it, and starts the segment constructor.
2775  *
2776  * Return Value: On success, 0 is returned. On error, one of the following
2777  * negative error code is returned.
2778  *
2779  * %-ENOMEM - Insufficient memory available.
2780  */
2781 int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi,
2782                                      struct nilfs_root *root)
2783 {
2784         struct the_nilfs *nilfs = sbi->s_nilfs;
2785         int err;
2786
2787         if (nilfs->ns_writer) {
2788                 /*
2789                  * This happens if the filesystem was remounted
2790                  * read/write after nilfs_error degenerated it into a
2791                  * read-only mount.
2792                  */
2793                 nilfs_detach_segment_constructor(sbi);
2794         }
2795
2796         nilfs->ns_writer = nilfs_segctor_new(sbi, root);
2797         if (!nilfs->ns_writer)
2798                 return -ENOMEM;
2799
2800         err = nilfs_segctor_start_thread(nilfs->ns_writer);
2801         if (err) {
2802                 kfree(nilfs->ns_writer);
2803                 nilfs->ns_writer = NULL;
2804         }
2805         return err;
2806 }
2807
2808 /**
2809  * nilfs_detach_segment_constructor - destroy the segment constructor
2810  * @sbi: nilfs_sb_info
2811  *
2812  * nilfs_detach_segment_constructor() kills the segment constructor daemon,
2813  * frees the struct nilfs_sc_info, and destroy the dirty file list.
2814  */
2815 void nilfs_detach_segment_constructor(struct nilfs_sb_info *sbi)
2816 {
2817         struct the_nilfs *nilfs = sbi->s_nilfs;
2818         LIST_HEAD(garbage_list);
2819
2820         down_write(&nilfs->ns_segctor_sem);
2821         if (nilfs->ns_writer) {
2822                 nilfs_segctor_destroy(nilfs->ns_writer);
2823                 nilfs->ns_writer = NULL;
2824         }
2825
2826         /* Force to free the list of dirty files */
2827         spin_lock(&nilfs->ns_inode_lock);
2828         if (!list_empty(&nilfs->ns_dirty_files)) {
2829                 list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
2830                 nilfs_warning(sbi->s_super, __func__,
2831                               "Non empty dirty list after the last "
2832                               "segment construction\n");
2833         }
2834         spin_unlock(&nilfs->ns_inode_lock);
2835         up_write(&nilfs->ns_segctor_sem);
2836
2837         nilfs_dispose_list(nilfs, &garbage_list, 1);
2838 }