2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/statfs.h>
16 #include <linux/seq_file.h>
17 #include <linux/mount.h>
18 #include <linux/kthread.h>
19 #include <linux/delay.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/crc32.h>
22 #include <linux/lm_interface.h>
23 #include <linux/time.h>
44 * gfs2_write_inode - Make sure the inode is stable on the disk
46 * @sync: synchronous write flag
51 static int gfs2_write_inode(struct inode *inode, int sync)
53 struct gfs2_inode *ip = GFS2_I(inode);
54 struct gfs2_sbd *sdp = GFS2_SB(inode);
55 struct gfs2_holder gh;
56 struct buffer_head *bh;
57 struct timespec atime;
58 struct gfs2_dinode *di;
61 /* Check this is a "normal" inode, etc */
62 if (!test_bit(GIF_USER, &ip->i_flags) ||
63 (current->flags & PF_MEMALLOC))
65 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
68 ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
71 ret = gfs2_meta_inode_buffer(ip, &bh);
73 di = (struct gfs2_dinode *)bh->b_data;
74 atime.tv_sec = be64_to_cpu(di->di_atime);
75 atime.tv_nsec = be32_to_cpu(di->di_atime_nsec);
76 if (timespec_compare(&inode->i_atime, &atime) > 0) {
77 gfs2_trans_add_bh(ip->i_gl, bh, 1);
78 gfs2_dinode_out(ip, bh->b_data);
84 gfs2_glock_dq_uninit(&gh);
87 gfs2_log_flush(GFS2_SB(inode), ip->i_gl);
92 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
93 * @sdp: the filesystem
98 static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
100 struct gfs2_holder t_gh;
103 gfs2_quota_sync(sdp);
104 gfs2_statfs_sync(sdp);
106 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
108 if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
111 gfs2_meta_syncfs(sdp);
112 gfs2_log_shutdown(sdp);
114 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
117 gfs2_glock_dq_uninit(&t_gh);
119 gfs2_quota_cleanup(sdp);
125 * gfs2_put_super - Unmount the filesystem
126 * @sb: The VFS superblock
130 static void gfs2_put_super(struct super_block *sb)
132 struct gfs2_sbd *sdp = sb->s_fs_info;
135 /* Unfreeze the filesystem, if we need to */
137 mutex_lock(&sdp->sd_freeze_lock);
138 if (sdp->sd_freeze_count)
139 gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
140 mutex_unlock(&sdp->sd_freeze_lock);
142 kthread_stop(sdp->sd_quotad_process);
143 kthread_stop(sdp->sd_logd_process);
144 kthread_stop(sdp->sd_recoverd_process);
146 if (!(sb->s_flags & MS_RDONLY)) {
147 error = gfs2_make_fs_ro(sdp);
151 /* At this point, we're through modifying the disk */
155 iput(sdp->sd_jindex);
156 iput(sdp->sd_inum_inode);
157 iput(sdp->sd_statfs_inode);
158 iput(sdp->sd_rindex);
159 iput(sdp->sd_quota_inode);
161 gfs2_glock_put(sdp->sd_rename_gl);
162 gfs2_glock_put(sdp->sd_trans_gl);
164 if (!sdp->sd_args.ar_spectator) {
165 gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
166 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
167 gfs2_glock_dq_uninit(&sdp->sd_ir_gh);
168 gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
169 gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
170 iput(sdp->sd_ir_inode);
171 iput(sdp->sd_sc_inode);
172 iput(sdp->sd_qc_inode);
175 gfs2_glock_dq_uninit(&sdp->sd_live_gh);
176 gfs2_clear_rgrpd(sdp);
177 gfs2_jindex_free(sdp);
178 /* Take apart glock structures and buffer lists */
179 gfs2_gl_hash_clear(sdp);
180 /* Unmount the locking protocol */
181 gfs2_lm_unmount(sdp);
183 /* At this point, we're through participating in the lockspace */
184 gfs2_sys_fs_del(sdp);
190 * @sb: the superblock
194 static void gfs2_write_super(struct super_block *sb)
200 * gfs2_sync_fs - sync the filesystem
201 * @sb: the superblock
203 * Flushes the log to disk.
206 static int gfs2_sync_fs(struct super_block *sb, int wait)
209 if (wait && sb->s_fs_info)
210 gfs2_log_flush(sb->s_fs_info, NULL);
215 * gfs2_write_super_lockfs - prevent further writes to the filesystem
216 * @sb: the VFS structure for the filesystem
220 static void gfs2_write_super_lockfs(struct super_block *sb)
222 struct gfs2_sbd *sdp = sb->s_fs_info;
225 if (test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
229 error = gfs2_freeze_fs(sdp);
235 fs_err(sdp, "waiting for recovery before freeze\n");
239 fs_err(sdp, "error freezing FS: %d\n", error);
243 fs_err(sdp, "retrying...\n");
249 * gfs2_unlockfs - reallow writes to the filesystem
250 * @sb: the VFS structure for the filesystem
254 static void gfs2_unlockfs(struct super_block *sb)
256 gfs2_unfreeze_fs(sb->s_fs_info);
260 * statfs_fill - fill in the sg for a given RG
262 * @sc: the sc structure
264 * Returns: 0 on success, -ESTALE if the LVB is invalid
267 static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
268 struct gfs2_statfs_change_host *sc)
270 gfs2_rgrp_verify(rgd);
271 sc->sc_total += rgd->rd_data;
272 sc->sc_free += rgd->rd_free;
273 sc->sc_dinodes += rgd->rd_dinodes;
278 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
279 * @sdp: the filesystem
280 * @sc: the sc info that will be returned
282 * Any error (other than a signal) will cause this routine to fall back
283 * to the synchronous version.
285 * FIXME: This really shouldn't busy wait like this.
290 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
292 struct gfs2_holder ri_gh;
293 struct gfs2_rgrpd *rgd_next;
294 struct gfs2_holder *gha, *gh;
295 unsigned int slots = 64;
300 memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
301 gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
305 error = gfs2_rindex_hold(sdp, &ri_gh);
309 rgd_next = gfs2_rgrpd_get_first(sdp);
314 for (x = 0; x < slots; x++) {
317 if (gh->gh_gl && gfs2_glock_poll(gh)) {
318 err = gfs2_glock_wait(gh);
320 gfs2_holder_uninit(gh);
324 error = statfs_slow_fill(
325 gh->gh_gl->gl_object, sc);
326 gfs2_glock_dq_uninit(gh);
332 else if (rgd_next && !error) {
333 error = gfs2_glock_nq_init(rgd_next->rd_gl,
337 rgd_next = gfs2_rgrpd_get_next(rgd_next);
341 if (signal_pending(current))
342 error = -ERESTARTSYS;
351 gfs2_glock_dq_uninit(&ri_gh);
359 * gfs2_statfs_i - Do a statfs
360 * @sdp: the filesystem
361 * @sg: the sg structure
366 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
368 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
369 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
371 spin_lock(&sdp->sd_statfs_spin);
374 sc->sc_total += l_sc->sc_total;
375 sc->sc_free += l_sc->sc_free;
376 sc->sc_dinodes += l_sc->sc_dinodes;
378 spin_unlock(&sdp->sd_statfs_spin);
382 if (sc->sc_free > sc->sc_total)
383 sc->sc_free = sc->sc_total;
384 if (sc->sc_dinodes < 0)
391 * gfs2_statfs - Gather and return stats about the filesystem
392 * @sb: The superblock
393 * @statfsbuf: The buffer
395 * Returns: 0 on success or error code
398 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
400 struct super_block *sb = dentry->d_inode->i_sb;
401 struct gfs2_sbd *sdp = sb->s_fs_info;
402 struct gfs2_statfs_change_host sc;
405 if (gfs2_tune_get(sdp, gt_statfs_slow))
406 error = gfs2_statfs_slow(sdp, &sc);
408 error = gfs2_statfs_i(sdp, &sc);
413 buf->f_type = GFS2_MAGIC;
414 buf->f_bsize = sdp->sd_sb.sb_bsize;
415 buf->f_blocks = sc.sc_total;
416 buf->f_bfree = sc.sc_free;
417 buf->f_bavail = sc.sc_free;
418 buf->f_files = sc.sc_dinodes + sc.sc_free;
419 buf->f_ffree = sc.sc_free;
420 buf->f_namelen = GFS2_FNAMESIZE;
426 * gfs2_remount_fs - called when the FS is remounted
427 * @sb: the filesystem
428 * @flags: the remount flags
429 * @data: extra data passed in (not used right now)
434 static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
436 struct gfs2_sbd *sdp = sb->s_fs_info;
439 error = gfs2_mount_args(sdp, data, 1);
443 if (sdp->sd_args.ar_spectator)
446 if (*flags & MS_RDONLY) {
447 if (!(sb->s_flags & MS_RDONLY))
448 error = gfs2_make_fs_ro(sdp);
449 } else if (!(*flags & MS_RDONLY) &&
450 (sb->s_flags & MS_RDONLY)) {
451 error = gfs2_make_fs_rw(sdp);
459 * gfs2_drop_inode - Drop an inode (test for remote unlink)
460 * @inode: The inode to drop
462 * If we've received a callback on an iopen lock then its because a
463 * remote node tried to deallocate the inode but failed due to this node
464 * still having the inode open. Here we mark the link count zero
465 * since we know that it must have reached zero if the GLF_DEMOTE flag
466 * is set on the iopen glock. If we didn't do a disk read since the
467 * remote node removed the final link then we might otherwise miss
468 * this event. This check ensures that this node will deallocate the
469 * inode's blocks, or alternatively pass the baton on to another
470 * node for later deallocation.
473 static void gfs2_drop_inode(struct inode *inode)
475 struct gfs2_inode *ip = GFS2_I(inode);
477 if (test_bit(GIF_USER, &ip->i_flags) && inode->i_nlink) {
478 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
479 if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags))
482 generic_drop_inode(inode);
486 * gfs2_clear_inode - Deallocate an inode when VFS is done with it
487 * @inode: The VFS inode
491 static void gfs2_clear_inode(struct inode *inode)
493 struct gfs2_inode *ip = GFS2_I(inode);
495 /* This tells us its a "real" inode and not one which only
496 * serves to contain an address space (see rgrp.c, meta_io.c)
497 * which therefore doesn't have its own glocks.
499 if (test_bit(GIF_USER, &ip->i_flags)) {
500 ip->i_gl->gl_object = NULL;
501 gfs2_glock_put(ip->i_gl);
503 if (ip->i_iopen_gh.gh_gl) {
504 ip->i_iopen_gh.gh_gl->gl_object = NULL;
505 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
510 static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
516 } while (!IS_ROOT(d1));
521 * gfs2_show_options - Show mount options for /proc/mounts
522 * @s: seq_file structure
525 * Returns: 0 on success or error code
528 static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
530 struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info;
531 struct gfs2_args *args = &sdp->sd_args;
533 if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir))
534 seq_printf(s, ",meta");
535 if (args->ar_lockproto[0])
536 seq_printf(s, ",lockproto=%s", args->ar_lockproto);
537 if (args->ar_locktable[0])
538 seq_printf(s, ",locktable=%s", args->ar_locktable);
539 if (args->ar_hostdata[0])
540 seq_printf(s, ",hostdata=%s", args->ar_hostdata);
541 if (args->ar_spectator)
542 seq_printf(s, ",spectator");
543 if (args->ar_ignore_local_fs)
544 seq_printf(s, ",ignore_local_fs");
545 if (args->ar_localflocks)
546 seq_printf(s, ",localflocks");
547 if (args->ar_localcaching)
548 seq_printf(s, ",localcaching");
550 seq_printf(s, ",debug");
551 if (args->ar_upgrade)
552 seq_printf(s, ",upgrade");
553 if (args->ar_posix_acl)
554 seq_printf(s, ",acl");
555 if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
557 switch (args->ar_quota) {
561 case GFS2_QUOTA_ACCOUNT:
571 seq_printf(s, ",quota=%s", state);
573 if (args->ar_suiddir)
574 seq_printf(s, ",suiddir");
575 if (args->ar_data != GFS2_DATA_DEFAULT) {
577 switch (args->ar_data) {
578 case GFS2_DATA_WRITEBACK:
581 case GFS2_DATA_ORDERED:
588 seq_printf(s, ",data=%s", state);
595 * We have to (at the moment) hold the inodes main lock to cover
596 * the gap between unlocking the shared lock on the iopen lock and
597 * taking the exclusive lock. I'd rather do a shared -> exclusive
598 * conversion on the iopen lock, but we can change that later. This
599 * is safe, just less efficient.
602 static void gfs2_delete_inode(struct inode *inode)
604 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
605 struct gfs2_inode *ip = GFS2_I(inode);
606 struct gfs2_holder gh;
609 if (!test_bit(GIF_USER, &ip->i_flags))
612 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
613 if (unlikely(error)) {
614 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
618 gfs2_glock_dq_wait(&ip->i_iopen_gh);
619 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
620 error = gfs2_glock_nq(&ip->i_iopen_gh);
624 if (S_ISDIR(inode->i_mode) &&
625 (ip->i_diskflags & GFS2_DIF_EXHASH)) {
626 error = gfs2_dir_exhash_dealloc(ip);
632 error = gfs2_ea_dealloc(ip);
637 if (!gfs2_is_stuffed(ip)) {
638 error = gfs2_file_dealloc(ip);
643 error = gfs2_dinode_dealloc(ip);
648 error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
651 /* Needs to be done before glock release & also in a transaction */
652 truncate_inode_pages(&inode->i_data, 0);
656 if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
657 gfs2_glock_dq(&ip->i_iopen_gh);
658 gfs2_holder_uninit(&ip->i_iopen_gh);
659 gfs2_glock_dq_uninit(&gh);
660 if (error && error != GLR_TRYFAILED)
661 fs_warn(sdp, "gfs2_delete_inode: %d\n", error);
663 truncate_inode_pages(&inode->i_data, 0);
667 static struct inode *gfs2_alloc_inode(struct super_block *sb)
669 struct gfs2_inode *ip;
671 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
679 static void gfs2_destroy_inode(struct inode *inode)
681 kmem_cache_free(gfs2_inode_cachep, inode);
684 const struct super_operations gfs2_super_ops = {
685 .alloc_inode = gfs2_alloc_inode,
686 .destroy_inode = gfs2_destroy_inode,
687 .write_inode = gfs2_write_inode,
688 .delete_inode = gfs2_delete_inode,
689 .put_super = gfs2_put_super,
690 .write_super = gfs2_write_super,
691 .sync_fs = gfs2_sync_fs,
692 .write_super_lockfs = gfs2_write_super_lockfs,
693 .unlockfs = gfs2_unlockfs,
694 .statfs = gfs2_statfs,
695 .remount_fs = gfs2_remount_fs,
696 .clear_inode = gfs2_clear_inode,
697 .drop_inode = gfs2_drop_inode,
698 .show_options = gfs2_show_options,