1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/sched/signal.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/statfs.h>
16 #include <linux/seq_file.h>
17 #include <linux/mount.h>
18 #include <linux/kthread.h>
19 #include <linux/delay.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/crc32.h>
22 #include <linux/time.h>
23 #include <linux/wait.h>
24 #include <linux/writeback.h>
25 #include <linux/backing-dev.h>
26 #include <linux/kernel.h>
49 EVICT_SHOULD_SKIP_DELETE,
50 EVICT_SHOULD_DEFER_DELETE,
54 * gfs2_jindex_free - Clear all the journal index information
55 * @sdp: The GFS2 superblock
59 void gfs2_jindex_free(struct gfs2_sbd *sdp)
61 struct list_head list;
62 struct gfs2_jdesc *jd;
64 spin_lock(&sdp->sd_jindex_spin);
65 list_add(&list, &sdp->sd_jindex_list);
66 list_del_init(&sdp->sd_jindex_list);
68 spin_unlock(&sdp->sd_jindex_spin);
70 down_write(&sdp->sd_log_flush_lock);
72 up_write(&sdp->sd_log_flush_lock);
74 while (!list_empty(&list)) {
75 jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
76 BUG_ON(jd->jd_log_bio);
77 gfs2_free_journal_extents(jd);
78 list_del(&jd->jd_list);
85 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
87 struct gfs2_jdesc *jd;
89 list_for_each_entry(jd, head, jd_list) {
90 if (jd->jd_jid == jid)
96 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
98 struct gfs2_jdesc *jd;
100 spin_lock(&sdp->sd_jindex_spin);
101 jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
102 spin_unlock(&sdp->sd_jindex_spin);
107 int gfs2_jdesc_check(struct gfs2_jdesc *jd)
109 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
110 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
111 u64 size = i_size_read(jd->jd_inode);
113 if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30)))
116 jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
118 if (gfs2_write_alloc_required(ip, 0, size)) {
119 gfs2_consist_inode(ip);
127 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
128 * @sdp: the filesystem
133 int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
135 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
136 struct gfs2_glock *j_gl = ip->i_gl;
139 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
140 if (gfs2_withdrawing_or_withdrawn(sdp))
143 if (sdp->sd_log_sequence == 0) {
144 fs_err(sdp, "unknown status of our own journal jid %d",
145 sdp->sd_lockstruct.ls_jid);
149 error = gfs2_quota_init(sdp);
150 if (!error && gfs2_withdrawing_or_withdrawn(sdp))
153 set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
157 void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
159 const struct gfs2_statfs_change *str = buf;
161 sc->sc_total = be64_to_cpu(str->sc_total);
162 sc->sc_free = be64_to_cpu(str->sc_free);
163 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
166 void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
168 struct gfs2_statfs_change *str = buf;
170 str->sc_total = cpu_to_be64(sc->sc_total);
171 str->sc_free = cpu_to_be64(sc->sc_free);
172 str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
175 int gfs2_statfs_init(struct gfs2_sbd *sdp)
177 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
178 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
179 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
180 struct buffer_head *m_bh;
181 struct gfs2_holder gh;
184 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
189 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
193 if (sdp->sd_args.ar_spectator) {
194 spin_lock(&sdp->sd_statfs_spin);
195 gfs2_statfs_change_in(m_sc, m_bh->b_data +
196 sizeof(struct gfs2_dinode));
197 spin_unlock(&sdp->sd_statfs_spin);
199 spin_lock(&sdp->sd_statfs_spin);
200 gfs2_statfs_change_in(m_sc, m_bh->b_data +
201 sizeof(struct gfs2_dinode));
202 gfs2_statfs_change_in(l_sc, sdp->sd_sc_bh->b_data +
203 sizeof(struct gfs2_dinode));
204 spin_unlock(&sdp->sd_statfs_spin);
210 gfs2_glock_dq_uninit(&gh);
214 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
217 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
218 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
219 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
223 gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
225 spin_lock(&sdp->sd_statfs_spin);
226 l_sc->sc_total += total;
227 l_sc->sc_free += free;
228 l_sc->sc_dinodes += dinodes;
229 gfs2_statfs_change_out(l_sc, sdp->sd_sc_bh->b_data +
230 sizeof(struct gfs2_dinode));
231 if (sdp->sd_args.ar_statfs_percent) {
232 x = 100 * l_sc->sc_free;
233 y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
234 if (x >= y || x <= -y)
237 spin_unlock(&sdp->sd_statfs_spin);
240 gfs2_wake_up_statfs(sdp);
243 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh)
245 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
246 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
247 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
248 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
250 gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
251 gfs2_trans_add_meta(m_ip->i_gl, m_bh);
253 spin_lock(&sdp->sd_statfs_spin);
254 m_sc->sc_total += l_sc->sc_total;
255 m_sc->sc_free += l_sc->sc_free;
256 m_sc->sc_dinodes += l_sc->sc_dinodes;
257 memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
258 memset(sdp->sd_sc_bh->b_data + sizeof(struct gfs2_dinode),
259 0, sizeof(struct gfs2_statfs_change));
260 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
261 spin_unlock(&sdp->sd_statfs_spin);
264 int gfs2_statfs_sync(struct super_block *sb, int type)
266 struct gfs2_sbd *sdp = sb->s_fs_info;
267 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
268 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
269 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
270 struct gfs2_holder gh;
271 struct buffer_head *m_bh;
274 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
279 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
283 spin_lock(&sdp->sd_statfs_spin);
284 gfs2_statfs_change_in(m_sc, m_bh->b_data +
285 sizeof(struct gfs2_dinode));
286 if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
287 spin_unlock(&sdp->sd_statfs_spin);
290 spin_unlock(&sdp->sd_statfs_spin);
292 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
296 update_statfs(sdp, m_bh);
297 sdp->sd_statfs_force_sync = 0;
304 gfs2_glock_dq_uninit(&gh);
310 struct list_head list;
311 struct gfs2_holder gh;
315 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
317 * @sdp: the file system
322 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
324 struct gfs2_inode *ip;
325 struct gfs2_jdesc *jd;
328 struct gfs2_log_header_host lh;
332 * Grab all the journal glocks in SH mode. We are *probably* doing
333 * that to prevent recovery.
336 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
337 lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
342 ip = GFS2_I(jd->jd_inode);
343 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
348 list_add(&lfcc->list, &list);
351 gfs2_freeze_unlock(sdp);
353 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
354 LM_FLAG_NOEXP | GL_NOPID,
359 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
360 error = gfs2_jdesc_check(jd);
363 error = gfs2_find_jhead(jd, &lh);
366 if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
373 goto out; /* success */
375 gfs2_freeze_unlock(sdp);
378 error2 = gfs2_freeze_lock_shared(sdp);
379 gfs2_assert_withdraw(sdp, !error2);
382 while (!list_empty(&list)) {
383 lfcc = list_first_entry(&list, struct lfcc, list);
384 list_del(&lfcc->list);
385 gfs2_glock_dq_uninit(&lfcc->gh);
391 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
393 const struct inode *inode = &ip->i_inode;
394 struct gfs2_dinode *str = buf;
396 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
397 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
398 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
399 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
400 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
401 str->di_mode = cpu_to_be32(inode->i_mode);
402 str->di_uid = cpu_to_be32(i_uid_read(inode));
403 str->di_gid = cpu_to_be32(i_gid_read(inode));
404 str->di_nlink = cpu_to_be32(inode->i_nlink);
405 str->di_size = cpu_to_be64(i_size_read(inode));
406 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(inode));
407 str->di_atime = cpu_to_be64(inode_get_atime_sec(inode));
408 str->di_mtime = cpu_to_be64(inode_get_mtime_sec(inode));
409 str->di_ctime = cpu_to_be64(inode_get_ctime_sec(inode));
411 str->di_goal_meta = cpu_to_be64(ip->i_goal);
412 str->di_goal_data = cpu_to_be64(ip->i_goal);
413 str->di_generation = cpu_to_be64(ip->i_generation);
415 str->di_flags = cpu_to_be32(ip->i_diskflags);
416 str->di_height = cpu_to_be16(ip->i_height);
417 str->di_payload_format = cpu_to_be32(S_ISDIR(inode->i_mode) &&
418 !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
420 str->di_depth = cpu_to_be16(ip->i_depth);
421 str->di_entries = cpu_to_be32(ip->i_entries);
423 str->di_eattr = cpu_to_be64(ip->i_eattr);
424 str->di_atime_nsec = cpu_to_be32(inode_get_atime_nsec(inode));
425 str->di_mtime_nsec = cpu_to_be32(inode_get_mtime_nsec(inode));
426 str->di_ctime_nsec = cpu_to_be32(inode_get_ctime_nsec(inode));
430 * gfs2_write_inode - Make sure the inode is stable on the disk
432 * @wbc: The writeback control structure
437 static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
439 struct gfs2_inode *ip = GFS2_I(inode);
440 struct gfs2_sbd *sdp = GFS2_SB(inode);
441 struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
442 struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
444 bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
447 gfs2_log_flush(GFS2_SB(inode), ip->i_gl,
448 GFS2_LOG_HEAD_FLUSH_NORMAL |
449 GFS2_LFC_WRITE_INODE);
450 if (bdi->wb.dirty_exceeded)
451 gfs2_ail1_flush(sdp, wbc);
453 filemap_fdatawrite(metamapping);
455 ret = filemap_fdatawait(metamapping);
457 mark_inode_dirty_sync(inode);
459 spin_lock(&inode->i_lock);
460 if (!(inode->i_flags & I_DIRTY))
461 gfs2_ordered_del_inode(ip);
462 spin_unlock(&inode->i_lock);
468 * gfs2_dirty_inode - check for atime updates
469 * @inode: The inode in question
470 * @flags: The type of dirty
472 * Unfortunately it can be called under any combination of inode
473 * glock and freeze glock, so we have to check carefully.
475 * At the moment this deals only with atime - it should be possible
476 * to expand that role in future, once a review of the locking has
480 static void gfs2_dirty_inode(struct inode *inode, int flags)
482 struct gfs2_inode *ip = GFS2_I(inode);
483 struct gfs2_sbd *sdp = GFS2_SB(inode);
484 struct buffer_head *bh;
485 struct gfs2_holder gh;
487 int need_endtrans = 0;
490 if (unlikely(!ip->i_gl)) {
491 /* This can only happen during incomplete inode creation. */
492 BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags));
496 if (gfs2_withdrawing_or_withdrawn(sdp))
498 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
499 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
501 fs_err(sdp, "dirty_inode: glock %d\n", ret);
502 gfs2_dump_glock(NULL, ip->i_gl, true);
506 } else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
509 if (current->journal_info == NULL) {
510 ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
512 fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret);
518 ret = gfs2_meta_inode_buffer(ip, &bh);
520 gfs2_trans_add_meta(ip->i_gl, bh);
521 gfs2_dinode_out(ip, bh->b_data);
529 gfs2_glock_dq_uninit(&gh);
533 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
534 * @sdp: the filesystem
539 void gfs2_make_fs_ro(struct gfs2_sbd *sdp)
541 int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
543 if (!test_bit(SDF_KILL, &sdp->sd_flags))
544 gfs2_flush_delete_work(sdp);
546 gfs2_destroy_threads(sdp);
548 if (log_write_allowed) {
549 gfs2_quota_sync(sdp->sd_vfs, 0);
550 gfs2_statfs_sync(sdp->sd_vfs, 0);
552 /* We do two log flushes here. The first one commits dirty inodes
553 * and rgrps to the journal, but queues up revokes to the ail list.
554 * The second flush writes out and removes the revokes.
556 * The first must be done before the FLUSH_SHUTDOWN code
557 * clears the LIVE flag, otherwise it will not be able to start
558 * a transaction to write its revokes, and the error will cause
559 * a withdraw of the file system. */
560 gfs2_log_flush(sdp, NULL, GFS2_LFC_MAKE_FS_RO);
561 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
562 GFS2_LFC_MAKE_FS_RO);
563 wait_event_timeout(sdp->sd_log_waitq,
564 gfs2_log_is_empty(sdp),
566 gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp));
568 gfs2_quota_cleanup(sdp);
572 * gfs2_put_super - Unmount the filesystem
573 * @sb: The VFS superblock
577 static void gfs2_put_super(struct super_block *sb)
579 struct gfs2_sbd *sdp = sb->s_fs_info;
580 struct gfs2_jdesc *jd;
582 /* No more recovery requests */
583 set_bit(SDF_NORECOVERY, &sdp->sd_flags);
586 /* Wait on outstanding recovery */
588 spin_lock(&sdp->sd_jindex_spin);
589 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
590 if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
592 spin_unlock(&sdp->sd_jindex_spin);
593 wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
594 TASK_UNINTERRUPTIBLE);
597 spin_unlock(&sdp->sd_jindex_spin);
600 gfs2_make_fs_ro(sdp);
602 if (gfs2_withdrawing_or_withdrawn(sdp))
603 gfs2_destroy_threads(sdp);
605 gfs2_quota_cleanup(sdp);
608 WARN_ON(gfs2_withdrawing(sdp));
610 /* At this point, we're through modifying the disk */
614 gfs2_freeze_unlock(sdp);
616 iput(sdp->sd_jindex);
617 iput(sdp->sd_statfs_inode);
618 iput(sdp->sd_rindex);
619 iput(sdp->sd_quota_inode);
621 gfs2_glock_put(sdp->sd_rename_gl);
622 gfs2_glock_put(sdp->sd_freeze_gl);
624 if (!sdp->sd_args.ar_spectator) {
625 if (gfs2_holder_initialized(&sdp->sd_journal_gh))
626 gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
627 if (gfs2_holder_initialized(&sdp->sd_jinode_gh))
628 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
629 brelse(sdp->sd_sc_bh);
630 gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
631 gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
632 free_local_statfs_inodes(sdp);
633 iput(sdp->sd_qc_inode);
636 gfs2_glock_dq_uninit(&sdp->sd_live_gh);
637 gfs2_clear_rgrpd(sdp);
638 gfs2_jindex_free(sdp);
639 /* Take apart glock structures and buffer lists */
640 gfs2_gl_hash_clear(sdp);
642 gfs2_delete_debugfs_file(sdp);
644 gfs2_sys_fs_del(sdp);
649 * gfs2_sync_fs - sync the filesystem
650 * @sb: the superblock
651 * @wait: true to wait for completion
653 * Flushes the log to disk.
656 static int gfs2_sync_fs(struct super_block *sb, int wait)
658 struct gfs2_sbd *sdp = sb->s_fs_info;
660 gfs2_quota_sync(sb, -1);
662 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
664 return sdp->sd_log_error;
667 static int gfs2_do_thaw(struct gfs2_sbd *sdp, enum freeze_holder who, const void *freeze_owner)
669 struct super_block *sb = sdp->sd_vfs;
672 error = gfs2_freeze_lock_shared(sdp);
675 error = thaw_super(sb, who, freeze_owner);
680 fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n", error);
681 gfs2_assert_withdraw(sdp, 0);
685 void gfs2_freeze_func(struct work_struct *work)
687 struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
688 struct super_block *sb = sdp->sd_vfs;
691 mutex_lock(&sdp->sd_freeze_mutex);
693 if (test_bit(SDF_FROZEN, &sdp->sd_flags))
696 error = freeze_super(sb, FREEZE_HOLDER_USERSPACE, NULL);
700 gfs2_freeze_unlock(sdp);
701 set_bit(SDF_FROZEN, &sdp->sd_flags);
703 error = gfs2_do_thaw(sdp, FREEZE_HOLDER_USERSPACE, NULL);
707 clear_bit(SDF_FROZEN, &sdp->sd_flags);
711 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", error);
714 mutex_unlock(&sdp->sd_freeze_mutex);
715 deactivate_super(sb);
719 * gfs2_freeze_super - prevent further writes to the filesystem
720 * @sb: the VFS structure for the filesystem
722 * @freeze_owner: owner of the freeze
726 static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who,
727 const void *freeze_owner)
729 struct gfs2_sbd *sdp = sb->s_fs_info;
732 if (!mutex_trylock(&sdp->sd_freeze_mutex))
734 if (test_bit(SDF_FROZEN, &sdp->sd_flags)) {
735 mutex_unlock(&sdp->sd_freeze_mutex);
740 error = freeze_super(sb, who, freeze_owner);
742 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
747 error = gfs2_lock_fs_check_clean(sdp);
749 set_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
750 set_bit(SDF_FROZEN, &sdp->sd_flags);
754 error = gfs2_do_thaw(sdp, who, freeze_owner);
759 fs_err(sdp, "waiting for recovery before freeze\n");
760 else if (error == -EIO) {
761 fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due "
762 "to recovery error.\n");
765 fs_err(sdp, "error freezing FS: %d\n", error);
767 fs_err(sdp, "retrying...\n");
772 mutex_unlock(&sdp->sd_freeze_mutex);
776 static int gfs2_freeze_fs(struct super_block *sb)
778 struct gfs2_sbd *sdp = sb->s_fs_info;
780 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
781 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
782 GFS2_LFC_FREEZE_GO_SYNC);
783 if (gfs2_withdrawing_or_withdrawn(sdp))
790 * gfs2_thaw_super - reallow writes to the filesystem
791 * @sb: the VFS structure for the filesystem
793 * @freeze_owner: owner of the freeze
797 static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who,
798 const void *freeze_owner)
800 struct gfs2_sbd *sdp = sb->s_fs_info;
803 if (!mutex_trylock(&sdp->sd_freeze_mutex))
805 if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags)) {
806 mutex_unlock(&sdp->sd_freeze_mutex);
810 atomic_inc(&sb->s_active);
811 gfs2_freeze_unlock(sdp);
813 error = gfs2_do_thaw(sdp, who, freeze_owner);
816 clear_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
817 clear_bit(SDF_FROZEN, &sdp->sd_flags);
819 mutex_unlock(&sdp->sd_freeze_mutex);
820 deactivate_super(sb);
824 void gfs2_thaw_freeze_initiator(struct super_block *sb)
826 struct gfs2_sbd *sdp = sb->s_fs_info;
828 mutex_lock(&sdp->sd_freeze_mutex);
829 if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
832 gfs2_freeze_unlock(sdp);
835 mutex_unlock(&sdp->sd_freeze_mutex);
839 * statfs_slow_fill - fill in the sg for a given RG
841 * @sc: the sc structure
843 * Returns: 0 on success, -ESTALE if the LVB is invalid
846 static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
847 struct gfs2_statfs_change_host *sc)
849 gfs2_rgrp_verify(rgd);
850 sc->sc_total += rgd->rd_data;
851 sc->sc_free += rgd->rd_free;
852 sc->sc_dinodes += rgd->rd_dinodes;
857 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
858 * @sdp: the filesystem
859 * @sc: the sc info that will be returned
861 * Any error (other than a signal) will cause this routine to fall back
862 * to the synchronous version.
864 * FIXME: This really shouldn't busy wait like this.
869 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
871 struct gfs2_rgrpd *rgd_next;
872 struct gfs2_holder *gha, *gh;
873 unsigned int slots = 64;
878 memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
879 gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
882 for (x = 0; x < slots; x++)
883 gfs2_holder_mark_uninitialized(gha + x);
885 rgd_next = gfs2_rgrpd_get_first(sdp);
890 for (x = 0; x < slots; x++) {
893 if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
894 err = gfs2_glock_wait(gh);
896 gfs2_holder_uninit(gh);
900 struct gfs2_rgrpd *rgd =
901 gfs2_glock2rgrp(gh->gh_gl);
903 error = statfs_slow_fill(rgd, sc);
905 gfs2_glock_dq_uninit(gh);
909 if (gfs2_holder_initialized(gh))
911 else if (rgd_next && !error) {
912 error = gfs2_glock_nq_init(rgd_next->rd_gl,
916 rgd_next = gfs2_rgrpd_get_next(rgd_next);
920 if (signal_pending(current))
921 error = -ERESTARTSYS;
935 * gfs2_statfs_i - Do a statfs
936 * @sdp: the filesystem
937 * @sc: the sc structure
942 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
944 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
945 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
947 spin_lock(&sdp->sd_statfs_spin);
950 sc->sc_total += l_sc->sc_total;
951 sc->sc_free += l_sc->sc_free;
952 sc->sc_dinodes += l_sc->sc_dinodes;
954 spin_unlock(&sdp->sd_statfs_spin);
958 if (sc->sc_free > sc->sc_total)
959 sc->sc_free = sc->sc_total;
960 if (sc->sc_dinodes < 0)
967 * gfs2_statfs - Gather and return stats about the filesystem
968 * @dentry: The name of the link
971 * Returns: 0 on success or error code
974 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
976 struct super_block *sb = dentry->d_sb;
977 struct gfs2_sbd *sdp = sb->s_fs_info;
978 struct gfs2_statfs_change_host sc;
981 error = gfs2_rindex_update(sdp);
985 if (gfs2_tune_get(sdp, gt_statfs_slow))
986 error = gfs2_statfs_slow(sdp, &sc);
988 error = gfs2_statfs_i(sdp, &sc);
993 buf->f_type = GFS2_MAGIC;
994 buf->f_bsize = sdp->sd_sb.sb_bsize;
995 buf->f_blocks = sc.sc_total;
996 buf->f_bfree = sc.sc_free;
997 buf->f_bavail = sc.sc_free;
998 buf->f_files = sc.sc_dinodes + sc.sc_free;
999 buf->f_ffree = sc.sc_free;
1000 buf->f_namelen = GFS2_FNAMESIZE;
1001 buf->f_fsid = uuid_to_fsid(sb->s_uuid.b);
1007 * gfs2_drop_inode - Drop an inode (test for remote unlink)
1008 * @inode: The inode to drop
1010 * If we've received a callback on an iopen lock then it's because a
1011 * remote node tried to deallocate the inode but failed due to this node
1012 * still having the inode open. Here we mark the link count zero
1013 * since we know that it must have reached zero if the GLF_DEMOTE flag
1014 * is set on the iopen glock. If we didn't do a disk read since the
1015 * remote node removed the final link then we might otherwise miss
1016 * this event. This check ensures that this node will deallocate the
1017 * inode's blocks, or alternatively pass the baton on to another
1018 * node for later deallocation.
1021 static int gfs2_drop_inode(struct inode *inode)
1023 struct gfs2_inode *ip = GFS2_I(inode);
1024 struct gfs2_sbd *sdp = GFS2_SB(inode);
1026 if (inode->i_nlink &&
1027 gfs2_holder_initialized(&ip->i_iopen_gh)) {
1028 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1029 if (glock_needs_demote(gl))
1034 * When under memory pressure when an inode's link count has dropped to
1035 * zero, defer deleting the inode to the delete workqueue. This avoids
1036 * calling into DLM under memory pressure, which can deadlock.
1038 if (!inode->i_nlink &&
1039 unlikely(current->flags & PF_MEMALLOC) &&
1040 gfs2_holder_initialized(&ip->i_iopen_gh)) {
1041 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1043 gfs2_glock_hold(gl);
1044 if (!gfs2_queue_verify_delete(gl, true))
1045 gfs2_glock_put_async(gl);
1050 * No longer cache inodes when trying to evict them all.
1052 if (test_bit(SDF_EVICTING, &sdp->sd_flags))
1055 return generic_drop_inode(inode);
1059 * gfs2_show_options - Show mount options for /proc/mounts
1060 * @s: seq_file structure
1061 * @root: root of this (sub)tree
1063 * Returns: 0 on success or error code
1066 static int gfs2_show_options(struct seq_file *s, struct dentry *root)
1068 struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
1069 struct gfs2_args *args = &sdp->sd_args;
1070 unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum;
1072 spin_lock(&sdp->sd_tune.gt_spin);
1073 logd_secs = sdp->sd_tune.gt_logd_secs;
1074 quota_quantum = sdp->sd_tune.gt_quota_quantum;
1075 statfs_quantum = sdp->sd_tune.gt_statfs_quantum;
1076 statfs_slow = sdp->sd_tune.gt_statfs_slow;
1077 spin_unlock(&sdp->sd_tune.gt_spin);
1079 if (is_subdir(root, sdp->sd_master_dir))
1080 seq_puts(s, ",meta");
1081 if (args->ar_lockproto[0])
1082 seq_show_option(s, "lockproto", args->ar_lockproto);
1083 if (args->ar_locktable[0])
1084 seq_show_option(s, "locktable", args->ar_locktable);
1085 if (args->ar_hostdata[0])
1086 seq_show_option(s, "hostdata", args->ar_hostdata);
1087 if (args->ar_spectator)
1088 seq_puts(s, ",spectator");
1089 if (args->ar_localflocks)
1090 seq_puts(s, ",localflocks");
1092 seq_puts(s, ",debug");
1093 if (args->ar_posix_acl)
1094 seq_puts(s, ",acl");
1095 if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
1097 switch (args->ar_quota) {
1098 case GFS2_QUOTA_OFF:
1101 case GFS2_QUOTA_ACCOUNT:
1107 case GFS2_QUOTA_QUIET:
1114 seq_printf(s, ",quota=%s", state);
1116 if (args->ar_suiddir)
1117 seq_puts(s, ",suiddir");
1118 if (args->ar_data != GFS2_DATA_DEFAULT) {
1120 switch (args->ar_data) {
1121 case GFS2_DATA_WRITEBACK:
1122 state = "writeback";
1124 case GFS2_DATA_ORDERED:
1131 seq_printf(s, ",data=%s", state);
1133 if (args->ar_discard)
1134 seq_puts(s, ",discard");
1135 if (logd_secs != 30)
1136 seq_printf(s, ",commit=%d", logd_secs);
1137 if (statfs_quantum != 30)
1138 seq_printf(s, ",statfs_quantum=%d", statfs_quantum);
1139 else if (statfs_slow)
1140 seq_puts(s, ",statfs_quantum=0");
1141 if (quota_quantum != 60)
1142 seq_printf(s, ",quota_quantum=%d", quota_quantum);
1143 if (args->ar_statfs_percent)
1144 seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1145 if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1148 switch (args->ar_errors) {
1149 case GFS2_ERRORS_WITHDRAW:
1152 case GFS2_ERRORS_PANIC:
1159 seq_printf(s, ",errors=%s", state);
1161 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1162 seq_puts(s, ",nobarrier");
1163 if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
1164 seq_puts(s, ",demote_interface_used");
1165 if (args->ar_rgrplvb)
1166 seq_puts(s, ",rgrplvb");
1167 if (args->ar_loccookie)
1168 seq_puts(s, ",loccookie");
1173 * gfs2_glock_put_eventually
1174 * @gl: The glock to put
1176 * When under memory pressure, trigger a deferred glock put to make sure we
1177 * won't call into DLM and deadlock. Otherwise, put the glock directly.
1180 static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
1182 if (current->flags & PF_MEMALLOC)
1183 gfs2_glock_put_async(gl);
1188 static enum evict_behavior gfs2_upgrade_iopen_glock(struct inode *inode)
1190 struct gfs2_inode *ip = GFS2_I(inode);
1191 struct gfs2_sbd *sdp = GFS2_SB(inode);
1192 struct gfs2_holder *gh = &ip->i_iopen_gh;
1195 gh->gh_flags |= GL_NOCACHE;
1196 gfs2_glock_dq_wait(gh);
1199 * If there are no other lock holders, we will immediately get
1200 * exclusive access to the iopen glock here.
1202 * Otherwise, the other nodes holding the lock will be notified about
1203 * our locking request (see iopen_go_callback()). If they do not have
1204 * the inode open, they are expected to evict the cached inode and
1205 * release the lock, allowing us to proceed.
1207 * Otherwise, if they cannot evict the inode, they are expected to poke
1208 * the inode glock (note: not the iopen glock). We will notice that
1209 * and stop waiting for the iopen glock immediately. The other node(s)
1210 * are then expected to take care of deleting the inode when they no
1213 * As a last resort, if another node keeps holding the iopen glock
1214 * without showing any activity on the inode glock, we will eventually
1215 * time out and fail the iopen glock upgrade.
1218 gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
1219 error = gfs2_glock_nq(gh);
1221 return EVICT_SHOULD_SKIP_DELETE;
1223 wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
1224 !test_bit(HIF_WAIT, &gh->gh_iflags) ||
1225 glock_needs_demote(ip->i_gl),
1227 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1229 if (glock_needs_demote(ip->i_gl))
1230 return EVICT_SHOULD_SKIP_DELETE;
1231 return EVICT_SHOULD_DEFER_DELETE;
1233 error = gfs2_glock_holder_ready(gh);
1235 return EVICT_SHOULD_SKIP_DELETE;
1236 return EVICT_SHOULD_DELETE;
1240 * evict_should_delete - determine whether the inode is eligible for deletion
1241 * @inode: The inode to evict
1242 * @gh: The glock holder structure
1244 * This function determines whether the evicted inode is eligible to be deleted
1245 * and locks the inode glock.
1247 * Returns: the fate of the dinode
1249 static enum evict_behavior evict_should_delete(struct inode *inode,
1250 struct gfs2_holder *gh)
1252 struct gfs2_inode *ip = GFS2_I(inode);
1253 struct super_block *sb = inode->i_sb;
1254 struct gfs2_sbd *sdp = sb->s_fs_info;
1257 if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1258 test_bit(GLF_DEFER_DELETE, &ip->i_iopen_gh.gh_gl->gl_flags))
1259 return EVICT_SHOULD_DEFER_DELETE;
1261 /* Deletes should never happen under memory pressure anymore. */
1262 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1263 return EVICT_SHOULD_DEFER_DELETE;
1265 /* Must not read inode block until block type has been verified */
1266 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh);
1268 return EVICT_SHOULD_SKIP_DELETE;
1270 if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
1271 return EVICT_SHOULD_SKIP_DELETE;
1272 ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1274 return EVICT_SHOULD_SKIP_DELETE;
1276 ret = gfs2_instantiate(gh);
1278 return EVICT_SHOULD_SKIP_DELETE;
1281 * The inode may have been recreated in the meantime.
1284 return EVICT_SHOULD_SKIP_DELETE;
1286 if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1287 test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
1288 return gfs2_upgrade_iopen_glock(inode);
1289 return EVICT_SHOULD_DELETE;
1293 * evict_unlinked_inode - delete the pieces of an unlinked evicted inode
1294 * @inode: The inode to evict
1296 static int evict_unlinked_inode(struct inode *inode)
1298 struct gfs2_inode *ip = GFS2_I(inode);
1301 if (S_ISDIR(inode->i_mode) &&
1302 (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1303 ret = gfs2_dir_exhash_dealloc(ip);
1309 ret = gfs2_ea_dealloc(ip, true);
1314 if (!gfs2_is_stuffed(ip)) {
1315 ret = gfs2_file_dealloc(ip);
1321 * As soon as we clear the bitmap for the dinode, gfs2_create_inode()
1322 * can get called to recreate it, or even gfs2_inode_lookup() if the
1323 * inode was recreated on another node in the meantime.
1325 * However, inserting the new inode into the inode hash table will not
1326 * succeed until the old inode is removed, and that only happens after
1327 * ->evict_inode() returns. The new inode is attached to its inode and
1328 * iopen glocks after inserting it into the inode hash table, so at
1329 * that point we can be sure that both glocks are unused.
1332 ret = gfs2_dinode_dealloc(ip);
1333 if (!ret && ip->i_gl)
1334 gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
1341 * evict_linked_inode - evict an inode whose dinode has not been unlinked
1342 * @inode: The inode to evict
1344 static int evict_linked_inode(struct inode *inode)
1346 struct super_block *sb = inode->i_sb;
1347 struct gfs2_sbd *sdp = sb->s_fs_info;
1348 struct gfs2_inode *ip = GFS2_I(inode);
1349 struct address_space *metamapping;
1352 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1353 GFS2_LFC_EVICT_INODE);
1354 metamapping = gfs2_glock2aspace(ip->i_gl);
1355 if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1356 filemap_fdatawrite(metamapping);
1357 filemap_fdatawait(metamapping);
1359 write_inode_now(inode, 1);
1360 gfs2_ail_flush(ip->i_gl, 0);
1362 ret = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1366 /* Needs to be done before glock release & also in a transaction */
1367 truncate_inode_pages(&inode->i_data, 0);
1368 truncate_inode_pages(metamapping, 0);
1369 gfs2_trans_end(sdp);
1374 * gfs2_evict_inode - Remove an inode from cache
1375 * @inode: The inode to evict
1377 * There are three cases to consider:
1378 * 1. i_nlink == 0, we are final opener (and must deallocate)
1379 * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
1382 * If the fs is read only, then we have to treat all cases as per #3
1383 * since we are unable to do any deallocation. The inode will be
1384 * deallocated by the next read/write node to attempt an allocation
1385 * in the same resource group
1387 * We have to (at the moment) hold the inodes main lock to cover
1388 * the gap between unlocking the shared lock on the iopen lock and
1389 * taking the exclusive lock. I'd rather do a shared -> exclusive
1390 * conversion on the iopen lock, but we can change that later. This
1391 * is safe, just less efficient.
1394 static void gfs2_evict_inode(struct inode *inode)
1396 struct super_block *sb = inode->i_sb;
1397 struct gfs2_sbd *sdp = sb->s_fs_info;
1398 struct gfs2_inode *ip = GFS2_I(inode);
1399 struct gfs2_holder gh;
1400 enum evict_behavior behavior;
1403 gfs2_holder_mark_uninitialized(&gh);
1404 if (inode->i_nlink || sb_rdonly(sb) || !ip->i_no_addr)
1408 * In case of an incomplete mount, gfs2_evict_inode() may be called for
1409 * system files without having an active journal to write to. In that
1410 * case, skip the filesystem evict.
1415 behavior = evict_should_delete(inode, &gh);
1416 if (behavior == EVICT_SHOULD_DEFER_DELETE &&
1417 !test_bit(SDF_KILL, &sdp->sd_flags)) {
1418 struct gfs2_glock *io_gl = ip->i_iopen_gh.gh_gl;
1421 gfs2_glock_hold(io_gl);
1422 if (!gfs2_queue_verify_delete(io_gl, true))
1423 gfs2_glock_put(io_gl);
1426 behavior = EVICT_SHOULD_SKIP_DELETE;
1428 if (behavior == EVICT_SHOULD_DELETE)
1429 ret = evict_unlinked_inode(inode);
1431 ret = evict_linked_inode(inode);
1433 if (gfs2_rs_active(&ip->i_res))
1434 gfs2_rs_deltree(&ip->i_res);
1436 if (ret && ret != GLR_TRYFAILED && ret != -EROFS)
1437 fs_warn(sdp, "gfs2_evict_inode: %d\n", ret);
1439 if (gfs2_holder_initialized(&gh))
1440 gfs2_glock_dq_uninit(&gh);
1441 truncate_inode_pages_final(&inode->i_data);
1443 gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
1444 gfs2_rs_deltree(&ip->i_res);
1445 gfs2_ordered_del_inode(ip);
1447 gfs2_dir_hash_inval(ip);
1448 if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1449 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1451 glock_clear_object(gl, ip);
1452 gfs2_glock_hold(gl);
1453 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1454 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1455 gfs2_glock_put_eventually(gl);
1458 glock_clear_object(ip->i_gl, ip);
1459 wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1460 gfs2_glock_put_eventually(ip->i_gl);
1461 rcu_assign_pointer(ip->i_gl, NULL);
1465 static struct inode *gfs2_alloc_inode(struct super_block *sb)
1467 struct gfs2_inode *ip;
1469 ip = alloc_inode_sb(sb, gfs2_inode_cachep, GFP_KERNEL);
1473 ip->i_no_formal_ino = 0;
1476 gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
1477 memset(&ip->i_res, 0, sizeof(ip->i_res));
1478 RB_CLEAR_NODE(&ip->i_res.rs_node);
1479 ip->i_diskflags = 0;
1481 return &ip->i_inode;
1484 static void gfs2_free_inode(struct inode *inode)
1486 kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
1489 void free_local_statfs_inodes(struct gfs2_sbd *sdp)
1491 struct local_statfs_inode *lsi, *safe;
1493 /* Run through the statfs inodes list to iput and free memory */
1494 list_for_each_entry_safe(lsi, safe, &sdp->sd_sc_inodes_list, si_list) {
1495 if (lsi->si_jid == sdp->sd_jdesc->jd_jid)
1496 sdp->sd_sc_inode = NULL; /* belongs to this node */
1497 if (lsi->si_sc_inode)
1498 iput(lsi->si_sc_inode);
1499 list_del(&lsi->si_list);
1504 struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
1507 struct local_statfs_inode *lsi;
1509 /* Return the local (per node) statfs inode in the
1510 * sdp->sd_sc_inodes_list corresponding to the 'index'. */
1511 list_for_each_entry(lsi, &sdp->sd_sc_inodes_list, si_list) {
1512 if (lsi->si_jid == index)
1513 return lsi->si_sc_inode;
1518 const struct super_operations gfs2_super_ops = {
1519 .alloc_inode = gfs2_alloc_inode,
1520 .free_inode = gfs2_free_inode,
1521 .write_inode = gfs2_write_inode,
1522 .dirty_inode = gfs2_dirty_inode,
1523 .evict_inode = gfs2_evict_inode,
1524 .put_super = gfs2_put_super,
1525 .sync_fs = gfs2_sync_fs,
1526 .freeze_super = gfs2_freeze_super,
1527 .freeze_fs = gfs2_freeze_fs,
1528 .thaw_super = gfs2_thaw_super,
1529 .statfs = gfs2_statfs,
1530 .drop_inode = gfs2_drop_inode,
1531 .show_options = gfs2_show_options,