Merge tag 'locking-core-2023-05-05' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / fs / gfs2 / glops.c
CommitLineData
7336d0e6 1// SPDX-License-Identifier: GPL-2.0-only
b3b94faa
DT
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
cf45b752 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
b3b94faa
DT
5 */
6
b3b94faa
DT
7#include <linux/spinlock.h>
8#include <linux/completion.h>
9#include <linux/buffer_head.h>
5c676f6d 10#include <linux/gfs2_ondisk.h>
6802e340 11#include <linux/bio.h>
c65f7fb5 12#include <linux/posix_acl.h>
f39814f6 13#include <linux/security.h>
b3b94faa
DT
14
15#include "gfs2.h"
5c676f6d 16#include "incore.h"
b3b94faa
DT
17#include "bmap.h"
18#include "glock.h"
19#include "glops.h"
20#include "inode.h"
21#include "log.h"
22#include "meta_io.h"
b3b94faa
DT
23#include "recovery.h"
24#include "rgrp.h"
5c676f6d 25#include "util.h"
ddacfaf7 26#include "trans.h"
17d539f0 27#include "dir.h"
f4686c26 28#include "lops.h"
b3b94faa 29
2e60d768
BM
30struct workqueue_struct *gfs2_freeze_wq;
31
601ef0d5
BP
32extern struct workqueue_struct *gfs2_control_wq;
33
75549186
SW
34static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
35{
69a61144
BP
36 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
37
38 fs_err(sdp,
15562c43
BP
39 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
40 "state 0x%lx\n",
75549186 41 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
11551cf1 42 bh->b_folio->mapping, bh->b_folio->flags);
69a61144 43 fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
75549186
SW
44 gl->gl_name.ln_type, gl->gl_name.ln_number,
45 gfs2_glock2aspace(gl));
69a61144 46 gfs2_lm(sdp, "AIL error\n");
fffe9bee 47 gfs2_withdraw_delayed(sdp);
75549186
SW
48}
49
ddacfaf7 50/**
dba898b0 51 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
ddacfaf7 52 * @gl: the glock
b5b24d7a 53 * @fsync: set when called from fsync (not all buffers will be clean)
c551f66c 54 * @nr_revokes: Number of buffers to revoke
ddacfaf7
SW
55 *
56 * None of the buffers should be dirty, locked, or pinned.
57 */
58
1bc333f4
BM
59static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
60 unsigned int nr_revokes)
ddacfaf7 61{
15562c43 62 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
ddacfaf7 63 struct list_head *head = &gl->gl_ail_list;
b5b24d7a 64 struct gfs2_bufdata *bd, *tmp;
ddacfaf7 65 struct buffer_head *bh;
b5b24d7a 66 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
d8348de0 67
b5b24d7a 68 gfs2_log_lock(sdp);
d6a079e8 69 spin_lock(&sdp->sd_ail_lock);
1bc333f4
BM
70 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
71 if (nr_revokes == 0)
72 break;
ddacfaf7 73 bh = bd->bd_bh;
b5b24d7a
SW
74 if (bh->b_state & b_state) {
75 if (fsync)
76 continue;
75549186 77 gfs2_ail_error(gl, bh);
b5b24d7a 78 }
1ad38c43 79 gfs2_trans_add_revoke(sdp, bd);
1bc333f4 80 nr_revokes--;
ddacfaf7 81 }
8eae1ca0 82 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
d6a079e8 83 spin_unlock(&sdp->sd_ail_lock);
b5b24d7a 84 gfs2_log_unlock(sdp);
dba898b0
SW
85}
86
87
1c634f94 88static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
dba898b0 89{
15562c43 90 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
dba898b0 91 struct gfs2_trans tr;
c968f578 92 unsigned int revokes;
24ab1582 93 int ret = 0;
dba898b0 94
c968f578 95 revokes = atomic_read(&gl->gl_ail_count);
dba898b0 96
c968f578 97 if (!revokes) {
9ff78289
BP
98 bool have_revokes;
99 bool log_in_flight;
100
101 /*
102 * We have nothing on the ail, but there could be revokes on
103 * the sdp revoke queue, in which case, we still want to flush
104 * the log and wait for it to finish.
105 *
106 * If the sdp revoke list is empty too, we might still have an
107 * io outstanding for writing revokes, so we should wait for
108 * it before returning.
109 *
110 * If none of these conditions are true, our revokes are all
111 * flushed and we can return.
112 */
113 gfs2_log_lock(sdp);
114 have_revokes = !list_empty(&sdp->sd_log_revokes);
115 log_in_flight = atomic_read(&sdp->sd_log_in_flight);
116 gfs2_log_unlock(sdp);
117 if (have_revokes)
118 goto flush;
119 if (log_in_flight)
120 log_flush_wait(sdp);
1c634f94 121 return 0;
9ff78289 122 }
dba898b0 123
c968f578
AG
124 memset(&tr, 0, sizeof(tr));
125 set_bit(TR_ONSTACK, &tr.tr_flags);
126 ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
b97e583c
BP
127 if (ret) {
128 fs_err(sdp, "Transaction error %d: Unable to write revokes.", ret);
c968f578 129 goto flush;
b97e583c 130 }
c968f578 131 __gfs2_ail_flush(gl, 0, revokes);
dba898b0 132 gfs2_trans_end(sdp);
c968f578 133
9ff78289 134flush:
644f6bf7
BP
135 if (!ret)
136 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
137 GFS2_LFC_AIL_EMPTY_GL);
24ab1582 138 return ret;
dba898b0 139}
ddacfaf7 140
b5b24d7a 141void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
dba898b0 142{
15562c43 143 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
dba898b0
SW
144 unsigned int revokes = atomic_read(&gl->gl_ail_count);
145 int ret;
146
147 if (!revokes)
148 return;
149
2129b428 150 ret = gfs2_trans_begin(sdp, 0, revokes);
dba898b0
SW
151 if (ret)
152 return;
2129b428 153 __gfs2_ail_flush(gl, fsync, revokes);
ddacfaf7 154 gfs2_trans_end(sdp);
805c0907
BP
155 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
156 GFS2_LFC_AIL_FLUSH);
ddacfaf7 157}
ba7f7290 158
4a55752a
BP
159/**
160 * gfs2_rgrp_metasync - sync out the metadata of a resource group
161 * @gl: the glock protecting the resource group
162 *
163 */
164
165static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
166{
167 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
168 struct address_space *metamapping = &sdp->sd_aspace;
169 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
170 const unsigned bsize = sdp->sd_sb.sb_bsize;
171 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
172 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
173 int error;
174
175 filemap_fdatawrite_range(metamapping, start, end);
176 error = filemap_fdatawait_range(metamapping, start, end);
177 WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
178 mapping_set_error(metamapping, error);
179 if (error)
180 gfs2_io_error(sdp);
181 return error;
182}
183
ba7f7290 184/**
6bac243f 185 * rgrp_go_sync - sync out the metadata for this glock
b3b94faa 186 * @gl: the glock
b3b94faa
DT
187 *
188 * Called when demoting or unlocking an EX glock. We must flush
189 * to disk all dirty buffers/pages relating to this glock, and must not
6f6597ba 190 * return to caller to demote/unlock the glock until I/O is complete.
b3b94faa
DT
191 */
192
1c634f94 193static int rgrp_go_sync(struct gfs2_glock *gl)
b3b94faa 194{
15562c43 195 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
b3422cac 196 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
6bac243f
SW
197 int error;
198
fd5f446f 199 if (!rgd || !test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
1c634f94 200 return 0;
8eae1ca0 201 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
b5d32bea 202
805c0907
BP
203 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
204 GFS2_LFC_RGRP_GO_SYNC);
4a55752a 205 error = gfs2_rgrp_metasync(gl);
1c634f94
BP
206 if (!error)
207 error = gfs2_ail_empty_gl(gl);
23cfb0c3 208 gfs2_free_clones(rgd);
1c634f94 209 return error;
b3b94faa
DT
210}
211
212/**
6bac243f 213 * rgrp_go_inval - invalidate the metadata for this glock
b3b94faa
DT
214 * @gl: the glock
215 * @flags:
216 *
6bac243f
SW
217 * We never used LM_ST_DEFERRED with resource groups, so that we
218 * should always see the metadata flag set here.
219 *
b3b94faa
DT
220 */
221
6bac243f 222static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
b3b94faa 223{
15562c43 224 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
70d4ee94 225 struct address_space *mapping = &sdp->sd_aspace;
6f6597ba 226 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
23cfb0c3 227 const unsigned bsize = sdp->sd_sb.sb_bsize;
fd5f446f 228 loff_t start, end;
39b0f1e9 229
fd5f446f
BP
230 if (!rgd)
231 return;
232 start = (rgd->rd_addr * bsize) & PAGE_MASK;
233 end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
23cfb0c3 234 gfs2_rgrp_brelse(rgd);
8eae1ca0 235 WARN_ON_ONCE(!(flags & DIO_METADATA));
23cfb0c3 236 truncate_inode_pages_range(mapping, start, end);
b3b94faa
DT
237}
238
0e539ca1
AP
239static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
240 const char *fs_id_buf)
241{
16e6281b 242 struct gfs2_rgrpd *rgd = gl->gl_object;
0e539ca1
AP
243
244 if (rgd)
245 gfs2_rgrp_dump(seq, rgd, fs_id_buf);
246}
247
4fd1a579
AG
248static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
249{
250 struct gfs2_inode *ip;
251
252 spin_lock(&gl->gl_lockref.lock);
253 ip = gl->gl_object;
254 if (ip)
255 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
256 spin_unlock(&gl->gl_lockref.lock);
257 return ip;
258}
259
6f6597ba
AG
260struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
261{
262 struct gfs2_rgrpd *rgd;
263
264 spin_lock(&gl->gl_lockref.lock);
265 rgd = gl->gl_object;
266 spin_unlock(&gl->gl_lockref.lock);
267
268 return rgd;
269}
270
4fd1a579
AG
271static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
272{
273 if (!ip)
274 return;
275
276 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
277 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
278}
279
b5d32bea 280/**
4a55752a
BP
281 * gfs2_inode_metasync - sync out the metadata of an inode
282 * @gl: the glock protecting the inode
283 *
284 */
285int gfs2_inode_metasync(struct gfs2_glock *gl)
286{
287 struct address_space *metamapping = gfs2_glock2aspace(gl);
288 int error;
289
290 filemap_fdatawrite(metamapping);
291 error = filemap_fdatawait(metamapping);
292 if (error)
293 gfs2_io_error(gl->gl_name.ln_sbd);
294 return error;
295}
296
297/**
298 * inode_go_sync - Sync the dirty metadata of an inode
b5d32bea
SW
299 * @gl: the glock protecting the inode
300 *
301 */
302
1c634f94 303static int inode_go_sync(struct gfs2_glock *gl)
b5d32bea 304{
4fd1a579
AG
305 struct gfs2_inode *ip = gfs2_glock2inode(gl);
306 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
009d8518 307 struct address_space *metamapping = gfs2_glock2aspace(gl);
bbae10fa 308 int error = 0, ret;
3042a2cc 309
4fd1a579 310 if (isreg) {
582d2f7a
SW
311 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
312 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
313 inode_dio_wait(&ip->i_inode);
314 }
6bac243f 315 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
4fd1a579 316 goto out;
b5d32bea 317
8eae1ca0 318 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
6bac243f 319
805c0907
BP
320 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
321 GFS2_LFC_INODE_GO_SYNC);
6bac243f 322 filemap_fdatawrite(metamapping);
4fd1a579 323 if (isreg) {
6bac243f
SW
324 struct address_space *mapping = ip->i_inode.i_mapping;
325 filemap_fdatawrite(mapping);
326 error = filemap_fdatawait(mapping);
327 mapping_set_error(mapping, error);
b5d32bea 328 }
4a55752a 329 ret = gfs2_inode_metasync(gl);
bbae10fa
BP
330 if (!error)
331 error = ret;
24ab1582
BP
332 ret = gfs2_ail_empty_gl(gl);
333 if (!error)
334 error = ret;
52fcd11c
SW
335 /*
336 * Writeback of the data mapping may cause the dirty flag to be set
337 * so we have to clear it again here.
338 */
4e857c58 339 smp_mb__before_atomic();
52fcd11c 340 clear_bit(GLF_DIRTY, &gl->gl_flags);
4fd1a579
AG
341
342out:
343 gfs2_clear_glop_pending(ip);
1c634f94 344 return error;
b5d32bea
SW
345}
346
b3b94faa
DT
347/**
348 * inode_go_inval - prepare a inode glock to be released
349 * @gl: the glock
350 * @flags:
6b49d1d9
GU
351 *
352 * Normally we invalidate everything, but if we are moving into
6bac243f
SW
353 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
354 * can keep hold of the metadata, since it won't have changed.
b3b94faa
DT
355 *
356 */
357
358static void inode_go_inval(struct gfs2_glock *gl, int flags)
359{
4fd1a579 360 struct gfs2_inode *ip = gfs2_glock2inode(gl);
b3b94faa 361
6bac243f 362 if (flags & DIO_METADATA) {
009d8518 363 struct address_space *mapping = gfs2_glock2aspace(gl);
6bac243f 364 truncate_inode_pages(mapping, 0);
c65f7fb5 365 if (ip) {
f2e70d8f 366 set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
c65f7fb5 367 forget_all_cached_acls(&ip->i_inode);
f39814f6 368 security_inode_invalidate_secctx(&ip->i_inode);
17d539f0 369 gfs2_dir_hash_inval(ip);
c65f7fb5 370 }
b004157a
SW
371 }
372
15562c43 373 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
c1696fb8 374 gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
805c0907
BP
375 GFS2_LOG_HEAD_FLUSH_NORMAL |
376 GFS2_LFC_INODE_GO_INVAL);
15562c43 377 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
1ce53368 378 }
3cc3f710 379 if (ip && S_ISREG(ip->i_inode.i_mode))
b004157a 380 truncate_inode_pages(ip->i_inode.i_mapping, 0);
4fd1a579
AG
381
382 gfs2_clear_glop_pending(ip);
b3b94faa
DT
383}
384
385/**
386 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
387 * @gl: the glock
388 *
389 * Returns: 1 if it's ok
390 */
391
97cc1025 392static int inode_go_demote_ok(const struct gfs2_glock *gl)
b3b94faa 393{
15562c43 394 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
bc015cb8 395
97cc1025
SW
396 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
397 return 0;
bc015cb8 398
97cc1025 399 return 1;
b3b94faa
DT
400}
401
d4b2cf1b
SW
402static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
403{
cfcdb5ba 404 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
d4b2cf1b 405 const struct gfs2_dinode *str = buf;
95582b00 406 struct timespec64 atime;
d4b2cf1b 407 u16 height, depth;
4a378d8a 408 umode_t mode = be32_to_cpu(str->di_mode);
7db35444
AG
409 struct inode *inode = &ip->i_inode;
410 bool is_new = inode->i_state & I_NEW;
d4b2cf1b
SW
411
412 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
413 goto corrupt;
7db35444 414 if (unlikely(!is_new && inode_wrong_type(inode, mode)))
4a378d8a 415 goto corrupt;
d4b2cf1b 416 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
7db35444 417 inode->i_mode = mode;
4a378d8a 418 if (is_new) {
7db35444 419 inode->i_rdev = 0;
4a378d8a
AV
420 switch (mode & S_IFMT) {
421 case S_IFBLK:
422 case S_IFCHR:
7db35444
AG
423 inode->i_rdev = MKDEV(be32_to_cpu(str->di_major),
424 be32_to_cpu(str->di_minor));
4a378d8a
AV
425 break;
426 }
098b9c14 427 }
d4b2cf1b 428
7db35444
AG
429 i_uid_write(inode, be32_to_cpu(str->di_uid));
430 i_gid_write(inode, be32_to_cpu(str->di_gid));
431 set_nlink(inode, be32_to_cpu(str->di_nlink));
432 i_size_write(inode, be64_to_cpu(str->di_size));
433 gfs2_set_inode_blocks(inode, be64_to_cpu(str->di_blocks));
d4b2cf1b
SW
434 atime.tv_sec = be64_to_cpu(str->di_atime);
435 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
7db35444
AG
436 if (timespec64_compare(&inode->i_atime, &atime) < 0)
437 inode->i_atime = atime;
438 inode->i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
439 inode->i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
440 inode->i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
441 inode->i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
d4b2cf1b
SW
442
443 ip->i_goal = be64_to_cpu(str->di_goal_meta);
444 ip->i_generation = be64_to_cpu(str->di_generation);
445
446 ip->i_diskflags = be32_to_cpu(str->di_flags);
9964afbb
SW
447 ip->i_eattr = be64_to_cpu(str->di_eattr);
448 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
7db35444 449 gfs2_set_inode_flags(inode);
d4b2cf1b 450 height = be16_to_cpu(str->di_height);
cfcdb5ba 451 if (unlikely(height > sdp->sd_max_height))
d4b2cf1b
SW
452 goto corrupt;
453 ip->i_height = (u8)height;
454
455 depth = be16_to_cpu(str->di_depth);
456 if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
457 goto corrupt;
458 ip->i_depth = (u8)depth;
459 ip->i_entries = be32_to_cpu(str->di_entries);
460
70376c7f
AG
461 if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip))
462 goto corrupt;
463
7db35444
AG
464 if (S_ISREG(inode->i_mode))
465 gfs2_set_aops(inode);
d4b2cf1b
SW
466
467 return 0;
468corrupt:
469 gfs2_consist_inode(ip);
470 return -EIO;
471}
472
473/**
474 * gfs2_inode_refresh - Refresh the incore copy of the dinode
475 * @ip: The GFS2 inode
476 *
477 * Returns: errno
478 */
479
480int gfs2_inode_refresh(struct gfs2_inode *ip)
481{
482 struct buffer_head *dibh;
483 int error;
484
485 error = gfs2_meta_inode_buffer(ip, &dibh);
486 if (error)
487 return error;
488
d4b2cf1b
SW
489 error = gfs2_dinode_in(ip, dibh->b_data);
490 brelse(dibh);
d4b2cf1b
SW
491 return error;
492}
493
b3b94faa 494/**
3278b977 495 * inode_go_instantiate - read in an inode if necessary
c551f66c 496 * @gh: The glock holder
b3b94faa
DT
497 *
498 * Returns: errno
499 */
500
5f38a4d3 501static int inode_go_instantiate(struct gfs2_glock *gl)
b3b94faa 502{
5c676f6d 503 struct gfs2_inode *ip = gl->gl_object;
b3b94faa 504
f2e70d8f 505 if (!ip) /* no inode to populate - read it in later */
86c30a01 506 return 0;
b3b94faa 507
86c30a01
AG
508 return gfs2_inode_refresh(ip);
509}
510
511static int inode_go_held(struct gfs2_holder *gh)
512{
513 struct gfs2_glock *gl = gh->gh_gl;
514 struct gfs2_inode *ip = gl->gl_object;
515 int error = 0;
516
517 if (!ip) /* no inode to populate - read it in later */
518 return 0;
b3b94faa 519
582d2f7a
SW
520 if (gh->gh_state != LM_ST_DEFERRED)
521 inode_dio_wait(&ip->i_inode);
522
383f01fb 523 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
b3b94faa 524 (gl->gl_state == LM_ST_EXCLUSIVE) &&
de3f906f
AG
525 (gh->gh_state == LM_ST_EXCLUSIVE))
526 error = gfs2_truncatei_resume(ip);
b3b94faa
DT
527
528 return error;
529}
530
6802e340
SW
531/**
532 * inode_go_dump - print information about an inode
533 * @seq: The iterator
c551f66c 534 * @gl: The glock
3792ce97 535 * @fs_id_buf: file system id (may be empty)
6802e340 536 *
6802e340
SW
537 */
538
3792ce97
BP
539static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
540 const char *fs_id_buf)
6802e340 541{
27a2660f 542 struct gfs2_inode *ip = gl->gl_object;
55534c09 543 struct inode *inode;
27a2660f
BP
544 unsigned long nrpages;
545
6802e340 546 if (ip == NULL)
ac3beb6a 547 return;
27a2660f 548
55534c09 549 inode = &ip->i_inode;
27a2660f
BP
550 xa_lock_irq(&inode->i_data.i_pages);
551 nrpages = inode->i_data.nrpages;
552 xa_unlock_irq(&inode->i_data.i_pages);
553
3792ce97
BP
554 gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
555 "p:%lu\n", fs_id_buf,
6802e340
SW
556 (unsigned long long)ip->i_no_formal_ino,
557 (unsigned long long)ip->i_no_addr,
fa75cedc
SW
558 IF2DT(ip->i_inode.i_mode), ip->i_flags,
559 (unsigned int)ip->i_diskflags,
27a2660f 560 (unsigned long long)i_size_read(inode), nrpages);
6802e340
SW
561}
562
b3b94faa 563/**
24972557 564 * freeze_go_sync - promote/demote the freeze glock
b3b94faa 565 * @gl: the glock
b3b94faa
DT
566 */
567
1c634f94 568static int freeze_go_sync(struct gfs2_glock *gl)
b3b94faa 569{
2e60d768 570 int error = 0;
15562c43 571 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
b3b94faa 572
20b32912
BP
573 /*
574 * We need to check gl_state == LM_ST_SHARED here and not gl_req ==
575 * LM_ST_EXCLUSIVE. That's because when any node does a freeze,
576 * all the nodes should have the freeze glock in SH mode and they all
577 * call do_xmote: One for EX and the others for UN. They ALL must
578 * freeze locally, and they ALL must queue freeze work. The freeze_work
579 * calls freeze_func, which tries to reacquire the freeze glock in SH,
580 * effectively waiting for the thaw on the node who holds it in EX.
581 * Once thawed, the work func acquires the freeze glock in
582 * SH and everybody goes back to thawed.
583 */
f39e7d3a
BP
584 if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
585 !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
2e60d768
BM
586 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
587 error = freeze_super(sdp->sd_vfs);
588 if (error) {
f29e62ee
BP
589 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
590 error);
601ef0d5
BP
591 if (gfs2_withdrawn(sdp)) {
592 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
1c634f94 593 return 0;
601ef0d5 594 }
2e60d768
BM
595 gfs2_assert_withdraw(sdp, 0);
596 }
597 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
541656d3
BP
598 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
599 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
600 GFS2_LFC_FREEZE_GO_SYNC);
601 else /* read-only mounts */
602 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
b3b94faa 603 }
1c634f94 604 return 0;
b3b94faa
DT
605}
606
607/**
24972557 608 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
b3b94faa 609 * @gl: the glock
b3b94faa 610 */
f68effb3 611static int freeze_go_xmote_bh(struct gfs2_glock *gl)
b3b94faa 612{
15562c43 613 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
feaa7bba 614 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
5c676f6d 615 struct gfs2_glock *j_gl = ip->i_gl;
55167622 616 struct gfs2_log_header_host head;
b3b94faa
DT
617 int error;
618
6802e340 619 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
1a14d3a6 620 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
b3b94faa 621
f4686c26 622 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
9d9b1605
BP
623 if (gfs2_assert_withdraw_delayed(sdp, !error))
624 return error;
625 if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
626 GFS2_LOG_HEAD_UNMOUNT))
627 return -EIO;
628 sdp->sd_log_sequence = head.lh_sequence + 1;
629 gfs2_log_pointers_init(sdp, head.lh_blkno);
b3b94faa 630 }
6802e340 631 return 0;
b3b94faa
DT
632}
633
97cc1025 634/**
c551f66c 635 * freeze_go_demote_ok
97cc1025
SW
636 * @gl: the glock
637 *
638 * Always returns 0
639 */
640
24972557 641static int freeze_go_demote_ok(const struct gfs2_glock *gl)
97cc1025
SW
642{
643 return 0;
644}
645
b94a170e
BM
646/**
647 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
648 * @gl: the glock
c551f66c 649 * @remote: true if this came from a different cluster node
b94a170e 650 *
f3dd1649 651 * gl_lockref.lock lock is held while calling this
b94a170e 652 */
81ffbf65 653static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
b94a170e 654{
6f6597ba 655 struct gfs2_inode *ip = gl->gl_object;
15562c43 656 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
001e8e8d 657
6c0246a9
BP
658 if (!remote || sb_rdonly(sdp->sd_vfs) ||
659 test_bit(SDF_DEACTIVATING, &sdp->sd_flags))
001e8e8d 660 return;
b94a170e
BM
661
662 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
009d8518 663 gl->gl_state == LM_ST_SHARED && ip) {
e66cf161 664 gl->gl_lockref.count++;
f0e56edc 665 if (!gfs2_queue_try_to_evict(gl))
e66cf161 666 gl->gl_lockref.count--;
b94a170e
BM
667 }
668}
669
601ef0d5
BP
670/**
671 * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
672 * @gl: glock being freed
673 *
674 * For now, this is only used for the journal inode glock. In withdraw
675 * situations, we need to wait for the glock to be freed so that we know
676 * other nodes may proceed with recovery / journal replay.
677 */
678static void inode_go_free(struct gfs2_glock *gl)
679{
680 /* Note that we cannot reference gl_object because it's already set
681 * to NULL by this point in its lifecycle. */
682 if (!test_bit(GLF_FREEING, &gl->gl_flags))
683 return;
684 clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
685 wake_up_bit(&gl->gl_flags, GLF_FREEING);
686}
687
688/**
689 * nondisk_go_callback - used to signal when a node did a withdraw
690 * @gl: the nondisk glock
691 * @remote: true if this came from a different cluster node
692 *
693 */
694static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
695{
696 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
697
698 /* Ignore the callback unless it's from another node, and it's the
699 live lock. */
700 if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
701 return;
702
703 /* First order of business is to cancel the demote request. We don't
704 * really want to demote a nondisk glock. At best it's just to inform
705 * us of another node's withdraw. We'll keep it in SH mode. */
706 clear_bit(GLF_DEMOTE, &gl->gl_flags);
707 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
708
709 /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
710 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
711 test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
712 test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
713 return;
714
715 /* We only care when a node wants us to unlock, because that means
716 * they want a journal recovered. */
717 if (gl->gl_demote_state != LM_ST_UNLOCKED)
718 return;
719
720 if (sdp->sd_args.ar_spectator) {
721 fs_warn(sdp, "Spectator node cannot recover journals.\n");
722 return;
723 }
724
725 fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
726 set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
727 /*
728 * We can't call remote_withdraw directly here or gfs2_recover_journal
729 * because this is called from the glock unlock function and the
730 * remote_withdraw needs to enqueue and dequeue the same "live" glock
731 * we were called from. So we queue it to the control work queue in
732 * lock_dlm.
733 */
734 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
735}
736
8fb4b536 737const struct gfs2_glock_operations gfs2_meta_glops = {
ea67eedb 738 .go_type = LM_TYPE_META,
a72d2401 739 .go_flags = GLOF_NONDISK,
b3b94faa
DT
740};
741
8fb4b536 742const struct gfs2_glock_operations gfs2_inode_glops = {
06dfc306 743 .go_sync = inode_go_sync,
b3b94faa
DT
744 .go_inval = inode_go_inval,
745 .go_demote_ok = inode_go_demote_ok,
3278b977 746 .go_instantiate = inode_go_instantiate,
86c30a01 747 .go_held = inode_go_held,
6802e340 748 .go_dump = inode_go_dump,
ea67eedb 749 .go_type = LM_TYPE_INODE,
f286d627 750 .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
601ef0d5 751 .go_free = inode_go_free,
b3b94faa
DT
752};
753
8fb4b536 754const struct gfs2_glock_operations gfs2_rgrp_glops = {
06dfc306 755 .go_sync = rgrp_go_sync,
6bac243f 756 .go_inval = rgrp_go_inval,
3278b977 757 .go_instantiate = gfs2_rgrp_go_instantiate,
0e539ca1 758 .go_dump = gfs2_rgrp_go_dump,
ea67eedb 759 .go_type = LM_TYPE_RGRP,
70d4ee94 760 .go_flags = GLOF_LVB,
b3b94faa
DT
761};
762
24972557
BM
763const struct gfs2_glock_operations gfs2_freeze_glops = {
764 .go_sync = freeze_go_sync,
765 .go_xmote_bh = freeze_go_xmote_bh,
766 .go_demote_ok = freeze_go_demote_ok,
ea67eedb 767 .go_type = LM_TYPE_NONDISK,
a72d2401 768 .go_flags = GLOF_NONDISK,
b3b94faa
DT
769};
770
8fb4b536 771const struct gfs2_glock_operations gfs2_iopen_glops = {
ea67eedb 772 .go_type = LM_TYPE_IOPEN,
b94a170e 773 .go_callback = iopen_go_callback,
74382e27 774 .go_dump = inode_go_dump,
a72d2401 775 .go_flags = GLOF_LRU | GLOF_NONDISK,
515b269d 776 .go_subclass = 1,
b3b94faa
DT
777};
778
8fb4b536 779const struct gfs2_glock_operations gfs2_flock_glops = {
ea67eedb 780 .go_type = LM_TYPE_FLOCK,
a72d2401 781 .go_flags = GLOF_LRU | GLOF_NONDISK,
b3b94faa
DT
782};
783
8fb4b536 784const struct gfs2_glock_operations gfs2_nondisk_glops = {
ea67eedb 785 .go_type = LM_TYPE_NONDISK,
a72d2401 786 .go_flags = GLOF_NONDISK,
601ef0d5 787 .go_callback = nondisk_go_callback,
b3b94faa
DT
788};
789
8fb4b536 790const struct gfs2_glock_operations gfs2_quota_glops = {
ea67eedb 791 .go_type = LM_TYPE_QUOTA,
a72d2401 792 .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
b3b94faa
DT
793};
794
8fb4b536 795const struct gfs2_glock_operations gfs2_journal_glops = {
ea67eedb 796 .go_type = LM_TYPE_JOURNAL,
a72d2401 797 .go_flags = GLOF_NONDISK,
b3b94faa
DT
798};
799
64d576ba
SW
800const struct gfs2_glock_operations *gfs2_glops_list[] = {
801 [LM_TYPE_META] = &gfs2_meta_glops,
802 [LM_TYPE_INODE] = &gfs2_inode_glops,
803 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
64d576ba
SW
804 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
805 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
806 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
807 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
808 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
809};
810