Commit | Line | Data |
---|---|---|
7336d0e6 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
f057f6cd SW |
2 | /* |
3 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | |
e0c2a9aa | 4 | * Copyright 2004-2011 Red Hat, Inc. |
f057f6cd SW |
5 | */ |
6 | ||
d77d1b58 JP |
7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
8 | ||
f057f6cd SW |
9 | #include <linux/fs.h> |
10 | #include <linux/dlm.h> | |
5a0e3ad6 | 11 | #include <linux/slab.h> |
f057f6cd | 12 | #include <linux/types.h> |
e0c2a9aa | 13 | #include <linux/delay.h> |
f057f6cd | 14 | #include <linux/gfs2_ondisk.h> |
174cd4b1 | 15 | #include <linux/sched/signal.h> |
f057f6cd SW |
16 | |
17 | #include "incore.h" | |
18 | #include "glock.h" | |
19 | #include "util.h" | |
e0c2a9aa | 20 | #include "sys.h" |
a245769f | 21 | #include "trace_gfs2.h" |
f057f6cd | 22 | |
a245769f SW |
23 | /** |
24 | * gfs2_update_stats - Update time based stats | |
25 | * @mv: Pointer to mean/variance structure to update | |
26 | * @sample: New data to include | |
27 | * | |
28 | * @delta is the difference between the current rtt sample and the | |
29 | * running average srtt. We add 1/8 of that to the srtt in order to | |
c9ea8c8b | 30 | * update the current srtt estimate. The variance estimate is a bit |
5a5ec83d AG |
31 | * more complicated. We subtract the current variance estimate from |
32 | * the abs value of the @delta and add 1/4 of that to the running | |
33 | * total. That's equivalent to 3/4 of the current variance | |
34 | * estimate plus 1/4 of the abs of @delta. | |
a245769f SW |
35 | * |
36 | * Note that the index points at the array entry containing the smoothed | |
37 | * mean value, and the variance is always in the following entry | |
38 | * | |
39 | * Reference: TCP/IP Illustrated, vol 2, p. 831,832 | |
40 | * All times are in units of integer nanoseconds. Unlike the TCP/IP case, | |
41 | * they are not scaled fixed point. | |
42 | */ | |
43 | ||
44 | static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index, | |
45 | s64 sample) | |
46 | { | |
47 | s64 delta = sample - s->stats[index]; | |
48 | s->stats[index] += (delta >> 3); | |
49 | index++; | |
5a5ec83d | 50 | s->stats[index] += (s64)(abs(delta) - s->stats[index]) >> 2; |
a245769f SW |
51 | } |
52 | ||
53 | /** | |
54 | * gfs2_update_reply_times - Update locking statistics | |
55 | * @gl: The glock to update | |
56 | * | |
57 | * This assumes that gl->gl_dstamp has been set earlier. | |
58 | * | |
59 | * The rtt (lock round trip time) is an estimate of the time | |
60 | * taken to perform a dlm lock request. We update it on each | |
61 | * reply from the dlm. | |
62 | * | |
63 | * The blocking flag is set on the glock for all dlm requests | |
64 | * which may potentially block due to lock requests from other nodes. | |
65 | * DLM requests where the current lock state is exclusive, the | |
66 | * requested state is null (or unlocked) or where the TRY or | |
67 | * TRY_1CB flags are set are classified as non-blocking. All | |
68 | * other DLM requests are counted as (potentially) blocking. | |
69 | */ | |
70 | static inline void gfs2_update_reply_times(struct gfs2_glock *gl) | |
71 | { | |
72 | struct gfs2_pcpu_lkstats *lks; | |
73 | const unsigned gltype = gl->gl_name.ln_type; | |
74 | unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ? | |
75 | GFS2_LKS_SRTTB : GFS2_LKS_SRTT; | |
76 | s64 rtt; | |
77 | ||
78 | preempt_disable(); | |
79 | rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp)); | |
15562c43 | 80 | lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); |
a245769f SW |
81 | gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */ |
82 | gfs2_update_stats(&lks->lkstats[gltype], index, rtt); /* Global */ | |
83 | preempt_enable(); | |
84 | ||
85 | trace_gfs2_glock_lock_time(gl, rtt); | |
86 | } | |
87 | ||
88 | /** | |
89 | * gfs2_update_request_times - Update locking statistics | |
90 | * @gl: The glock to update | |
91 | * | |
92 | * The irt (lock inter-request times) measures the average time | |
93 | * between requests to the dlm. It is updated immediately before | |
94 | * each dlm call. | |
95 | */ | |
96 | ||
97 | static inline void gfs2_update_request_times(struct gfs2_glock *gl) | |
98 | { | |
99 | struct gfs2_pcpu_lkstats *lks; | |
100 | const unsigned gltype = gl->gl_name.ln_type; | |
101 | ktime_t dstamp; | |
102 | s64 irt; | |
103 | ||
104 | preempt_disable(); | |
105 | dstamp = gl->gl_dstamp; | |
106 | gl->gl_dstamp = ktime_get_real(); | |
107 | irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp)); | |
15562c43 | 108 | lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); |
a245769f SW |
109 | gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt); /* Local */ |
110 | gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt); /* Global */ | |
111 | preempt_enable(); | |
112 | } | |
113 | ||
f057f6cd SW |
114 | static void gdlm_ast(void *arg) |
115 | { | |
116 | struct gfs2_glock *gl = arg; | |
117 | unsigned ret = gl->gl_state; | |
118 | ||
a245769f | 119 | gfs2_update_reply_times(gl); |
f057f6cd SW |
120 | BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED); |
121 | ||
4e2f8849 DT |
122 | if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr) |
123 | memset(gl->gl_lksb.sb_lvbptr, 0, GDLM_LVB_SIZE); | |
f057f6cd SW |
124 | |
125 | switch (gl->gl_lksb.sb_status) { | |
126 | case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */ | |
fc0e38da | 127 | gfs2_glock_free(gl); |
f057f6cd SW |
128 | return; |
129 | case -DLM_ECANCEL: /* Cancel while getting lock */ | |
130 | ret |= LM_OUT_CANCELED; | |
131 | goto out; | |
132 | case -EAGAIN: /* Try lock fails */ | |
1fea7c25 | 133 | case -EDEADLK: /* Deadlock detected */ |
f057f6cd | 134 | goto out; |
1fea7c25 | 135 | case -ETIMEDOUT: /* Canceled due to timeout */ |
f057f6cd SW |
136 | ret |= LM_OUT_ERROR; |
137 | goto out; | |
138 | case 0: /* Success */ | |
139 | break; | |
140 | default: /* Something unexpected */ | |
141 | BUG(); | |
142 | } | |
143 | ||
02ffad08 | 144 | ret = gl->gl_req; |
f057f6cd | 145 | if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) { |
02ffad08 | 146 | if (gl->gl_req == LM_ST_SHARED) |
f057f6cd | 147 | ret = LM_ST_DEFERRED; |
02ffad08 | 148 | else if (gl->gl_req == LM_ST_DEFERRED) |
f057f6cd SW |
149 | ret = LM_ST_SHARED; |
150 | else | |
151 | BUG(); | |
152 | } | |
153 | ||
154 | set_bit(GLF_INITIAL, &gl->gl_flags); | |
155 | gfs2_glock_complete(gl, ret); | |
156 | return; | |
157 | out: | |
158 | if (!test_bit(GLF_INITIAL, &gl->gl_flags)) | |
159 | gl->gl_lksb.sb_lkid = 0; | |
160 | gfs2_glock_complete(gl, ret); | |
161 | } | |
162 | ||
163 | static void gdlm_bast(void *arg, int mode) | |
164 | { | |
165 | struct gfs2_glock *gl = arg; | |
166 | ||
167 | switch (mode) { | |
168 | case DLM_LOCK_EX: | |
169 | gfs2_glock_cb(gl, LM_ST_UNLOCKED); | |
170 | break; | |
171 | case DLM_LOCK_CW: | |
172 | gfs2_glock_cb(gl, LM_ST_DEFERRED); | |
173 | break; | |
174 | case DLM_LOCK_PR: | |
175 | gfs2_glock_cb(gl, LM_ST_SHARED); | |
176 | break; | |
177 | default: | |
e54c78a2 | 178 | fs_err(gl->gl_name.ln_sbd, "unknown bast mode %d\n", mode); |
f057f6cd SW |
179 | BUG(); |
180 | } | |
181 | } | |
182 | ||
183 | /* convert gfs lock-state to dlm lock-mode */ | |
184 | ||
e54c78a2 | 185 | static int make_mode(struct gfs2_sbd *sdp, const unsigned int lmstate) |
f057f6cd SW |
186 | { |
187 | switch (lmstate) { | |
188 | case LM_ST_UNLOCKED: | |
189 | return DLM_LOCK_NL; | |
190 | case LM_ST_EXCLUSIVE: | |
191 | return DLM_LOCK_EX; | |
192 | case LM_ST_DEFERRED: | |
193 | return DLM_LOCK_CW; | |
194 | case LM_ST_SHARED: | |
195 | return DLM_LOCK_PR; | |
196 | } | |
e54c78a2 | 197 | fs_err(sdp, "unknown LM state %d\n", lmstate); |
f057f6cd SW |
198 | BUG(); |
199 | return -1; | |
200 | } | |
201 | ||
4c569a72 | 202 | static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags, |
f057f6cd SW |
203 | const int req) |
204 | { | |
dba2d70c DT |
205 | u32 lkf = 0; |
206 | ||
4e2f8849 | 207 | if (gl->gl_lksb.sb_lvbptr) |
dba2d70c | 208 | lkf |= DLM_LKF_VALBLK; |
f057f6cd SW |
209 | |
210 | if (gfs_flags & LM_FLAG_TRY) | |
211 | lkf |= DLM_LKF_NOQUEUE; | |
212 | ||
213 | if (gfs_flags & LM_FLAG_TRY_1CB) { | |
214 | lkf |= DLM_LKF_NOQUEUE; | |
215 | lkf |= DLM_LKF_NOQUEUEBAST; | |
216 | } | |
217 | ||
218 | if (gfs_flags & LM_FLAG_PRIORITY) { | |
219 | lkf |= DLM_LKF_NOORDER; | |
220 | lkf |= DLM_LKF_HEADQUE; | |
221 | } | |
222 | ||
223 | if (gfs_flags & LM_FLAG_ANY) { | |
224 | if (req == DLM_LOCK_PR) | |
225 | lkf |= DLM_LKF_ALTCW; | |
226 | else if (req == DLM_LOCK_CW) | |
227 | lkf |= DLM_LKF_ALTPR; | |
228 | else | |
229 | BUG(); | |
230 | } | |
231 | ||
dba2d70c | 232 | if (gl->gl_lksb.sb_lkid != 0) { |
f057f6cd | 233 | lkf |= DLM_LKF_CONVERT; |
4c569a72 BP |
234 | if (test_bit(GLF_BLOCKING, &gl->gl_flags)) |
235 | lkf |= DLM_LKF_QUECVT; | |
236 | } | |
f057f6cd | 237 | |
f057f6cd SW |
238 | return lkf; |
239 | } | |
240 | ||
a245769f SW |
241 | static void gfs2_reverse_hex(char *c, u64 value) |
242 | { | |
ec148752 | 243 | *c = '0'; |
a245769f SW |
244 | while (value) { |
245 | *c-- = hex_asc[value & 0x0f]; | |
246 | value >>= 4; | |
247 | } | |
248 | } | |
249 | ||
921169ca SW |
250 | static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, |
251 | unsigned int flags) | |
f057f6cd | 252 | { |
15562c43 | 253 | struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; |
f057f6cd SW |
254 | int req; |
255 | u32 lkf; | |
a245769f | 256 | char strname[GDLM_STRNAME_BYTES] = ""; |
f057f6cd | 257 | |
e54c78a2 | 258 | req = make_mode(gl->gl_name.ln_sbd, req_state); |
4c569a72 | 259 | lkf = make_flags(gl, flags, req); |
a245769f SW |
260 | gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); |
261 | gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); | |
262 | if (gl->gl_lksb.sb_lkid) { | |
263 | gfs2_update_request_times(gl); | |
264 | } else { | |
265 | memset(strname, ' ', GDLM_STRNAME_BYTES - 1); | |
266 | strname[GDLM_STRNAME_BYTES - 1] = '\0'; | |
267 | gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type); | |
268 | gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number); | |
269 | gl->gl_dstamp = ktime_get_real(); | |
270 | } | |
f057f6cd SW |
271 | /* |
272 | * Submit the actual lock request. | |
273 | */ | |
274 | ||
a245769f | 275 | return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname, |
921169ca | 276 | GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast); |
f057f6cd SW |
277 | } |
278 | ||
bc015cb8 | 279 | static void gdlm_put_lock(struct gfs2_glock *gl) |
f057f6cd | 280 | { |
15562c43 | 281 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; |
e402746a | 282 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; |
d4e0bfec | 283 | int lvb_needs_unlock = 0; |
f057f6cd SW |
284 | int error; |
285 | ||
286 | if (gl->gl_lksb.sb_lkid == 0) { | |
fc0e38da | 287 | gfs2_glock_free(gl); |
f057f6cd SW |
288 | return; |
289 | } | |
290 | ||
a245769f SW |
291 | clear_bit(GLF_BLOCKING, &gl->gl_flags); |
292 | gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); | |
293 | gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); | |
294 | gfs2_update_request_times(gl); | |
fb6791d1 DT |
295 | |
296 | /* don't want to skip dlm_unlock writing the lvb when lock is ex */ | |
d4e0bfec DT |
297 | |
298 | if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE)) | |
299 | lvb_needs_unlock = 1; | |
300 | ||
fb6791d1 | 301 | if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && |
d4e0bfec | 302 | !lvb_needs_unlock) { |
fb6791d1 DT |
303 | gfs2_glock_free(gl); |
304 | return; | |
305 | } | |
306 | ||
f057f6cd SW |
307 | error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, |
308 | NULL, gl); | |
309 | if (error) { | |
e54c78a2 | 310 | fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n", |
d77d1b58 | 311 | gl->gl_name.ln_type, |
f057f6cd SW |
312 | (unsigned long long)gl->gl_name.ln_number, error); |
313 | return; | |
314 | } | |
315 | } | |
316 | ||
317 | static void gdlm_cancel(struct gfs2_glock *gl) | |
318 | { | |
15562c43 | 319 | struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; |
f057f6cd SW |
320 | dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl); |
321 | } | |
322 | ||
e0c2a9aa DT |
323 | /* |
324 | * dlm/gfs2 recovery coordination using dlm_recover callbacks | |
325 | * | |
326 | * 1. dlm_controld sees lockspace members change | |
327 | * 2. dlm_controld blocks dlm-kernel locking activity | |
328 | * 3. dlm_controld within dlm-kernel notifies gfs2 (recover_prep) | |
329 | * 4. dlm_controld starts and finishes its own user level recovery | |
330 | * 5. dlm_controld starts dlm-kernel dlm_recoverd to do kernel recovery | |
331 | * 6. dlm_recoverd notifies gfs2 of failed nodes (recover_slot) | |
332 | * 7. dlm_recoverd does its own lock recovery | |
333 | * 8. dlm_recoverd unblocks dlm-kernel locking activity | |
334 | * 9. dlm_recoverd notifies gfs2 when done (recover_done with new generation) | |
335 | * 10. gfs2_control updates control_lock lvb with new generation and jid bits | |
336 | * 11. gfs2_control enqueues journals for gfs2_recover to recover (maybe none) | |
337 | * 12. gfs2_recover dequeues and recovers journals of failed nodes | |
338 | * 13. gfs2_recover provides recovery results to gfs2_control (recovery_result) | |
339 | * 14. gfs2_control updates control_lock lvb jid bits for recovered journals | |
340 | * 15. gfs2_control unblocks normal locking when all journals are recovered | |
341 | * | |
342 | * - failures during recovery | |
343 | * | |
344 | * recover_prep() may set BLOCK_LOCKS (step 3) again before gfs2_control | |
345 | * clears BLOCK_LOCKS (step 15), e.g. another node fails while still | |
346 | * recovering for a prior failure. gfs2_control needs a way to detect | |
347 | * this so it can leave BLOCK_LOCKS set in step 15. This is managed using | |
348 | * the recover_block and recover_start values. | |
349 | * | |
350 | * recover_done() provides a new lockspace generation number each time it | |
351 | * is called (step 9). This generation number is saved as recover_start. | |
352 | * When recover_prep() is called, it sets BLOCK_LOCKS and sets | |
353 | * recover_block = recover_start. So, while recover_block is equal to | |
354 | * recover_start, BLOCK_LOCKS should remain set. (recover_spin must | |
355 | * be held around the BLOCK_LOCKS/recover_block/recover_start logic.) | |
356 | * | |
357 | * - more specific gfs2 steps in sequence above | |
358 | * | |
359 | * 3. recover_prep sets BLOCK_LOCKS and sets recover_block = recover_start | |
360 | * 6. recover_slot records any failed jids (maybe none) | |
361 | * 9. recover_done sets recover_start = new generation number | |
362 | * 10. gfs2_control sets control_lock lvb = new gen + bits for failed jids | |
363 | * 12. gfs2_recover does journal recoveries for failed jids identified above | |
364 | * 14. gfs2_control clears control_lock lvb bits for recovered jids | |
365 | * 15. gfs2_control checks if recover_block == recover_start (step 3 occured | |
366 | * again) then do nothing, otherwise if recover_start > recover_block | |
367 | * then clear BLOCK_LOCKS. | |
368 | * | |
369 | * - parallel recovery steps across all nodes | |
370 | * | |
371 | * All nodes attempt to update the control_lock lvb with the new generation | |
372 | * number and jid bits, but only the first to get the control_lock EX will | |
373 | * do so; others will see that it's already done (lvb already contains new | |
374 | * generation number.) | |
375 | * | |
376 | * . All nodes get the same recover_prep/recover_slot/recover_done callbacks | |
377 | * . All nodes attempt to set control_lock lvb gen + bits for the new gen | |
378 | * . One node gets control_lock first and writes the lvb, others see it's done | |
379 | * . All nodes attempt to recover jids for which they see control_lock bits set | |
380 | * . One node succeeds for a jid, and that one clears the jid bit in the lvb | |
381 | * . All nodes will eventually see all lvb bits clear and unblock locks | |
382 | * | |
383 | * - is there a problem with clearing an lvb bit that should be set | |
384 | * and missing a journal recovery? | |
385 | * | |
386 | * 1. jid fails | |
387 | * 2. lvb bit set for step 1 | |
388 | * 3. jid recovered for step 1 | |
389 | * 4. jid taken again (new mount) | |
390 | * 5. jid fails (for step 4) | |
391 | * 6. lvb bit set for step 5 (will already be set) | |
392 | * 7. lvb bit cleared for step 3 | |
393 | * | |
394 | * This is not a problem because the failure in step 5 does not | |
395 | * require recovery, because the mount in step 4 could not have | |
396 | * progressed far enough to unblock locks and access the fs. The | |
397 | * control_mount() function waits for all recoveries to be complete | |
398 | * for the latest lockspace generation before ever unblocking locks | |
399 | * and returning. The mount in step 4 waits until the recovery in | |
400 | * step 1 is done. | |
401 | * | |
402 | * - special case of first mounter: first node to mount the fs | |
403 | * | |
404 | * The first node to mount a gfs2 fs needs to check all the journals | |
405 | * and recover any that need recovery before other nodes are allowed | |
406 | * to mount the fs. (Others may begin mounting, but they must wait | |
407 | * for the first mounter to be done before taking locks on the fs | |
408 | * or accessing the fs.) This has two parts: | |
409 | * | |
410 | * 1. The mounted_lock tells a node it's the first to mount the fs. | |
411 | * Each node holds the mounted_lock in PR while it's mounted. | |
412 | * Each node tries to acquire the mounted_lock in EX when it mounts. | |
413 | * If a node is granted the mounted_lock EX it means there are no | |
414 | * other mounted nodes (no PR locks exist), and it is the first mounter. | |
415 | * The mounted_lock is demoted to PR when first recovery is done, so | |
416 | * others will fail to get an EX lock, but will get a PR lock. | |
417 | * | |
418 | * 2. The control_lock blocks others in control_mount() while the first | |
419 | * mounter is doing first mount recovery of all journals. | |
420 | * A mounting node needs to acquire control_lock in EX mode before | |
421 | * it can proceed. The first mounter holds control_lock in EX while doing | |
422 | * the first mount recovery, blocking mounts from other nodes, then demotes | |
423 | * control_lock to NL when it's done (others_may_mount/first_done), | |
424 | * allowing other nodes to continue mounting. | |
425 | * | |
426 | * first mounter: | |
427 | * control_lock EX/NOQUEUE success | |
428 | * mounted_lock EX/NOQUEUE success (no other PR, so no other mounters) | |
429 | * set first=1 | |
430 | * do first mounter recovery | |
431 | * mounted_lock EX->PR | |
432 | * control_lock EX->NL, write lvb generation | |
433 | * | |
434 | * other mounter: | |
435 | * control_lock EX/NOQUEUE success (if fail -EAGAIN, retry) | |
436 | * mounted_lock EX/NOQUEUE fail -EAGAIN (expected due to other mounters PR) | |
437 | * mounted_lock PR/NOQUEUE success | |
438 | * read lvb generation | |
439 | * control_lock EX->NL | |
440 | * set first=0 | |
441 | * | |
442 | * - mount during recovery | |
443 | * | |
444 | * If a node mounts while others are doing recovery (not first mounter), | |
445 | * the mounting node will get its initial recover_done() callback without | |
446 | * having seen any previous failures/callbacks. | |
447 | * | |
448 | * It must wait for all recoveries preceding its mount to be finished | |
449 | * before it unblocks locks. It does this by repeating the "other mounter" | |
450 | * steps above until the lvb generation number is >= its mount generation | |
451 | * number (from initial recover_done) and all lvb bits are clear. | |
452 | * | |
453 | * - control_lock lvb format | |
454 | * | |
455 | * 4 bytes generation number: the latest dlm lockspace generation number | |
456 | * from recover_done callback. Indicates the jid bitmap has been updated | |
457 | * to reflect all slot failures through that generation. | |
458 | * 4 bytes unused. | |
459 | * GDLM_LVB_SIZE-8 bytes of jid bit map. If bit N is set, it indicates | |
460 | * that jid N needs recovery. | |
461 | */ | |
462 | ||
463 | #define JID_BITMAP_OFFSET 8 /* 4 byte generation number + 4 byte unused */ | |
464 | ||
465 | static void control_lvb_read(struct lm_lockstruct *ls, uint32_t *lvb_gen, | |
466 | char *lvb_bits) | |
467 | { | |
951b4bd5 | 468 | __le32 gen; |
e0c2a9aa | 469 | memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE); |
951b4bd5 | 470 | memcpy(&gen, lvb_bits, sizeof(__le32)); |
e0c2a9aa DT |
471 | *lvb_gen = le32_to_cpu(gen); |
472 | } | |
473 | ||
474 | static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen, | |
475 | char *lvb_bits) | |
476 | { | |
951b4bd5 | 477 | __le32 gen; |
e0c2a9aa DT |
478 | memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE); |
479 | gen = cpu_to_le32(lvb_gen); | |
951b4bd5 | 480 | memcpy(ls->ls_control_lvb, &gen, sizeof(__le32)); |
e0c2a9aa DT |
481 | } |
482 | ||
483 | static int all_jid_bits_clear(char *lvb) | |
484 | { | |
4146c3d4 AM |
485 | return !memchr_inv(lvb + JID_BITMAP_OFFSET, 0, |
486 | GDLM_LVB_SIZE - JID_BITMAP_OFFSET); | |
e0c2a9aa DT |
487 | } |
488 | ||
489 | static void sync_wait_cb(void *arg) | |
490 | { | |
491 | struct lm_lockstruct *ls = arg; | |
492 | complete(&ls->ls_sync_wait); | |
493 | } | |
494 | ||
495 | static int sync_unlock(struct gfs2_sbd *sdp, struct dlm_lksb *lksb, char *name) | |
f057f6cd SW |
496 | { |
497 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
498 | int error; | |
499 | ||
e0c2a9aa DT |
500 | error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls); |
501 | if (error) { | |
502 | fs_err(sdp, "%s lkid %x error %d\n", | |
503 | name, lksb->sb_lkid, error); | |
504 | return error; | |
505 | } | |
506 | ||
507 | wait_for_completion(&ls->ls_sync_wait); | |
508 | ||
509 | if (lksb->sb_status != -DLM_EUNLOCK) { | |
510 | fs_err(sdp, "%s lkid %x status %d\n", | |
511 | name, lksb->sb_lkid, lksb->sb_status); | |
512 | return -1; | |
513 | } | |
514 | return 0; | |
515 | } | |
516 | ||
517 | static int sync_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags, | |
518 | unsigned int num, struct dlm_lksb *lksb, char *name) | |
519 | { | |
520 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
521 | char strname[GDLM_STRNAME_BYTES]; | |
522 | int error, status; | |
523 | ||
524 | memset(strname, 0, GDLM_STRNAME_BYTES); | |
525 | snprintf(strname, GDLM_STRNAME_BYTES, "%8x%16x", LM_TYPE_NONDISK, num); | |
526 | ||
527 | error = dlm_lock(ls->ls_dlm, mode, lksb, flags, | |
528 | strname, GDLM_STRNAME_BYTES - 1, | |
529 | 0, sync_wait_cb, ls, NULL); | |
530 | if (error) { | |
531 | fs_err(sdp, "%s lkid %x flags %x mode %d error %d\n", | |
532 | name, lksb->sb_lkid, flags, mode, error); | |
533 | return error; | |
534 | } | |
535 | ||
536 | wait_for_completion(&ls->ls_sync_wait); | |
537 | ||
538 | status = lksb->sb_status; | |
539 | ||
540 | if (status && status != -EAGAIN) { | |
541 | fs_err(sdp, "%s lkid %x flags %x mode %d status %d\n", | |
542 | name, lksb->sb_lkid, flags, mode, status); | |
543 | } | |
544 | ||
545 | return status; | |
546 | } | |
547 | ||
548 | static int mounted_unlock(struct gfs2_sbd *sdp) | |
549 | { | |
550 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
551 | return sync_unlock(sdp, &ls->ls_mounted_lksb, "mounted_lock"); | |
552 | } | |
553 | ||
554 | static int mounted_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags) | |
555 | { | |
556 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
557 | return sync_lock(sdp, mode, flags, GFS2_MOUNTED_LOCK, | |
558 | &ls->ls_mounted_lksb, "mounted_lock"); | |
559 | } | |
560 | ||
561 | static int control_unlock(struct gfs2_sbd *sdp) | |
562 | { | |
563 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
564 | return sync_unlock(sdp, &ls->ls_control_lksb, "control_lock"); | |
565 | } | |
566 | ||
567 | static int control_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags) | |
568 | { | |
569 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
570 | return sync_lock(sdp, mode, flags, GFS2_CONTROL_LOCK, | |
571 | &ls->ls_control_lksb, "control_lock"); | |
572 | } | |
573 | ||
574 | static void gfs2_control_func(struct work_struct *work) | |
575 | { | |
576 | struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work); | |
577 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
e0c2a9aa DT |
578 | uint32_t block_gen, start_gen, lvb_gen, flags; |
579 | int recover_set = 0; | |
580 | int write_lvb = 0; | |
581 | int recover_size; | |
582 | int i, error; | |
583 | ||
584 | spin_lock(&ls->ls_recover_spin); | |
585 | /* | |
586 | * No MOUNT_DONE means we're still mounting; control_mount() | |
587 | * will set this flag, after which this thread will take over | |
588 | * all further clearing of BLOCK_LOCKS. | |
589 | * | |
590 | * FIRST_MOUNT means this node is doing first mounter recovery, | |
591 | * for which recovery control is handled by | |
592 | * control_mount()/control_first_done(), not this thread. | |
593 | */ | |
594 | if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || | |
595 | test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { | |
596 | spin_unlock(&ls->ls_recover_spin); | |
597 | return; | |
598 | } | |
599 | block_gen = ls->ls_recover_block; | |
600 | start_gen = ls->ls_recover_start; | |
601 | spin_unlock(&ls->ls_recover_spin); | |
602 | ||
603 | /* | |
604 | * Equal block_gen and start_gen implies we are between | |
605 | * recover_prep and recover_done callbacks, which means | |
606 | * dlm recovery is in progress and dlm locking is blocked. | |
607 | * There's no point trying to do any work until recover_done. | |
608 | */ | |
609 | ||
610 | if (block_gen == start_gen) | |
611 | return; | |
612 | ||
613 | /* | |
614 | * Propagate recover_submit[] and recover_result[] to lvb: | |
615 | * dlm_recoverd adds to recover_submit[] jids needing recovery | |
616 | * gfs2_recover adds to recover_result[] journal recovery results | |
617 | * | |
618 | * set lvb bit for jids in recover_submit[] if the lvb has not | |
619 | * yet been updated for the generation of the failure | |
620 | * | |
621 | * clear lvb bit for jids in recover_result[] if the result of | |
622 | * the journal recovery is SUCCESS | |
623 | */ | |
624 | ||
625 | error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_VALBLK); | |
626 | if (error) { | |
627 | fs_err(sdp, "control lock EX error %d\n", error); | |
628 | return; | |
629 | } | |
630 | ||
57c7310b | 631 | control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); |
e0c2a9aa DT |
632 | |
633 | spin_lock(&ls->ls_recover_spin); | |
634 | if (block_gen != ls->ls_recover_block || | |
635 | start_gen != ls->ls_recover_start) { | |
636 | fs_info(sdp, "recover generation %u block1 %u %u\n", | |
637 | start_gen, block_gen, ls->ls_recover_block); | |
638 | spin_unlock(&ls->ls_recover_spin); | |
639 | control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT); | |
640 | return; | |
641 | } | |
642 | ||
643 | recover_size = ls->ls_recover_size; | |
644 | ||
645 | if (lvb_gen <= start_gen) { | |
646 | /* | |
647 | * Clear lvb bits for jids we've successfully recovered. | |
648 | * Because all nodes attempt to recover failed journals, | |
649 | * a journal can be recovered multiple times successfully | |
650 | * in succession. Only the first will really do recovery, | |
651 | * the others find it clean, but still report a successful | |
652 | * recovery. So, another node may have already recovered | |
653 | * the jid and cleared the lvb bit for it. | |
654 | */ | |
655 | for (i = 0; i < recover_size; i++) { | |
656 | if (ls->ls_recover_result[i] != LM_RD_SUCCESS) | |
657 | continue; | |
658 | ||
659 | ls->ls_recover_result[i] = 0; | |
660 | ||
57c7310b | 661 | if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) |
e0c2a9aa DT |
662 | continue; |
663 | ||
57c7310b | 664 | __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); |
e0c2a9aa DT |
665 | write_lvb = 1; |
666 | } | |
667 | } | |
668 | ||
669 | if (lvb_gen == start_gen) { | |
670 | /* | |
671 | * Failed slots before start_gen are already set in lvb. | |
672 | */ | |
673 | for (i = 0; i < recover_size; i++) { | |
674 | if (!ls->ls_recover_submit[i]) | |
675 | continue; | |
676 | if (ls->ls_recover_submit[i] < lvb_gen) | |
677 | ls->ls_recover_submit[i] = 0; | |
678 | } | |
679 | } else if (lvb_gen < start_gen) { | |
680 | /* | |
681 | * Failed slots before start_gen are not yet set in lvb. | |
682 | */ | |
683 | for (i = 0; i < recover_size; i++) { | |
684 | if (!ls->ls_recover_submit[i]) | |
685 | continue; | |
686 | if (ls->ls_recover_submit[i] < start_gen) { | |
687 | ls->ls_recover_submit[i] = 0; | |
57c7310b | 688 | __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); |
e0c2a9aa DT |
689 | } |
690 | } | |
691 | /* even if there are no bits to set, we need to write the | |
692 | latest generation to the lvb */ | |
693 | write_lvb = 1; | |
694 | } else { | |
695 | /* | |
696 | * we should be getting a recover_done() for lvb_gen soon | |
697 | */ | |
698 | } | |
699 | spin_unlock(&ls->ls_recover_spin); | |
700 | ||
701 | if (write_lvb) { | |
57c7310b | 702 | control_lvb_write(ls, start_gen, ls->ls_lvb_bits); |
e0c2a9aa DT |
703 | flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK; |
704 | } else { | |
705 | flags = DLM_LKF_CONVERT; | |
706 | } | |
707 | ||
708 | error = control_lock(sdp, DLM_LOCK_NL, flags); | |
709 | if (error) { | |
710 | fs_err(sdp, "control lock NL error %d\n", error); | |
711 | return; | |
712 | } | |
713 | ||
714 | /* | |
715 | * Everyone will see jid bits set in the lvb, run gfs2_recover_set(), | |
716 | * and clear a jid bit in the lvb if the recovery is a success. | |
717 | * Eventually all journals will be recovered, all jid bits will | |
718 | * be cleared in the lvb, and everyone will clear BLOCK_LOCKS. | |
719 | */ | |
720 | ||
721 | for (i = 0; i < recover_size; i++) { | |
57c7310b | 722 | if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) { |
e0c2a9aa DT |
723 | fs_info(sdp, "recover generation %u jid %d\n", |
724 | start_gen, i); | |
725 | gfs2_recover_set(sdp, i); | |
726 | recover_set++; | |
727 | } | |
728 | } | |
729 | if (recover_set) | |
730 | return; | |
731 | ||
732 | /* | |
733 | * No more jid bits set in lvb, all recovery is done, unblock locks | |
734 | * (unless a new recover_prep callback has occured blocking locks | |
735 | * again while working above) | |
736 | */ | |
737 | ||
738 | spin_lock(&ls->ls_recover_spin); | |
739 | if (ls->ls_recover_block == block_gen && | |
740 | ls->ls_recover_start == start_gen) { | |
741 | clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); | |
742 | spin_unlock(&ls->ls_recover_spin); | |
743 | fs_info(sdp, "recover generation %u done\n", start_gen); | |
744 | gfs2_glock_thaw(sdp); | |
745 | } else { | |
746 | fs_info(sdp, "recover generation %u block2 %u %u\n", | |
747 | start_gen, block_gen, ls->ls_recover_block); | |
748 | spin_unlock(&ls->ls_recover_spin); | |
749 | } | |
750 | } | |
751 | ||
752 | static int control_mount(struct gfs2_sbd *sdp) | |
753 | { | |
754 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
e0c2a9aa DT |
755 | uint32_t start_gen, block_gen, mount_gen, lvb_gen; |
756 | int mounted_mode; | |
757 | int retries = 0; | |
758 | int error; | |
759 | ||
760 | memset(&ls->ls_mounted_lksb, 0, sizeof(struct dlm_lksb)); | |
761 | memset(&ls->ls_control_lksb, 0, sizeof(struct dlm_lksb)); | |
762 | memset(&ls->ls_control_lvb, 0, GDLM_LVB_SIZE); | |
763 | ls->ls_control_lksb.sb_lvbptr = ls->ls_control_lvb; | |
764 | init_completion(&ls->ls_sync_wait); | |
765 | ||
766 | set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); | |
767 | ||
768 | error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_VALBLK); | |
769 | if (error) { | |
770 | fs_err(sdp, "control_mount control_lock NL error %d\n", error); | |
771 | return error; | |
772 | } | |
773 | ||
774 | error = mounted_lock(sdp, DLM_LOCK_NL, 0); | |
775 | if (error) { | |
776 | fs_err(sdp, "control_mount mounted_lock NL error %d\n", error); | |
777 | control_unlock(sdp); | |
778 | return error; | |
779 | } | |
780 | mounted_mode = DLM_LOCK_NL; | |
781 | ||
782 | restart: | |
783 | if (retries++ && signal_pending(current)) { | |
784 | error = -EINTR; | |
785 | goto fail; | |
786 | } | |
787 | ||
788 | /* | |
789 | * We always start with both locks in NL. control_lock is | |
790 | * demoted to NL below so we don't need to do it here. | |
791 | */ | |
792 | ||
793 | if (mounted_mode != DLM_LOCK_NL) { | |
794 | error = mounted_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT); | |
795 | if (error) | |
796 | goto fail; | |
797 | mounted_mode = DLM_LOCK_NL; | |
798 | } | |
799 | ||
800 | /* | |
801 | * Other nodes need to do some work in dlm recovery and gfs2_control | |
802 | * before the recover_done and control_lock will be ready for us below. | |
803 | * A delay here is not required but often avoids having to retry. | |
804 | */ | |
805 | ||
806 | msleep_interruptible(500); | |
807 | ||
808 | /* | |
809 | * Acquire control_lock in EX and mounted_lock in either EX or PR. | |
810 | * control_lock lvb keeps track of any pending journal recoveries. | |
811 | * mounted_lock indicates if any other nodes have the fs mounted. | |
812 | */ | |
813 | ||
814 | error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE|DLM_LKF_VALBLK); | |
815 | if (error == -EAGAIN) { | |
816 | goto restart; | |
817 | } else if (error) { | |
818 | fs_err(sdp, "control_mount control_lock EX error %d\n", error); | |
819 | goto fail; | |
820 | } | |
821 | ||
4a772772 BP |
822 | /** |
823 | * If we're a spectator, we don't want to take the lock in EX because | |
824 | * we cannot do the first-mount responsibility it implies: recovery. | |
825 | */ | |
826 | if (sdp->sd_args.ar_spectator) | |
827 | goto locks_done; | |
828 | ||
e0c2a9aa DT |
829 | error = mounted_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE); |
830 | if (!error) { | |
831 | mounted_mode = DLM_LOCK_EX; | |
832 | goto locks_done; | |
833 | } else if (error != -EAGAIN) { | |
834 | fs_err(sdp, "control_mount mounted_lock EX error %d\n", error); | |
835 | goto fail; | |
836 | } | |
837 | ||
838 | error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE); | |
839 | if (!error) { | |
840 | mounted_mode = DLM_LOCK_PR; | |
841 | goto locks_done; | |
842 | } else { | |
843 | /* not even -EAGAIN should happen here */ | |
844 | fs_err(sdp, "control_mount mounted_lock PR error %d\n", error); | |
845 | goto fail; | |
846 | } | |
847 | ||
848 | locks_done: | |
849 | /* | |
850 | * If we got both locks above in EX, then we're the first mounter. | |
851 | * If not, then we need to wait for the control_lock lvb to be | |
852 | * updated by other mounted nodes to reflect our mount generation. | |
853 | * | |
854 | * In simple first mounter cases, first mounter will see zero lvb_gen, | |
855 | * but in cases where all existing nodes leave/fail before mounting | |
856 | * nodes finish control_mount, then all nodes will be mounting and | |
857 | * lvb_gen will be non-zero. | |
858 | */ | |
859 | ||
57c7310b | 860 | control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); |
e0c2a9aa DT |
861 | |
862 | if (lvb_gen == 0xFFFFFFFF) { | |
863 | /* special value to force mount attempts to fail */ | |
864 | fs_err(sdp, "control_mount control_lock disabled\n"); | |
865 | error = -EINVAL; | |
866 | goto fail; | |
867 | } | |
868 | ||
869 | if (mounted_mode == DLM_LOCK_EX) { | |
870 | /* first mounter, keep both EX while doing first recovery */ | |
871 | spin_lock(&ls->ls_recover_spin); | |
872 | clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); | |
873 | set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); | |
874 | set_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); | |
875 | spin_unlock(&ls->ls_recover_spin); | |
876 | fs_info(sdp, "first mounter control generation %u\n", lvb_gen); | |
877 | return 0; | |
878 | } | |
879 | ||
880 | error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT); | |
881 | if (error) | |
882 | goto fail; | |
883 | ||
884 | /* | |
885 | * We are not first mounter, now we need to wait for the control_lock | |
886 | * lvb generation to be >= the generation from our first recover_done | |
887 | * and all lvb bits to be clear (no pending journal recoveries.) | |
888 | */ | |
889 | ||
57c7310b | 890 | if (!all_jid_bits_clear(ls->ls_lvb_bits)) { |
e0c2a9aa DT |
891 | /* journals need recovery, wait until all are clear */ |
892 | fs_info(sdp, "control_mount wait for journal recovery\n"); | |
893 | goto restart; | |
894 | } | |
895 | ||
896 | spin_lock(&ls->ls_recover_spin); | |
897 | block_gen = ls->ls_recover_block; | |
898 | start_gen = ls->ls_recover_start; | |
899 | mount_gen = ls->ls_recover_mount; | |
900 | ||
901 | if (lvb_gen < mount_gen) { | |
902 | /* wait for mounted nodes to update control_lock lvb to our | |
903 | generation, which might include new recovery bits set */ | |
4a772772 BP |
904 | if (sdp->sd_args.ar_spectator) { |
905 | fs_info(sdp, "Recovery is required. Waiting for a " | |
906 | "non-spectator to mount.\n"); | |
907 | msleep_interruptible(1000); | |
908 | } else { | |
909 | fs_info(sdp, "control_mount wait1 block %u start %u " | |
910 | "mount %u lvb %u flags %lx\n", block_gen, | |
911 | start_gen, mount_gen, lvb_gen, | |
912 | ls->ls_recover_flags); | |
913 | } | |
e0c2a9aa DT |
914 | spin_unlock(&ls->ls_recover_spin); |
915 | goto restart; | |
916 | } | |
917 | ||
918 | if (lvb_gen != start_gen) { | |
919 | /* wait for mounted nodes to update control_lock lvb to the | |
920 | latest recovery generation */ | |
921 | fs_info(sdp, "control_mount wait2 block %u start %u mount %u " | |
922 | "lvb %u flags %lx\n", block_gen, start_gen, mount_gen, | |
923 | lvb_gen, ls->ls_recover_flags); | |
924 | spin_unlock(&ls->ls_recover_spin); | |
925 | goto restart; | |
926 | } | |
927 | ||
928 | if (block_gen == start_gen) { | |
929 | /* dlm recovery in progress, wait for it to finish */ | |
930 | fs_info(sdp, "control_mount wait3 block %u start %u mount %u " | |
931 | "lvb %u flags %lx\n", block_gen, start_gen, mount_gen, | |
932 | lvb_gen, ls->ls_recover_flags); | |
933 | spin_unlock(&ls->ls_recover_spin); | |
934 | goto restart; | |
f057f6cd SW |
935 | } |
936 | ||
e0c2a9aa DT |
937 | clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); |
938 | set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); | |
939 | memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); | |
940 | memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); | |
941 | spin_unlock(&ls->ls_recover_spin); | |
942 | return 0; | |
943 | ||
944 | fail: | |
945 | mounted_unlock(sdp); | |
946 | control_unlock(sdp); | |
947 | return error; | |
948 | } | |
949 | ||
e0c2a9aa DT |
950 | static int control_first_done(struct gfs2_sbd *sdp) |
951 | { | |
952 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
e0c2a9aa DT |
953 | uint32_t start_gen, block_gen; |
954 | int error; | |
955 | ||
956 | restart: | |
957 | spin_lock(&ls->ls_recover_spin); | |
958 | start_gen = ls->ls_recover_start; | |
959 | block_gen = ls->ls_recover_block; | |
960 | ||
961 | if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags) || | |
962 | !test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || | |
963 | !test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { | |
964 | /* sanity check, should not happen */ | |
965 | fs_err(sdp, "control_first_done start %u block %u flags %lx\n", | |
966 | start_gen, block_gen, ls->ls_recover_flags); | |
967 | spin_unlock(&ls->ls_recover_spin); | |
968 | control_unlock(sdp); | |
969 | return -1; | |
970 | } | |
971 | ||
972 | if (start_gen == block_gen) { | |
973 | /* | |
974 | * Wait for the end of a dlm recovery cycle to switch from | |
975 | * first mounter recovery. We can ignore any recover_slot | |
976 | * callbacks between the recover_prep and next recover_done | |
977 | * because we are still the first mounter and any failed nodes | |
978 | * have not fully mounted, so they don't need recovery. | |
979 | */ | |
980 | spin_unlock(&ls->ls_recover_spin); | |
981 | fs_info(sdp, "control_first_done wait gen %u\n", start_gen); | |
982 | ||
983 | wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY, | |
74316201 | 984 | TASK_UNINTERRUPTIBLE); |
e0c2a9aa DT |
985 | goto restart; |
986 | } | |
987 | ||
988 | clear_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); | |
989 | set_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags); | |
990 | memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); | |
991 | memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); | |
992 | spin_unlock(&ls->ls_recover_spin); | |
993 | ||
57c7310b DT |
994 | memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE); |
995 | control_lvb_write(ls, start_gen, ls->ls_lvb_bits); | |
e0c2a9aa DT |
996 | |
997 | error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT); | |
998 | if (error) | |
999 | fs_err(sdp, "control_first_done mounted PR error %d\n", error); | |
1000 | ||
1001 | error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT|DLM_LKF_VALBLK); | |
f057f6cd | 1002 | if (error) |
e0c2a9aa | 1003 | fs_err(sdp, "control_first_done control NL error %d\n", error); |
f057f6cd SW |
1004 | |
1005 | return error; | |
1006 | } | |
1007 | ||
e0c2a9aa DT |
1008 | /* |
1009 | * Expand static jid arrays if necessary (by increments of RECOVER_SIZE_INC) | |
1010 | * to accomodate the largest slot number. (NB dlm slot numbers start at 1, | |
1011 | * gfs2 jids start at 0, so jid = slot - 1) | |
1012 | */ | |
1013 | ||
1014 | #define RECOVER_SIZE_INC 16 | |
1015 | ||
1016 | static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots, | |
1017 | int num_slots) | |
1018 | { | |
1019 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
1020 | uint32_t *submit = NULL; | |
1021 | uint32_t *result = NULL; | |
1022 | uint32_t old_size, new_size; | |
1023 | int i, max_jid; | |
1024 | ||
57c7310b DT |
1025 | if (!ls->ls_lvb_bits) { |
1026 | ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); | |
1027 | if (!ls->ls_lvb_bits) | |
1028 | return -ENOMEM; | |
1029 | } | |
1030 | ||
e0c2a9aa DT |
1031 | max_jid = 0; |
1032 | for (i = 0; i < num_slots; i++) { | |
1033 | if (max_jid < slots[i].slot - 1) | |
1034 | max_jid = slots[i].slot - 1; | |
1035 | } | |
1036 | ||
1037 | old_size = ls->ls_recover_size; | |
8f0daef5 AG |
1038 | new_size = old_size; |
1039 | while (new_size < max_jid + 1) | |
1040 | new_size += RECOVER_SIZE_INC; | |
1041 | if (new_size == old_size) | |
e0c2a9aa DT |
1042 | return 0; |
1043 | ||
6ec43b18 FF |
1044 | submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS); |
1045 | result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS); | |
e0c2a9aa DT |
1046 | if (!submit || !result) { |
1047 | kfree(submit); | |
1048 | kfree(result); | |
1049 | return -ENOMEM; | |
1050 | } | |
1051 | ||
1052 | spin_lock(&ls->ls_recover_spin); | |
1053 | memcpy(submit, ls->ls_recover_submit, old_size * sizeof(uint32_t)); | |
1054 | memcpy(result, ls->ls_recover_result, old_size * sizeof(uint32_t)); | |
1055 | kfree(ls->ls_recover_submit); | |
1056 | kfree(ls->ls_recover_result); | |
1057 | ls->ls_recover_submit = submit; | |
1058 | ls->ls_recover_result = result; | |
1059 | ls->ls_recover_size = new_size; | |
1060 | spin_unlock(&ls->ls_recover_spin); | |
1061 | return 0; | |
1062 | } | |
1063 | ||
1064 | static void free_recover_size(struct lm_lockstruct *ls) | |
1065 | { | |
57c7310b | 1066 | kfree(ls->ls_lvb_bits); |
e0c2a9aa DT |
1067 | kfree(ls->ls_recover_submit); |
1068 | kfree(ls->ls_recover_result); | |
1069 | ls->ls_recover_submit = NULL; | |
1070 | ls->ls_recover_result = NULL; | |
1071 | ls->ls_recover_size = 0; | |
cc1dfa8b | 1072 | ls->ls_lvb_bits = NULL; |
e0c2a9aa DT |
1073 | } |
1074 | ||
1075 | /* dlm calls before it does lock recovery */ | |
1076 | ||
1077 | static void gdlm_recover_prep(void *arg) | |
1078 | { | |
1079 | struct gfs2_sbd *sdp = arg; | |
1080 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
1081 | ||
1082 | spin_lock(&ls->ls_recover_spin); | |
1083 | ls->ls_recover_block = ls->ls_recover_start; | |
1084 | set_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); | |
1085 | ||
1086 | if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || | |
1087 | test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { | |
1088 | spin_unlock(&ls->ls_recover_spin); | |
1089 | return; | |
1090 | } | |
1091 | set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); | |
1092 | spin_unlock(&ls->ls_recover_spin); | |
1093 | } | |
1094 | ||
1095 | /* dlm calls after recover_prep has been completed on all lockspace members; | |
1096 | identifies slot/jid of failed member */ | |
1097 | ||
1098 | static void gdlm_recover_slot(void *arg, struct dlm_slot *slot) | |
1099 | { | |
1100 | struct gfs2_sbd *sdp = arg; | |
1101 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
1102 | int jid = slot->slot - 1; | |
1103 | ||
1104 | spin_lock(&ls->ls_recover_spin); | |
1105 | if (ls->ls_recover_size < jid + 1) { | |
af38816e | 1106 | fs_err(sdp, "recover_slot jid %d gen %u short size %d\n", |
e0c2a9aa DT |
1107 | jid, ls->ls_recover_block, ls->ls_recover_size); |
1108 | spin_unlock(&ls->ls_recover_spin); | |
1109 | return; | |
1110 | } | |
1111 | ||
1112 | if (ls->ls_recover_submit[jid]) { | |
ad781971 | 1113 | fs_info(sdp, "recover_slot jid %d gen %u prev %u\n", |
e0c2a9aa DT |
1114 | jid, ls->ls_recover_block, ls->ls_recover_submit[jid]); |
1115 | } | |
1116 | ls->ls_recover_submit[jid] = ls->ls_recover_block; | |
1117 | spin_unlock(&ls->ls_recover_spin); | |
1118 | } | |
1119 | ||
1120 | /* dlm calls after recover_slot and after it completes lock recovery */ | |
1121 | ||
1122 | static void gdlm_recover_done(void *arg, struct dlm_slot *slots, int num_slots, | |
1123 | int our_slot, uint32_t generation) | |
1124 | { | |
1125 | struct gfs2_sbd *sdp = arg; | |
1126 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
1127 | ||
1128 | /* ensure the ls jid arrays are large enough */ | |
1129 | set_recover_size(sdp, slots, num_slots); | |
1130 | ||
1131 | spin_lock(&ls->ls_recover_spin); | |
1132 | ls->ls_recover_start = generation; | |
1133 | ||
1134 | if (!ls->ls_recover_mount) { | |
1135 | ls->ls_recover_mount = generation; | |
1136 | ls->ls_jid = our_slot - 1; | |
1137 | } | |
1138 | ||
1139 | if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) | |
1140 | queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0); | |
1141 | ||
1142 | clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); | |
4e857c58 | 1143 | smp_mb__after_atomic(); |
e0c2a9aa DT |
1144 | wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY); |
1145 | spin_unlock(&ls->ls_recover_spin); | |
1146 | } | |
1147 | ||
1148 | /* gfs2_recover thread has a journal recovery result */ | |
1149 | ||
1150 | static void gdlm_recovery_result(struct gfs2_sbd *sdp, unsigned int jid, | |
1151 | unsigned int result) | |
1152 | { | |
1153 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
1154 | ||
1155 | if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) | |
1156 | return; | |
1157 | ||
1158 | /* don't care about the recovery of own journal during mount */ | |
1159 | if (jid == ls->ls_jid) | |
1160 | return; | |
1161 | ||
1162 | spin_lock(&ls->ls_recover_spin); | |
1163 | if (test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { | |
1164 | spin_unlock(&ls->ls_recover_spin); | |
1165 | return; | |
1166 | } | |
1167 | if (ls->ls_recover_size < jid + 1) { | |
af38816e | 1168 | fs_err(sdp, "recovery_result jid %d short size %d\n", |
e0c2a9aa DT |
1169 | jid, ls->ls_recover_size); |
1170 | spin_unlock(&ls->ls_recover_spin); | |
1171 | return; | |
1172 | } | |
1173 | ||
1174 | fs_info(sdp, "recover jid %d result %s\n", jid, | |
1175 | result == LM_RD_GAVEUP ? "busy" : "success"); | |
1176 | ||
1177 | ls->ls_recover_result[jid] = result; | |
1178 | ||
1179 | /* GAVEUP means another node is recovering the journal; delay our | |
1180 | next attempt to recover it, to give the other node a chance to | |
1181 | finish before trying again */ | |
1182 | ||
1183 | if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) | |
1184 | queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, | |
1185 | result == LM_RD_GAVEUP ? HZ : 0); | |
1186 | spin_unlock(&ls->ls_recover_spin); | |
1187 | } | |
1188 | ||
27c3b415 | 1189 | static const struct dlm_lockspace_ops gdlm_lockspace_ops = { |
e0c2a9aa DT |
1190 | .recover_prep = gdlm_recover_prep, |
1191 | .recover_slot = gdlm_recover_slot, | |
1192 | .recover_done = gdlm_recover_done, | |
1193 | }; | |
1194 | ||
1195 | static int gdlm_mount(struct gfs2_sbd *sdp, const char *table) | |
1196 | { | |
1197 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
1198 | char cluster[GFS2_LOCKNAME_LEN]; | |
1199 | const char *fsname; | |
1200 | uint32_t flags; | |
1201 | int error, ops_result; | |
1202 | ||
1203 | /* | |
1204 | * initialize everything | |
1205 | */ | |
1206 | ||
1207 | INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func); | |
1208 | spin_lock_init(&ls->ls_recover_spin); | |
1209 | ls->ls_recover_flags = 0; | |
1210 | ls->ls_recover_mount = 0; | |
1211 | ls->ls_recover_start = 0; | |
1212 | ls->ls_recover_block = 0; | |
1213 | ls->ls_recover_size = 0; | |
1214 | ls->ls_recover_submit = NULL; | |
1215 | ls->ls_recover_result = NULL; | |
57c7310b | 1216 | ls->ls_lvb_bits = NULL; |
e0c2a9aa DT |
1217 | |
1218 | error = set_recover_size(sdp, NULL, 0); | |
1219 | if (error) | |
1220 | goto fail; | |
1221 | ||
1222 | /* | |
1223 | * prepare dlm_new_lockspace args | |
1224 | */ | |
1225 | ||
1226 | fsname = strchr(table, ':'); | |
1227 | if (!fsname) { | |
1228 | fs_info(sdp, "no fsname found\n"); | |
1229 | error = -EINVAL; | |
1230 | goto fail_free; | |
1231 | } | |
1232 | memset(cluster, 0, sizeof(cluster)); | |
1233 | memcpy(cluster, table, strlen(table) - strlen(fsname)); | |
1234 | fsname++; | |
1235 | ||
1236 | flags = DLM_LSFL_FS | DLM_LSFL_NEWEXCL; | |
e0c2a9aa DT |
1237 | |
1238 | /* | |
1239 | * create/join lockspace | |
1240 | */ | |
1241 | ||
1242 | error = dlm_new_lockspace(fsname, cluster, flags, GDLM_LVB_SIZE, | |
1243 | &gdlm_lockspace_ops, sdp, &ops_result, | |
1244 | &ls->ls_dlm); | |
1245 | if (error) { | |
1246 | fs_err(sdp, "dlm_new_lockspace error %d\n", error); | |
1247 | goto fail_free; | |
1248 | } | |
1249 | ||
1250 | if (ops_result < 0) { | |
1251 | /* | |
1252 | * dlm does not support ops callbacks, | |
1253 | * old dlm_controld/gfs_controld are used, try without ops. | |
1254 | */ | |
1255 | fs_info(sdp, "dlm lockspace ops not used\n"); | |
1256 | free_recover_size(ls); | |
1257 | set_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags); | |
1258 | return 0; | |
1259 | } | |
1260 | ||
1261 | if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) { | |
1262 | fs_err(sdp, "dlm lockspace ops disallow jid preset\n"); | |
1263 | error = -EINVAL; | |
1264 | goto fail_release; | |
1265 | } | |
1266 | ||
1267 | /* | |
1268 | * control_mount() uses control_lock to determine first mounter, | |
1269 | * and for later mounts, waits for any recoveries to be cleared. | |
1270 | */ | |
1271 | ||
1272 | error = control_mount(sdp); | |
1273 | if (error) { | |
1274 | fs_err(sdp, "mount control error %d\n", error); | |
1275 | goto fail_release; | |
1276 | } | |
1277 | ||
1278 | ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); | |
1279 | clear_bit(SDF_NOJOURNALID, &sdp->sd_flags); | |
4e857c58 | 1280 | smp_mb__after_atomic(); |
e0c2a9aa DT |
1281 | wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); |
1282 | return 0; | |
1283 | ||
1284 | fail_release: | |
1285 | dlm_release_lockspace(ls->ls_dlm, 2); | |
1286 | fail_free: | |
1287 | free_recover_size(ls); | |
1288 | fail: | |
1289 | return error; | |
1290 | } | |
1291 | ||
1292 | static void gdlm_first_done(struct gfs2_sbd *sdp) | |
1293 | { | |
1294 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
1295 | int error; | |
1296 | ||
1297 | if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) | |
1298 | return; | |
1299 | ||
1300 | error = control_first_done(sdp); | |
1301 | if (error) | |
1302 | fs_err(sdp, "mount first_done error %d\n", error); | |
1303 | } | |
1304 | ||
f057f6cd SW |
1305 | static void gdlm_unmount(struct gfs2_sbd *sdp) |
1306 | { | |
1307 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
1308 | ||
e0c2a9aa DT |
1309 | if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) |
1310 | goto release; | |
1311 | ||
1312 | /* wait for gfs2_control_wq to be done with this mount */ | |
1313 | ||
1314 | spin_lock(&ls->ls_recover_spin); | |
1315 | set_bit(DFL_UNMOUNT, &ls->ls_recover_flags); | |
1316 | spin_unlock(&ls->ls_recover_spin); | |
43829731 | 1317 | flush_delayed_work(&sdp->sd_control_work); |
e0c2a9aa DT |
1318 | |
1319 | /* mounted_lock and control_lock will be purged in dlm recovery */ | |
1320 | release: | |
f057f6cd SW |
1321 | if (ls->ls_dlm) { |
1322 | dlm_release_lockspace(ls->ls_dlm, 2); | |
1323 | ls->ls_dlm = NULL; | |
1324 | } | |
e0c2a9aa DT |
1325 | |
1326 | free_recover_size(ls); | |
f057f6cd SW |
1327 | } |
1328 | ||
1329 | static const match_table_t dlm_tokens = { | |
1330 | { Opt_jid, "jid=%d"}, | |
1331 | { Opt_id, "id=%d"}, | |
1332 | { Opt_first, "first=%d"}, | |
1333 | { Opt_nodir, "nodir=%d"}, | |
1334 | { Opt_err, NULL }, | |
1335 | }; | |
1336 | ||
1337 | const struct lm_lockops gfs2_dlm_ops = { | |
1338 | .lm_proto_name = "lock_dlm", | |
1339 | .lm_mount = gdlm_mount, | |
e0c2a9aa DT |
1340 | .lm_first_done = gdlm_first_done, |
1341 | .lm_recovery_result = gdlm_recovery_result, | |
f057f6cd SW |
1342 | .lm_unmount = gdlm_unmount, |
1343 | .lm_put_lock = gdlm_put_lock, | |
1344 | .lm_lock = gdlm_lock, | |
1345 | .lm_cancel = gdlm_cancel, | |
1346 | .lm_tokens = &dlm_tokens, | |
1347 | }; | |
1348 |