Merge tag 'trace-v6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[linux-block.git] / fs / dlm / lock.c
CommitLineData
2522fe45 1// SPDX-License-Identifier: GPL-2.0-only
e7fd4179
DT
2/******************************************************************************
3*******************************************************************************
4**
7fe2b319 5** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
e7fd4179 6**
e7fd4179
DT
7**
8*******************************************************************************
9******************************************************************************/
10
11/* Central locking logic has four stages:
12
13 dlm_lock()
14 dlm_unlock()
15
16 request_lock(ls, lkb)
17 convert_lock(ls, lkb)
18 unlock_lock(ls, lkb)
19 cancel_lock(ls, lkb)
20
21 _request_lock(r, lkb)
22 _convert_lock(r, lkb)
23 _unlock_lock(r, lkb)
24 _cancel_lock(r, lkb)
25
26 do_request(r, lkb)
27 do_convert(r, lkb)
28 do_unlock(r, lkb)
29 do_cancel(r, lkb)
30
31 Stage 1 (lock, unlock) is mainly about checking input args and
32 splitting into one of the four main operations:
33
34 dlm_lock = request_lock
35 dlm_lock+CONVERT = convert_lock
36 dlm_unlock = unlock_lock
37 dlm_unlock+CANCEL = cancel_lock
38
39 Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
40 provided to the next stage.
41
42 Stage 3, _xxxx_lock(), determines if the operation is local or remote.
43 When remote, it calls send_xxxx(), when local it calls do_xxxx().
44
45 Stage 4, do_xxxx(), is the guts of the operation. It manipulates the
46 given rsb and lkb and queues callbacks.
47
48 For remote operations, send_xxxx() results in the corresponding do_xxxx()
49 function being executed on the remote node. The connecting send/receive
50 calls on local (L) and remote (R) nodes:
51
52 L: send_xxxx() -> R: receive_xxxx()
53 R: do_xxxx()
54 L: receive_xxxx_reply() <- R: send_xxxx_reply()
55*/
f1d3b8f9
AA
56#include <trace/events/dlm.h>
57
597d0cae 58#include <linux/types.h>
9beb3bf5 59#include <linux/rbtree.h>
5a0e3ad6 60#include <linux/slab.h>
e7fd4179 61#include "dlm_internal.h"
597d0cae 62#include <linux/dlm_device.h>
e7fd4179 63#include "memory.h"
a070a91c 64#include "midcomms.h"
e7fd4179
DT
65#include "requestqueue.h"
66#include "util.h"
67#include "dir.h"
68#include "member.h"
69#include "lockspace.h"
70#include "ast.h"
71#include "lock.h"
72#include "rcom.h"
73#include "recover.h"
74#include "lvb_table.h"
597d0cae 75#include "user.h"
e7fd4179
DT
76#include "config.h"
77
78static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
79static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
80static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
81static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
82static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
83static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
84static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
85static int send_remove(struct dlm_rsb *r);
86static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
3ae1acf9 87static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
e7fd4179
DT
88static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
89 struct dlm_message *ms);
90static int receive_extralen(struct dlm_message *ms);
8499137d 91static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
3ae1acf9 92static void del_timeout(struct dlm_lkb *lkb);
c04fecb4 93static void toss_rsb(struct kref *kref);
e7fd4179
DT
94
95/*
96 * Lock compatibilty matrix - thanks Steve
97 * UN = Unlocked state. Not really a state, used as a flag
98 * PD = Padding. Used to make the matrix a nice power of two in size
99 * Other states are the same as the VMS DLM.
100 * Usage: matrix[grmode+1][rqmode+1] (although m[rq+1][gr+1] is the same)
101 */
102
103static const int __dlm_compat_matrix[8][8] = {
104 /* UN NL CR CW PR PW EX PD */
105 {1, 1, 1, 1, 1, 1, 1, 0}, /* UN */
106 {1, 1, 1, 1, 1, 1, 1, 0}, /* NL */
107 {1, 1, 1, 1, 1, 1, 0, 0}, /* CR */
108 {1, 1, 1, 1, 0, 0, 0, 0}, /* CW */
109 {1, 1, 1, 0, 1, 0, 0, 0}, /* PR */
110 {1, 1, 1, 0, 0, 0, 0, 0}, /* PW */
111 {1, 1, 0, 0, 0, 0, 0, 0}, /* EX */
112 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
113};
114
115/*
116 * This defines the direction of transfer of LVB data.
117 * Granted mode is the row; requested mode is the column.
118 * Usage: matrix[grmode+1][rqmode+1]
119 * 1 = LVB is returned to the caller
120 * 0 = LVB is written to the resource
121 * -1 = nothing happens to the LVB
122 */
123
124const int dlm_lvb_operations[8][8] = {
125 /* UN NL CR CW PR PW EX PD*/
126 { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
127 { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
128 { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
129 { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
130 { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
131 { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
132 { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
133 { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
134};
e7fd4179
DT
135
136#define modes_compat(gr, rq) \
137 __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
138
139int dlm_modes_compat(int mode1, int mode2)
140{
141 return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
142}
143
144/*
145 * Compatibility matrix for conversions with QUECVT set.
146 * Granted mode is the row; requested mode is the column.
147 * Usage: matrix[grmode+1][rqmode+1]
148 */
149
150static const int __quecvt_compat_matrix[8][8] = {
151 /* UN NL CR CW PR PW EX PD */
152 {0, 0, 0, 0, 0, 0, 0, 0}, /* UN */
153 {0, 0, 1, 1, 1, 1, 1, 0}, /* NL */
154 {0, 0, 0, 1, 1, 1, 1, 0}, /* CR */
155 {0, 0, 0, 0, 1, 1, 1, 0}, /* CW */
156 {0, 0, 0, 1, 0, 1, 1, 0}, /* PR */
157 {0, 0, 0, 0, 0, 0, 1, 0}, /* PW */
158 {0, 0, 0, 0, 0, 0, 0, 0}, /* EX */
159 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
160};
161
597d0cae 162void dlm_print_lkb(struct dlm_lkb *lkb)
e7fd4179 163{
6d40c4a7 164 printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x "
4875647a 165 "sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n",
e7fd4179
DT
166 lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
167 lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
4875647a
DT
168 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid,
169 (unsigned long long)lkb->lkb_recover_seq);
e7fd4179
DT
170}
171
170e19ab 172static void dlm_print_rsb(struct dlm_rsb *r)
e7fd4179 173{
c04fecb4
DT
174 printk(KERN_ERR "rsb: nodeid %d master %d dir %d flags %lx first %x "
175 "rlc %d name %s\n",
176 r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
177 r->res_flags, r->res_first_lkid, r->res_recover_locks_count,
178 r->res_name);
e7fd4179
DT
179}
180
a345da3e
DT
181void dlm_dump_rsb(struct dlm_rsb *r)
182{
183 struct dlm_lkb *lkb;
184
185 dlm_print_rsb(r);
186
187 printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
188 list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
189 printk(KERN_ERR "rsb lookup list\n");
190 list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
191 dlm_print_lkb(lkb);
192 printk(KERN_ERR "rsb grant queue:\n");
193 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
194 dlm_print_lkb(lkb);
195 printk(KERN_ERR "rsb convert queue:\n");
196 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
197 dlm_print_lkb(lkb);
198 printk(KERN_ERR "rsb wait queue:\n");
199 list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
200 dlm_print_lkb(lkb);
201}
202
e7fd4179
DT
203/* Threads cannot use the lockspace while it's being recovered */
204
85e86edf 205static inline void dlm_lock_recovery(struct dlm_ls *ls)
e7fd4179
DT
206{
207 down_read(&ls->ls_in_recovery);
208}
209
85e86edf 210void dlm_unlock_recovery(struct dlm_ls *ls)
e7fd4179
DT
211{
212 up_read(&ls->ls_in_recovery);
213}
214
85e86edf 215int dlm_lock_recovery_try(struct dlm_ls *ls)
e7fd4179
DT
216{
217 return down_read_trylock(&ls->ls_in_recovery);
218}
219
220static inline int can_be_queued(struct dlm_lkb *lkb)
221{
222 return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
223}
224
225static inline int force_blocking_asts(struct dlm_lkb *lkb)
226{
227 return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
228}
229
230static inline int is_demoted(struct dlm_lkb *lkb)
231{
232 return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
233}
234
7d3c1feb
DT
235static inline int is_altmode(struct dlm_lkb *lkb)
236{
237 return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
238}
239
240static inline int is_granted(struct dlm_lkb *lkb)
241{
242 return (lkb->lkb_status == DLM_LKSTS_GRANTED);
243}
244
e7fd4179
DT
245static inline int is_remote(struct dlm_rsb *r)
246{
247 DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
248 return !!r->res_nodeid;
249}
250
251static inline int is_process_copy(struct dlm_lkb *lkb)
252{
253 return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
254}
255
256static inline int is_master_copy(struct dlm_lkb *lkb)
257{
90135925 258 return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
e7fd4179
DT
259}
260
261static inline int middle_conversion(struct dlm_lkb *lkb)
262{
263 if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
264 (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
90135925
DT
265 return 1;
266 return 0;
e7fd4179
DT
267}
268
269static inline int down_conversion(struct dlm_lkb *lkb)
270{
271 return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
272}
273
ef0c2bb0
DT
274static inline int is_overlap_unlock(struct dlm_lkb *lkb)
275{
276 return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
277}
278
279static inline int is_overlap_cancel(struct dlm_lkb *lkb)
280{
281 return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
282}
283
284static inline int is_overlap(struct dlm_lkb *lkb)
285{
286 return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
287 DLM_IFL_OVERLAP_CANCEL));
288}
289
e7fd4179
DT
290static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
291{
292 if (is_master_copy(lkb))
293 return;
294
3ae1acf9
DT
295 del_timeout(lkb);
296
e7fd4179
DT
297 DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
298
6b0afc0c 299#ifdef CONFIG_DLM_DEPRECATED_API
3ae1acf9
DT
300 /* if the operation was a cancel, then return -DLM_ECANCEL, if a
301 timeout caused the cancel then return -ETIMEDOUT */
302 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
303 lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
304 rv = -ETIMEDOUT;
305 }
6b0afc0c 306#endif
3ae1acf9 307
8b4021fa
DT
308 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
309 lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
310 rv = -EDEADLK;
311 }
312
23e8e1aa 313 dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
e7fd4179
DT
314}
315
ef0c2bb0
DT
316static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
317{
318 queue_cast(r, lkb,
319 is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
320}
321
e7fd4179
DT
322static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
323{
b6fa8796 324 if (is_master_copy(lkb)) {
e7fd4179 325 send_bast(r, lkb, rqmode);
b6fa8796 326 } else {
23e8e1aa 327 dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0);
b6fa8796 328 }
e7fd4179
DT
329}
330
331/*
332 * Basic operations on rsb's and lkb's
333 */
334
c04fecb4
DT
335/* This is only called to add a reference when the code already holds
336 a valid reference to the rsb, so there's no need for locking. */
337
338static inline void hold_rsb(struct dlm_rsb *r)
339{
340 kref_get(&r->res_ref);
341}
342
343void dlm_hold_rsb(struct dlm_rsb *r)
344{
345 hold_rsb(r);
346}
347
348/* When all references to the rsb are gone it's transferred to
349 the tossed list for later disposal. */
350
351static void put_rsb(struct dlm_rsb *r)
352{
353 struct dlm_ls *ls = r->res_ls;
354 uint32_t bucket = r->res_bucket;
9502a7f6 355 int rv;
c04fecb4 356
9502a7f6
AA
357 rv = kref_put_lock(&r->res_ref, toss_rsb,
358 &ls->ls_rsbtbl[bucket].lock);
359 if (rv)
360 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
c04fecb4
DT
361}
362
363void dlm_put_rsb(struct dlm_rsb *r)
364{
365 put_rsb(r);
366}
367
3881ac04
DT
368static int pre_rsb_struct(struct dlm_ls *ls)
369{
370 struct dlm_rsb *r1, *r2;
371 int count = 0;
372
373 spin_lock(&ls->ls_new_rsb_spin);
374 if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
375 spin_unlock(&ls->ls_new_rsb_spin);
376 return 0;
377 }
378 spin_unlock(&ls->ls_new_rsb_spin);
379
380 r1 = dlm_allocate_rsb(ls);
381 r2 = dlm_allocate_rsb(ls);
382
383 spin_lock(&ls->ls_new_rsb_spin);
384 if (r1) {
385 list_add(&r1->res_hashchain, &ls->ls_new_rsb);
386 ls->ls_new_rsb_count++;
387 }
388 if (r2) {
389 list_add(&r2->res_hashchain, &ls->ls_new_rsb);
390 ls->ls_new_rsb_count++;
391 }
392 count = ls->ls_new_rsb_count;
393 spin_unlock(&ls->ls_new_rsb_spin);
394
395 if (!count)
396 return -ENOMEM;
397 return 0;
398}
399
400/* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
401 unlock any spinlocks, go back and call pre_rsb_struct again.
402 Otherwise, take an rsb off the list and return it. */
403
56171e0d 404static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len,
3881ac04 405 struct dlm_rsb **r_ret)
e7fd4179
DT
406{
407 struct dlm_rsb *r;
3881ac04
DT
408 int count;
409
410 spin_lock(&ls->ls_new_rsb_spin);
411 if (list_empty(&ls->ls_new_rsb)) {
412 count = ls->ls_new_rsb_count;
413 spin_unlock(&ls->ls_new_rsb_spin);
414 log_debug(ls, "find_rsb retry %d %d %s",
56171e0d
AA
415 count, dlm_config.ci_new_rsb_count,
416 (const char *)name);
3881ac04
DT
417 return -EAGAIN;
418 }
e7fd4179 419
3881ac04
DT
420 r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
421 list_del(&r->res_hashchain);
9beb3bf5
BP
422 /* Convert the empty list_head to a NULL rb_node for tree usage: */
423 memset(&r->res_hashnode, 0, sizeof(struct rb_node));
3881ac04
DT
424 ls->ls_new_rsb_count--;
425 spin_unlock(&ls->ls_new_rsb_spin);
e7fd4179
DT
426
427 r->res_ls = ls;
428 r->res_length = len;
429 memcpy(r->res_name, name, len);
90135925 430 mutex_init(&r->res_mutex);
e7fd4179
DT
431
432 INIT_LIST_HEAD(&r->res_lookup);
433 INIT_LIST_HEAD(&r->res_grantqueue);
434 INIT_LIST_HEAD(&r->res_convertqueue);
435 INIT_LIST_HEAD(&r->res_waitqueue);
436 INIT_LIST_HEAD(&r->res_root_list);
437 INIT_LIST_HEAD(&r->res_recover_list);
438
3881ac04
DT
439 *r_ret = r;
440 return 0;
e7fd4179
DT
441}
442
9beb3bf5
BP
443static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
444{
445 char maxname[DLM_RESNAME_MAXLEN];
446
447 memset(maxname, 0, DLM_RESNAME_MAXLEN);
448 memcpy(maxname, name, nlen);
449 return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
450}
451
56171e0d 452int dlm_search_rsb_tree(struct rb_root *tree, const void *name, int len,
c04fecb4 453 struct dlm_rsb **r_ret)
e7fd4179 454{
9beb3bf5 455 struct rb_node *node = tree->rb_node;
e7fd4179 456 struct dlm_rsb *r;
9beb3bf5
BP
457 int rc;
458
459 while (node) {
460 r = rb_entry(node, struct dlm_rsb, res_hashnode);
461 rc = rsb_cmp(r, name, len);
462 if (rc < 0)
463 node = node->rb_left;
464 else if (rc > 0)
465 node = node->rb_right;
466 else
e7fd4179
DT
467 goto found;
468 }
18c60c0a 469 *r_ret = NULL;
597d0cae 470 return -EBADR;
e7fd4179
DT
471
472 found:
e7fd4179 473 *r_ret = r;
c04fecb4 474 return 0;
e7fd4179
DT
475}
476
9beb3bf5
BP
477static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
478{
479 struct rb_node **newn = &tree->rb_node;
480 struct rb_node *parent = NULL;
481 int rc;
482
483 while (*newn) {
484 struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
485 res_hashnode);
486
487 parent = *newn;
488 rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
489 if (rc < 0)
490 newn = &parent->rb_left;
491 else if (rc > 0)
492 newn = &parent->rb_right;
493 else {
494 log_print("rsb_insert match");
495 dlm_dump_rsb(rsb);
496 dlm_dump_rsb(cur);
497 return -EEXIST;
498 }
499 }
500
501 rb_link_node(&rsb->res_hashnode, parent, newn);
502 rb_insert_color(&rsb->res_hashnode, tree);
503 return 0;
504}
505
c04fecb4
DT
506/*
507 * Find rsb in rsbtbl and potentially create/add one
508 *
509 * Delaying the release of rsb's has a similar benefit to applications keeping
510 * NL locks on an rsb, but without the guarantee that the cached master value
511 * will still be valid when the rsb is reused. Apps aren't always smart enough
512 * to keep NL locks on an rsb that they may lock again shortly; this can lead
513 * to excessive master lookups and removals if we don't delay the release.
514 *
515 * Searching for an rsb means looking through both the normal list and toss
516 * list. When found on the toss list the rsb is moved to the normal list with
517 * ref count of 1; when found on normal list the ref count is incremented.
518 *
519 * rsb's on the keep list are being used locally and refcounted.
520 * rsb's on the toss list are not being used locally, and are not refcounted.
521 *
522 * The toss list rsb's were either
523 * - previously used locally but not any more (were on keep list, then
524 * moved to toss list when last refcount dropped)
525 * - created and put on toss list as a directory record for a lookup
526 * (we are the dir node for the res, but are not using the res right now,
527 * but some other node is)
528 *
529 * The purpose of find_rsb() is to return a refcounted rsb for local use.
530 * So, if the given rsb is on the toss list, it is moved to the keep list
531 * before being returned.
532 *
533 * toss_rsb() happens when all local usage of the rsb is done, i.e. no
534 * more refcounts exist, so the rsb is moved from the keep list to the
535 * toss list.
536 *
537 * rsb's on both keep and toss lists are used for doing a name to master
538 * lookups. rsb's that are in use locally (and being refcounted) are on
539 * the keep list, rsb's that are not in use locally (not refcounted) and
540 * only exist for name/master lookups are on the toss list.
541 *
542 * rsb's on the toss list who's dir_nodeid is not local can have stale
543 * name/master mappings. So, remote requests on such rsb's can potentially
544 * return with an error, which means the mapping is stale and needs to
545 * be updated with a new lookup. (The idea behind MASTER UNCERTAIN and
546 * first_lkid is to keep only a single outstanding request on an rsb
547 * while that rsb has a potentially stale master.)
548 */
549
56171e0d 550static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
c04fecb4
DT
551 uint32_t hash, uint32_t b,
552 int dir_nodeid, int from_nodeid,
553 unsigned int flags, struct dlm_rsb **r_ret)
e7fd4179 554{
c04fecb4
DT
555 struct dlm_rsb *r = NULL;
556 int our_nodeid = dlm_our_nodeid();
557 int from_local = 0;
558 int from_other = 0;
559 int from_dir = 0;
560 int create = 0;
e7fd4179
DT
561 int error;
562
c04fecb4
DT
563 if (flags & R_RECEIVE_REQUEST) {
564 if (from_nodeid == dir_nodeid)
565 from_dir = 1;
566 else
567 from_other = 1;
568 } else if (flags & R_REQUEST) {
569 from_local = 1;
570 }
571
572 /*
573 * flags & R_RECEIVE_RECOVER is from dlm_recover_master_copy, so
574 * from_nodeid has sent us a lock in dlm_recover_locks, believing
575 * we're the new master. Our local recovery may not have set
576 * res_master_nodeid to our_nodeid yet, so allow either. Don't
577 * create the rsb; dlm_recover_process_copy() will handle EBADR
578 * by resending.
579 *
580 * If someone sends us a request, we are the dir node, and we do
581 * not find the rsb anywhere, then recreate it. This happens if
582 * someone sends us a request after we have removed/freed an rsb
583 * from our toss list. (They sent a request instead of lookup
584 * because they are using an rsb from their toss list.)
585 */
586
587 if (from_local || from_dir ||
588 (from_other && (dir_nodeid == our_nodeid))) {
589 create = 1;
e7fd4179 590 }
57638bf3 591
c04fecb4
DT
592 retry:
593 if (create) {
594 error = pre_rsb_struct(ls);
595 if (error < 0)
596 goto out;
597 }
598
599 spin_lock(&ls->ls_rsbtbl[b].lock);
600
601 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
e7fd4179 602 if (error)
c04fecb4
DT
603 goto do_toss;
604
605 /*
606 * rsb is active, so we can't check master_nodeid without lock_rsb.
607 */
e7fd4179 608
c04fecb4 609 kref_get(&r->res_ref);
c04fecb4
DT
610 goto out_unlock;
611
612
613 do_toss:
614 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
9beb3bf5 615 if (error)
c04fecb4 616 goto do_new;
e7fd4179 617
c04fecb4
DT
618 /*
619 * rsb found inactive (master_nodeid may be out of date unless
620 * we are the dir_nodeid or were the master) No other thread
621 * is using this rsb because it's on the toss list, so we can
622 * look at or update res_master_nodeid without lock_rsb.
623 */
e7fd4179 624
c04fecb4
DT
625 if ((r->res_master_nodeid != our_nodeid) && from_other) {
626 /* our rsb was not master, and another node (not the dir node)
627 has sent us a request */
628 log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
629 from_nodeid, r->res_master_nodeid, dir_nodeid,
630 r->res_name);
631 error = -ENOTBLK;
632 goto out_unlock;
633 }
634
635 if ((r->res_master_nodeid != our_nodeid) && from_dir) {
636 /* don't think this should ever happen */
637 log_error(ls, "find_rsb toss from_dir %d master %d",
638 from_nodeid, r->res_master_nodeid);
639 dlm_print_rsb(r);
640 /* fix it and go on */
641 r->res_master_nodeid = our_nodeid;
642 r->res_nodeid = 0;
e7fd4179
DT
643 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
644 r->res_first_lkid = 0;
c04fecb4
DT
645 }
646
647 if (from_local && (r->res_master_nodeid != our_nodeid)) {
648 /* Because we have held no locks on this rsb,
649 res_master_nodeid could have become stale. */
e7fd4179
DT
650 rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
651 r->res_first_lkid = 0;
c04fecb4
DT
652 }
653
654 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
655 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
656 goto out_unlock;
657
658
659 do_new:
660 /*
661 * rsb not found
662 */
663
664 if (error == -EBADR && !create)
665 goto out_unlock;
666
667 error = get_rsb_struct(ls, name, len, &r);
668 if (error == -EAGAIN) {
669 spin_unlock(&ls->ls_rsbtbl[b].lock);
670 goto retry;
671 }
672 if (error)
673 goto out_unlock;
674
675 r->res_hash = hash;
676 r->res_bucket = b;
677 r->res_dir_nodeid = dir_nodeid;
678 kref_init(&r->res_ref);
679
680 if (from_dir) {
681 /* want to see how often this happens */
682 log_debug(ls, "find_rsb new from_dir %d recreate %s",
683 from_nodeid, r->res_name);
684 r->res_master_nodeid = our_nodeid;
685 r->res_nodeid = 0;
686 goto out_add;
687 }
688
689 if (from_other && (dir_nodeid != our_nodeid)) {
690 /* should never happen */
691 log_error(ls, "find_rsb new from_other %d dir %d our %d %s",
692 from_nodeid, dir_nodeid, our_nodeid, r->res_name);
693 dlm_free_rsb(r);
e8243f32 694 r = NULL;
c04fecb4
DT
695 error = -ENOTBLK;
696 goto out_unlock;
697 }
698
699 if (from_other) {
700 log_debug(ls, "find_rsb new from_other %d dir %d %s",
701 from_nodeid, dir_nodeid, r->res_name);
702 }
703
704 if (dir_nodeid == our_nodeid) {
705 /* When we are the dir nodeid, we can set the master
706 node immediately */
707 r->res_master_nodeid = our_nodeid;
708 r->res_nodeid = 0;
e7fd4179 709 } else {
c04fecb4
DT
710 /* set_master will send_lookup to dir_nodeid */
711 r->res_master_nodeid = 0;
712 r->res_nodeid = -1;
713 }
714
715 out_add:
716 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
717 out_unlock:
718 spin_unlock(&ls->ls_rsbtbl[b].lock);
719 out:
720 *r_ret = r;
721 return error;
722}
723
724/* During recovery, other nodes can send us new MSTCPY locks (from
725 dlm_recover_locks) before we've made ourself master (in
726 dlm_recover_masters). */
727
56171e0d 728static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
c04fecb4
DT
729 uint32_t hash, uint32_t b,
730 int dir_nodeid, int from_nodeid,
731 unsigned int flags, struct dlm_rsb **r_ret)
732{
733 struct dlm_rsb *r = NULL;
734 int our_nodeid = dlm_our_nodeid();
735 int recover = (flags & R_RECEIVE_RECOVER);
736 int error;
737
738 retry:
739 error = pre_rsb_struct(ls);
740 if (error < 0)
741 goto out;
742
743 spin_lock(&ls->ls_rsbtbl[b].lock);
744
745 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
746 if (error)
747 goto do_toss;
748
749 /*
750 * rsb is active, so we can't check master_nodeid without lock_rsb.
751 */
752
753 kref_get(&r->res_ref);
754 goto out_unlock;
755
756
757 do_toss:
758 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
759 if (error)
760 goto do_new;
761
762 /*
763 * rsb found inactive. No other thread is using this rsb because
764 * it's on the toss list, so we can look at or update
765 * res_master_nodeid without lock_rsb.
766 */
767
768 if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) {
769 /* our rsb is not master, and another node has sent us a
770 request; this should never happen */
771 log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
772 from_nodeid, r->res_master_nodeid, dir_nodeid);
773 dlm_print_rsb(r);
774 error = -ENOTBLK;
775 goto out_unlock;
e7fd4179 776 }
c04fecb4
DT
777
778 if (!recover && (r->res_master_nodeid != our_nodeid) &&
779 (dir_nodeid == our_nodeid)) {
780 /* our rsb is not master, and we are dir; may as well fix it;
781 this should never happen */
782 log_error(ls, "find_rsb toss our %d master %d dir %d",
783 our_nodeid, r->res_master_nodeid, dir_nodeid);
784 dlm_print_rsb(r);
785 r->res_master_nodeid = our_nodeid;
786 r->res_nodeid = 0;
787 }
788
789 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
790 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
791 goto out_unlock;
792
793
794 do_new:
795 /*
796 * rsb not found
797 */
798
799 error = get_rsb_struct(ls, name, len, &r);
800 if (error == -EAGAIN) {
801 spin_unlock(&ls->ls_rsbtbl[b].lock);
802 goto retry;
803 }
804 if (error)
805 goto out_unlock;
806
807 r->res_hash = hash;
808 r->res_bucket = b;
809 r->res_dir_nodeid = dir_nodeid;
810 r->res_master_nodeid = dir_nodeid;
811 r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid;
812 kref_init(&r->res_ref);
813
814 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
815 out_unlock:
816 spin_unlock(&ls->ls_rsbtbl[b].lock);
e7fd4179
DT
817 out:
818 *r_ret = r;
819 return error;
820}
821
56171e0d
AA
822static int find_rsb(struct dlm_ls *ls, const void *name, int len,
823 int from_nodeid, unsigned int flags,
824 struct dlm_rsb **r_ret)
c04fecb4
DT
825{
826 uint32_t hash, b;
827 int dir_nodeid;
828
829 if (len > DLM_RESNAME_MAXLEN)
830 return -EINVAL;
831
832 hash = jhash(name, len, 0);
833 b = hash & (ls->ls_rsbtbl_size - 1);
834
835 dir_nodeid = dlm_hash2nodeid(ls, hash);
836
837 if (dlm_no_directory(ls))
838 return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid,
839 from_nodeid, flags, r_ret);
840 else
841 return find_rsb_dir(ls, name, len, hash, b, dir_nodeid,
842 from_nodeid, flags, r_ret);
843}
844
845/* we have received a request and found that res_master_nodeid != our_nodeid,
846 so we need to return an error or make ourself the master */
847
848static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
849 int from_nodeid)
850{
851 if (dlm_no_directory(ls)) {
852 log_error(ls, "find_rsb keep from_nodeid %d master %d dir %d",
853 from_nodeid, r->res_master_nodeid,
854 r->res_dir_nodeid);
855 dlm_print_rsb(r);
856 return -ENOTBLK;
857 }
858
859 if (from_nodeid != r->res_dir_nodeid) {
860 /* our rsb is not master, and another node (not the dir node)
861 has sent us a request. this is much more common when our
862 master_nodeid is zero, so limit debug to non-zero. */
863
864 if (r->res_master_nodeid) {
865 log_debug(ls, "validate master from_other %d master %d "
866 "dir %d first %x %s", from_nodeid,
867 r->res_master_nodeid, r->res_dir_nodeid,
868 r->res_first_lkid, r->res_name);
869 }
870 return -ENOTBLK;
871 } else {
872 /* our rsb is not master, but the dir nodeid has sent us a
873 request; this could happen with master 0 / res_nodeid -1 */
874
875 if (r->res_master_nodeid) {
876 log_error(ls, "validate master from_dir %d master %d "
877 "first %x %s",
878 from_nodeid, r->res_master_nodeid,
879 r->res_first_lkid, r->res_name);
880 }
881
882 r->res_master_nodeid = dlm_our_nodeid();
883 r->res_nodeid = 0;
884 return 0;
885 }
886}
887
40159748
AA
888static void __dlm_master_lookup(struct dlm_ls *ls, struct dlm_rsb *r, int our_nodeid,
889 int from_nodeid, bool toss_list, unsigned int flags,
890 int *r_nodeid, int *result)
891{
892 int fix_master = (flags & DLM_LU_RECOVER_MASTER);
893 int from_master = (flags & DLM_LU_RECOVER_DIR);
894
895 if (r->res_dir_nodeid != our_nodeid) {
896 /* should not happen, but may as well fix it and carry on */
897 log_error(ls, "%s res_dir %d our %d %s", __func__,
898 r->res_dir_nodeid, our_nodeid, r->res_name);
899 r->res_dir_nodeid = our_nodeid;
900 }
901
902 if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) {
903 /* Recovery uses this function to set a new master when
904 * the previous master failed. Setting NEW_MASTER will
905 * force dlm_recover_masters to call recover_master on this
906 * rsb even though the res_nodeid is no longer removed.
907 */
908
909 r->res_master_nodeid = from_nodeid;
910 r->res_nodeid = from_nodeid;
911 rsb_set_flag(r, RSB_NEW_MASTER);
912
913 if (toss_list) {
914 /* I don't think we should ever find it on toss list. */
915 log_error(ls, "%s fix_master on toss", __func__);
916 dlm_dump_rsb(r);
917 }
918 }
919
920 if (from_master && (r->res_master_nodeid != from_nodeid)) {
921 /* this will happen if from_nodeid became master during
922 * a previous recovery cycle, and we aborted the previous
923 * cycle before recovering this master value
924 */
925
926 log_limit(ls, "%s from_master %d master_nodeid %d res_nodeid %d first %x %s",
927 __func__, from_nodeid, r->res_master_nodeid,
928 r->res_nodeid, r->res_first_lkid, r->res_name);
929
930 if (r->res_master_nodeid == our_nodeid) {
931 log_error(ls, "from_master %d our_master", from_nodeid);
932 dlm_dump_rsb(r);
933 goto ret_assign;
934 }
935
936 r->res_master_nodeid = from_nodeid;
937 r->res_nodeid = from_nodeid;
938 rsb_set_flag(r, RSB_NEW_MASTER);
939 }
940
941 if (!r->res_master_nodeid) {
942 /* this will happen if recovery happens while we're looking
943 * up the master for this rsb
944 */
945
946 log_debug(ls, "%s master 0 to %d first %x %s", __func__,
947 from_nodeid, r->res_first_lkid, r->res_name);
948 r->res_master_nodeid = from_nodeid;
949 r->res_nodeid = from_nodeid;
950 }
951
952 if (!from_master && !fix_master &&
953 (r->res_master_nodeid == from_nodeid)) {
954 /* this can happen when the master sends remove, the dir node
955 * finds the rsb on the keep list and ignores the remove,
956 * and the former master sends a lookup
957 */
958
959 log_limit(ls, "%s from master %d flags %x first %x %s",
960 __func__, from_nodeid, flags, r->res_first_lkid,
961 r->res_name);
962 }
963
964 ret_assign:
965 *r_nodeid = r->res_master_nodeid;
966 if (result)
967 *result = DLM_LU_MATCH;
968}
969
e7fd4179 970/*
c04fecb4
DT
971 * We're the dir node for this res and another node wants to know the
972 * master nodeid. During normal operation (non recovery) this is only
973 * called from receive_lookup(); master lookups when the local node is
974 * the dir node are done by find_rsb().
e7fd4179 975 *
c04fecb4
DT
976 * normal operation, we are the dir node for a resource
977 * . _request_lock
978 * . set_master
979 * . send_lookup
980 * . receive_lookup
981 * . dlm_master_lookup flags 0
e7fd4179 982 *
c04fecb4
DT
983 * recover directory, we are rebuilding dir for all resources
984 * . dlm_recover_directory
985 * . dlm_rcom_names
986 * remote node sends back the rsb names it is master of and we are dir of
987 * . dlm_master_lookup RECOVER_DIR (fix_master 0, from_master 1)
988 * we either create new rsb setting remote node as master, or find existing
989 * rsb and set master to be the remote node.
990 *
991 * recover masters, we are finding the new master for resources
992 * . dlm_recover_masters
993 * . recover_master
994 * . dlm_send_rcom_lookup
995 * . receive_rcom_lookup
996 * . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0)
e7fd4179
DT
997 */
998
c04fecb4
DT
999int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
1000 unsigned int flags, int *r_nodeid, int *result)
e7fd4179 1001{
3881ac04 1002 struct dlm_rsb *r = NULL;
c04fecb4 1003 uint32_t hash, b;
c04fecb4 1004 int our_nodeid = dlm_our_nodeid();
40159748 1005 int dir_nodeid, error;
ef58bcca 1006
c04fecb4
DT
1007 if (len > DLM_RESNAME_MAXLEN)
1008 return -EINVAL;
1009
1010 if (from_nodeid == our_nodeid) {
1011 log_error(ls, "dlm_master_lookup from our_nodeid %d flags %x",
1012 our_nodeid, flags);
1013 return -EINVAL;
3881ac04 1014 }
e7fd4179 1015
c04fecb4
DT
1016 hash = jhash(name, len, 0);
1017 b = hash & (ls->ls_rsbtbl_size - 1);
e7fd4179 1018
c04fecb4
DT
1019 dir_nodeid = dlm_hash2nodeid(ls, hash);
1020 if (dir_nodeid != our_nodeid) {
1021 log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d",
1022 from_nodeid, dir_nodeid, our_nodeid, hash,
1023 ls->ls_num_nodes);
1024 *r_nodeid = -1;
1025 return -EINVAL;
1026 }
e7fd4179 1027
3881ac04 1028 retry:
c04fecb4
DT
1029 error = pre_rsb_struct(ls);
1030 if (error < 0)
1031 return error;
1032
1033 spin_lock(&ls->ls_rsbtbl[b].lock);
1034 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
1035 if (!error) {
1036 /* because the rsb is active, we need to lock_rsb before
40159748
AA
1037 * checking/changing re_master_nodeid
1038 */
c04fecb4
DT
1039
1040 hold_rsb(r);
1041 spin_unlock(&ls->ls_rsbtbl[b].lock);
1042 lock_rsb(r);
e7fd4179 1043
40159748
AA
1044 __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, false,
1045 flags, r_nodeid, result);
c04fecb4 1046
40159748
AA
1047 /* the rsb was active */
1048 unlock_rsb(r);
1049 put_rsb(r);
c04fecb4 1050
40159748 1051 return 0;
c04fecb4
DT
1052 }
1053
40159748
AA
1054 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1055 if (error)
1056 goto not_found;
c04fecb4 1057
40159748
AA
1058 /* because the rsb is inactive (on toss list), it's not refcounted
1059 * and lock_rsb is not used, but is protected by the rsbtbl lock
1060 */
c04fecb4 1061
40159748
AA
1062 __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, true, flags,
1063 r_nodeid, result);
c04fecb4 1064
40159748
AA
1065 r->res_toss_time = jiffies;
1066 /* the rsb was inactive (on toss list) */
1067 spin_unlock(&ls->ls_rsbtbl[b].lock);
c04fecb4 1068
c04fecb4 1069 return 0;
e7fd4179 1070
c04fecb4
DT
1071 not_found:
1072 error = get_rsb_struct(ls, name, len, &r);
3881ac04 1073 if (error == -EAGAIN) {
c04fecb4 1074 spin_unlock(&ls->ls_rsbtbl[b].lock);
3881ac04
DT
1075 goto retry;
1076 }
1077 if (error)
1078 goto out_unlock;
e7fd4179
DT
1079
1080 r->res_hash = hash;
c04fecb4
DT
1081 r->res_bucket = b;
1082 r->res_dir_nodeid = our_nodeid;
1083 r->res_master_nodeid = from_nodeid;
1084 r->res_nodeid = from_nodeid;
e7fd4179 1085 kref_init(&r->res_ref);
c04fecb4 1086 r->res_toss_time = jiffies;
e7fd4179 1087
c04fecb4
DT
1088 error = rsb_insert(r, &ls->ls_rsbtbl[b].toss);
1089 if (error) {
1090 /* should never happen */
1091 dlm_free_rsb(r);
1092 spin_unlock(&ls->ls_rsbtbl[b].lock);
1093 goto retry;
e7fd4179 1094 }
c04fecb4
DT
1095
1096 if (result)
1097 *result = DLM_LU_ADD;
1098 *r_nodeid = from_nodeid;
3881ac04 1099 out_unlock:
c04fecb4 1100 spin_unlock(&ls->ls_rsbtbl[b].lock);
e7fd4179
DT
1101 return error;
1102}
1103
6d40c4a7
DT
1104static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
1105{
1106 struct rb_node *n;
1107 struct dlm_rsb *r;
1108 int i;
1109
1110 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1111 spin_lock(&ls->ls_rsbtbl[i].lock);
1112 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
1113 r = rb_entry(n, struct dlm_rsb, res_hashnode);
1114 if (r->res_hash == hash)
1115 dlm_dump_rsb(r);
1116 }
1117 spin_unlock(&ls->ls_rsbtbl[i].lock);
1118 }
1119}
1120
c04fecb4 1121void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len)
e7fd4179 1122{
c04fecb4
DT
1123 struct dlm_rsb *r = NULL;
1124 uint32_t hash, b;
1125 int error;
e7fd4179 1126
c04fecb4
DT
1127 hash = jhash(name, len, 0);
1128 b = hash & (ls->ls_rsbtbl_size - 1);
1129
1130 spin_lock(&ls->ls_rsbtbl[b].lock);
1131 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
1132 if (!error)
1133 goto out_dump;
1134
1135 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1136 if (error)
1137 goto out;
1138 out_dump:
1139 dlm_dump_rsb(r);
1140 out:
1141 spin_unlock(&ls->ls_rsbtbl[b].lock);
e7fd4179
DT
1142}
1143
1144static void toss_rsb(struct kref *kref)
1145{
1146 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1147 struct dlm_ls *ls = r->res_ls;
1148
1149 DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
1150 kref_init(&r->res_ref);
9beb3bf5
BP
1151 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
1152 rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
e7fd4179 1153 r->res_toss_time = jiffies;
f1172283 1154 ls->ls_rsbtbl[r->res_bucket].flags |= DLM_RTF_SHRINK;
e7fd4179 1155 if (r->res_lvbptr) {
52bda2b5 1156 dlm_free_lvb(r->res_lvbptr);
e7fd4179
DT
1157 r->res_lvbptr = NULL;
1158 }
1159}
1160
e7fd4179
DT
1161/* See comment for unhold_lkb */
1162
1163static void unhold_rsb(struct dlm_rsb *r)
1164{
1165 int rv;
1166 rv = kref_put(&r->res_ref, toss_rsb);
a345da3e 1167 DLM_ASSERT(!rv, dlm_dump_rsb(r););
e7fd4179
DT
1168}
1169
1170static void kill_rsb(struct kref *kref)
1171{
1172 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1173
1174 /* All work is done after the return from kref_put() so we
1175 can release the write_lock before the remove and free. */
1176
a345da3e
DT
1177 DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
1178 DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
1179 DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
1180 DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
1181 DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
1182 DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
e7fd4179
DT
1183}
1184
1185/* Attaching/detaching lkb's from rsb's is for rsb reference counting.
1186 The rsb must exist as long as any lkb's for it do. */
1187
1188static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1189{
1190 hold_rsb(r);
1191 lkb->lkb_resource = r;
1192}
1193
1194static void detach_lkb(struct dlm_lkb *lkb)
1195{
1196 if (lkb->lkb_resource) {
1197 put_rsb(lkb->lkb_resource);
1198 lkb->lkb_resource = NULL;
1199 }
1200}
1201
75d25ffe
AA
1202static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret,
1203 int start, int end)
e7fd4179 1204{
3d6aa675 1205 struct dlm_lkb *lkb;
2a86b3e7 1206 int rv;
e7fd4179 1207
52bda2b5 1208 lkb = dlm_allocate_lkb(ls);
e7fd4179
DT
1209 if (!lkb)
1210 return -ENOMEM;
1211
61bed0ba 1212 lkb->lkb_last_bast_mode = -1;
e7fd4179
DT
1213 lkb->lkb_nodeid = -1;
1214 lkb->lkb_grmode = DLM_LOCK_IV;
1215 kref_init(&lkb->lkb_ref);
34e22bed 1216 INIT_LIST_HEAD(&lkb->lkb_ownqueue);
ef0c2bb0 1217 INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
6b0afc0c 1218#ifdef CONFIG_DLM_DEPRECATED_API
3ae1acf9 1219 INIT_LIST_HEAD(&lkb->lkb_time_list);
6b0afc0c 1220#endif
23e8e1aa 1221 INIT_LIST_HEAD(&lkb->lkb_cb_list);
61bed0ba 1222 INIT_LIST_HEAD(&lkb->lkb_callbacks);
92e95733 1223 spin_lock_init(&lkb->lkb_cb_lock);
23e8e1aa 1224 INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
e7fd4179 1225
2a86b3e7 1226 idr_preload(GFP_NOFS);
3d6aa675 1227 spin_lock(&ls->ls_lkbidr_spin);
75d25ffe 1228 rv = idr_alloc(&ls->ls_lkbidr, lkb, start, end, GFP_NOWAIT);
2a86b3e7
TH
1229 if (rv >= 0)
1230 lkb->lkb_id = rv;
3d6aa675 1231 spin_unlock(&ls->ls_lkbidr_spin);
2a86b3e7 1232 idr_preload_end();
e7fd4179 1233
3d6aa675
DT
1234 if (rv < 0) {
1235 log_error(ls, "create_lkb idr error %d", rv);
23851e97 1236 dlm_free_lkb(lkb);
3d6aa675 1237 return rv;
e7fd4179
DT
1238 }
1239
e7fd4179
DT
1240 *lkb_ret = lkb;
1241 return 0;
1242}
1243
75d25ffe
AA
1244static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
1245{
1246 return _create_lkb(ls, lkb_ret, 1, 0);
1247}
1248
e7fd4179
DT
1249static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
1250{
1251 struct dlm_lkb *lkb;
e7fd4179 1252
3d6aa675
DT
1253 spin_lock(&ls->ls_lkbidr_spin);
1254 lkb = idr_find(&ls->ls_lkbidr, lkid);
e7fd4179
DT
1255 if (lkb)
1256 kref_get(&lkb->lkb_ref);
3d6aa675 1257 spin_unlock(&ls->ls_lkbidr_spin);
e7fd4179
DT
1258
1259 *lkb_ret = lkb;
1260 return lkb ? 0 : -ENOENT;
1261}
1262
1263static void kill_lkb(struct kref *kref)
1264{
1265 struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
1266
1267 /* All work is done after the return from kref_put() so we
1268 can release the write_lock before the detach_lkb */
1269
1270 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1271}
1272
b3f58d8f
DT
1273/* __put_lkb() is used when an lkb may not have an rsb attached to
1274 it so we need to provide the lockspace explicitly */
1275
1276static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
e7fd4179 1277{
3d6aa675 1278 uint32_t lkid = lkb->lkb_id;
8e51ec61 1279 int rv;
e7fd4179 1280
8e51ec61
AA
1281 rv = kref_put_lock(&lkb->lkb_ref, kill_lkb,
1282 &ls->ls_lkbidr_spin);
1283 if (rv) {
3d6aa675
DT
1284 idr_remove(&ls->ls_lkbidr, lkid);
1285 spin_unlock(&ls->ls_lkbidr_spin);
e7fd4179
DT
1286
1287 detach_lkb(lkb);
1288
1289 /* for local/process lkbs, lvbptr points to caller's lksb */
1290 if (lkb->lkb_lvbptr && is_master_copy(lkb))
52bda2b5
DT
1291 dlm_free_lvb(lkb->lkb_lvbptr);
1292 dlm_free_lkb(lkb);
e7fd4179 1293 }
8e51ec61
AA
1294
1295 return rv;
e7fd4179
DT
1296}
1297
1298int dlm_put_lkb(struct dlm_lkb *lkb)
1299{
b3f58d8f
DT
1300 struct dlm_ls *ls;
1301
1302 DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
1303 DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
1304
1305 ls = lkb->lkb_resource->res_ls;
1306 return __put_lkb(ls, lkb);
e7fd4179
DT
1307}
1308
1309/* This is only called to add a reference when the code already holds
1310 a valid reference to the lkb, so there's no need for locking. */
1311
1312static inline void hold_lkb(struct dlm_lkb *lkb)
1313{
1314 kref_get(&lkb->lkb_ref);
1315}
1316
95858989
AA
1317static void unhold_lkb_assert(struct kref *kref)
1318{
1319 struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
1320
1321 DLM_ASSERT(false, dlm_print_lkb(lkb););
1322}
1323
e7fd4179
DT
1324/* This is called when we need to remove a reference and are certain
1325 it's not the last ref. e.g. del_lkb is always called between a
1326 find_lkb/put_lkb and is always the inverse of a previous add_lkb.
1327 put_lkb would work fine, but would involve unnecessary locking */
1328
1329static inline void unhold_lkb(struct dlm_lkb *lkb)
1330{
95858989 1331 kref_put(&lkb->lkb_ref, unhold_lkb_assert);
e7fd4179
DT
1332}
1333
1334static void lkb_add_ordered(struct list_head *new, struct list_head *head,
1335 int mode)
1336{
c490b3af 1337 struct dlm_lkb *lkb = NULL, *iter;
e7fd4179 1338
c490b3af
JK
1339 list_for_each_entry(iter, head, lkb_statequeue)
1340 if (iter->lkb_rqmode < mode) {
1341 lkb = iter;
1342 list_add_tail(new, &iter->lkb_statequeue);
e7fd4179 1343 break;
c490b3af 1344 }
e7fd4179 1345
c490b3af
JK
1346 if (!lkb)
1347 list_add_tail(new, head);
e7fd4179
DT
1348}
1349
1350/* add/remove lkb to rsb's grant/convert/wait queue */
1351
1352static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
1353{
1354 kref_get(&lkb->lkb_ref);
1355
1356 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1357
eeda418d
DT
1358 lkb->lkb_timestamp = ktime_get();
1359
e7fd4179
DT
1360 lkb->lkb_status = status;
1361
1362 switch (status) {
1363 case DLM_LKSTS_WAITING:
1364 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1365 list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
1366 else
1367 list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
1368 break;
1369 case DLM_LKSTS_GRANTED:
1370 /* convention says granted locks kept in order of grmode */
1371 lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
1372 lkb->lkb_grmode);
1373 break;
1374 case DLM_LKSTS_CONVERT:
1375 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1376 list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
1377 else
1378 list_add_tail(&lkb->lkb_statequeue,
1379 &r->res_convertqueue);
1380 break;
1381 default:
1382 DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
1383 }
1384}
1385
1386static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1387{
1388 lkb->lkb_status = 0;
1389 list_del(&lkb->lkb_statequeue);
1390 unhold_lkb(lkb);
1391}
1392
1393static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
1394{
1395 hold_lkb(lkb);
1396 del_lkb(r, lkb);
1397 add_lkb(r, lkb, sts);
1398 unhold_lkb(lkb);
1399}
1400
ef0c2bb0
DT
1401static int msg_reply_type(int mstype)
1402{
1403 switch (mstype) {
1404 case DLM_MSG_REQUEST:
1405 return DLM_MSG_REQUEST_REPLY;
1406 case DLM_MSG_CONVERT:
1407 return DLM_MSG_CONVERT_REPLY;
1408 case DLM_MSG_UNLOCK:
1409 return DLM_MSG_UNLOCK_REPLY;
1410 case DLM_MSG_CANCEL:
1411 return DLM_MSG_CANCEL_REPLY;
1412 case DLM_MSG_LOOKUP:
1413 return DLM_MSG_LOOKUP_REPLY;
1414 }
1415 return -1;
1416}
1417
e7fd4179
DT
1418/* add/remove lkb from global waiters list of lkb's waiting for
1419 a reply from a remote node */
1420
c6ff669b 1421static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
e7fd4179
DT
1422{
1423 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
ef0c2bb0 1424 int error = 0;
e7fd4179 1425
90135925 1426 mutex_lock(&ls->ls_waiters_mutex);
ef0c2bb0
DT
1427
1428 if (is_overlap_unlock(lkb) ||
1429 (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
1430 error = -EINVAL;
1431 goto out;
1432 }
1433
1434 if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
1435 switch (mstype) {
1436 case DLM_MSG_UNLOCK:
1437 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
1438 break;
1439 case DLM_MSG_CANCEL:
1440 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
1441 break;
1442 default:
1443 error = -EBUSY;
1444 goto out;
1445 }
1446 lkb->lkb_wait_count++;
1447 hold_lkb(lkb);
1448
43279e53 1449 log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
ef0c2bb0
DT
1450 lkb->lkb_id, lkb->lkb_wait_type, mstype,
1451 lkb->lkb_wait_count, lkb->lkb_flags);
e7fd4179
DT
1452 goto out;
1453 }
ef0c2bb0
DT
1454
1455 DLM_ASSERT(!lkb->lkb_wait_count,
1456 dlm_print_lkb(lkb);
1457 printk("wait_count %d\n", lkb->lkb_wait_count););
1458
1459 lkb->lkb_wait_count++;
e7fd4179 1460 lkb->lkb_wait_type = mstype;
c6ff669b 1461 lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
ef0c2bb0 1462 hold_lkb(lkb);
e7fd4179
DT
1463 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
1464 out:
ef0c2bb0 1465 if (error)
43279e53 1466 log_error(ls, "addwait error %x %d flags %x %d %d %s",
ef0c2bb0
DT
1467 lkb->lkb_id, error, lkb->lkb_flags, mstype,
1468 lkb->lkb_wait_type, lkb->lkb_resource->res_name);
90135925 1469 mutex_unlock(&ls->ls_waiters_mutex);
ef0c2bb0 1470 return error;
e7fd4179
DT
1471}
1472
b790c3b7
DT
1473/* We clear the RESEND flag because we might be taking an lkb off the waiters
1474 list as part of process_requestqueue (e.g. a lookup that has an optimized
1475 request reply on the requestqueue) between dlm_recover_waiters_pre() which
1476 set RESEND and dlm_recover_waiters_post() */
1477
43279e53
DT
1478static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
1479 struct dlm_message *ms)
e7fd4179 1480{
ef0c2bb0
DT
1481 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1482 int overlap_done = 0;
e7fd4179 1483
ef0c2bb0 1484 if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
43279e53 1485 log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
ef0c2bb0
DT
1486 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
1487 overlap_done = 1;
1488 goto out_del;
e7fd4179 1489 }
ef0c2bb0
DT
1490
1491 if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
43279e53 1492 log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
ef0c2bb0
DT
1493 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1494 overlap_done = 1;
1495 goto out_del;
1496 }
1497
43279e53
DT
1498 /* Cancel state was preemptively cleared by a successful convert,
1499 see next comment, nothing to do. */
1500
1501 if ((mstype == DLM_MSG_CANCEL_REPLY) &&
1502 (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
1503 log_debug(ls, "remwait %x cancel_reply wait_type %d",
1504 lkb->lkb_id, lkb->lkb_wait_type);
1505 return -1;
1506 }
1507
1508 /* Remove for the convert reply, and premptively remove for the
1509 cancel reply. A convert has been granted while there's still
1510 an outstanding cancel on it (the cancel is moot and the result
1511 in the cancel reply should be 0). We preempt the cancel reply
1512 because the app gets the convert result and then can follow up
1513 with another op, like convert. This subsequent op would see the
1514 lingering state of the cancel and fail with -EBUSY. */
1515
1516 if ((mstype == DLM_MSG_CONVERT_REPLY) &&
1517 (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
1518 is_overlap_cancel(lkb) && ms && !ms->m_result) {
1519 log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
1520 lkb->lkb_id);
1521 lkb->lkb_wait_type = 0;
1522 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1523 lkb->lkb_wait_count--;
1689c169 1524 unhold_lkb(lkb);
43279e53
DT
1525 goto out_del;
1526 }
1527
ef0c2bb0
DT
1528 /* N.B. type of reply may not always correspond to type of original
1529 msg due to lookup->request optimization, verify others? */
1530
1531 if (lkb->lkb_wait_type) {
1532 lkb->lkb_wait_type = 0;
1533 goto out_del;
1534 }
1535
6d40c4a7 1536 log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait",
3428785a
AA
1537 lkb->lkb_id, ms ? le32_to_cpu(ms->m_header.h_nodeid) : 0,
1538 lkb->lkb_remid, mstype, lkb->lkb_flags);
ef0c2bb0
DT
1539 return -1;
1540
1541 out_del:
1542 /* the force-unlock/cancel has completed and we haven't recvd a reply
1543 to the op that was in progress prior to the unlock/cancel; we
1544 give up on any reply to the earlier op. FIXME: not sure when/how
1545 this would happen */
1546
1547 if (overlap_done && lkb->lkb_wait_type) {
43279e53 1548 log_error(ls, "remwait error %x reply %d wait_type %d overlap",
ef0c2bb0
DT
1549 lkb->lkb_id, mstype, lkb->lkb_wait_type);
1550 lkb->lkb_wait_count--;
1689c169 1551 unhold_lkb(lkb);
ef0c2bb0
DT
1552 lkb->lkb_wait_type = 0;
1553 }
1554
1555 DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
1556
b790c3b7 1557 lkb->lkb_flags &= ~DLM_IFL_RESEND;
ef0c2bb0
DT
1558 lkb->lkb_wait_count--;
1559 if (!lkb->lkb_wait_count)
1560 list_del_init(&lkb->lkb_wait_reply);
e7fd4179 1561 unhold_lkb(lkb);
ef0c2bb0 1562 return 0;
e7fd4179
DT
1563}
1564
ef0c2bb0 1565static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
e7fd4179
DT
1566{
1567 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1568 int error;
1569
90135925 1570 mutex_lock(&ls->ls_waiters_mutex);
43279e53 1571 error = _remove_from_waiters(lkb, mstype, NULL);
90135925 1572 mutex_unlock(&ls->ls_waiters_mutex);
e7fd4179
DT
1573 return error;
1574}
1575
ef0c2bb0
DT
1576/* Handles situations where we might be processing a "fake" or "stub" reply in
1577 which we can't try to take waiters_mutex again. */
1578
1579static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
1580{
1581 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1582 int error;
1583
00e99ccd 1584 if (ms->m_flags != cpu_to_le32(DLM_IFL_STUB_MS))
ef0c2bb0 1585 mutex_lock(&ls->ls_waiters_mutex);
00e99ccd
AA
1586 error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms);
1587 if (ms->m_flags != cpu_to_le32(DLM_IFL_STUB_MS))
ef0c2bb0
DT
1588 mutex_unlock(&ls->ls_waiters_mutex);
1589 return error;
1590}
1591
05c32f47
DT
1592static void shrink_bucket(struct dlm_ls *ls, int b)
1593{
1594 struct rb_node *n, *next;
1595 struct dlm_rsb *r;
1596 char *name;
1597 int our_nodeid = dlm_our_nodeid();
1598 int remote_count = 0;
f1172283 1599 int need_shrink = 0;
05c32f47 1600 int i, len, rv;
c04fecb4 1601
05c32f47 1602 memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
c04fecb4 1603
05c32f47 1604 spin_lock(&ls->ls_rsbtbl[b].lock);
f1172283
DT
1605
1606 if (!(ls->ls_rsbtbl[b].flags & DLM_RTF_SHRINK)) {
1607 spin_unlock(&ls->ls_rsbtbl[b].lock);
1608 return;
1609 }
1610
05c32f47
DT
1611 for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) {
1612 next = rb_next(n);
1613 r = rb_entry(n, struct dlm_rsb, res_hashnode);
1614
1615 /* If we're the directory record for this rsb, and
1616 we're not the master of it, then we need to wait
1617 for the master node to send us a dir remove for
1618 before removing the dir record. */
1619
1620 if (!dlm_no_directory(ls) &&
1621 (r->res_master_nodeid != our_nodeid) &&
1622 (dlm_dir_nodeid(r) == our_nodeid)) {
1623 continue;
e7fd4179
DT
1624 }
1625
f1172283
DT
1626 need_shrink = 1;
1627
05c32f47
DT
1628 if (!time_after_eq(jiffies, r->res_toss_time +
1629 dlm_config.ci_toss_secs * HZ)) {
1630 continue;
e7fd4179
DT
1631 }
1632
05c32f47
DT
1633 if (!dlm_no_directory(ls) &&
1634 (r->res_master_nodeid == our_nodeid) &&
1635 (dlm_dir_nodeid(r) != our_nodeid)) {
e7fd4179 1636
c04fecb4
DT
1637 /* We're the master of this rsb but we're not
1638 the directory record, so we need to tell the
1639 dir node to remove the dir record. */
1640
05c32f47
DT
1641 ls->ls_remove_lens[remote_count] = r->res_length;
1642 memcpy(ls->ls_remove_names[remote_count], r->res_name,
1643 DLM_RESNAME_MAXLEN);
1644 remote_count++;
c04fecb4 1645
05c32f47
DT
1646 if (remote_count >= DLM_REMOVE_NAMES_MAX)
1647 break;
1648 continue;
1649 }
1650
1651 if (!kref_put(&r->res_ref, kill_rsb)) {
e7fd4179 1652 log_error(ls, "tossed rsb in use %s", r->res_name);
05c32f47 1653 continue;
e7fd4179 1654 }
05c32f47
DT
1655
1656 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1657 dlm_free_rsb(r);
e7fd4179 1658 }
f1172283
DT
1659
1660 if (need_shrink)
1661 ls->ls_rsbtbl[b].flags |= DLM_RTF_SHRINK;
1662 else
1663 ls->ls_rsbtbl[b].flags &= ~DLM_RTF_SHRINK;
05c32f47 1664 spin_unlock(&ls->ls_rsbtbl[b].lock);
e7fd4179 1665
05c32f47
DT
1666 /*
1667 * While searching for rsb's to free, we found some that require
1668 * remote removal. We leave them in place and find them again here
1669 * so there is a very small gap between removing them from the toss
1670 * list and sending the removal. Keeping this gap small is
1671 * important to keep us (the master node) from being out of sync
1672 * with the remote dir node for very long.
05c32f47
DT
1673 */
1674
1675 for (i = 0; i < remote_count; i++) {
1676 name = ls->ls_remove_names[i];
1677 len = ls->ls_remove_lens[i];
1678
1679 spin_lock(&ls->ls_rsbtbl[b].lock);
1680 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1681 if (rv) {
1682 spin_unlock(&ls->ls_rsbtbl[b].lock);
1683 log_debug(ls, "remove_name not toss %s", name);
1684 continue;
1685 }
1686
1687 if (r->res_master_nodeid != our_nodeid) {
1688 spin_unlock(&ls->ls_rsbtbl[b].lock);
1689 log_debug(ls, "remove_name master %d dir %d our %d %s",
1690 r->res_master_nodeid, r->res_dir_nodeid,
1691 our_nodeid, name);
1692 continue;
1693 }
1694
1695 if (r->res_dir_nodeid == our_nodeid) {
1696 /* should never happen */
1697 spin_unlock(&ls->ls_rsbtbl[b].lock);
1698 log_error(ls, "remove_name dir %d master %d our %d %s",
1699 r->res_dir_nodeid, r->res_master_nodeid,
1700 our_nodeid, name);
1701 continue;
1702 }
1703
1704 if (!time_after_eq(jiffies, r->res_toss_time +
1705 dlm_config.ci_toss_secs * HZ)) {
1706 spin_unlock(&ls->ls_rsbtbl[b].lock);
1707 log_debug(ls, "remove_name toss_time %lu now %lu %s",
1708 r->res_toss_time, jiffies, name);
1709 continue;
1710 }
1711
1712 if (!kref_put(&r->res_ref, kill_rsb)) {
1713 spin_unlock(&ls->ls_rsbtbl[b].lock);
1714 log_error(ls, "remove_name in use %s", name);
1715 continue;
1716 }
1717
1718 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
05c32f47 1719 send_remove(r);
3872f87b 1720 spin_unlock(&ls->ls_rsbtbl[b].lock);
05c32f47
DT
1721
1722 dlm_free_rsb(r);
1723 }
e7fd4179
DT
1724}
1725
1726void dlm_scan_rsbs(struct dlm_ls *ls)
1727{
1728 int i;
1729
e7fd4179
DT
1730 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1731 shrink_bucket(ls, i);
85e86edf
DT
1732 if (dlm_locking_stopped(ls))
1733 break;
e7fd4179
DT
1734 cond_resched();
1735 }
1736}
1737
6b0afc0c 1738#ifdef CONFIG_DLM_DEPRECATED_API
3ae1acf9
DT
1739static void add_timeout(struct dlm_lkb *lkb)
1740{
1741 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1742
eeda418d 1743 if (is_master_copy(lkb))
3ae1acf9 1744 return;
3ae1acf9
DT
1745
1746 if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
1747 !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1748 lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
1749 goto add_it;
1750 }
84d8cd69
DT
1751 if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
1752 goto add_it;
3ae1acf9
DT
1753 return;
1754
1755 add_it:
1756 DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
1757 mutex_lock(&ls->ls_timeout_mutex);
1758 hold_lkb(lkb);
3ae1acf9
DT
1759 list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
1760 mutex_unlock(&ls->ls_timeout_mutex);
1761}
1762
1763static void del_timeout(struct dlm_lkb *lkb)
1764{
1765 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1766
1767 mutex_lock(&ls->ls_timeout_mutex);
1768 if (!list_empty(&lkb->lkb_time_list)) {
1769 list_del_init(&lkb->lkb_time_list);
1770 unhold_lkb(lkb);
1771 }
1772 mutex_unlock(&ls->ls_timeout_mutex);
1773}
1774
1775/* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
1776 lkb_lksb_timeout without lock_rsb? Note: we can't lock timeout_mutex
1777 and then lock rsb because of lock ordering in add_timeout. We may need
1778 to specify some special timeout-related bits in the lkb that are just to
1779 be accessed under the timeout_mutex. */
1780
1781void dlm_scan_timeout(struct dlm_ls *ls)
1782{
1783 struct dlm_rsb *r;
dc1acd5c 1784 struct dlm_lkb *lkb = NULL, *iter;
3ae1acf9 1785 int do_cancel, do_warn;
eeda418d 1786 s64 wait_us;
3ae1acf9
DT
1787
1788 for (;;) {
1789 if (dlm_locking_stopped(ls))
1790 break;
1791
1792 do_cancel = 0;
1793 do_warn = 0;
1794 mutex_lock(&ls->ls_timeout_mutex);
dc1acd5c 1795 list_for_each_entry(iter, &ls->ls_timeout, lkb_time_list) {
3ae1acf9 1796
eeda418d 1797 wait_us = ktime_to_us(ktime_sub(ktime_get(),
dc1acd5c 1798 iter->lkb_timestamp));
eeda418d 1799
dc1acd5c
JK
1800 if ((iter->lkb_exflags & DLM_LKF_TIMEOUT) &&
1801 wait_us >= (iter->lkb_timeout_cs * 10000))
3ae1acf9
DT
1802 do_cancel = 1;
1803
dc1acd5c 1804 if ((iter->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
eeda418d 1805 wait_us >= dlm_config.ci_timewarn_cs * 10000)
3ae1acf9
DT
1806 do_warn = 1;
1807
1808 if (!do_cancel && !do_warn)
1809 continue;
dc1acd5c
JK
1810 hold_lkb(iter);
1811 lkb = iter;
3ae1acf9
DT
1812 break;
1813 }
1814 mutex_unlock(&ls->ls_timeout_mutex);
1815
dc1acd5c 1816 if (!lkb)
3ae1acf9
DT
1817 break;
1818
1819 r = lkb->lkb_resource;
1820 hold_rsb(r);
1821 lock_rsb(r);
1822
1823 if (do_warn) {
1824 /* clear flag so we only warn once */
1825 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1826 if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
1827 del_timeout(lkb);
1828 dlm_timeout_warn(lkb);
1829 }
1830
1831 if (do_cancel) {
b3cab7b9 1832 log_debug(ls, "timeout cancel %x node %d %s",
639aca41 1833 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
3ae1acf9
DT
1834 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1835 lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
1836 del_timeout(lkb);
1837 _cancel_lock(r, lkb);
1838 }
1839
1840 unlock_rsb(r);
1841 unhold_rsb(r);
1842 dlm_put_lkb(lkb);
1843 }
1844}
1845
1846/* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
1847 dlm_recoverd before checking/setting ls_recover_begin. */
1848
1849void dlm_adjust_timeouts(struct dlm_ls *ls)
1850{
1851 struct dlm_lkb *lkb;
eeda418d 1852 u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin);
3ae1acf9
DT
1853
1854 ls->ls_recover_begin = 0;
1855 mutex_lock(&ls->ls_timeout_mutex);
1856 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
eeda418d 1857 lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
3ae1acf9
DT
1858 mutex_unlock(&ls->ls_timeout_mutex);
1859}
6b0afc0c
AA
1860#else
1861static void add_timeout(struct dlm_lkb *lkb) { }
1862static void del_timeout(struct dlm_lkb *lkb) { }
1863#endif
3ae1acf9 1864
e7fd4179
DT
1865/* lkb is master or local copy */
1866
1867static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1868{
1869 int b, len = r->res_ls->ls_lvblen;
1870
1871 /* b=1 lvb returned to caller
1872 b=0 lvb written to rsb or invalidated
1873 b=-1 do nothing */
1874
1875 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1876
1877 if (b == 1) {
1878 if (!lkb->lkb_lvbptr)
1879 return;
1880
1881 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1882 return;
1883
1884 if (!r->res_lvbptr)
1885 return;
1886
1887 memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1888 lkb->lkb_lvbseq = r->res_lvbseq;
1889
1890 } else if (b == 0) {
1891 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1892 rsb_set_flag(r, RSB_VALNOTVALID);
1893 return;
1894 }
1895
1896 if (!lkb->lkb_lvbptr)
1897 return;
1898
1899 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1900 return;
1901
1902 if (!r->res_lvbptr)
52bda2b5 1903 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
e7fd4179
DT
1904
1905 if (!r->res_lvbptr)
1906 return;
1907
1908 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1909 r->res_lvbseq++;
1910 lkb->lkb_lvbseq = r->res_lvbseq;
1911 rsb_clear_flag(r, RSB_VALNOTVALID);
1912 }
1913
1914 if (rsb_flag(r, RSB_VALNOTVALID))
1915 lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
1916}
1917
1918static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1919{
1920 if (lkb->lkb_grmode < DLM_LOCK_PW)
1921 return;
1922
1923 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1924 rsb_set_flag(r, RSB_VALNOTVALID);
1925 return;
1926 }
1927
1928 if (!lkb->lkb_lvbptr)
1929 return;
1930
1931 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1932 return;
1933
1934 if (!r->res_lvbptr)
52bda2b5 1935 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
e7fd4179
DT
1936
1937 if (!r->res_lvbptr)
1938 return;
1939
1940 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
1941 r->res_lvbseq++;
1942 rsb_clear_flag(r, RSB_VALNOTVALID);
1943}
1944
1945/* lkb is process copy (pc) */
1946
1947static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1948 struct dlm_message *ms)
1949{
1950 int b;
1951
1952 if (!lkb->lkb_lvbptr)
1953 return;
1954
1955 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1956 return;
1957
597d0cae 1958 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
e7fd4179
DT
1959 if (b == 1) {
1960 int len = receive_extralen(ms);
cfa805f6
BVA
1961 if (len > r->res_ls->ls_lvblen)
1962 len = r->res_ls->ls_lvblen;
e7fd4179 1963 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
00e99ccd 1964 lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq);
e7fd4179
DT
1965 }
1966}
1967
1968/* Manipulate lkb's on rsb's convert/granted/waiting queues
1969 remove_lock -- used for unlock, removes lkb from granted
1970 revert_lock -- used for cancel, moves lkb from convert to granted
1971 grant_lock -- used for request and convert, adds lkb to granted or
1972 moves lkb from convert or waiting to granted
1973
1974 Each of these is used for master or local copy lkb's. There is
1975 also a _pc() variation used to make the corresponding change on
1976 a process copy (pc) lkb. */
1977
1978static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1979{
1980 del_lkb(r, lkb);
1981 lkb->lkb_grmode = DLM_LOCK_IV;
1982 /* this unhold undoes the original ref from create_lkb()
1983 so this leads to the lkb being freed */
1984 unhold_lkb(lkb);
1985}
1986
1987static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1988{
1989 set_lvb_unlock(r, lkb);
1990 _remove_lock(r, lkb);
1991}
1992
1993static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
1994{
1995 _remove_lock(r, lkb);
1996}
1997
ef0c2bb0
DT
1998/* returns: 0 did nothing
1999 1 moved lock to granted
2000 -1 removed lock */
2001
2002static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
e7fd4179 2003{
ef0c2bb0
DT
2004 int rv = 0;
2005
e7fd4179
DT
2006 lkb->lkb_rqmode = DLM_LOCK_IV;
2007
2008 switch (lkb->lkb_status) {
597d0cae
DT
2009 case DLM_LKSTS_GRANTED:
2010 break;
e7fd4179
DT
2011 case DLM_LKSTS_CONVERT:
2012 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
ef0c2bb0 2013 rv = 1;
e7fd4179
DT
2014 break;
2015 case DLM_LKSTS_WAITING:
2016 del_lkb(r, lkb);
2017 lkb->lkb_grmode = DLM_LOCK_IV;
2018 /* this unhold undoes the original ref from create_lkb()
2019 so this leads to the lkb being freed */
2020 unhold_lkb(lkb);
ef0c2bb0 2021 rv = -1;
e7fd4179
DT
2022 break;
2023 default:
2024 log_print("invalid status for revert %d", lkb->lkb_status);
2025 }
ef0c2bb0 2026 return rv;
e7fd4179
DT
2027}
2028
ef0c2bb0 2029static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
e7fd4179 2030{
ef0c2bb0 2031 return revert_lock(r, lkb);
e7fd4179
DT
2032}
2033
2034static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2035{
2036 if (lkb->lkb_grmode != lkb->lkb_rqmode) {
2037 lkb->lkb_grmode = lkb->lkb_rqmode;
2038 if (lkb->lkb_status)
2039 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2040 else
2041 add_lkb(r, lkb, DLM_LKSTS_GRANTED);
2042 }
2043
2044 lkb->lkb_rqmode = DLM_LOCK_IV;
4875647a 2045 lkb->lkb_highbast = 0;
e7fd4179
DT
2046}
2047
2048static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2049{
2050 set_lvb_lock(r, lkb);
2051 _grant_lock(r, lkb);
e7fd4179
DT
2052}
2053
2054static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2055 struct dlm_message *ms)
2056{
2057 set_lvb_lock_pc(r, lkb, ms);
2058 _grant_lock(r, lkb);
2059}
2060
2061/* called by grant_pending_locks() which means an async grant message must
2062 be sent to the requesting node in addition to granting the lock if the
2063 lkb belongs to a remote node. */
2064
2065static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
2066{
2067 grant_lock(r, lkb);
2068 if (is_master_copy(lkb))
2069 send_grant(r, lkb);
2070 else
2071 queue_cast(r, lkb, 0);
2072}
2073
7d3c1feb
DT
2074/* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
2075 change the granted/requested modes. We're munging things accordingly in
2076 the process copy.
2077 CONVDEADLK: our grmode may have been forced down to NL to resolve a
2078 conversion deadlock
2079 ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
2080 compatible with other granted locks */
2081
2a7ce0ed 2082static void munge_demoted(struct dlm_lkb *lkb)
7d3c1feb 2083{
7d3c1feb
DT
2084 if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
2085 log_print("munge_demoted %x invalid modes gr %d rq %d",
2086 lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
2087 return;
2088 }
2089
2090 lkb->lkb_grmode = DLM_LOCK_NL;
2091}
2092
2093static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
2094{
00e99ccd
AA
2095 if (ms->m_type != cpu_to_le32(DLM_MSG_REQUEST_REPLY) &&
2096 ms->m_type != cpu_to_le32(DLM_MSG_GRANT)) {
7d3c1feb 2097 log_print("munge_altmode %x invalid reply type %d",
00e99ccd 2098 lkb->lkb_id, le32_to_cpu(ms->m_type));
7d3c1feb
DT
2099 return;
2100 }
2101
2102 if (lkb->lkb_exflags & DLM_LKF_ALTPR)
2103 lkb->lkb_rqmode = DLM_LOCK_PR;
2104 else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
2105 lkb->lkb_rqmode = DLM_LOCK_CW;
2106 else {
2107 log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
2108 dlm_print_lkb(lkb);
2109 }
2110}
2111
e7fd4179
DT
2112static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
2113{
2114 struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
2115 lkb_statequeue);
2116 if (lkb->lkb_id == first->lkb_id)
90135925 2117 return 1;
e7fd4179 2118
90135925 2119 return 0;
e7fd4179
DT
2120}
2121
e7fd4179
DT
2122/* Check if the given lkb conflicts with another lkb on the queue. */
2123
2124static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
2125{
2126 struct dlm_lkb *this;
2127
2128 list_for_each_entry(this, head, lkb_statequeue) {
2129 if (this == lkb)
2130 continue;
3bcd3687 2131 if (!modes_compat(this, lkb))
90135925 2132 return 1;
e7fd4179 2133 }
90135925 2134 return 0;
e7fd4179
DT
2135}
2136
2137/*
2138 * "A conversion deadlock arises with a pair of lock requests in the converting
2139 * queue for one resource. The granted mode of each lock blocks the requested
2140 * mode of the other lock."
2141 *
c85d65e9
DT
2142 * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
2143 * convert queue from being granted, then deadlk/demote lkb.
e7fd4179
DT
2144 *
2145 * Example:
2146 * Granted Queue: empty
2147 * Convert Queue: NL->EX (first lock)
2148 * PR->EX (second lock)
2149 *
2150 * The first lock can't be granted because of the granted mode of the second
2151 * lock and the second lock can't be granted because it's not first in the
c85d65e9
DT
2152 * list. We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
2153 * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
2154 * flag set and return DEMOTED in the lksb flags.
e7fd4179 2155 *
c85d65e9
DT
2156 * Originally, this function detected conv-deadlk in a more limited scope:
2157 * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
2158 * - if lkb1 was the first entry in the queue (not just earlier), and was
2159 * blocked by the granted mode of lkb2, and there was nothing on the
2160 * granted queue preventing lkb1 from being granted immediately, i.e.
2161 * lkb2 was the only thing preventing lkb1 from being granted.
2162 *
2163 * That second condition meant we'd only say there was conv-deadlk if
2164 * resolving it (by demotion) would lead to the first lock on the convert
2165 * queue being granted right away. It allowed conversion deadlocks to exist
2166 * between locks on the convert queue while they couldn't be granted anyway.
2167 *
2168 * Now, we detect and take action on conversion deadlocks immediately when
2169 * they're created, even if they may not be immediately consequential. If
2170 * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
2171 * mode that would prevent lkb1's conversion from being granted, we do a
2172 * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
2173 * I think this means that the lkb_is_ahead condition below should always
2174 * be zero, i.e. there will never be conv-deadlk between two locks that are
2175 * both already on the convert queue.
e7fd4179
DT
2176 */
2177
c85d65e9 2178static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
e7fd4179 2179{
c85d65e9
DT
2180 struct dlm_lkb *lkb1;
2181 int lkb_is_ahead = 0;
e7fd4179 2182
c85d65e9
DT
2183 list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
2184 if (lkb1 == lkb2) {
2185 lkb_is_ahead = 1;
e7fd4179
DT
2186 continue;
2187 }
2188
c85d65e9
DT
2189 if (!lkb_is_ahead) {
2190 if (!modes_compat(lkb2, lkb1))
2191 return 1;
2192 } else {
2193 if (!modes_compat(lkb2, lkb1) &&
2194 !modes_compat(lkb1, lkb2))
2195 return 1;
2196 }
e7fd4179 2197 }
90135925 2198 return 0;
e7fd4179
DT
2199}
2200
2201/*
2202 * Return 1 if the lock can be granted, 0 otherwise.
2203 * Also detect and resolve conversion deadlocks.
2204 *
2205 * lkb is the lock to be granted
2206 *
2207 * now is 1 if the function is being called in the context of the
2208 * immediate request, it is 0 if called later, after the lock has been
2209 * queued.
2210 *
c503a621
DT
2211 * recover is 1 if dlm_recover_grant() is trying to grant conversions
2212 * after recovery.
2213 *
e7fd4179
DT
2214 * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
2215 */
2216
c503a621
DT
2217static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2218 int recover)
e7fd4179
DT
2219{
2220 int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
2221
2222 /*
2223 * 6-10: Version 5.4 introduced an option to address the phenomenon of
2224 * a new request for a NL mode lock being blocked.
2225 *
2226 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
2227 * request, then it would be granted. In essence, the use of this flag
2228 * tells the Lock Manager to expedite theis request by not considering
2229 * what may be in the CONVERTING or WAITING queues... As of this
2230 * writing, the EXPEDITE flag can be used only with new requests for NL
2231 * mode locks. This flag is not valid for conversion requests.
2232 *
2233 * A shortcut. Earlier checks return an error if EXPEDITE is used in a
2234 * conversion or used with a non-NL requested mode. We also know an
2235 * EXPEDITE request is always granted immediately, so now must always
2236 * be 1. The full condition to grant an expedite request: (now &&
2237 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
2238 * therefore be shortened to just checking the flag.
2239 */
2240
2241 if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
90135925 2242 return 1;
e7fd4179
DT
2243
2244 /*
2245 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
2246 * added to the remaining conditions.
2247 */
2248
2249 if (queue_conflict(&r->res_grantqueue, lkb))
c503a621 2250 return 0;
e7fd4179
DT
2251
2252 /*
2253 * 6-3: By default, a conversion request is immediately granted if the
2254 * requested mode is compatible with the modes of all other granted
2255 * locks
2256 */
2257
2258 if (queue_conflict(&r->res_convertqueue, lkb))
c503a621
DT
2259 return 0;
2260
2261 /*
2262 * The RECOVER_GRANT flag means dlm_recover_grant() is granting
2263 * locks for a recovered rsb, on which lkb's have been rebuilt.
2264 * The lkb's may have been rebuilt on the queues in a different
2265 * order than they were in on the previous master. So, granting
2266 * queued conversions in order after recovery doesn't make sense
2267 * since the order hasn't been preserved anyway. The new order
2268 * could also have created a new "in place" conversion deadlock.
2269 * (e.g. old, failed master held granted EX, with PR->EX, NL->EX.
2270 * After recovery, there would be no granted locks, and possibly
2271 * NL->EX, PR->EX, an in-place conversion deadlock.) So, after
2272 * recovery, grant conversions without considering order.
2273 */
2274
2275 if (conv && recover)
2276 return 1;
e7fd4179
DT
2277
2278 /*
2279 * 6-5: But the default algorithm for deciding whether to grant or
2280 * queue conversion requests does not by itself guarantee that such
2281 * requests are serviced on a "first come first serve" basis. This, in
2282 * turn, can lead to a phenomenon known as "indefinate postponement".
2283 *
2284 * 6-7: This issue is dealt with by using the optional QUECVT flag with
2285 * the system service employed to request a lock conversion. This flag
2286 * forces certain conversion requests to be queued, even if they are
2287 * compatible with the granted modes of other locks on the same
2288 * resource. Thus, the use of this flag results in conversion requests
2289 * being ordered on a "first come first servce" basis.
2290 *
2291 * DCT: This condition is all about new conversions being able to occur
2292 * "in place" while the lock remains on the granted queue (assuming
2293 * nothing else conflicts.) IOW if QUECVT isn't set, a conversion
2294 * doesn't _have_ to go onto the convert queue where it's processed in
2295 * order. The "now" variable is necessary to distinguish converts
2296 * being received and processed for the first time now, because once a
2297 * convert is moved to the conversion queue the condition below applies
2298 * requiring fifo granting.
2299 */
2300
2301 if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
90135925 2302 return 1;
e7fd4179 2303
53ad1c98
DT
2304 /*
2305 * Even if the convert is compat with all granted locks,
2306 * QUECVT forces it behind other locks on the convert queue.
2307 */
2308
2309 if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) {
2310 if (list_empty(&r->res_convertqueue))
2311 return 1;
2312 else
c503a621 2313 return 0;
53ad1c98
DT
2314 }
2315
e7fd4179 2316 /*
3bcd3687
DT
2317 * The NOORDER flag is set to avoid the standard vms rules on grant
2318 * order.
e7fd4179
DT
2319 */
2320
2321 if (lkb->lkb_exflags & DLM_LKF_NOORDER)
90135925 2322 return 1;
e7fd4179
DT
2323
2324 /*
2325 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
2326 * granted until all other conversion requests ahead of it are granted
2327 * and/or canceled.
2328 */
2329
2330 if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
90135925 2331 return 1;
e7fd4179
DT
2332
2333 /*
2334 * 6-4: By default, a new request is immediately granted only if all
2335 * three of the following conditions are satisfied when the request is
2336 * issued:
2337 * - The queue of ungranted conversion requests for the resource is
2338 * empty.
2339 * - The queue of ungranted new requests for the resource is empty.
2340 * - The mode of the new request is compatible with the most
2341 * restrictive mode of all granted locks on the resource.
2342 */
2343
2344 if (now && !conv && list_empty(&r->res_convertqueue) &&
2345 list_empty(&r->res_waitqueue))
90135925 2346 return 1;
e7fd4179
DT
2347
2348 /*
2349 * 6-4: Once a lock request is in the queue of ungranted new requests,
2350 * it cannot be granted until the queue of ungranted conversion
2351 * requests is empty, all ungranted new requests ahead of it are
2352 * granted and/or canceled, and it is compatible with the granted mode
2353 * of the most restrictive lock granted on the resource.
2354 */
2355
2356 if (!now && !conv && list_empty(&r->res_convertqueue) &&
2357 first_in_list(lkb, &r->res_waitqueue))
90135925 2358 return 1;
c503a621 2359
90135925 2360 return 0;
e7fd4179
DT
2361}
2362
c85d65e9 2363static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
c503a621 2364 int recover, int *err)
e7fd4179 2365{
e7fd4179
DT
2366 int rv;
2367 int8_t alt = 0, rqmode = lkb->lkb_rqmode;
c85d65e9
DT
2368 int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
2369
2370 if (err)
2371 *err = 0;
e7fd4179 2372
c503a621 2373 rv = _can_be_granted(r, lkb, now, recover);
e7fd4179
DT
2374 if (rv)
2375 goto out;
2376
c85d65e9
DT
2377 /*
2378 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
2379 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
2380 * cancels one of the locks.
2381 */
2382
2383 if (is_convert && can_be_queued(lkb) &&
2384 conversion_deadlock_detect(r, lkb)) {
2385 if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
2386 lkb->lkb_grmode = DLM_LOCK_NL;
2387 lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
294e7e45 2388 } else if (err) {
2389 *err = -EDEADLK;
2390 } else {
2391 log_print("can_be_granted deadlock %x now %d",
2392 lkb->lkb_id, now);
2393 dlm_dump_rsb(r);
c85d65e9 2394 }
e7fd4179 2395 goto out;
c85d65e9 2396 }
e7fd4179 2397
c85d65e9
DT
2398 /*
2399 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
2400 * to grant a request in a mode other than the normal rqmode. It's a
2401 * simple way to provide a big optimization to applications that can
2402 * use them.
2403 */
2404
2405 if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
e7fd4179 2406 alt = DLM_LOCK_PR;
c85d65e9 2407 else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
e7fd4179
DT
2408 alt = DLM_LOCK_CW;
2409
2410 if (alt) {
2411 lkb->lkb_rqmode = alt;
c503a621 2412 rv = _can_be_granted(r, lkb, now, 0);
e7fd4179
DT
2413 if (rv)
2414 lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
2415 else
2416 lkb->lkb_rqmode = rqmode;
2417 }
2418 out:
2419 return rv;
2420}
2421
36509258
DT
2422/* Returns the highest requested mode of all blocked conversions; sets
2423 cw if there's a blocked conversion to DLM_LOCK_CW. */
c85d65e9 2424
4875647a
DT
2425static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw,
2426 unsigned int *count)
e7fd4179
DT
2427{
2428 struct dlm_lkb *lkb, *s;
c503a621 2429 int recover = rsb_flag(r, RSB_RECOVER_GRANT);
e7fd4179 2430 int hi, demoted, quit, grant_restart, demote_restart;
c85d65e9 2431 int deadlk;
e7fd4179
DT
2432
2433 quit = 0;
2434 restart:
2435 grant_restart = 0;
2436 demote_restart = 0;
2437 hi = DLM_LOCK_IV;
2438
2439 list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
2440 demoted = is_demoted(lkb);
c85d65e9
DT
2441 deadlk = 0;
2442
c503a621 2443 if (can_be_granted(r, lkb, 0, recover, &deadlk)) {
e7fd4179
DT
2444 grant_lock_pending(r, lkb);
2445 grant_restart = 1;
4875647a
DT
2446 if (count)
2447 (*count)++;
c85d65e9 2448 continue;
e7fd4179 2449 }
c85d65e9
DT
2450
2451 if (!demoted && is_demoted(lkb)) {
2452 log_print("WARN: pending demoted %x node %d %s",
2453 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2454 demote_restart = 1;
2455 continue;
2456 }
2457
2458 if (deadlk) {
294e7e45 2459 /*
2460 * If DLM_LKB_NODLKWT flag is set and conversion
2461 * deadlock is detected, we request blocking AST and
2462 * down (or cancel) conversion.
2463 */
2464 if (lkb->lkb_exflags & DLM_LKF_NODLCKWT) {
2465 if (lkb->lkb_highbast < lkb->lkb_rqmode) {
2466 queue_bast(r, lkb, lkb->lkb_rqmode);
2467 lkb->lkb_highbast = lkb->lkb_rqmode;
2468 }
2469 } else {
2470 log_print("WARN: pending deadlock %x node %d %s",
2471 lkb->lkb_id, lkb->lkb_nodeid,
2472 r->res_name);
2473 dlm_dump_rsb(r);
2474 }
c85d65e9
DT
2475 continue;
2476 }
2477
2478 hi = max_t(int, lkb->lkb_rqmode, hi);
36509258
DT
2479
2480 if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
2481 *cw = 1;
e7fd4179
DT
2482 }
2483
2484 if (grant_restart)
2485 goto restart;
2486 if (demote_restart && !quit) {
2487 quit = 1;
2488 goto restart;
2489 }
2490
2491 return max_t(int, high, hi);
2492}
2493
4875647a
DT
2494static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw,
2495 unsigned int *count)
e7fd4179
DT
2496{
2497 struct dlm_lkb *lkb, *s;
2498
2499 list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
c503a621 2500 if (can_be_granted(r, lkb, 0, 0, NULL)) {
e7fd4179 2501 grant_lock_pending(r, lkb);
4875647a
DT
2502 if (count)
2503 (*count)++;
2504 } else {
e7fd4179 2505 high = max_t(int, lkb->lkb_rqmode, high);
36509258
DT
2506 if (lkb->lkb_rqmode == DLM_LOCK_CW)
2507 *cw = 1;
2508 }
e7fd4179
DT
2509 }
2510
2511 return high;
2512}
2513
36509258
DT
2514/* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
2515 on either the convert or waiting queue.
2516 high is the largest rqmode of all locks blocked on the convert or
2517 waiting queue. */
2518
2519static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
2520{
2521 if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
2522 if (gr->lkb_highbast < DLM_LOCK_EX)
2523 return 1;
2524 return 0;
2525 }
2526
2527 if (gr->lkb_highbast < high &&
2528 !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
2529 return 1;
2530 return 0;
2531}
2532
4875647a 2533static void grant_pending_locks(struct dlm_rsb *r, unsigned int *count)
e7fd4179
DT
2534{
2535 struct dlm_lkb *lkb, *s;
2536 int high = DLM_LOCK_IV;
36509258 2537 int cw = 0;
e7fd4179 2538
4875647a
DT
2539 if (!is_master(r)) {
2540 log_print("grant_pending_locks r nodeid %d", r->res_nodeid);
2541 dlm_dump_rsb(r);
2542 return;
2543 }
e7fd4179 2544
4875647a
DT
2545 high = grant_pending_convert(r, high, &cw, count);
2546 high = grant_pending_wait(r, high, &cw, count);
e7fd4179
DT
2547
2548 if (high == DLM_LOCK_IV)
2549 return;
2550
2551 /*
2552 * If there are locks left on the wait/convert queue then send blocking
2553 * ASTs to granted locks based on the largest requested mode (high)
36509258 2554 * found above.
e7fd4179
DT
2555 */
2556
2557 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
e5dae548 2558 if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
329fc4c3
DT
2559 if (cw && high == DLM_LOCK_PR &&
2560 lkb->lkb_grmode == DLM_LOCK_PR)
36509258
DT
2561 queue_bast(r, lkb, DLM_LOCK_CW);
2562 else
2563 queue_bast(r, lkb, high);
e7fd4179
DT
2564 lkb->lkb_highbast = high;
2565 }
2566 }
2567}
2568
36509258
DT
2569static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
2570{
2571 if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
2572 (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
2573 if (gr->lkb_highbast < DLM_LOCK_EX)
2574 return 1;
2575 return 0;
2576 }
2577
2578 if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
2579 return 1;
2580 return 0;
2581}
2582
e7fd4179
DT
2583static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
2584 struct dlm_lkb *lkb)
2585{
2586 struct dlm_lkb *gr;
2587
2588 list_for_each_entry(gr, head, lkb_statequeue) {
314dd2a0
SW
2589 /* skip self when sending basts to convertqueue */
2590 if (gr == lkb)
2591 continue;
e5dae548 2592 if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
e7fd4179
DT
2593 queue_bast(r, gr, lkb->lkb_rqmode);
2594 gr->lkb_highbast = lkb->lkb_rqmode;
2595 }
2596 }
2597}
2598
2599static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
2600{
2601 send_bast_queue(r, &r->res_grantqueue, lkb);
2602}
2603
2604static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
2605{
2606 send_bast_queue(r, &r->res_grantqueue, lkb);
2607 send_bast_queue(r, &r->res_convertqueue, lkb);
2608}
2609
2610/* set_master(r, lkb) -- set the master nodeid of a resource
2611
2612 The purpose of this function is to set the nodeid field in the given
2613 lkb using the nodeid field in the given rsb. If the rsb's nodeid is
2614 known, it can just be copied to the lkb and the function will return
2615 0. If the rsb's nodeid is _not_ known, it needs to be looked up
2616 before it can be copied to the lkb.
2617
2618 When the rsb nodeid is being looked up remotely, the initial lkb
2619 causing the lookup is kept on the ls_waiters list waiting for the
2620 lookup reply. Other lkb's waiting for the same rsb lookup are kept
2621 on the rsb's res_lookup list until the master is verified.
2622
2623 Return values:
2624 0: nodeid is set in rsb/lkb and the caller should go ahead and use it
2625 1: the rsb master is not available and the lkb has been placed on
2626 a wait queue
2627*/
2628
2629static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
2630{
c04fecb4 2631 int our_nodeid = dlm_our_nodeid();
e7fd4179
DT
2632
2633 if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
2634 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
2635 r->res_first_lkid = lkb->lkb_id;
2636 lkb->lkb_nodeid = r->res_nodeid;
2637 return 0;
2638 }
2639
2640 if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
2641 list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
2642 return 1;
2643 }
2644
c04fecb4 2645 if (r->res_master_nodeid == our_nodeid) {
e7fd4179
DT
2646 lkb->lkb_nodeid = 0;
2647 return 0;
2648 }
2649
c04fecb4
DT
2650 if (r->res_master_nodeid) {
2651 lkb->lkb_nodeid = r->res_master_nodeid;
e7fd4179
DT
2652 return 0;
2653 }
2654
c04fecb4
DT
2655 if (dlm_dir_nodeid(r) == our_nodeid) {
2656 /* This is a somewhat unusual case; find_rsb will usually
2657 have set res_master_nodeid when dir nodeid is local, but
2658 there are cases where we become the dir node after we've
2659 past find_rsb and go through _request_lock again.
2660 confirm_master() or process_lookup_list() needs to be
2661 called after this. */
2662 log_debug(r->res_ls, "set_master %x self master %d dir %d %s",
2663 lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid,
2664 r->res_name);
2665 r->res_master_nodeid = our_nodeid;
e7fd4179
DT
2666 r->res_nodeid = 0;
2667 lkb->lkb_nodeid = 0;
c04fecb4 2668 return 0;
e7fd4179 2669 }
c04fecb4
DT
2670
2671 r->res_first_lkid = lkb->lkb_id;
2672 send_lookup(r, lkb);
2673 return 1;
e7fd4179
DT
2674}
2675
2676static void process_lookup_list(struct dlm_rsb *r)
2677{
2678 struct dlm_lkb *lkb, *safe;
2679
2680 list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
ef0c2bb0 2681 list_del_init(&lkb->lkb_rsb_lookup);
e7fd4179
DT
2682 _request_lock(r, lkb);
2683 schedule();
2684 }
2685}
2686
2687/* confirm_master -- confirm (or deny) an rsb's master nodeid */
2688
2689static void confirm_master(struct dlm_rsb *r, int error)
2690{
2691 struct dlm_lkb *lkb;
2692
2693 if (!r->res_first_lkid)
2694 return;
2695
2696 switch (error) {
2697 case 0:
2698 case -EINPROGRESS:
2699 r->res_first_lkid = 0;
2700 process_lookup_list(r);
2701 break;
2702
2703 case -EAGAIN:
aec64e1b
DT
2704 case -EBADR:
2705 case -ENOTBLK:
2706 /* the remote request failed and won't be retried (it was
2707 a NOQUEUE, or has been canceled/unlocked); make a waiting
2708 lkb the first_lkid */
e7fd4179
DT
2709
2710 r->res_first_lkid = 0;
2711
2712 if (!list_empty(&r->res_lookup)) {
2713 lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
2714 lkb_rsb_lookup);
ef0c2bb0 2715 list_del_init(&lkb->lkb_rsb_lookup);
e7fd4179
DT
2716 r->res_first_lkid = lkb->lkb_id;
2717 _request_lock(r, lkb);
761b9d3f 2718 }
e7fd4179
DT
2719 break;
2720
2721 default:
2722 log_error(r->res_ls, "confirm_master unknown error %d", error);
2723 }
2724}
2725
6b0afc0c 2726#ifdef CONFIG_DLM_DEPRECATED_API
e7fd4179 2727static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
e5dae548
DT
2728 int namelen, unsigned long timeout_cs,
2729 void (*ast) (void *astparam),
2730 void *astparam,
2731 void (*bast) (void *astparam, int mode),
2732 struct dlm_args *args)
6b0afc0c
AA
2733#else
2734static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
2735 int namelen, void (*ast)(void *astparam),
2736 void *astparam,
2737 void (*bast)(void *astparam, int mode),
2738 struct dlm_args *args)
2739#endif
e7fd4179
DT
2740{
2741 int rv = -EINVAL;
2742
2743 /* check for invalid arg usage */
2744
2745 if (mode < 0 || mode > DLM_LOCK_EX)
2746 goto out;
2747
2748 if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
2749 goto out;
2750
2751 if (flags & DLM_LKF_CANCEL)
2752 goto out;
2753
2754 if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
2755 goto out;
2756
2757 if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
2758 goto out;
2759
2760 if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
2761 goto out;
2762
2763 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
2764 goto out;
2765
2766 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
2767 goto out;
2768
2769 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
2770 goto out;
2771
2772 if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2773 goto out;
2774
2775 if (!ast || !lksb)
2776 goto out;
2777
2778 if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2779 goto out;
2780
e7fd4179
DT
2781 if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2782 goto out;
2783
2784 /* these args will be copied to the lkb in validate_lock_args,
2785 it cannot be done now because when converting locks, fields in
2786 an active lkb cannot be modified before locking the rsb */
2787
2788 args->flags = flags;
e5dae548
DT
2789 args->astfn = ast;
2790 args->astparam = astparam;
2791 args->bastfn = bast;
6b0afc0c 2792#ifdef CONFIG_DLM_DEPRECATED_API
d7db923e 2793 args->timeout = timeout_cs;
6b0afc0c 2794#endif
e7fd4179
DT
2795 args->mode = mode;
2796 args->lksb = lksb;
e7fd4179
DT
2797 rv = 0;
2798 out:
2799 return rv;
2800}
2801
2802static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2803{
2804 if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2805 DLM_LKF_FORCEUNLOCK))
2806 return -EINVAL;
2807
ef0c2bb0
DT
2808 if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2809 return -EINVAL;
2810
e7fd4179 2811 args->flags = flags;
e5dae548 2812 args->astparam = astarg;
e7fd4179
DT
2813 return 0;
2814}
2815
2816static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2817 struct dlm_args *args)
2818{
44637ca4 2819 int rv = -EBUSY;
e7fd4179
DT
2820
2821 if (args->flags & DLM_LKF_CONVERT) {
e7fd4179
DT
2822 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2823 goto out;
2824
67e4d8c5
AA
2825 /* lock not allowed if there's any op in progress */
2826 if (lkb->lkb_wait_type || lkb->lkb_wait_count)
e7fd4179 2827 goto out;
ef0c2bb0
DT
2828
2829 if (is_overlap(lkb))
2830 goto out;
44637ca4
AA
2831
2832 rv = -EINVAL;
2833 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
2834 goto out;
2835
2836 if (args->flags & DLM_LKF_QUECVT &&
2837 !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2838 goto out;
e7fd4179
DT
2839 }
2840
2841 lkb->lkb_exflags = args->flags;
2842 lkb->lkb_sbflags = 0;
e5dae548 2843 lkb->lkb_astfn = args->astfn;
e7fd4179 2844 lkb->lkb_astparam = args->astparam;
e5dae548 2845 lkb->lkb_bastfn = args->bastfn;
e7fd4179
DT
2846 lkb->lkb_rqmode = args->mode;
2847 lkb->lkb_lksb = args->lksb;
2848 lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2849 lkb->lkb_ownpid = (int) current->pid;
6b0afc0c 2850#ifdef CONFIG_DLM_DEPRECATED_API
d7db923e 2851 lkb->lkb_timeout_cs = args->timeout;
6b0afc0c 2852#endif
e7fd4179
DT
2853 rv = 0;
2854 out:
9ac8ba46
AA
2855 switch (rv) {
2856 case 0:
2857 break;
2858 case -EINVAL:
2859 /* annoy the user because dlm usage is wrong */
2860 WARN_ON(1);
2861 log_error(ls, "%s %d %x %x %x %d %d %s", __func__,
2862 rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2863 lkb->lkb_status, lkb->lkb_wait_type,
2864 lkb->lkb_resource->res_name);
2865 break;
2866 default:
c2d76a62 2867 log_debug(ls, "%s %d %x %x %x %d %d %s", __func__,
43279e53
DT
2868 rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2869 lkb->lkb_status, lkb->lkb_wait_type,
2870 lkb->lkb_resource->res_name);
9ac8ba46
AA
2871 break;
2872 }
2873
e7fd4179
DT
2874 return rv;
2875}
2876
ef0c2bb0
DT
2877/* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2878 for success */
2879
2880/* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2881 because there may be a lookup in progress and it's valid to do
2882 cancel/unlockf on it */
2883
e7fd4179
DT
2884static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2885{
ef0c2bb0 2886 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
420ba3cd 2887 int rv = -EBUSY;
e7fd4179 2888
420ba3cd
AA
2889 /* normal unlock not allowed if there's any op in progress */
2890 if (!(args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) &&
2891 (lkb->lkb_wait_type || lkb->lkb_wait_count))
e7fd4179
DT
2892 goto out;
2893
ef0c2bb0
DT
2894 /* an lkb may be waiting for an rsb lookup to complete where the
2895 lookup was initiated by another lock */
2896
42dc1601
DT
2897 if (!list_empty(&lkb->lkb_rsb_lookup)) {
2898 if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
ef0c2bb0
DT
2899 log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2900 list_del_init(&lkb->lkb_rsb_lookup);
2901 queue_cast(lkb->lkb_resource, lkb,
2902 args->flags & DLM_LKF_CANCEL ?
2903 -DLM_ECANCEL : -DLM_EUNLOCK);
2904 unhold_lkb(lkb); /* undoes create_lkb() */
ef0c2bb0 2905 }
42dc1601 2906 /* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
420ba3cd
AA
2907 goto out;
2908 }
2909
2910 rv = -EINVAL;
2911 if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
2912 log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2913 dlm_print_lkb(lkb);
2914 goto out;
2915 }
2916
2917 /* an lkb may still exist even though the lock is EOL'ed due to a
2918 * cancel, unlock or failed noqueue request; an app can't use these
2919 * locks; return same error as if the lkid had not been found at all
2920 */
2921
2922 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
2923 log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2924 rv = -ENOENT;
42dc1601 2925 goto out;
ef0c2bb0
DT
2926 }
2927
2928 /* cancel not allowed with another cancel/unlock in progress */
2929
2930 if (args->flags & DLM_LKF_CANCEL) {
2931 if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2932 goto out;
2933
2934 if (is_overlap(lkb))
2935 goto out;
2936
3ae1acf9
DT
2937 /* don't let scand try to do a cancel */
2938 del_timeout(lkb);
2939
ef0c2bb0
DT
2940 if (lkb->lkb_flags & DLM_IFL_RESEND) {
2941 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2942 rv = -EBUSY;
2943 goto out;
2944 }
2945
a536e381
DT
2946 /* there's nothing to cancel */
2947 if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
2948 !lkb->lkb_wait_type) {
2949 rv = -EBUSY;
2950 goto out;
2951 }
2952
ef0c2bb0
DT
2953 switch (lkb->lkb_wait_type) {
2954 case DLM_MSG_LOOKUP:
2955 case DLM_MSG_REQUEST:
2956 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2957 rv = -EBUSY;
2958 goto out;
2959 case DLM_MSG_UNLOCK:
2960 case DLM_MSG_CANCEL:
2961 goto out;
2962 }
2963 /* add_to_waiters() will set OVERLAP_CANCEL */
2964 goto out_ok;
2965 }
2966
2967 /* do we need to allow a force-unlock if there's a normal unlock
2968 already in progress? in what conditions could the normal unlock
2969 fail such that we'd want to send a force-unlock to be sure? */
2970
2971 if (args->flags & DLM_LKF_FORCEUNLOCK) {
2972 if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
2973 goto out;
2974
2975 if (is_overlap_unlock(lkb))
2976 goto out;
e7fd4179 2977
3ae1acf9
DT
2978 /* don't let scand try to do a cancel */
2979 del_timeout(lkb);
2980
ef0c2bb0
DT
2981 if (lkb->lkb_flags & DLM_IFL_RESEND) {
2982 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
2983 rv = -EBUSY;
2984 goto out;
2985 }
2986
2987 switch (lkb->lkb_wait_type) {
2988 case DLM_MSG_LOOKUP:
2989 case DLM_MSG_REQUEST:
2990 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
2991 rv = -EBUSY;
2992 goto out;
2993 case DLM_MSG_UNLOCK:
2994 goto out;
2995 }
2996 /* add_to_waiters() will set OVERLAP_UNLOCK */
ef0c2bb0
DT
2997 }
2998
e7fd4179 2999 out_ok:
ef0c2bb0
DT
3000 /* an overlapping op shouldn't blow away exflags from other op */
3001 lkb->lkb_exflags |= args->flags;
e7fd4179
DT
3002 lkb->lkb_sbflags = 0;
3003 lkb->lkb_astparam = args->astparam;
e7fd4179
DT
3004 rv = 0;
3005 out:
9ac8ba46
AA
3006 switch (rv) {
3007 case 0:
3008 break;
3009 case -EINVAL:
3010 /* annoy the user because dlm usage is wrong */
3011 WARN_ON(1);
3012 log_error(ls, "%s %d %x %x %x %x %d %s", __func__, rv,
3013 lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
3014 args->flags, lkb->lkb_wait_type,
3015 lkb->lkb_resource->res_name);
3016 break;
3017 default:
c2d76a62 3018 log_debug(ls, "%s %d %x %x %x %x %d %s", __func__, rv,
ef0c2bb0
DT
3019 lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
3020 args->flags, lkb->lkb_wait_type,
3021 lkb->lkb_resource->res_name);
9ac8ba46
AA
3022 break;
3023 }
3024
e7fd4179
DT
3025 return rv;
3026}
3027
3028/*
3029 * Four stage 4 varieties:
3030 * do_request(), do_convert(), do_unlock(), do_cancel()
3031 * These are called on the master node for the given lock and
3032 * from the central locking logic.
3033 */
3034
3035static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3036{
3037 int error = 0;
3038
c503a621 3039 if (can_be_granted(r, lkb, 1, 0, NULL)) {
e7fd4179
DT
3040 grant_lock(r, lkb);
3041 queue_cast(r, lkb, 0);
3042 goto out;
3043 }
3044
3045 if (can_be_queued(lkb)) {
3046 error = -EINPROGRESS;
3047 add_lkb(r, lkb, DLM_LKSTS_WAITING);
3ae1acf9 3048 add_timeout(lkb);
e7fd4179
DT
3049 goto out;
3050 }
3051
3052 error = -EAGAIN;
e7fd4179 3053 queue_cast(r, lkb, -EAGAIN);
e7fd4179
DT
3054 out:
3055 return error;
3056}
3057
cf6620ac
DT
3058static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3059 int error)
3060{
3061 switch (error) {
3062 case -EAGAIN:
3063 if (force_blocking_asts(lkb))
3064 send_blocking_asts_all(r, lkb);
3065 break;
3066 case -EINPROGRESS:
3067 send_blocking_asts(r, lkb);
3068 break;
3069 }
3070}
3071
e7fd4179
DT
3072static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3073{
3074 int error = 0;
c85d65e9 3075 int deadlk = 0;
e7fd4179
DT
3076
3077 /* changing an existing lock may allow others to be granted */
3078
c503a621 3079 if (can_be_granted(r, lkb, 1, 0, &deadlk)) {
e7fd4179
DT
3080 grant_lock(r, lkb);
3081 queue_cast(r, lkb, 0);
e7fd4179
DT
3082 goto out;
3083 }
3084
c85d65e9
DT
3085 /* can_be_granted() detected that this lock would block in a conversion
3086 deadlock, so we leave it on the granted queue and return EDEADLK in
3087 the ast for the convert. */
3088
294e7e45 3089 if (deadlk && !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
c85d65e9 3090 /* it's left on the granted queue */
c85d65e9
DT
3091 revert_lock(r, lkb);
3092 queue_cast(r, lkb, -EDEADLK);
3093 error = -EDEADLK;
3094 goto out;
3095 }
3096
7d3c1feb
DT
3097 /* is_demoted() means the can_be_granted() above set the grmode
3098 to NL, and left us on the granted queue. This auto-demotion
3099 (due to CONVDEADLK) might mean other locks, and/or this lock, are
3100 now grantable. We have to try to grant other converting locks
3101 before we try again to grant this one. */
3102
3103 if (is_demoted(lkb)) {
4875647a 3104 grant_pending_convert(r, DLM_LOCK_IV, NULL, NULL);
c503a621 3105 if (_can_be_granted(r, lkb, 1, 0)) {
7d3c1feb
DT
3106 grant_lock(r, lkb);
3107 queue_cast(r, lkb, 0);
7d3c1feb
DT
3108 goto out;
3109 }
3110 /* else fall through and move to convert queue */
3111 }
3112
3113 if (can_be_queued(lkb)) {
e7fd4179
DT
3114 error = -EINPROGRESS;
3115 del_lkb(r, lkb);
3116 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3ae1acf9 3117 add_timeout(lkb);
e7fd4179
DT
3118 goto out;
3119 }
3120
3121 error = -EAGAIN;
e7fd4179 3122 queue_cast(r, lkb, -EAGAIN);
e7fd4179
DT
3123 out:
3124 return error;
3125}
3126
cf6620ac
DT
3127static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3128 int error)
3129{
3130 switch (error) {
3131 case 0:
4875647a 3132 grant_pending_locks(r, NULL);
cf6620ac
DT
3133 /* grant_pending_locks also sends basts */
3134 break;
3135 case -EAGAIN:
3136 if (force_blocking_asts(lkb))
3137 send_blocking_asts_all(r, lkb);
3138 break;
3139 case -EINPROGRESS:
3140 send_blocking_asts(r, lkb);
3141 break;
3142 }
3143}
3144
e7fd4179
DT
3145static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3146{
3147 remove_lock(r, lkb);
3148 queue_cast(r, lkb, -DLM_EUNLOCK);
e7fd4179
DT
3149 return -DLM_EUNLOCK;
3150}
3151
cf6620ac
DT
3152static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3153 int error)
3154{
4875647a 3155 grant_pending_locks(r, NULL);
cf6620ac
DT
3156}
3157
ef0c2bb0 3158/* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
c04fecb4 3159
e7fd4179
DT
3160static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3161{
ef0c2bb0
DT
3162 int error;
3163
3164 error = revert_lock(r, lkb);
3165 if (error) {
3166 queue_cast(r, lkb, -DLM_ECANCEL);
ef0c2bb0
DT
3167 return -DLM_ECANCEL;
3168 }
3169 return 0;
e7fd4179
DT
3170}
3171
cf6620ac
DT
3172static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3173 int error)
3174{
3175 if (error)
4875647a 3176 grant_pending_locks(r, NULL);
cf6620ac
DT
3177}
3178
e7fd4179
DT
3179/*
3180 * Four stage 3 varieties:
3181 * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
3182 */
3183
3184/* add a new lkb to a possibly new rsb, called by requesting process */
3185
3186static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3187{
3188 int error;
3189
3190 /* set_master: sets lkb nodeid from r */
3191
3192 error = set_master(r, lkb);
3193 if (error < 0)
3194 goto out;
3195 if (error) {
3196 error = 0;
3197 goto out;
3198 }
3199
cf6620ac 3200 if (is_remote(r)) {
e7fd4179
DT
3201 /* receive_request() calls do_request() on remote node */
3202 error = send_request(r, lkb);
cf6620ac 3203 } else {
e7fd4179 3204 error = do_request(r, lkb);
cf6620ac
DT
3205 /* for remote locks the request_reply is sent
3206 between do_request and do_request_effects */
3207 do_request_effects(r, lkb, error);
3208 }
e7fd4179
DT
3209 out:
3210 return error;
3211}
3212
3bcd3687 3213/* change some property of an existing lkb, e.g. mode */
e7fd4179
DT
3214
3215static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3216{
3217 int error;
3218
cf6620ac 3219 if (is_remote(r)) {
e7fd4179
DT
3220 /* receive_convert() calls do_convert() on remote node */
3221 error = send_convert(r, lkb);
cf6620ac 3222 } else {
e7fd4179 3223 error = do_convert(r, lkb);
cf6620ac
DT
3224 /* for remote locks the convert_reply is sent
3225 between do_convert and do_convert_effects */
3226 do_convert_effects(r, lkb, error);
3227 }
e7fd4179
DT
3228
3229 return error;
3230}
3231
3232/* remove an existing lkb from the granted queue */
3233
3234static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3235{
3236 int error;
3237
cf6620ac 3238 if (is_remote(r)) {
e7fd4179
DT
3239 /* receive_unlock() calls do_unlock() on remote node */
3240 error = send_unlock(r, lkb);
cf6620ac 3241 } else {
e7fd4179 3242 error = do_unlock(r, lkb);
cf6620ac
DT
3243 /* for remote locks the unlock_reply is sent
3244 between do_unlock and do_unlock_effects */
3245 do_unlock_effects(r, lkb, error);
3246 }
e7fd4179
DT
3247
3248 return error;
3249}
3250
3251/* remove an existing lkb from the convert or wait queue */
3252
3253static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3254{
3255 int error;
3256
cf6620ac 3257 if (is_remote(r)) {
e7fd4179
DT
3258 /* receive_cancel() calls do_cancel() on remote node */
3259 error = send_cancel(r, lkb);
cf6620ac 3260 } else {
e7fd4179 3261 error = do_cancel(r, lkb);
cf6620ac
DT
3262 /* for remote locks the cancel_reply is sent
3263 between do_cancel and do_cancel_effects */
3264 do_cancel_effects(r, lkb, error);
3265 }
e7fd4179
DT
3266
3267 return error;
3268}
3269
3270/*
3271 * Four stage 2 varieties:
3272 * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
3273 */
3274
56171e0d
AA
3275static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3276 const void *name, int len,
3277 struct dlm_args *args)
e7fd4179
DT
3278{
3279 struct dlm_rsb *r;
3280 int error;
3281
3282 error = validate_lock_args(ls, lkb, args);
3283 if (error)
c04fecb4 3284 return error;
e7fd4179 3285
c04fecb4 3286 error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
e7fd4179 3287 if (error)
c04fecb4 3288 return error;
e7fd4179
DT
3289
3290 lock_rsb(r);
3291
3292 attach_lkb(r, lkb);
3293 lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
3294
3295 error = _request_lock(r, lkb);
3296
3297 unlock_rsb(r);
3298 put_rsb(r);
e7fd4179
DT
3299 return error;
3300}
3301
3302static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3303 struct dlm_args *args)
3304{
3305 struct dlm_rsb *r;
3306 int error;
3307
3308 r = lkb->lkb_resource;
3309
3310 hold_rsb(r);
3311 lock_rsb(r);
3312
3313 error = validate_lock_args(ls, lkb, args);
3314 if (error)
3315 goto out;
3316
3317 error = _convert_lock(r, lkb);
3318 out:
3319 unlock_rsb(r);
3320 put_rsb(r);
3321 return error;
3322}
3323
3324static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3325 struct dlm_args *args)
3326{
3327 struct dlm_rsb *r;
3328 int error;
3329
3330 r = lkb->lkb_resource;
3331
3332 hold_rsb(r);
3333 lock_rsb(r);
3334
3335 error = validate_unlock_args(lkb, args);
3336 if (error)
3337 goto out;
3338
3339 error = _unlock_lock(r, lkb);
3340 out:
3341 unlock_rsb(r);
3342 put_rsb(r);
3343 return error;
3344}
3345
3346static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3347 struct dlm_args *args)
3348{
3349 struct dlm_rsb *r;
3350 int error;
3351
3352 r = lkb->lkb_resource;
3353
3354 hold_rsb(r);
3355 lock_rsb(r);
3356
3357 error = validate_unlock_args(lkb, args);
3358 if (error)
3359 goto out;
3360
3361 error = _cancel_lock(r, lkb);
3362 out:
3363 unlock_rsb(r);
3364 put_rsb(r);
3365 return error;
3366}
3367
3368/*
3369 * Two stage 1 varieties: dlm_lock() and dlm_unlock()
3370 */
3371
3372int dlm_lock(dlm_lockspace_t *lockspace,
3373 int mode,
3374 struct dlm_lksb *lksb,
3375 uint32_t flags,
56171e0d 3376 const void *name,
e7fd4179
DT
3377 unsigned int namelen,
3378 uint32_t parent_lkid,
3379 void (*ast) (void *astarg),
3380 void *astarg,
3bcd3687 3381 void (*bast) (void *astarg, int mode))
e7fd4179
DT
3382{
3383 struct dlm_ls *ls;
3384 struct dlm_lkb *lkb;
3385 struct dlm_args args;
3386 int error, convert = flags & DLM_LKF_CONVERT;
3387
3388 ls = dlm_find_lockspace_local(lockspace);
3389 if (!ls)
3390 return -EINVAL;
3391
85e86edf 3392 dlm_lock_recovery(ls);
e7fd4179
DT
3393
3394 if (convert)
3395 error = find_lkb(ls, lksb->sb_lkid, &lkb);
3396 else
3397 error = create_lkb(ls, &lkb);
3398
3399 if (error)
3400 goto out;
3401
5d92a30e 3402 trace_dlm_lock_start(ls, lkb, name, namelen, mode, flags);
f1d3b8f9 3403
6b0afc0c 3404#ifdef CONFIG_DLM_DEPRECATED_API
d7db923e 3405 error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
3bcd3687 3406 astarg, bast, &args);
6b0afc0c
AA
3407#else
3408 error = set_lock_args(mode, lksb, flags, namelen, ast, astarg, bast,
3409 &args);
3410#endif
e7fd4179
DT
3411 if (error)
3412 goto out_put;
3413
3414 if (convert)
3415 error = convert_lock(ls, lkb, &args);
3416 else
3417 error = request_lock(ls, lkb, name, namelen, &args);
3418
3419 if (error == -EINPROGRESS)
3420 error = 0;
3421 out_put:
7a3de732 3422 trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, true);
f1d3b8f9 3423
e7fd4179 3424 if (convert || error)
b3f58d8f 3425 __put_lkb(ls, lkb);
c85d65e9 3426 if (error == -EAGAIN || error == -EDEADLK)
e7fd4179
DT
3427 error = 0;
3428 out:
85e86edf 3429 dlm_unlock_recovery(ls);
e7fd4179
DT
3430 dlm_put_lockspace(ls);
3431 return error;
3432}
3433
3434int dlm_unlock(dlm_lockspace_t *lockspace,
3435 uint32_t lkid,
3436 uint32_t flags,
3437 struct dlm_lksb *lksb,
3438 void *astarg)
3439{
3440 struct dlm_ls *ls;
3441 struct dlm_lkb *lkb;
3442 struct dlm_args args;
3443 int error;
3444
3445 ls = dlm_find_lockspace_local(lockspace);
3446 if (!ls)
3447 return -EINVAL;
3448
85e86edf 3449 dlm_lock_recovery(ls);
e7fd4179
DT
3450
3451 error = find_lkb(ls, lkid, &lkb);
3452 if (error)
3453 goto out;
3454
f1d3b8f9
AA
3455 trace_dlm_unlock_start(ls, lkb, flags);
3456
e7fd4179
DT
3457 error = set_unlock_args(flags, astarg, &args);
3458 if (error)
3459 goto out_put;
3460
3461 if (flags & DLM_LKF_CANCEL)
3462 error = cancel_lock(ls, lkb, &args);
3463 else
3464 error = unlock_lock(ls, lkb, &args);
3465
3466 if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
3467 error = 0;
ef0c2bb0
DT
3468 if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
3469 error = 0;
e7fd4179 3470 out_put:
f1d3b8f9
AA
3471 trace_dlm_unlock_end(ls, lkb, flags, error);
3472
b3f58d8f 3473 dlm_put_lkb(lkb);
e7fd4179 3474 out:
85e86edf 3475 dlm_unlock_recovery(ls);
e7fd4179
DT
3476 dlm_put_lockspace(ls);
3477 return error;
3478}
3479
3480/*
3481 * send/receive routines for remote operations and replies
3482 *
3483 * send_args
3484 * send_common
3485 * send_request receive_request
3486 * send_convert receive_convert
3487 * send_unlock receive_unlock
3488 * send_cancel receive_cancel
3489 * send_grant receive_grant
3490 * send_bast receive_bast
3491 * send_lookup receive_lookup
3492 * send_remove receive_remove
3493 *
3494 * send_common_reply
3495 * receive_request_reply send_request_reply
3496 * receive_convert_reply send_convert_reply
3497 * receive_unlock_reply send_unlock_reply
3498 * receive_cancel_reply send_cancel_reply
3499 * receive_lookup_reply send_lookup_reply
3500 */
3501
7e4dac33
DT
3502static int _create_message(struct dlm_ls *ls, int mb_len,
3503 int to_nodeid, int mstype,
3504 struct dlm_message **ms_ret,
e1711fe3
AA
3505 struct dlm_mhandle **mh_ret,
3506 gfp_t allocation)
e7fd4179
DT
3507{
3508 struct dlm_message *ms;
3509 struct dlm_mhandle *mh;
3510 char *mb;
e7fd4179
DT
3511
3512 /* get_buffer gives us a message handle (mh) that we need to
a070a91c 3513 pass into midcomms_commit and a message buffer (mb) that we
e7fd4179
DT
3514 write our data into */
3515
e1711fe3 3516 mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, allocation, &mb);
e7fd4179
DT
3517 if (!mh)
3518 return -ENOBUFS;
3519
e7fd4179
DT
3520 ms = (struct dlm_message *) mb;
3521
3428785a
AA
3522 ms->m_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
3523 ms->m_header.u.h_lockspace = cpu_to_le32(ls->ls_global_id);
3524 ms->m_header.h_nodeid = cpu_to_le32(dlm_our_nodeid());
3525 ms->m_header.h_length = cpu_to_le16(mb_len);
e7fd4179
DT
3526 ms->m_header.h_cmd = DLM_MSG;
3527
00e99ccd 3528 ms->m_type = cpu_to_le32(mstype);
e7fd4179
DT
3529
3530 *mh_ret = mh;
3531 *ms_ret = ms;
3532 return 0;
3533}
3534
7e4dac33
DT
3535static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
3536 int to_nodeid, int mstype,
3537 struct dlm_message **ms_ret,
e1711fe3
AA
3538 struct dlm_mhandle **mh_ret,
3539 gfp_t allocation)
7e4dac33
DT
3540{
3541 int mb_len = sizeof(struct dlm_message);
3542
3543 switch (mstype) {
3544 case DLM_MSG_REQUEST:
3545 case DLM_MSG_LOOKUP:
3546 case DLM_MSG_REMOVE:
3547 mb_len += r->res_length;
3548 break;
3549 case DLM_MSG_CONVERT:
3550 case DLM_MSG_UNLOCK:
3551 case DLM_MSG_REQUEST_REPLY:
3552 case DLM_MSG_CONVERT_REPLY:
3553 case DLM_MSG_GRANT:
3554 if (lkb && lkb->lkb_lvbptr)
3555 mb_len += r->res_ls->ls_lvblen;
3556 break;
3557 }
3558
3559 return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
e1711fe3 3560 ms_ret, mh_ret, allocation);
7e4dac33
DT
3561}
3562
e7fd4179
DT
3563/* further lowcomms enhancements or alternate implementations may make
3564 the return value from this function useful at some point */
3565
e01c4b7b
AA
3566static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms,
3567 const void *name, int namelen)
e7fd4179 3568{
e01c4b7b 3569 dlm_midcomms_commit_mhandle(mh, name, namelen);
e7fd4179
DT
3570 return 0;
3571}
3572
3573static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
3574 struct dlm_message *ms)
3575{
00e99ccd
AA
3576 ms->m_nodeid = cpu_to_le32(lkb->lkb_nodeid);
3577 ms->m_pid = cpu_to_le32(lkb->lkb_ownpid);
3578 ms->m_lkid = cpu_to_le32(lkb->lkb_id);
3579 ms->m_remid = cpu_to_le32(lkb->lkb_remid);
3580 ms->m_exflags = cpu_to_le32(lkb->lkb_exflags);
3581 ms->m_sbflags = cpu_to_le32(lkb->lkb_sbflags);
3582 ms->m_flags = cpu_to_le32(lkb->lkb_flags);
3583 ms->m_lvbseq = cpu_to_le32(lkb->lkb_lvbseq);
3584 ms->m_status = cpu_to_le32(lkb->lkb_status);
3585 ms->m_grmode = cpu_to_le32(lkb->lkb_grmode);
3586 ms->m_rqmode = cpu_to_le32(lkb->lkb_rqmode);
3587 ms->m_hash = cpu_to_le32(r->res_hash);
e7fd4179
DT
3588
3589 /* m_result and m_bastmode are set from function args,
3590 not from lkb fields */
3591
e5dae548 3592 if (lkb->lkb_bastfn)
00e99ccd 3593 ms->m_asts |= cpu_to_le32(DLM_CB_BAST);
e5dae548 3594 if (lkb->lkb_astfn)
00e99ccd 3595 ms->m_asts |= cpu_to_le32(DLM_CB_CAST);
e7fd4179 3596
da49f36f
DT
3597 /* compare with switch in create_message; send_remove() doesn't
3598 use send_args() */
e7fd4179 3599
da49f36f 3600 switch (ms->m_type) {
00e99ccd
AA
3601 case cpu_to_le32(DLM_MSG_REQUEST):
3602 case cpu_to_le32(DLM_MSG_LOOKUP):
da49f36f
DT
3603 memcpy(ms->m_extra, r->res_name, r->res_length);
3604 break;
00e99ccd
AA
3605 case cpu_to_le32(DLM_MSG_CONVERT):
3606 case cpu_to_le32(DLM_MSG_UNLOCK):
3607 case cpu_to_le32(DLM_MSG_REQUEST_REPLY):
3608 case cpu_to_le32(DLM_MSG_CONVERT_REPLY):
3609 case cpu_to_le32(DLM_MSG_GRANT):
7175e131 3610 if (!lkb->lkb_lvbptr || !(lkb->lkb_exflags & DLM_LKF_VALBLK))
da49f36f 3611 break;
e7fd4179 3612 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
da49f36f
DT
3613 break;
3614 }
e7fd4179
DT
3615}
3616
3617static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
3618{
3619 struct dlm_message *ms;
3620 struct dlm_mhandle *mh;
3621 int to_nodeid, error;
3622
c6ff669b
DT
3623 to_nodeid = r->res_nodeid;
3624
3625 error = add_to_waiters(lkb, mstype, to_nodeid);
ef0c2bb0
DT
3626 if (error)
3627 return error;
e7fd4179 3628
e1711fe3 3629 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh, GFP_NOFS);
e7fd4179
DT
3630 if (error)
3631 goto fail;
3632
3633 send_args(r, lkb, ms);
3634
e01c4b7b 3635 error = send_message(mh, ms, r->res_name, r->res_length);
e7fd4179
DT
3636 if (error)
3637 goto fail;
3638 return 0;
3639
3640 fail:
ef0c2bb0 3641 remove_from_waiters(lkb, msg_reply_type(mstype));
e7fd4179
DT
3642 return error;
3643}
3644
3645static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3646{
3647 return send_common(r, lkb, DLM_MSG_REQUEST);
3648}
3649
3650static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3651{
3652 int error;
3653
3654 error = send_common(r, lkb, DLM_MSG_CONVERT);
3655
3656 /* down conversions go without a reply from the master */
3657 if (!error && down_conversion(lkb)) {
ef0c2bb0 3658 remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
00e99ccd
AA
3659 r->res_ls->ls_stub_ms.m_flags = cpu_to_le32(DLM_IFL_STUB_MS);
3660 r->res_ls->ls_stub_ms.m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY);
e7fd4179
DT
3661 r->res_ls->ls_stub_ms.m_result = 0;
3662 __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
3663 }
3664
3665 return error;
3666}
3667
3668/* FIXME: if this lkb is the only lock we hold on the rsb, then set
3669 MASTER_UNCERTAIN to force the next request on the rsb to confirm
3670 that the master is still correct. */
3671
3672static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3673{
3674 return send_common(r, lkb, DLM_MSG_UNLOCK);
3675}
3676
3677static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3678{
3679 return send_common(r, lkb, DLM_MSG_CANCEL);
3680}
3681
3682static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
3683{
3684 struct dlm_message *ms;
3685 struct dlm_mhandle *mh;
3686 int to_nodeid, error;
3687
3688 to_nodeid = lkb->lkb_nodeid;
3689
e1711fe3
AA
3690 error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh,
3691 GFP_NOFS);
e7fd4179
DT
3692 if (error)
3693 goto out;
3694
3695 send_args(r, lkb, ms);
3696
3697 ms->m_result = 0;
3698
e01c4b7b 3699 error = send_message(mh, ms, r->res_name, r->res_length);
e7fd4179
DT
3700 out:
3701 return error;
3702}
3703
3704static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
3705{
3706 struct dlm_message *ms;
3707 struct dlm_mhandle *mh;
3708 int to_nodeid, error;
3709
3710 to_nodeid = lkb->lkb_nodeid;
3711
e1711fe3
AA
3712 error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh,
3713 GFP_NOFS);
e7fd4179
DT
3714 if (error)
3715 goto out;
3716
3717 send_args(r, lkb, ms);
3718
00e99ccd 3719 ms->m_bastmode = cpu_to_le32(mode);
e7fd4179 3720
e01c4b7b 3721 error = send_message(mh, ms, r->res_name, r->res_length);
e7fd4179
DT
3722 out:
3723 return error;
3724}
3725
3726static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
3727{
3728 struct dlm_message *ms;
3729 struct dlm_mhandle *mh;
3730 int to_nodeid, error;
3731
c6ff669b
DT
3732 to_nodeid = dlm_dir_nodeid(r);
3733
3734 error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
ef0c2bb0
DT
3735 if (error)
3736 return error;
e7fd4179 3737
e1711fe3
AA
3738 error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh,
3739 GFP_NOFS);
e7fd4179
DT
3740 if (error)
3741 goto fail;
3742
3743 send_args(r, lkb, ms);
3744
e01c4b7b 3745 error = send_message(mh, ms, r->res_name, r->res_length);
e7fd4179
DT
3746 if (error)
3747 goto fail;
3748 return 0;
3749
3750 fail:
ef0c2bb0 3751 remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
e7fd4179
DT
3752 return error;
3753}
3754
3755static int send_remove(struct dlm_rsb *r)
3756{
3757 struct dlm_message *ms;
3758 struct dlm_mhandle *mh;
3759 int to_nodeid, error;
3760
3761 to_nodeid = dlm_dir_nodeid(r);
3762
e1711fe3 3763 error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh,
3872f87b 3764 GFP_ATOMIC);
e7fd4179
DT
3765 if (error)
3766 goto out;
3767
3768 memcpy(ms->m_extra, r->res_name, r->res_length);
00e99ccd 3769 ms->m_hash = cpu_to_le32(r->res_hash);
e7fd4179 3770
e01c4b7b 3771 error = send_message(mh, ms, r->res_name, r->res_length);
e7fd4179
DT
3772 out:
3773 return error;
3774}
3775
3776static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3777 int mstype, int rv)
3778{
3779 struct dlm_message *ms;
3780 struct dlm_mhandle *mh;
3781 int to_nodeid, error;
3782
3783 to_nodeid = lkb->lkb_nodeid;
3784
e1711fe3 3785 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh, GFP_NOFS);
e7fd4179
DT
3786 if (error)
3787 goto out;
3788
3789 send_args(r, lkb, ms);
3790
00e99ccd 3791 ms->m_result = cpu_to_le32(to_dlm_errno(rv));
e7fd4179 3792
e01c4b7b 3793 error = send_message(mh, ms, r->res_name, r->res_length);
e7fd4179
DT
3794 out:
3795 return error;
3796}
3797
3798static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3799{
3800 return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
3801}
3802
3803static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3804{
3805 return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
3806}
3807
3808static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3809{
3810 return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
3811}
3812
3813static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3814{
3815 return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
3816}
3817
3818static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
3819 int ret_nodeid, int rv)
3820{
3821 struct dlm_rsb *r = &ls->ls_stub_rsb;
3822 struct dlm_message *ms;
3823 struct dlm_mhandle *mh;
3428785a 3824 int error, nodeid = le32_to_cpu(ms_in->m_header.h_nodeid);
e7fd4179 3825
e1711fe3
AA
3826 error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh,
3827 GFP_NOFS);
e7fd4179
DT
3828 if (error)
3829 goto out;
3830
3831 ms->m_lkid = ms_in->m_lkid;
00e99ccd
AA
3832 ms->m_result = cpu_to_le32(to_dlm_errno(rv));
3833 ms->m_nodeid = cpu_to_le32(ret_nodeid);
e7fd4179 3834
e01c4b7b 3835 error = send_message(mh, ms, ms_in->m_extra, receive_extralen(ms_in));
e7fd4179
DT
3836 out:
3837 return error;
3838}
3839
3840/* which args we save from a received message depends heavily on the type
3841 of message, unlike the send side where we can safely send everything about
3842 the lkb for any type of message */
3843
3844static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
3845{
00e99ccd
AA
3846 lkb->lkb_exflags = le32_to_cpu(ms->m_exflags);
3847 lkb->lkb_sbflags = le32_to_cpu(ms->m_sbflags);
e7fd4179 3848 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
00e99ccd 3849 (le32_to_cpu(ms->m_flags) & 0x0000FFFF);
e7fd4179
DT
3850}
3851
3852static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3853{
00e99ccd 3854 if (ms->m_flags == cpu_to_le32(DLM_IFL_STUB_MS))
2a7ce0ed
DT
3855 return;
3856
00e99ccd 3857 lkb->lkb_sbflags = le32_to_cpu(ms->m_sbflags);
e7fd4179 3858 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
00e99ccd 3859 (le32_to_cpu(ms->m_flags) & 0x0000FFFF);
e7fd4179
DT
3860}
3861
3862static int receive_extralen(struct dlm_message *ms)
3863{
3428785a
AA
3864 return (le16_to_cpu(ms->m_header.h_length) -
3865 sizeof(struct dlm_message));
e7fd4179
DT
3866}
3867
e7fd4179
DT
3868static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
3869 struct dlm_message *ms)
3870{
3871 int len;
3872
3873 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3874 if (!lkb->lkb_lvbptr)
52bda2b5 3875 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
e7fd4179
DT
3876 if (!lkb->lkb_lvbptr)
3877 return -ENOMEM;
3878 len = receive_extralen(ms);
cfa805f6
BVA
3879 if (len > ls->ls_lvblen)
3880 len = ls->ls_lvblen;
e7fd4179
DT
3881 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
3882 }
3883 return 0;
3884}
3885
e5dae548
DT
3886static void fake_bastfn(void *astparam, int mode)
3887{
3888 log_print("fake_bastfn should not be called");
3889}
3890
3891static void fake_astfn(void *astparam)
3892{
3893 log_print("fake_astfn should not be called");
3894}
3895
e7fd4179
DT
3896static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3897 struct dlm_message *ms)
3898{
3428785a 3899 lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
00e99ccd
AA
3900 lkb->lkb_ownpid = le32_to_cpu(ms->m_pid);
3901 lkb->lkb_remid = le32_to_cpu(ms->m_lkid);
e7fd4179 3902 lkb->lkb_grmode = DLM_LOCK_IV;
00e99ccd 3903 lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode);
e5dae548 3904
00e99ccd
AA
3905 lkb->lkb_bastfn = (ms->m_asts & cpu_to_le32(DLM_CB_BAST)) ? &fake_bastfn : NULL;
3906 lkb->lkb_astfn = (ms->m_asts & cpu_to_le32(DLM_CB_CAST)) ? &fake_astfn : NULL;
e7fd4179 3907
8d07fd50
DT
3908 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3909 /* lkb was just created so there won't be an lvb yet */
52bda2b5 3910 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
8d07fd50
DT
3911 if (!lkb->lkb_lvbptr)
3912 return -ENOMEM;
3913 }
e7fd4179
DT
3914
3915 return 0;
3916}
3917
3918static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3919 struct dlm_message *ms)
3920{
e7fd4179
DT
3921 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3922 return -EBUSY;
3923
e7fd4179
DT
3924 if (receive_lvb(ls, lkb, ms))
3925 return -ENOMEM;
3926
00e99ccd
AA
3927 lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode);
3928 lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq);
e7fd4179
DT
3929
3930 return 0;
3931}
3932
3933static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3934 struct dlm_message *ms)
3935{
e7fd4179
DT
3936 if (receive_lvb(ls, lkb, ms))
3937 return -ENOMEM;
3938 return 0;
3939}
3940
3941/* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3942 uses to send a reply and that the remote end uses to process the reply. */
3943
3944static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
3945{
3946 struct dlm_lkb *lkb = &ls->ls_stub_lkb;
3428785a 3947 lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
00e99ccd 3948 lkb->lkb_remid = le32_to_cpu(ms->m_lkid);
e7fd4179
DT
3949}
3950
c54e04b0
DT
3951/* This is called after the rsb is locked so that we can safely inspect
3952 fields in the lkb. */
3953
3954static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
3955{
3428785a 3956 int from = le32_to_cpu(ms->m_header.h_nodeid);
c54e04b0
DT
3957 int error = 0;
3958
6c2e3bf6 3959 /* currently mixing of user/kernel locks are not supported */
00e99ccd
AA
3960 if (ms->m_flags & cpu_to_le32(DLM_IFL_USER) &&
3961 ~lkb->lkb_flags & DLM_IFL_USER) {
6c2e3bf6
AA
3962 log_error(lkb->lkb_resource->res_ls,
3963 "got user dlm message for a kernel lock");
3964 error = -EINVAL;
3965 goto out;
3966 }
3967
c54e04b0 3968 switch (ms->m_type) {
00e99ccd
AA
3969 case cpu_to_le32(DLM_MSG_CONVERT):
3970 case cpu_to_le32(DLM_MSG_UNLOCK):
3971 case cpu_to_le32(DLM_MSG_CANCEL):
c54e04b0
DT
3972 if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
3973 error = -EINVAL;
3974 break;
3975
00e99ccd
AA
3976 case cpu_to_le32(DLM_MSG_CONVERT_REPLY):
3977 case cpu_to_le32(DLM_MSG_UNLOCK_REPLY):
3978 case cpu_to_le32(DLM_MSG_CANCEL_REPLY):
3979 case cpu_to_le32(DLM_MSG_GRANT):
3980 case cpu_to_le32(DLM_MSG_BAST):
c54e04b0
DT
3981 if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
3982 error = -EINVAL;
3983 break;
3984
00e99ccd 3985 case cpu_to_le32(DLM_MSG_REQUEST_REPLY):
c54e04b0
DT
3986 if (!is_process_copy(lkb))
3987 error = -EINVAL;
3988 else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
3989 error = -EINVAL;
3990 break;
3991
3992 default:
3993 error = -EINVAL;
3994 }
3995
6c2e3bf6 3996out:
c54e04b0
DT
3997 if (error)
3998 log_error(lkb->lkb_resource->res_ls,
3999 "ignore invalid message %d from %d %x %x %x %d",
00e99ccd
AA
4000 le32_to_cpu(ms->m_type), from, lkb->lkb_id,
4001 lkb->lkb_remid, lkb->lkb_flags, lkb->lkb_nodeid);
c54e04b0
DT
4002 return error;
4003}
4004
6d40c4a7 4005static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4006{
4007 struct dlm_lkb *lkb;
4008 struct dlm_rsb *r;
c04fecb4 4009 int from_nodeid;
96006ea6 4010 int error, namelen = 0;
e7fd4179 4011
3428785a 4012 from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
c04fecb4 4013
e7fd4179
DT
4014 error = create_lkb(ls, &lkb);
4015 if (error)
4016 goto fail;
4017
4018 receive_flags(lkb, ms);
4019 lkb->lkb_flags |= DLM_IFL_MSTCPY;
4020 error = receive_request_args(ls, lkb, ms);
4021 if (error) {
b3f58d8f 4022 __put_lkb(ls, lkb);
e7fd4179
DT
4023 goto fail;
4024 }
4025
c04fecb4
DT
4026 /* The dir node is the authority on whether we are the master
4027 for this rsb or not, so if the master sends us a request, we should
4028 recreate the rsb if we've destroyed it. This race happens when we
4029 send a remove message to the dir node at the same time that the dir
4030 node sends us a request for the rsb. */
4031
e7fd4179
DT
4032 namelen = receive_extralen(ms);
4033
c04fecb4
DT
4034 error = find_rsb(ls, ms->m_extra, namelen, from_nodeid,
4035 R_RECEIVE_REQUEST, &r);
e7fd4179 4036 if (error) {
b3f58d8f 4037 __put_lkb(ls, lkb);
e7fd4179
DT
4038 goto fail;
4039 }
4040
4041 lock_rsb(r);
4042
c04fecb4
DT
4043 if (r->res_master_nodeid != dlm_our_nodeid()) {
4044 error = validate_master_nodeid(ls, r, from_nodeid);
4045 if (error) {
4046 unlock_rsb(r);
4047 put_rsb(r);
4048 __put_lkb(ls, lkb);
4049 goto fail;
4050 }
4051 }
4052
e7fd4179
DT
4053 attach_lkb(r, lkb);
4054 error = do_request(r, lkb);
4055 send_request_reply(r, lkb, error);
cf6620ac 4056 do_request_effects(r, lkb, error);
e7fd4179
DT
4057
4058 unlock_rsb(r);
4059 put_rsb(r);
4060
4061 if (error == -EINPROGRESS)
4062 error = 0;
4063 if (error)
b3f58d8f 4064 dlm_put_lkb(lkb);
6d40c4a7 4065 return 0;
e7fd4179
DT
4066
4067 fail:
c04fecb4
DT
4068 /* TODO: instead of returning ENOTBLK, add the lkb to res_lookup
4069 and do this receive_request again from process_lookup_list once
4070 we get the lookup reply. This would avoid a many repeated
4071 ENOTBLK request failures when the lookup reply designating us
4072 as master is delayed. */
4073
c04fecb4
DT
4074 if (error != -ENOTBLK) {
4075 log_limit(ls, "receive_request %x from %d %d",
00e99ccd 4076 le32_to_cpu(ms->m_lkid), from_nodeid, error);
c04fecb4
DT
4077 }
4078
e7fd4179
DT
4079 setup_stub_lkb(ls, ms);
4080 send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
6d40c4a7 4081 return error;
e7fd4179
DT
4082}
4083
6d40c4a7 4084static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4085{
4086 struct dlm_lkb *lkb;
4087 struct dlm_rsb *r;
90135925 4088 int error, reply = 1;
e7fd4179 4089
00e99ccd 4090 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
e7fd4179
DT
4091 if (error)
4092 goto fail;
4093
00e99ccd 4094 if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) {
4875647a
DT
4095 log_error(ls, "receive_convert %x remid %x recover_seq %llu "
4096 "remote %d %x", lkb->lkb_id, lkb->lkb_remid,
4097 (unsigned long long)lkb->lkb_recover_seq,
00e99ccd
AA
4098 le32_to_cpu(ms->m_header.h_nodeid),
4099 le32_to_cpu(ms->m_lkid));
6d40c4a7 4100 error = -ENOENT;
c0174726 4101 dlm_put_lkb(lkb);
6d40c4a7
DT
4102 goto fail;
4103 }
4104
e7fd4179
DT
4105 r = lkb->lkb_resource;
4106
4107 hold_rsb(r);
4108 lock_rsb(r);
4109
c54e04b0
DT
4110 error = validate_message(lkb, ms);
4111 if (error)
4112 goto out;
4113
e7fd4179 4114 receive_flags(lkb, ms);
cf6620ac 4115
e7fd4179 4116 error = receive_convert_args(ls, lkb, ms);
cf6620ac
DT
4117 if (error) {
4118 send_convert_reply(r, lkb, error);
4119 goto out;
4120 }
4121
e7fd4179
DT
4122 reply = !down_conversion(lkb);
4123
4124 error = do_convert(r, lkb);
e7fd4179
DT
4125 if (reply)
4126 send_convert_reply(r, lkb, error);
cf6620ac 4127 do_convert_effects(r, lkb, error);
c54e04b0 4128 out:
e7fd4179
DT
4129 unlock_rsb(r);
4130 put_rsb(r);
b3f58d8f 4131 dlm_put_lkb(lkb);
6d40c4a7 4132 return 0;
e7fd4179
DT
4133
4134 fail:
4135 setup_stub_lkb(ls, ms);
4136 send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
6d40c4a7 4137 return error;
e7fd4179
DT
4138}
4139
6d40c4a7 4140static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4141{
4142 struct dlm_lkb *lkb;
4143 struct dlm_rsb *r;
4144 int error;
4145
00e99ccd 4146 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
e7fd4179
DT
4147 if (error)
4148 goto fail;
4149
00e99ccd 4150 if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) {
6d40c4a7
DT
4151 log_error(ls, "receive_unlock %x remid %x remote %d %x",
4152 lkb->lkb_id, lkb->lkb_remid,
00e99ccd
AA
4153 le32_to_cpu(ms->m_header.h_nodeid),
4154 le32_to_cpu(ms->m_lkid));
6d40c4a7 4155 error = -ENOENT;
c0174726 4156 dlm_put_lkb(lkb);
6d40c4a7
DT
4157 goto fail;
4158 }
4159
e7fd4179
DT
4160 r = lkb->lkb_resource;
4161
4162 hold_rsb(r);
4163 lock_rsb(r);
4164
c54e04b0
DT
4165 error = validate_message(lkb, ms);
4166 if (error)
4167 goto out;
4168
e7fd4179 4169 receive_flags(lkb, ms);
cf6620ac 4170
e7fd4179 4171 error = receive_unlock_args(ls, lkb, ms);
cf6620ac
DT
4172 if (error) {
4173 send_unlock_reply(r, lkb, error);
4174 goto out;
4175 }
e7fd4179
DT
4176
4177 error = do_unlock(r, lkb);
e7fd4179 4178 send_unlock_reply(r, lkb, error);
cf6620ac 4179 do_unlock_effects(r, lkb, error);
c54e04b0 4180 out:
e7fd4179
DT
4181 unlock_rsb(r);
4182 put_rsb(r);
b3f58d8f 4183 dlm_put_lkb(lkb);
6d40c4a7 4184 return 0;
e7fd4179
DT
4185
4186 fail:
4187 setup_stub_lkb(ls, ms);
4188 send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
6d40c4a7 4189 return error;
e7fd4179
DT
4190}
4191
6d40c4a7 4192static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4193{
4194 struct dlm_lkb *lkb;
4195 struct dlm_rsb *r;
4196 int error;
4197
00e99ccd 4198 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
e7fd4179
DT
4199 if (error)
4200 goto fail;
4201
4202 receive_flags(lkb, ms);
4203
4204 r = lkb->lkb_resource;
4205
4206 hold_rsb(r);
4207 lock_rsb(r);
4208
c54e04b0
DT
4209 error = validate_message(lkb, ms);
4210 if (error)
4211 goto out;
4212
e7fd4179
DT
4213 error = do_cancel(r, lkb);
4214 send_cancel_reply(r, lkb, error);
cf6620ac 4215 do_cancel_effects(r, lkb, error);
c54e04b0 4216 out:
e7fd4179
DT
4217 unlock_rsb(r);
4218 put_rsb(r);
b3f58d8f 4219 dlm_put_lkb(lkb);
6d40c4a7 4220 return 0;
e7fd4179
DT
4221
4222 fail:
4223 setup_stub_lkb(ls, ms);
4224 send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
6d40c4a7 4225 return error;
e7fd4179
DT
4226}
4227
6d40c4a7 4228static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4229{
4230 struct dlm_lkb *lkb;
4231 struct dlm_rsb *r;
4232 int error;
4233
00e99ccd 4234 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
6d40c4a7
DT
4235 if (error)
4236 return error;
e7fd4179
DT
4237
4238 r = lkb->lkb_resource;
4239
4240 hold_rsb(r);
4241 lock_rsb(r);
4242
c54e04b0
DT
4243 error = validate_message(lkb, ms);
4244 if (error)
4245 goto out;
4246
e7fd4179 4247 receive_flags_reply(lkb, ms);
7d3c1feb
DT
4248 if (is_altmode(lkb))
4249 munge_altmode(lkb, ms);
e7fd4179
DT
4250 grant_lock_pc(r, lkb, ms);
4251 queue_cast(r, lkb, 0);
c54e04b0 4252 out:
e7fd4179
DT
4253 unlock_rsb(r);
4254 put_rsb(r);
b3f58d8f 4255 dlm_put_lkb(lkb);
6d40c4a7 4256 return 0;
e7fd4179
DT
4257}
4258
6d40c4a7 4259static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4260{
4261 struct dlm_lkb *lkb;
4262 struct dlm_rsb *r;
4263 int error;
4264
00e99ccd 4265 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
6d40c4a7
DT
4266 if (error)
4267 return error;
e7fd4179
DT
4268
4269 r = lkb->lkb_resource;
4270
4271 hold_rsb(r);
4272 lock_rsb(r);
4273
c54e04b0
DT
4274 error = validate_message(lkb, ms);
4275 if (error)
4276 goto out;
e7fd4179 4277
00e99ccd
AA
4278 queue_bast(r, lkb, le32_to_cpu(ms->m_bastmode));
4279 lkb->lkb_highbast = le32_to_cpu(ms->m_bastmode);
c54e04b0 4280 out:
e7fd4179
DT
4281 unlock_rsb(r);
4282 put_rsb(r);
b3f58d8f 4283 dlm_put_lkb(lkb);
6d40c4a7 4284 return 0;
e7fd4179
DT
4285}
4286
4287static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
4288{
c04fecb4 4289 int len, error, ret_nodeid, from_nodeid, our_nodeid;
e7fd4179 4290
3428785a 4291 from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
e7fd4179
DT
4292 our_nodeid = dlm_our_nodeid();
4293
4294 len = receive_extralen(ms);
4295
c04fecb4
DT
4296 error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0,
4297 &ret_nodeid, NULL);
e7fd4179
DT
4298
4299 /* Optimization: we're master so treat lookup as a request */
4300 if (!error && ret_nodeid == our_nodeid) {
4301 receive_request(ls, ms);
4302 return;
4303 }
e7fd4179
DT
4304 send_lookup_reply(ls, ms, ret_nodeid, error);
4305}
4306
4307static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
4308{
c04fecb4
DT
4309 char name[DLM_RESNAME_MAXLEN+1];
4310 struct dlm_rsb *r;
4311 uint32_t hash, b;
4312 int rv, len, dir_nodeid, from_nodeid;
e7fd4179 4313
3428785a 4314 from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
e7fd4179
DT
4315
4316 len = receive_extralen(ms);
4317
c04fecb4
DT
4318 if (len > DLM_RESNAME_MAXLEN) {
4319 log_error(ls, "receive_remove from %d bad len %d",
4320 from_nodeid, len);
4321 return;
4322 }
4323
00e99ccd 4324 dir_nodeid = dlm_hash2nodeid(ls, le32_to_cpu(ms->m_hash));
e7fd4179 4325 if (dir_nodeid != dlm_our_nodeid()) {
c04fecb4
DT
4326 log_error(ls, "receive_remove from %d bad nodeid %d",
4327 from_nodeid, dir_nodeid);
e7fd4179
DT
4328 return;
4329 }
4330
c04fecb4
DT
4331 /* Look for name on rsbtbl.toss, if it's there, kill it.
4332 If it's on rsbtbl.keep, it's being used, and we should ignore this
4333 message. This is an expected race between the dir node sending a
4334 request to the master node at the same time as the master node sends
4335 a remove to the dir node. The resolution to that race is for the
4336 dir node to ignore the remove message, and the master node to
4337 recreate the master rsb when it gets a request from the dir node for
4338 an rsb it doesn't have. */
4339
4340 memset(name, 0, sizeof(name));
4341 memcpy(name, ms->m_extra, len);
4342
4343 hash = jhash(name, len, 0);
4344 b = hash & (ls->ls_rsbtbl_size - 1);
4345
4346 spin_lock(&ls->ls_rsbtbl[b].lock);
4347
4348 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4349 if (rv) {
4350 /* verify the rsb is on keep list per comment above */
4351 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4352 if (rv) {
4353 /* should not happen */
4354 log_error(ls, "receive_remove from %d not found %s",
4355 from_nodeid, name);
4356 spin_unlock(&ls->ls_rsbtbl[b].lock);
4357 return;
4358 }
4359 if (r->res_master_nodeid != from_nodeid) {
4360 /* should not happen */
4361 log_error(ls, "receive_remove keep from %d master %d",
4362 from_nodeid, r->res_master_nodeid);
4363 dlm_print_rsb(r);
4364 spin_unlock(&ls->ls_rsbtbl[b].lock);
4365 return;
4366 }
4367
4368 log_debug(ls, "receive_remove from %d master %d first %x %s",
4369 from_nodeid, r->res_master_nodeid, r->res_first_lkid,
4370 name);
4371 spin_unlock(&ls->ls_rsbtbl[b].lock);
4372 return;
4373 }
4374
4375 if (r->res_master_nodeid != from_nodeid) {
4376 log_error(ls, "receive_remove toss from %d master %d",
4377 from_nodeid, r->res_master_nodeid);
4378 dlm_print_rsb(r);
4379 spin_unlock(&ls->ls_rsbtbl[b].lock);
4380 return;
4381 }
4382
4383 if (kref_put(&r->res_ref, kill_rsb)) {
4384 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
4385 spin_unlock(&ls->ls_rsbtbl[b].lock);
4386 dlm_free_rsb(r);
4387 } else {
4388 log_error(ls, "receive_remove from %d rsb ref error",
4389 from_nodeid);
4390 dlm_print_rsb(r);
4391 spin_unlock(&ls->ls_rsbtbl[b].lock);
4392 }
e7fd4179
DT
4393}
4394
8499137d
DT
4395static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
4396{
00e99ccd 4397 do_purge(ls, le32_to_cpu(ms->m_nodeid), le32_to_cpu(ms->m_pid));
8499137d
DT
4398}
4399
6d40c4a7 4400static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4401{
4402 struct dlm_lkb *lkb;
4403 struct dlm_rsb *r;
ef0c2bb0 4404 int error, mstype, result;
3428785a 4405 int from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
e7fd4179 4406
00e99ccd 4407 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
6d40c4a7
DT
4408 if (error)
4409 return error;
e7fd4179 4410
e7fd4179
DT
4411 r = lkb->lkb_resource;
4412 hold_rsb(r);
4413 lock_rsb(r);
4414
c54e04b0
DT
4415 error = validate_message(lkb, ms);
4416 if (error)
4417 goto out;
4418
ef0c2bb0
DT
4419 mstype = lkb->lkb_wait_type;
4420 error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
4875647a
DT
4421 if (error) {
4422 log_error(ls, "receive_request_reply %x remote %d %x result %d",
00e99ccd
AA
4423 lkb->lkb_id, from_nodeid, le32_to_cpu(ms->m_lkid),
4424 from_dlm_errno(le32_to_cpu(ms->m_result)));
4875647a 4425 dlm_dump_rsb(r);
ef0c2bb0 4426 goto out;
4875647a 4427 }
ef0c2bb0 4428
e7fd4179
DT
4429 /* Optimization: the dir node was also the master, so it took our
4430 lookup as a request and sent request reply instead of lookup reply */
4431 if (mstype == DLM_MSG_LOOKUP) {
c04fecb4
DT
4432 r->res_master_nodeid = from_nodeid;
4433 r->res_nodeid = from_nodeid;
4434 lkb->lkb_nodeid = from_nodeid;
e7fd4179
DT
4435 }
4436
ef0c2bb0 4437 /* this is the value returned from do_request() on the master */
00e99ccd 4438 result = from_dlm_errno(le32_to_cpu(ms->m_result));
ef0c2bb0
DT
4439
4440 switch (result) {
e7fd4179 4441 case -EAGAIN:
ef0c2bb0 4442 /* request would block (be queued) on remote master */
e7fd4179
DT
4443 queue_cast(r, lkb, -EAGAIN);
4444 confirm_master(r, -EAGAIN);
ef0c2bb0 4445 unhold_lkb(lkb); /* undoes create_lkb() */
e7fd4179
DT
4446 break;
4447
4448 case -EINPROGRESS:
4449 case 0:
4450 /* request was queued or granted on remote master */
4451 receive_flags_reply(lkb, ms);
00e99ccd 4452 lkb->lkb_remid = le32_to_cpu(ms->m_lkid);
7d3c1feb
DT
4453 if (is_altmode(lkb))
4454 munge_altmode(lkb, ms);
3ae1acf9 4455 if (result) {
e7fd4179 4456 add_lkb(r, lkb, DLM_LKSTS_WAITING);
3ae1acf9
DT
4457 add_timeout(lkb);
4458 } else {
e7fd4179
DT
4459 grant_lock_pc(r, lkb, ms);
4460 queue_cast(r, lkb, 0);
4461 }
ef0c2bb0 4462 confirm_master(r, result);
e7fd4179
DT
4463 break;
4464
597d0cae 4465 case -EBADR:
e7fd4179
DT
4466 case -ENOTBLK:
4467 /* find_rsb failed to find rsb or rsb wasn't master */
c04fecb4
DT
4468 log_limit(ls, "receive_request_reply %x from %d %d "
4469 "master %d dir %d first %x %s", lkb->lkb_id,
4470 from_nodeid, result, r->res_master_nodeid,
4471 r->res_dir_nodeid, r->res_first_lkid, r->res_name);
4472
4473 if (r->res_dir_nodeid != dlm_our_nodeid() &&
4474 r->res_master_nodeid != dlm_our_nodeid()) {
4475 /* cause _request_lock->set_master->send_lookup */
4476 r->res_master_nodeid = 0;
4477 r->res_nodeid = -1;
4478 lkb->lkb_nodeid = -1;
4479 }
ef0c2bb0
DT
4480
4481 if (is_overlap(lkb)) {
4482 /* we'll ignore error in cancel/unlock reply */
4483 queue_cast_overlap(r, lkb);
aec64e1b 4484 confirm_master(r, result);
ef0c2bb0 4485 unhold_lkb(lkb); /* undoes create_lkb() */
c04fecb4 4486 } else {
ef0c2bb0 4487 _request_lock(r, lkb);
c04fecb4
DT
4488
4489 if (r->res_master_nodeid == dlm_our_nodeid())
4490 confirm_master(r, 0);
4491 }
e7fd4179
DT
4492 break;
4493
4494 default:
ef0c2bb0
DT
4495 log_error(ls, "receive_request_reply %x error %d",
4496 lkb->lkb_id, result);
e7fd4179
DT
4497 }
4498
ef0c2bb0
DT
4499 if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
4500 log_debug(ls, "receive_request_reply %x result %d unlock",
4501 lkb->lkb_id, result);
4502 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4503 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4504 send_unlock(r, lkb);
4505 } else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
4506 log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
4507 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4508 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4509 send_cancel(r, lkb);
4510 } else {
4511 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4512 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4513 }
4514 out:
e7fd4179
DT
4515 unlock_rsb(r);
4516 put_rsb(r);
b3f58d8f 4517 dlm_put_lkb(lkb);
6d40c4a7 4518 return 0;
e7fd4179
DT
4519}
4520
4521static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
4522 struct dlm_message *ms)
4523{
e7fd4179 4524 /* this is the value returned from do_convert() on the master */
00e99ccd 4525 switch (from_dlm_errno(le32_to_cpu(ms->m_result))) {
e7fd4179
DT
4526 case -EAGAIN:
4527 /* convert would block (be queued) on remote master */
4528 queue_cast(r, lkb, -EAGAIN);
4529 break;
4530
c85d65e9
DT
4531 case -EDEADLK:
4532 receive_flags_reply(lkb, ms);
4533 revert_lock_pc(r, lkb);
4534 queue_cast(r, lkb, -EDEADLK);
4535 break;
4536
e7fd4179
DT
4537 case -EINPROGRESS:
4538 /* convert was queued on remote master */
7d3c1feb
DT
4539 receive_flags_reply(lkb, ms);
4540 if (is_demoted(lkb))
2a7ce0ed 4541 munge_demoted(lkb);
e7fd4179
DT
4542 del_lkb(r, lkb);
4543 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3ae1acf9 4544 add_timeout(lkb);
e7fd4179
DT
4545 break;
4546
4547 case 0:
4548 /* convert was granted on remote master */
4549 receive_flags_reply(lkb, ms);
7d3c1feb 4550 if (is_demoted(lkb))
2a7ce0ed 4551 munge_demoted(lkb);
e7fd4179
DT
4552 grant_lock_pc(r, lkb, ms);
4553 queue_cast(r, lkb, 0);
4554 break;
4555
4556 default:
6d40c4a7 4557 log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d",
3428785a 4558 lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid),
00e99ccd
AA
4559 le32_to_cpu(ms->m_lkid),
4560 from_dlm_errno(le32_to_cpu(ms->m_result)));
6d40c4a7
DT
4561 dlm_print_rsb(r);
4562 dlm_print_lkb(lkb);
e7fd4179
DT
4563 }
4564}
4565
4566static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4567{
4568 struct dlm_rsb *r = lkb->lkb_resource;
ef0c2bb0 4569 int error;
e7fd4179
DT
4570
4571 hold_rsb(r);
4572 lock_rsb(r);
4573
c54e04b0
DT
4574 error = validate_message(lkb, ms);
4575 if (error)
4576 goto out;
4577
ef0c2bb0
DT
4578 /* stub reply can happen with waiters_mutex held */
4579 error = remove_from_waiters_ms(lkb, ms);
4580 if (error)
4581 goto out;
e7fd4179 4582
ef0c2bb0
DT
4583 __receive_convert_reply(r, lkb, ms);
4584 out:
e7fd4179
DT
4585 unlock_rsb(r);
4586 put_rsb(r);
4587}
4588
6d40c4a7 4589static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4590{
4591 struct dlm_lkb *lkb;
4592 int error;
4593
00e99ccd 4594 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
6d40c4a7
DT
4595 if (error)
4596 return error;
e7fd4179 4597
e7fd4179 4598 _receive_convert_reply(lkb, ms);
b3f58d8f 4599 dlm_put_lkb(lkb);
6d40c4a7 4600 return 0;
e7fd4179
DT
4601}
4602
4603static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4604{
4605 struct dlm_rsb *r = lkb->lkb_resource;
ef0c2bb0 4606 int error;
e7fd4179
DT
4607
4608 hold_rsb(r);
4609 lock_rsb(r);
4610
c54e04b0
DT
4611 error = validate_message(lkb, ms);
4612 if (error)
4613 goto out;
4614
ef0c2bb0
DT
4615 /* stub reply can happen with waiters_mutex held */
4616 error = remove_from_waiters_ms(lkb, ms);
4617 if (error)
4618 goto out;
4619
e7fd4179
DT
4620 /* this is the value returned from do_unlock() on the master */
4621
00e99ccd 4622 switch (from_dlm_errno(le32_to_cpu(ms->m_result))) {
e7fd4179
DT
4623 case -DLM_EUNLOCK:
4624 receive_flags_reply(lkb, ms);
4625 remove_lock_pc(r, lkb);
4626 queue_cast(r, lkb, -DLM_EUNLOCK);
4627 break;
ef0c2bb0
DT
4628 case -ENOENT:
4629 break;
e7fd4179 4630 default:
ef0c2bb0 4631 log_error(r->res_ls, "receive_unlock_reply %x error %d",
00e99ccd 4632 lkb->lkb_id, from_dlm_errno(le32_to_cpu(ms->m_result)));
e7fd4179 4633 }
ef0c2bb0 4634 out:
e7fd4179
DT
4635 unlock_rsb(r);
4636 put_rsb(r);
4637}
4638
6d40c4a7 4639static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4640{
4641 struct dlm_lkb *lkb;
4642 int error;
4643
00e99ccd 4644 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
6d40c4a7
DT
4645 if (error)
4646 return error;
e7fd4179 4647
e7fd4179 4648 _receive_unlock_reply(lkb, ms);
b3f58d8f 4649 dlm_put_lkb(lkb);
6d40c4a7 4650 return 0;
e7fd4179
DT
4651}
4652
4653static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4654{
4655 struct dlm_rsb *r = lkb->lkb_resource;
ef0c2bb0 4656 int error;
e7fd4179
DT
4657
4658 hold_rsb(r);
4659 lock_rsb(r);
4660
c54e04b0
DT
4661 error = validate_message(lkb, ms);
4662 if (error)
4663 goto out;
4664
ef0c2bb0
DT
4665 /* stub reply can happen with waiters_mutex held */
4666 error = remove_from_waiters_ms(lkb, ms);
4667 if (error)
4668 goto out;
4669
e7fd4179
DT
4670 /* this is the value returned from do_cancel() on the master */
4671
00e99ccd 4672 switch (from_dlm_errno(le32_to_cpu(ms->m_result))) {
e7fd4179
DT
4673 case -DLM_ECANCEL:
4674 receive_flags_reply(lkb, ms);
4675 revert_lock_pc(r, lkb);
84d8cd69 4676 queue_cast(r, lkb, -DLM_ECANCEL);
ef0c2bb0
DT
4677 break;
4678 case 0:
e7fd4179
DT
4679 break;
4680 default:
ef0c2bb0 4681 log_error(r->res_ls, "receive_cancel_reply %x error %d",
00e99ccd
AA
4682 lkb->lkb_id,
4683 from_dlm_errno(le32_to_cpu(ms->m_result)));
e7fd4179 4684 }
ef0c2bb0 4685 out:
e7fd4179
DT
4686 unlock_rsb(r);
4687 put_rsb(r);
4688}
4689
6d40c4a7 4690static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4691{
4692 struct dlm_lkb *lkb;
4693 int error;
4694
00e99ccd 4695 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
6d40c4a7
DT
4696 if (error)
4697 return error;
e7fd4179 4698
e7fd4179 4699 _receive_cancel_reply(lkb, ms);
b3f58d8f 4700 dlm_put_lkb(lkb);
6d40c4a7 4701 return 0;
e7fd4179
DT
4702}
4703
4704static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
4705{
4706 struct dlm_lkb *lkb;
4707 struct dlm_rsb *r;
4708 int error, ret_nodeid;
c04fecb4 4709 int do_lookup_list = 0;
e7fd4179 4710
00e99ccd 4711 error = find_lkb(ls, le32_to_cpu(ms->m_lkid), &lkb);
e7fd4179 4712 if (error) {
00e99ccd
AA
4713 log_error(ls, "%s no lkid %x", __func__,
4714 le32_to_cpu(ms->m_lkid));
e7fd4179
DT
4715 return;
4716 }
4717
c04fecb4 4718 /* ms->m_result is the value returned by dlm_master_lookup on dir node
e7fd4179 4719 FIXME: will a non-zero error ever be returned? */
e7fd4179
DT
4720
4721 r = lkb->lkb_resource;
4722 hold_rsb(r);
4723 lock_rsb(r);
4724
ef0c2bb0
DT
4725 error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
4726 if (error)
4727 goto out;
4728
00e99ccd 4729 ret_nodeid = le32_to_cpu(ms->m_nodeid);
c04fecb4
DT
4730
4731 /* We sometimes receive a request from the dir node for this
4732 rsb before we've received the dir node's loookup_reply for it.
4733 The request from the dir node implies we're the master, so we set
4734 ourself as master in receive_request_reply, and verify here that
4735 we are indeed the master. */
4736
4737 if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) {
4738 /* This should never happen */
4739 log_error(ls, "receive_lookup_reply %x from %d ret %d "
4740 "master %d dir %d our %d first %x %s",
3428785a
AA
4741 lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid),
4742 ret_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
c04fecb4
DT
4743 dlm_our_nodeid(), r->res_first_lkid, r->res_name);
4744 }
4745
e7fd4179 4746 if (ret_nodeid == dlm_our_nodeid()) {
c04fecb4 4747 r->res_master_nodeid = ret_nodeid;
e7fd4179 4748 r->res_nodeid = 0;
c04fecb4 4749 do_lookup_list = 1;
e7fd4179 4750 r->res_first_lkid = 0;
c04fecb4
DT
4751 } else if (ret_nodeid == -1) {
4752 /* the remote node doesn't believe it's the dir node */
4753 log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid",
3428785a 4754 lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid));
c04fecb4
DT
4755 r->res_master_nodeid = 0;
4756 r->res_nodeid = -1;
4757 lkb->lkb_nodeid = -1;
e7fd4179 4758 } else {
c04fecb4
DT
4759 /* set_master() will set lkb_nodeid from r */
4760 r->res_master_nodeid = ret_nodeid;
e7fd4179
DT
4761 r->res_nodeid = ret_nodeid;
4762 }
4763
ef0c2bb0
DT
4764 if (is_overlap(lkb)) {
4765 log_debug(ls, "receive_lookup_reply %x unlock %x",
4766 lkb->lkb_id, lkb->lkb_flags);
4767 queue_cast_overlap(r, lkb);
4768 unhold_lkb(lkb); /* undoes create_lkb() */
4769 goto out_list;
4770 }
4771
e7fd4179
DT
4772 _request_lock(r, lkb);
4773
ef0c2bb0 4774 out_list:
c04fecb4 4775 if (do_lookup_list)
e7fd4179 4776 process_lookup_list(r);
ef0c2bb0 4777 out:
e7fd4179
DT
4778 unlock_rsb(r);
4779 put_rsb(r);
b3f58d8f 4780 dlm_put_lkb(lkb);
e7fd4179
DT
4781}
4782
6d40c4a7
DT
4783static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4784 uint32_t saved_seq)
e7fd4179 4785{
6d40c4a7
DT
4786 int error = 0, noent = 0;
4787
3428785a 4788 if (!dlm_is_member(ls, le32_to_cpu(ms->m_header.h_nodeid))) {
c04fecb4 4789 log_limit(ls, "receive %d from non-member %d %x %x %d",
00e99ccd
AA
4790 le32_to_cpu(ms->m_type),
4791 le32_to_cpu(ms->m_header.h_nodeid),
4792 le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid),
4793 from_dlm_errno(le32_to_cpu(ms->m_result)));
46b43eed
DT
4794 return;
4795 }
4796
e7fd4179
DT
4797 switch (ms->m_type) {
4798
4799 /* messages sent to a master node */
4800
00e99ccd 4801 case cpu_to_le32(DLM_MSG_REQUEST):
6d40c4a7 4802 error = receive_request(ls, ms);
e7fd4179
DT
4803 break;
4804
00e99ccd 4805 case cpu_to_le32(DLM_MSG_CONVERT):
6d40c4a7 4806 error = receive_convert(ls, ms);
e7fd4179
DT
4807 break;
4808
00e99ccd 4809 case cpu_to_le32(DLM_MSG_UNLOCK):
6d40c4a7 4810 error = receive_unlock(ls, ms);
e7fd4179
DT
4811 break;
4812
00e99ccd 4813 case cpu_to_le32(DLM_MSG_CANCEL):
6d40c4a7
DT
4814 noent = 1;
4815 error = receive_cancel(ls, ms);
e7fd4179
DT
4816 break;
4817
4818 /* messages sent from a master node (replies to above) */
4819
00e99ccd 4820 case cpu_to_le32(DLM_MSG_REQUEST_REPLY):
6d40c4a7 4821 error = receive_request_reply(ls, ms);
e7fd4179
DT
4822 break;
4823
00e99ccd 4824 case cpu_to_le32(DLM_MSG_CONVERT_REPLY):
6d40c4a7 4825 error = receive_convert_reply(ls, ms);
e7fd4179
DT
4826 break;
4827
00e99ccd 4828 case cpu_to_le32(DLM_MSG_UNLOCK_REPLY):
6d40c4a7 4829 error = receive_unlock_reply(ls, ms);
e7fd4179
DT
4830 break;
4831
00e99ccd 4832 case cpu_to_le32(DLM_MSG_CANCEL_REPLY):
6d40c4a7 4833 error = receive_cancel_reply(ls, ms);
e7fd4179
DT
4834 break;
4835
4836 /* messages sent from a master node (only two types of async msg) */
4837
00e99ccd 4838 case cpu_to_le32(DLM_MSG_GRANT):
6d40c4a7
DT
4839 noent = 1;
4840 error = receive_grant(ls, ms);
e7fd4179
DT
4841 break;
4842
00e99ccd 4843 case cpu_to_le32(DLM_MSG_BAST):
6d40c4a7
DT
4844 noent = 1;
4845 error = receive_bast(ls, ms);
e7fd4179
DT
4846 break;
4847
4848 /* messages sent to a dir node */
4849
00e99ccd 4850 case cpu_to_le32(DLM_MSG_LOOKUP):
e7fd4179
DT
4851 receive_lookup(ls, ms);
4852 break;
4853
00e99ccd 4854 case cpu_to_le32(DLM_MSG_REMOVE):
e7fd4179
DT
4855 receive_remove(ls, ms);
4856 break;
4857
4858 /* messages sent from a dir node (remove has no reply) */
4859
00e99ccd 4860 case cpu_to_le32(DLM_MSG_LOOKUP_REPLY):
e7fd4179
DT
4861 receive_lookup_reply(ls, ms);
4862 break;
4863
8499137d
DT
4864 /* other messages */
4865
00e99ccd 4866 case cpu_to_le32(DLM_MSG_PURGE):
8499137d
DT
4867 receive_purge(ls, ms);
4868 break;
4869
e7fd4179 4870 default:
00e99ccd
AA
4871 log_error(ls, "unknown message type %d",
4872 le32_to_cpu(ms->m_type));
e7fd4179 4873 }
6d40c4a7
DT
4874
4875 /*
4876 * When checking for ENOENT, we're checking the result of
4877 * find_lkb(m_remid):
4878 *
4879 * The lock id referenced in the message wasn't found. This may
4880 * happen in normal usage for the async messages and cancel, so
4881 * only use log_debug for them.
4882 *
4875647a 4883 * Some errors are expected and normal.
6d40c4a7
DT
4884 */
4885
4886 if (error == -ENOENT && noent) {
4875647a 4887 log_debug(ls, "receive %d no %x remote %d %x saved_seq %u",
00e99ccd 4888 le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid),
3428785a 4889 le32_to_cpu(ms->m_header.h_nodeid),
00e99ccd 4890 le32_to_cpu(ms->m_lkid), saved_seq);
6d40c4a7 4891 } else if (error == -ENOENT) {
4875647a 4892 log_error(ls, "receive %d no %x remote %d %x saved_seq %u",
00e99ccd 4893 le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid),
3428785a 4894 le32_to_cpu(ms->m_header.h_nodeid),
00e99ccd 4895 le32_to_cpu(ms->m_lkid), saved_seq);
6d40c4a7 4896
00e99ccd
AA
4897 if (ms->m_type == cpu_to_le32(DLM_MSG_CONVERT))
4898 dlm_dump_rsb_hash(ls, le32_to_cpu(ms->m_hash));
6d40c4a7 4899 }
4875647a
DT
4900
4901 if (error == -EINVAL) {
4902 log_error(ls, "receive %d inval from %d lkid %x remid %x "
4903 "saved_seq %u",
00e99ccd
AA
4904 le32_to_cpu(ms->m_type),
4905 le32_to_cpu(ms->m_header.h_nodeid),
4906 le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid),
4907 saved_seq);
4875647a 4908 }
e7fd4179
DT
4909}
4910
c36258b5
DT
4911/* If the lockspace is in recovery mode (locking stopped), then normal
4912 messages are saved on the requestqueue for processing after recovery is
4913 done. When not in recovery mode, we wait for dlm_recoverd to drain saved
4914 messages off the requestqueue before we process new ones. This occurs right
4915 after recovery completes when we transition from saving all messages on
4916 requestqueue, to processing all the saved messages, to processing new
4917 messages as they arrive. */
e7fd4179 4918
c36258b5
DT
4919static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4920 int nodeid)
4921{
4922 if (dlm_locking_stopped(ls)) {
c04fecb4
DT
4923 /* If we were a member of this lockspace, left, and rejoined,
4924 other nodes may still be sending us messages from the
4925 lockspace generation before we left. */
4926 if (!ls->ls_generation) {
4927 log_limit(ls, "receive %d from %d ignore old gen",
00e99ccd 4928 le32_to_cpu(ms->m_type), nodeid);
c04fecb4
DT
4929 return;
4930 }
4931
8b0d8e03 4932 dlm_add_requestqueue(ls, nodeid, ms);
c36258b5
DT
4933 } else {
4934 dlm_wait_requestqueue(ls);
6d40c4a7 4935 _receive_message(ls, ms, 0);
c36258b5
DT
4936 }
4937}
4938
4939/* This is called by dlm_recoverd to process messages that were saved on
4940 the requestqueue. */
4941
6d40c4a7
DT
4942void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms,
4943 uint32_t saved_seq)
c36258b5 4944{
6d40c4a7 4945 _receive_message(ls, ms, saved_seq);
c36258b5
DT
4946}
4947
4948/* This is called by the midcomms layer when something is received for
4949 the lockspace. It could be either a MSG (normal message sent as part of
4950 standard locking activity) or an RCOM (recovery message sent as part of
4951 lockspace recovery). */
4952
eef7d739 4953void dlm_receive_buffer(union dlm_packet *p, int nodeid)
c36258b5 4954{
eef7d739 4955 struct dlm_header *hd = &p->header;
c36258b5
DT
4956 struct dlm_ls *ls;
4957 int type = 0;
4958
4959 switch (hd->h_cmd) {
4960 case DLM_MSG:
00e99ccd 4961 type = le32_to_cpu(p->message.m_type);
c36258b5
DT
4962 break;
4963 case DLM_RCOM:
2f9dbeda 4964 type = le32_to_cpu(p->rcom.rc_type);
c36258b5
DT
4965 break;
4966 default:
4967 log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
4968 return;
4969 }
4970
3428785a 4971 if (le32_to_cpu(hd->h_nodeid) != nodeid) {
c36258b5 4972 log_print("invalid h_nodeid %d from %d lockspace %x",
3428785a
AA
4973 le32_to_cpu(hd->h_nodeid), nodeid,
4974 le32_to_cpu(hd->u.h_lockspace));
c36258b5
DT
4975 return;
4976 }
4977
3428785a 4978 ls = dlm_find_lockspace_global(le32_to_cpu(hd->u.h_lockspace));
c36258b5 4979 if (!ls) {
4875647a
DT
4980 if (dlm_config.ci_log_debug) {
4981 printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace "
4982 "%u from %d cmd %d type %d\n",
3428785a
AA
4983 le32_to_cpu(hd->u.h_lockspace), nodeid,
4984 hd->h_cmd, type);
4875647a 4985 }
c36258b5
DT
4986
4987 if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
eef7d739 4988 dlm_send_ls_not_ready(nodeid, &p->rcom);
c36258b5
DT
4989 return;
4990 }
4991
4992 /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
4993 be inactive (in this ls) before transitioning to recovery mode */
4994
4995 down_read(&ls->ls_recv_active);
4996 if (hd->h_cmd == DLM_MSG)
eef7d739 4997 dlm_receive_message(ls, &p->message, nodeid);
f45307d3 4998 else if (hd->h_cmd == DLM_RCOM)
eef7d739 4999 dlm_receive_rcom(ls, &p->rcom, nodeid);
f45307d3
AA
5000 else
5001 log_error(ls, "invalid h_cmd %d from %d lockspace %x",
5002 hd->h_cmd, nodeid, le32_to_cpu(hd->u.h_lockspace));
c36258b5
DT
5003 up_read(&ls->ls_recv_active);
5004
5005 dlm_put_lockspace(ls);
5006}
e7fd4179 5007
2a7ce0ed
DT
5008static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
5009 struct dlm_message *ms_stub)
e7fd4179
DT
5010{
5011 if (middle_conversion(lkb)) {
5012 hold_lkb(lkb);
2a7ce0ed 5013 memset(ms_stub, 0, sizeof(struct dlm_message));
00e99ccd
AA
5014 ms_stub->m_flags = cpu_to_le32(DLM_IFL_STUB_MS);
5015 ms_stub->m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY);
5016 ms_stub->m_result = cpu_to_le32(to_dlm_errno(-EINPROGRESS));
3428785a 5017 ms_stub->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
2a7ce0ed 5018 _receive_convert_reply(lkb, ms_stub);
e7fd4179
DT
5019
5020 /* Same special case as in receive_rcom_lock_args() */
5021 lkb->lkb_grmode = DLM_LOCK_IV;
5022 rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
5023 unhold_lkb(lkb);
5024
5025 } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
5026 lkb->lkb_flags |= DLM_IFL_RESEND;
5027 }
5028
5029 /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
5030 conversions are async; there's no reply from the remote master */
5031}
5032
5033/* A waiting lkb needs recovery if the master node has failed, or
5034 the master node is changing (only when no directory is used) */
5035
13ef1111
DT
5036static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb,
5037 int dir_nodeid)
e7fd4179 5038{
4875647a 5039 if (dlm_no_directory(ls))
13ef1111
DT
5040 return 1;
5041
4875647a 5042 if (dlm_is_removed(ls, lkb->lkb_wait_nodeid))
e7fd4179
DT
5043 return 1;
5044
5045 return 0;
5046}
5047
5048/* Recovery for locks that are waiting for replies from nodes that are now
5049 gone. We can just complete unlocks and cancels by faking a reply from the
5050 dead node. Requests and up-conversions we flag to be resent after
5051 recovery. Down-conversions can just be completed with a fake reply like
5052 unlocks. Conversions between PR and CW need special attention. */
5053
5054void dlm_recover_waiters_pre(struct dlm_ls *ls)
5055{
5056 struct dlm_lkb *lkb, *safe;
2a7ce0ed 5057 struct dlm_message *ms_stub;
601342ce 5058 int wait_type, stub_unlock_result, stub_cancel_result;
13ef1111 5059 int dir_nodeid;
e7fd4179 5060
102e67d4 5061 ms_stub = kmalloc(sizeof(*ms_stub), GFP_KERNEL);
0d37eca7 5062 if (!ms_stub)
2a7ce0ed 5063 return;
2a7ce0ed 5064
90135925 5065 mutex_lock(&ls->ls_waiters_mutex);
e7fd4179
DT
5066
5067 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
2a7ce0ed 5068
13ef1111
DT
5069 dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource);
5070
2a7ce0ed
DT
5071 /* exclude debug messages about unlocks because there can be so
5072 many and they aren't very interesting */
5073
5074 if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) {
13ef1111
DT
5075 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5076 "lkb_nodeid %d wait_nodeid %d dir_nodeid %d",
5077 lkb->lkb_id,
5078 lkb->lkb_remid,
5079 lkb->lkb_wait_type,
5080 lkb->lkb_resource->res_nodeid,
5081 lkb->lkb_nodeid,
5082 lkb->lkb_wait_nodeid,
5083 dir_nodeid);
2a7ce0ed 5084 }
e7fd4179
DT
5085
5086 /* all outstanding lookups, regardless of destination will be
5087 resent after recovery is done */
5088
5089 if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
5090 lkb->lkb_flags |= DLM_IFL_RESEND;
5091 continue;
5092 }
5093
13ef1111 5094 if (!waiter_needs_recovery(ls, lkb, dir_nodeid))
e7fd4179
DT
5095 continue;
5096
601342ce
DT
5097 wait_type = lkb->lkb_wait_type;
5098 stub_unlock_result = -DLM_EUNLOCK;
5099 stub_cancel_result = -DLM_ECANCEL;
5100
5101 /* Main reply may have been received leaving a zero wait_type,
5102 but a reply for the overlapping op may not have been
5103 received. In that case we need to fake the appropriate
5104 reply for the overlap op. */
5105
5106 if (!wait_type) {
5107 if (is_overlap_cancel(lkb)) {
5108 wait_type = DLM_MSG_CANCEL;
5109 if (lkb->lkb_grmode == DLM_LOCK_IV)
5110 stub_cancel_result = 0;
5111 }
5112 if (is_overlap_unlock(lkb)) {
5113 wait_type = DLM_MSG_UNLOCK;
5114 if (lkb->lkb_grmode == DLM_LOCK_IV)
5115 stub_unlock_result = -ENOENT;
5116 }
5117
5118 log_debug(ls, "rwpre overlap %x %x %d %d %d",
5119 lkb->lkb_id, lkb->lkb_flags, wait_type,
5120 stub_cancel_result, stub_unlock_result);
5121 }
5122
5123 switch (wait_type) {
e7fd4179
DT
5124
5125 case DLM_MSG_REQUEST:
5126 lkb->lkb_flags |= DLM_IFL_RESEND;
5127 break;
5128
5129 case DLM_MSG_CONVERT:
2a7ce0ed 5130 recover_convert_waiter(ls, lkb, ms_stub);
e7fd4179
DT
5131 break;
5132
5133 case DLM_MSG_UNLOCK:
5134 hold_lkb(lkb);
2a7ce0ed 5135 memset(ms_stub, 0, sizeof(struct dlm_message));
00e99ccd
AA
5136 ms_stub->m_flags = cpu_to_le32(DLM_IFL_STUB_MS);
5137 ms_stub->m_type = cpu_to_le32(DLM_MSG_UNLOCK_REPLY);
5138 ms_stub->m_result = cpu_to_le32(to_dlm_errno(stub_unlock_result));
3428785a 5139 ms_stub->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
2a7ce0ed 5140 _receive_unlock_reply(lkb, ms_stub);
b3f58d8f 5141 dlm_put_lkb(lkb);
e7fd4179
DT
5142 break;
5143
5144 case DLM_MSG_CANCEL:
5145 hold_lkb(lkb);
2a7ce0ed 5146 memset(ms_stub, 0, sizeof(struct dlm_message));
00e99ccd
AA
5147 ms_stub->m_flags = cpu_to_le32(DLM_IFL_STUB_MS);
5148 ms_stub->m_type = cpu_to_le32(DLM_MSG_CANCEL_REPLY);
5149 ms_stub->m_result = cpu_to_le32(to_dlm_errno(stub_cancel_result));
3428785a 5150 ms_stub->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
2a7ce0ed 5151 _receive_cancel_reply(lkb, ms_stub);
b3f58d8f 5152 dlm_put_lkb(lkb);
e7fd4179
DT
5153 break;
5154
5155 default:
601342ce
DT
5156 log_error(ls, "invalid lkb wait_type %d %d",
5157 lkb->lkb_wait_type, wait_type);
e7fd4179 5158 }
81456807 5159 schedule();
e7fd4179 5160 }
90135925 5161 mutex_unlock(&ls->ls_waiters_mutex);
2a7ce0ed 5162 kfree(ms_stub);
e7fd4179
DT
5163}
5164
ef0c2bb0 5165static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
e7fd4179 5166{
dc1acd5c 5167 struct dlm_lkb *lkb = NULL, *iter;
e7fd4179 5168
90135925 5169 mutex_lock(&ls->ls_waiters_mutex);
dc1acd5c
JK
5170 list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) {
5171 if (iter->lkb_flags & DLM_IFL_RESEND) {
5172 hold_lkb(iter);
5173 lkb = iter;
e7fd4179
DT
5174 break;
5175 }
5176 }
90135925 5177 mutex_unlock(&ls->ls_waiters_mutex);
e7fd4179 5178
ef0c2bb0 5179 return lkb;
e7fd4179
DT
5180}
5181
5182/* Deal with lookups and lkb's marked RESEND from _pre. We may now be the
5183 master or dir-node for r. Processing the lkb may result in it being placed
5184 back on waiters. */
5185
ef0c2bb0
DT
5186/* We do this after normal locking has been enabled and any saved messages
5187 (in requestqueue) have been processed. We should be confident that at
5188 this point we won't get or process a reply to any of these waiting
5189 operations. But, new ops may be coming in on the rsbs/locks here from
5190 userspace or remotely. */
5191
5192/* there may have been an overlap unlock/cancel prior to recovery or after
5193 recovery. if before, the lkb may still have a pos wait_count; if after, the
5194 overlap flag would just have been set and nothing new sent. we can be
5195 confident here than any replies to either the initial op or overlap ops
5196 prior to recovery have been received. */
5197
e7fd4179
DT
5198int dlm_recover_waiters_post(struct dlm_ls *ls)
5199{
5200 struct dlm_lkb *lkb;
5201 struct dlm_rsb *r;
ef0c2bb0 5202 int error = 0, mstype, err, oc, ou;
e7fd4179
DT
5203
5204 while (1) {
5205 if (dlm_locking_stopped(ls)) {
5206 log_debug(ls, "recover_waiters_post aborted");
5207 error = -EINTR;
5208 break;
5209 }
5210
ef0c2bb0
DT
5211 lkb = find_resend_waiter(ls);
5212 if (!lkb)
e7fd4179
DT
5213 break;
5214
5215 r = lkb->lkb_resource;
ef0c2bb0
DT
5216 hold_rsb(r);
5217 lock_rsb(r);
5218
5219 mstype = lkb->lkb_wait_type;
5220 oc = is_overlap_cancel(lkb);
5221 ou = is_overlap_unlock(lkb);
5222 err = 0;
e7fd4179 5223
13ef1111
DT
5224 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5225 "lkb_nodeid %d wait_nodeid %d dir_nodeid %d "
5226 "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype,
5227 r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid,
5228 dlm_dir_nodeid(r), oc, ou);
e7fd4179 5229
ef0c2bb0
DT
5230 /* At this point we assume that we won't get a reply to any
5231 previous op or overlap op on this lock. First, do a big
5232 remove_from_waiters() for all previous ops. */
5233
5234 lkb->lkb_flags &= ~DLM_IFL_RESEND;
5235 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
5236 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
5237 lkb->lkb_wait_type = 0;
1689c169
AA
5238 /* drop all wait_count references we still
5239 * hold a reference for this iteration.
5240 */
5241 while (lkb->lkb_wait_count) {
5242 lkb->lkb_wait_count--;
5243 unhold_lkb(lkb);
5244 }
ef0c2bb0
DT
5245 mutex_lock(&ls->ls_waiters_mutex);
5246 list_del_init(&lkb->lkb_wait_reply);
5247 mutex_unlock(&ls->ls_waiters_mutex);
ef0c2bb0
DT
5248
5249 if (oc || ou) {
5250 /* do an unlock or cancel instead of resending */
5251 switch (mstype) {
5252 case DLM_MSG_LOOKUP:
5253 case DLM_MSG_REQUEST:
5254 queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
5255 -DLM_ECANCEL);
5256 unhold_lkb(lkb); /* undoes create_lkb() */
5257 break;
5258 case DLM_MSG_CONVERT:
5259 if (oc) {
5260 queue_cast(r, lkb, -DLM_ECANCEL);
5261 } else {
5262 lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
5263 _unlock_lock(r, lkb);
5264 }
5265 break;
5266 default:
5267 err = 1;
5268 }
5269 } else {
5270 switch (mstype) {
5271 case DLM_MSG_LOOKUP:
5272 case DLM_MSG_REQUEST:
5273 _request_lock(r, lkb);
5274 if (is_master(r))
5275 confirm_master(r, 0);
5276 break;
5277 case DLM_MSG_CONVERT:
5278 _convert_lock(r, lkb);
5279 break;
5280 default:
5281 err = 1;
5282 }
e7fd4179 5283 }
ef0c2bb0 5284
13ef1111
DT
5285 if (err) {
5286 log_error(ls, "waiter %x msg %d r_nodeid %d "
5287 "dir_nodeid %d overlap %d %d",
5288 lkb->lkb_id, mstype, r->res_nodeid,
5289 dlm_dir_nodeid(r), oc, ou);
5290 }
ef0c2bb0
DT
5291 unlock_rsb(r);
5292 put_rsb(r);
5293 dlm_put_lkb(lkb);
e7fd4179
DT
5294 }
5295
5296 return error;
5297}
5298
4875647a
DT
5299static void purge_mstcpy_list(struct dlm_ls *ls, struct dlm_rsb *r,
5300 struct list_head *list)
e7fd4179 5301{
e7fd4179
DT
5302 struct dlm_lkb *lkb, *safe;
5303
4875647a
DT
5304 list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5305 if (!is_master_copy(lkb))
5306 continue;
5307
5308 /* don't purge lkbs we've added in recover_master_copy for
5309 the current recovery seq */
5310
5311 if (lkb->lkb_recover_seq == ls->ls_recover_seq)
5312 continue;
5313
5314 del_lkb(r, lkb);
5315
5316 /* this put should free the lkb */
5317 if (!dlm_put_lkb(lkb))
5318 log_error(ls, "purged mstcpy lkb not released");
e7fd4179
DT
5319 }
5320}
5321
4875647a 5322void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
e7fd4179 5323{
4875647a 5324 struct dlm_ls *ls = r->res_ls;
e7fd4179 5325
4875647a
DT
5326 purge_mstcpy_list(ls, r, &r->res_grantqueue);
5327 purge_mstcpy_list(ls, r, &r->res_convertqueue);
5328 purge_mstcpy_list(ls, r, &r->res_waitqueue);
e7fd4179
DT
5329}
5330
4875647a
DT
5331static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
5332 struct list_head *list,
5333 int nodeid_gone, unsigned int *count)
e7fd4179 5334{
4875647a 5335 struct dlm_lkb *lkb, *safe;
e7fd4179 5336
4875647a
DT
5337 list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5338 if (!is_master_copy(lkb))
5339 continue;
5340
5341 if ((lkb->lkb_nodeid == nodeid_gone) ||
5342 dlm_is_removed(ls, lkb->lkb_nodeid)) {
5343
da8c6663
DT
5344 /* tell recover_lvb to invalidate the lvb
5345 because a node holding EX/PW failed */
5346 if ((lkb->lkb_exflags & DLM_LKF_VALBLK) &&
5347 (lkb->lkb_grmode >= DLM_LOCK_PW)) {
5348 rsb_set_flag(r, RSB_RECOVER_LVB_INVAL);
5349 }
5350
4875647a
DT
5351 del_lkb(r, lkb);
5352
5353 /* this put should free the lkb */
5354 if (!dlm_put_lkb(lkb))
5355 log_error(ls, "purged dead lkb not released");
5356
5357 rsb_set_flag(r, RSB_RECOVER_GRANT);
5358
5359 (*count)++;
5360 }
5361 }
e7fd4179
DT
5362}
5363
5364/* Get rid of locks held by nodes that are gone. */
5365
4875647a 5366void dlm_recover_purge(struct dlm_ls *ls)
e7fd4179
DT
5367{
5368 struct dlm_rsb *r;
4875647a
DT
5369 struct dlm_member *memb;
5370 int nodes_count = 0;
5371 int nodeid_gone = 0;
5372 unsigned int lkb_count = 0;
5373
5374 /* cache one removed nodeid to optimize the common
5375 case of a single node removed */
5376
5377 list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
5378 nodes_count++;
5379 nodeid_gone = memb->nodeid;
5380 }
e7fd4179 5381
4875647a
DT
5382 if (!nodes_count)
5383 return;
e7fd4179
DT
5384
5385 down_write(&ls->ls_root_sem);
5386 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
5387 hold_rsb(r);
5388 lock_rsb(r);
4875647a
DT
5389 if (is_master(r)) {
5390 purge_dead_list(ls, r, &r->res_grantqueue,
5391 nodeid_gone, &lkb_count);
5392 purge_dead_list(ls, r, &r->res_convertqueue,
5393 nodeid_gone, &lkb_count);
5394 purge_dead_list(ls, r, &r->res_waitqueue,
5395 nodeid_gone, &lkb_count);
5396 }
e7fd4179
DT
5397 unlock_rsb(r);
5398 unhold_rsb(r);
4875647a 5399 cond_resched();
e7fd4179
DT
5400 }
5401 up_write(&ls->ls_root_sem);
5402
4875647a 5403 if (lkb_count)
075f0177 5404 log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes",
4875647a 5405 lkb_count, nodes_count);
e7fd4179
DT
5406}
5407
4875647a 5408static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
97a35d1e 5409{
9beb3bf5 5410 struct rb_node *n;
4875647a 5411 struct dlm_rsb *r;
97a35d1e 5412
c7be761a 5413 spin_lock(&ls->ls_rsbtbl[bucket].lock);
9beb3bf5
BP
5414 for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
5415 r = rb_entry(n, struct dlm_rsb, res_hashnode);
4875647a
DT
5416
5417 if (!rsb_flag(r, RSB_RECOVER_GRANT))
5418 continue;
c503a621
DT
5419 if (!is_master(r)) {
5420 rsb_clear_flag(r, RSB_RECOVER_GRANT);
97a35d1e 5421 continue;
c503a621 5422 }
97a35d1e 5423 hold_rsb(r);
4875647a
DT
5424 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5425 return r;
97a35d1e 5426 }
c7be761a 5427 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
4875647a 5428 return NULL;
97a35d1e
DT
5429}
5430
4875647a
DT
5431/*
5432 * Attempt to grant locks on resources that we are the master of.
5433 * Locks may have become grantable during recovery because locks
5434 * from departed nodes have been purged (or not rebuilt), allowing
5435 * previously blocked locks to now be granted. The subset of rsb's
5436 * we are interested in are those with lkb's on either the convert or
5437 * waiting queues.
5438 *
5439 * Simplest would be to go through each master rsb and check for non-empty
5440 * convert or waiting queues, and attempt to grant on those rsbs.
5441 * Checking the queues requires lock_rsb, though, for which we'd need
5442 * to release the rsbtbl lock. This would make iterating through all
5443 * rsb's very inefficient. So, we rely on earlier recovery routines
5444 * to set RECOVER_GRANT on any rsb's that we should attempt to grant
5445 * locks for.
5446 */
5447
5448void dlm_recover_grant(struct dlm_ls *ls)
e7fd4179
DT
5449{
5450 struct dlm_rsb *r;
2b4e926a 5451 int bucket = 0;
4875647a
DT
5452 unsigned int count = 0;
5453 unsigned int rsb_count = 0;
5454 unsigned int lkb_count = 0;
e7fd4179 5455
2b4e926a 5456 while (1) {
4875647a 5457 r = find_grant_rsb(ls, bucket);
2b4e926a
DT
5458 if (!r) {
5459 if (bucket == ls->ls_rsbtbl_size - 1)
5460 break;
5461 bucket++;
97a35d1e 5462 continue;
2b4e926a 5463 }
4875647a
DT
5464 rsb_count++;
5465 count = 0;
97a35d1e 5466 lock_rsb(r);
c503a621 5467 /* the RECOVER_GRANT flag is checked in the grant path */
4875647a 5468 grant_pending_locks(r, &count);
c503a621 5469 rsb_clear_flag(r, RSB_RECOVER_GRANT);
4875647a
DT
5470 lkb_count += count;
5471 confirm_master(r, 0);
97a35d1e
DT
5472 unlock_rsb(r);
5473 put_rsb(r);
4875647a 5474 cond_resched();
e7fd4179 5475 }
4875647a
DT
5476
5477 if (lkb_count)
075f0177 5478 log_rinfo(ls, "dlm_recover_grant %u locks on %u resources",
4875647a 5479 lkb_count, rsb_count);
e7fd4179
DT
5480}
5481
5482static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
5483 uint32_t remid)
5484{
5485 struct dlm_lkb *lkb;
5486
5487 list_for_each_entry(lkb, head, lkb_statequeue) {
5488 if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
5489 return lkb;
5490 }
5491 return NULL;
5492}
5493
5494static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
5495 uint32_t remid)
5496{
5497 struct dlm_lkb *lkb;
5498
5499 lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
5500 if (lkb)
5501 return lkb;
5502 lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
5503 if (lkb)
5504 return lkb;
5505 lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
5506 if (lkb)
5507 return lkb;
5508 return NULL;
5509}
5510
ae773d0b 5511/* needs at least dlm_rcom + rcom_lock */
e7fd4179
DT
5512static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
5513 struct dlm_rsb *r, struct dlm_rcom *rc)
5514{
5515 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
e7fd4179 5516
3428785a 5517 lkb->lkb_nodeid = le32_to_cpu(rc->rc_header.h_nodeid);
163a1859
AV
5518 lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
5519 lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
5520 lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
5521 lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
e7fd4179 5522 lkb->lkb_flags |= DLM_IFL_MSTCPY;
163a1859 5523 lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
e7fd4179
DT
5524 lkb->lkb_rqmode = rl->rl_rqmode;
5525 lkb->lkb_grmode = rl->rl_grmode;
5526 /* don't set lkb_status because add_lkb wants to itself */
5527
8304d6f2
DT
5528 lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
5529 lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
e7fd4179 5530
e7fd4179 5531 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3428785a
AA
5532 int lvblen = le16_to_cpu(rc->rc_header.h_length) -
5533 sizeof(struct dlm_rcom) - sizeof(struct rcom_lock);
a5dd0631
AV
5534 if (lvblen > ls->ls_lvblen)
5535 return -EINVAL;
52bda2b5 5536 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
e7fd4179
DT
5537 if (!lkb->lkb_lvbptr)
5538 return -ENOMEM;
e7fd4179
DT
5539 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
5540 }
5541
5542 /* Conversions between PR and CW (middle modes) need special handling.
5543 The real granted mode of these converting locks cannot be determined
5544 until all locks have been rebuilt on the rsb (recover_conversion) */
5545
163a1859
AV
5546 if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
5547 middle_conversion(lkb)) {
e7fd4179
DT
5548 rl->rl_status = DLM_LKSTS_CONVERT;
5549 lkb->lkb_grmode = DLM_LOCK_IV;
5550 rsb_set_flag(r, RSB_RECOVER_CONVERT);
5551 }
5552
5553 return 0;
5554}
5555
5556/* This lkb may have been recovered in a previous aborted recovery so we need
5557 to check if the rsb already has an lkb with the given remote nodeid/lkid.
5558 If so we just send back a standard reply. If not, we create a new lkb with
5559 the given values and send back our lkid. We send back our lkid by sending
5560 back the rcom_lock struct we got but with the remid field filled in. */
5561
ae773d0b 5562/* needs at least dlm_rcom + rcom_lock */
e7fd4179
DT
5563int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5564{
5565 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5566 struct dlm_rsb *r;
5567 struct dlm_lkb *lkb;
6d40c4a7 5568 uint32_t remid = 0;
3428785a 5569 int from_nodeid = le32_to_cpu(rc->rc_header.h_nodeid);
e7fd4179
DT
5570 int error;
5571
5572 if (rl->rl_parent_lkid) {
5573 error = -EOPNOTSUPP;
5574 goto out;
5575 }
5576
6d40c4a7
DT
5577 remid = le32_to_cpu(rl->rl_lkid);
5578
4875647a
DT
5579 /* In general we expect the rsb returned to be R_MASTER, but we don't
5580 have to require it. Recovery of masters on one node can overlap
5581 recovery of locks on another node, so one node can send us MSTCPY
5582 locks before we've made ourselves master of this rsb. We can still
5583 add new MSTCPY locks that we receive here without any harm; when
5584 we make ourselves master, dlm_recover_masters() won't touch the
5585 MSTCPY locks we've received early. */
5586
c04fecb4
DT
5587 error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
5588 from_nodeid, R_RECEIVE_RECOVER, &r);
e7fd4179
DT
5589 if (error)
5590 goto out;
5591
c04fecb4
DT
5592 lock_rsb(r);
5593
4875647a
DT
5594 if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) {
5595 log_error(ls, "dlm_recover_master_copy remote %d %x not dir",
c04fecb4 5596 from_nodeid, remid);
4875647a 5597 error = -EBADR;
c04fecb4 5598 goto out_unlock;
4875647a
DT
5599 }
5600
c04fecb4 5601 lkb = search_remid(r, from_nodeid, remid);
e7fd4179
DT
5602 if (lkb) {
5603 error = -EEXIST;
5604 goto out_remid;
5605 }
5606
5607 error = create_lkb(ls, &lkb);
5608 if (error)
5609 goto out_unlock;
5610
5611 error = receive_rcom_lock_args(ls, lkb, r, rc);
5612 if (error) {
b3f58d8f 5613 __put_lkb(ls, lkb);
e7fd4179
DT
5614 goto out_unlock;
5615 }
5616
5617 attach_lkb(r, lkb);
5618 add_lkb(r, lkb, rl->rl_status);
4875647a
DT
5619 ls->ls_recover_locks_in++;
5620
5621 if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
5622 rsb_set_flag(r, RSB_RECOVER_GRANT);
e7fd4179
DT
5623
5624 out_remid:
5625 /* this is the new value returned to the lock holder for
5626 saving in its process-copy lkb */
163a1859 5627 rl->rl_remid = cpu_to_le32(lkb->lkb_id);
e7fd4179 5628
4875647a
DT
5629 lkb->lkb_recover_seq = ls->ls_recover_seq;
5630
e7fd4179
DT
5631 out_unlock:
5632 unlock_rsb(r);
5633 put_rsb(r);
5634 out:
6d40c4a7 5635 if (error && error != -EEXIST)
075f0177 5636 log_rinfo(ls, "dlm_recover_master_copy remote %d %x error %d",
c04fecb4 5637 from_nodeid, remid, error);
163a1859 5638 rl->rl_result = cpu_to_le32(error);
e7fd4179
DT
5639 return error;
5640}
5641
ae773d0b 5642/* needs at least dlm_rcom + rcom_lock */
e7fd4179
DT
5643int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5644{
5645 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5646 struct dlm_rsb *r;
5647 struct dlm_lkb *lkb;
6d40c4a7
DT
5648 uint32_t lkid, remid;
5649 int error, result;
5650
5651 lkid = le32_to_cpu(rl->rl_lkid);
5652 remid = le32_to_cpu(rl->rl_remid);
5653 result = le32_to_cpu(rl->rl_result);
e7fd4179 5654
6d40c4a7 5655 error = find_lkb(ls, lkid, &lkb);
e7fd4179 5656 if (error) {
6d40c4a7 5657 log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d",
3428785a
AA
5658 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
5659 result);
e7fd4179
DT
5660 return error;
5661 }
5662
4875647a
DT
5663 r = lkb->lkb_resource;
5664 hold_rsb(r);
5665 lock_rsb(r);
5666
6d40c4a7
DT
5667 if (!is_process_copy(lkb)) {
5668 log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d",
3428785a
AA
5669 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
5670 result);
4875647a
DT
5671 dlm_dump_rsb(r);
5672 unlock_rsb(r);
5673 put_rsb(r);
5674 dlm_put_lkb(lkb);
6d40c4a7
DT
5675 return -EINVAL;
5676 }
e7fd4179 5677
6d40c4a7 5678 switch (result) {
dc200a88
DT
5679 case -EBADR:
5680 /* There's a chance the new master received our lock before
5681 dlm_recover_master_reply(), this wouldn't happen if we did
5682 a barrier between recover_masters and recover_locks. */
6d40c4a7
DT
5683
5684 log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d",
3428785a
AA
5685 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
5686 result);
6d40c4a7 5687
dc200a88
DT
5688 dlm_send_rcom_lock(r, lkb);
5689 goto out;
e7fd4179 5690 case -EEXIST:
e7fd4179 5691 case 0:
6d40c4a7 5692 lkb->lkb_remid = remid;
e7fd4179
DT
5693 break;
5694 default:
6d40c4a7 5695 log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk",
3428785a
AA
5696 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
5697 result);
e7fd4179
DT
5698 }
5699
5700 /* an ack for dlm_recover_locks() which waits for replies from
5701 all the locks it sends to new masters */
5702 dlm_recovered_lock(r);
dc200a88 5703 out:
e7fd4179
DT
5704 unlock_rsb(r);
5705 put_rsb(r);
b3f58d8f 5706 dlm_put_lkb(lkb);
e7fd4179
DT
5707
5708 return 0;
5709}
5710
6b0afc0c 5711#ifdef CONFIG_DLM_DEPRECATED_API
597d0cae
DT
5712int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
5713 int mode, uint32_t flags, void *name, unsigned int namelen,
d7db923e 5714 unsigned long timeout_cs)
6b0afc0c
AA
5715#else
5716int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
5717 int mode, uint32_t flags, void *name, unsigned int namelen)
5718#endif
597d0cae
DT
5719{
5720 struct dlm_lkb *lkb;
5721 struct dlm_args args;
3b761030 5722 bool do_put = true;
597d0cae
DT
5723 int error;
5724
85e86edf 5725 dlm_lock_recovery(ls);
597d0cae
DT
5726
5727 error = create_lkb(ls, &lkb);
5728 if (error) {
5729 kfree(ua);
5730 goto out;
5731 }
5732
7a3de732
AA
5733 trace_dlm_lock_start(ls, lkb, name, namelen, mode, flags);
5734
597d0cae 5735 if (flags & DLM_LKF_VALBLK) {
573c24c4 5736 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
597d0cae
DT
5737 if (!ua->lksb.sb_lvbptr) {
5738 kfree(ua);
597d0cae 5739 error = -ENOMEM;
3b761030 5740 goto out_put;
597d0cae
DT
5741 }
5742 }
6b0afc0c 5743#ifdef CONFIG_DLM_DEPRECATED_API
d7db923e 5744 error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
e5dae548 5745 fake_astfn, ua, fake_bastfn, &args);
6b0afc0c
AA
5746#else
5747 error = set_lock_args(mode, &ua->lksb, flags, namelen, fake_astfn, ua,
5748 fake_bastfn, &args);
5749#endif
597d0cae 5750 if (error) {
d47b41ac
VA
5751 kfree(ua->lksb.sb_lvbptr);
5752 ua->lksb.sb_lvbptr = NULL;
5753 kfree(ua);
3b761030 5754 goto out_put;
597d0cae
DT
5755 }
5756
d47b41ac
VA
5757 /* After ua is attached to lkb it will be freed by dlm_free_lkb().
5758 When DLM_IFL_USER is set, the dlm knows that this is a userspace
5759 lock and that lkb_astparam is the dlm_user_args structure. */
5760 lkb->lkb_flags |= DLM_IFL_USER;
597d0cae
DT
5761 error = request_lock(ls, lkb, name, namelen, &args);
5762
5763 switch (error) {
5764 case 0:
5765 break;
5766 case -EINPROGRESS:
5767 error = 0;
5768 break;
5769 case -EAGAIN:
5770 error = 0;
df561f66 5771 fallthrough;
597d0cae 5772 default:
3b761030 5773 goto out_put;
597d0cae
DT
5774 }
5775
5776 /* add this new lkb to the per-process list of locks */
5777 spin_lock(&ua->proc->locks_spin);
ef0c2bb0 5778 hold_lkb(lkb);
597d0cae
DT
5779 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5780 spin_unlock(&ua->proc->locks_spin);
3b761030
AA
5781 do_put = false;
5782 out_put:
7a3de732 5783 trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, false);
3b761030
AA
5784 if (do_put)
5785 __put_lkb(ls, lkb);
597d0cae 5786 out:
85e86edf 5787 dlm_unlock_recovery(ls);
597d0cae
DT
5788 return error;
5789}
5790
6b0afc0c 5791#ifdef CONFIG_DLM_DEPRECATED_API
597d0cae 5792int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
d7db923e
DT
5793 int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
5794 unsigned long timeout_cs)
6b0afc0c
AA
5795#else
5796int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5797 int mode, uint32_t flags, uint32_t lkid, char *lvb_in)
5798#endif
597d0cae
DT
5799{
5800 struct dlm_lkb *lkb;
5801 struct dlm_args args;
5802 struct dlm_user_args *ua;
5803 int error;
5804
85e86edf 5805 dlm_lock_recovery(ls);
597d0cae
DT
5806
5807 error = find_lkb(ls, lkid, &lkb);
5808 if (error)
5809 goto out;
5810
7a3de732
AA
5811 trace_dlm_lock_start(ls, lkb, NULL, 0, mode, flags);
5812
597d0cae
DT
5813 /* user can change the params on its lock when it converts it, or
5814 add an lvb that didn't exist before */
5815
d292c0cc 5816 ua = lkb->lkb_ua;
597d0cae
DT
5817
5818 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
573c24c4 5819 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
597d0cae
DT
5820 if (!ua->lksb.sb_lvbptr) {
5821 error = -ENOMEM;
5822 goto out_put;
5823 }
5824 }
5825 if (lvb_in && ua->lksb.sb_lvbptr)
5826 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5827
d7db923e 5828 ua->xid = ua_tmp->xid;
597d0cae
DT
5829 ua->castparam = ua_tmp->castparam;
5830 ua->castaddr = ua_tmp->castaddr;
5831 ua->bastparam = ua_tmp->bastparam;
5832 ua->bastaddr = ua_tmp->bastaddr;
10948eb4 5833 ua->user_lksb = ua_tmp->user_lksb;
597d0cae 5834
6b0afc0c 5835#ifdef CONFIG_DLM_DEPRECATED_API
d7db923e 5836 error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
e5dae548 5837 fake_astfn, ua, fake_bastfn, &args);
6b0afc0c
AA
5838#else
5839 error = set_lock_args(mode, &ua->lksb, flags, 0, fake_astfn, ua,
5840 fake_bastfn, &args);
5841#endif
597d0cae
DT
5842 if (error)
5843 goto out_put;
5844
5845 error = convert_lock(ls, lkb, &args);
5846
c85d65e9 5847 if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
597d0cae
DT
5848 error = 0;
5849 out_put:
7a3de732 5850 trace_dlm_lock_end(ls, lkb, NULL, 0, mode, flags, error, false);
597d0cae
DT
5851 dlm_put_lkb(lkb);
5852 out:
85e86edf 5853 dlm_unlock_recovery(ls);
597d0cae
DT
5854 kfree(ua_tmp);
5855 return error;
5856}
5857
2ab4bd8e
DT
5858/*
5859 * The caller asks for an orphan lock on a given resource with a given mode.
5860 * If a matching lock exists, it's moved to the owner's list of locks and
5861 * the lkid is returned.
5862 */
5863
5864int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5865 int mode, uint32_t flags, void *name, unsigned int namelen,
8d614a44 5866 uint32_t *lkid)
2ab4bd8e 5867{
dc1acd5c 5868 struct dlm_lkb *lkb = NULL, *iter;
2ab4bd8e
DT
5869 struct dlm_user_args *ua;
5870 int found_other_mode = 0;
2ab4bd8e
DT
5871 int rv = 0;
5872
5873 mutex_lock(&ls->ls_orphans_mutex);
dc1acd5c
JK
5874 list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) {
5875 if (iter->lkb_resource->res_length != namelen)
2ab4bd8e 5876 continue;
dc1acd5c 5877 if (memcmp(iter->lkb_resource->res_name, name, namelen))
2ab4bd8e 5878 continue;
dc1acd5c 5879 if (iter->lkb_grmode != mode) {
2ab4bd8e
DT
5880 found_other_mode = 1;
5881 continue;
5882 }
5883
dc1acd5c
JK
5884 lkb = iter;
5885 list_del_init(&iter->lkb_ownqueue);
5886 iter->lkb_flags &= ~DLM_IFL_ORPHAN;
5887 *lkid = iter->lkb_id;
2ab4bd8e
DT
5888 break;
5889 }
5890 mutex_unlock(&ls->ls_orphans_mutex);
5891
dc1acd5c 5892 if (!lkb && found_other_mode) {
2ab4bd8e
DT
5893 rv = -EAGAIN;
5894 goto out;
5895 }
5896
dc1acd5c 5897 if (!lkb) {
2ab4bd8e
DT
5898 rv = -ENOENT;
5899 goto out;
5900 }
5901
5902 lkb->lkb_exflags = flags;
5903 lkb->lkb_ownpid = (int) current->pid;
5904
5905 ua = lkb->lkb_ua;
5906
5907 ua->proc = ua_tmp->proc;
5908 ua->xid = ua_tmp->xid;
5909 ua->castparam = ua_tmp->castparam;
5910 ua->castaddr = ua_tmp->castaddr;
5911 ua->bastparam = ua_tmp->bastparam;
5912 ua->bastaddr = ua_tmp->bastaddr;
5913 ua->user_lksb = ua_tmp->user_lksb;
5914
5915 /*
5916 * The lkb reference from the ls_orphans list was not
5917 * removed above, and is now considered the reference
5918 * for the proc locks list.
5919 */
5920
5921 spin_lock(&ua->proc->locks_spin);
5922 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5923 spin_unlock(&ua->proc->locks_spin);
5924 out:
5925 kfree(ua_tmp);
5926 return rv;
5927}
5928
597d0cae
DT
5929int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5930 uint32_t flags, uint32_t lkid, char *lvb_in)
5931{
5932 struct dlm_lkb *lkb;
5933 struct dlm_args args;
5934 struct dlm_user_args *ua;
5935 int error;
5936
85e86edf 5937 dlm_lock_recovery(ls);
597d0cae
DT
5938
5939 error = find_lkb(ls, lkid, &lkb);
5940 if (error)
5941 goto out;
5942
7a3de732
AA
5943 trace_dlm_unlock_start(ls, lkb, flags);
5944
d292c0cc 5945 ua = lkb->lkb_ua;
597d0cae
DT
5946
5947 if (lvb_in && ua->lksb.sb_lvbptr)
5948 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
b434eda6
PC
5949 if (ua_tmp->castparam)
5950 ua->castparam = ua_tmp->castparam;
cc346d55 5951 ua->user_lksb = ua_tmp->user_lksb;
597d0cae
DT
5952
5953 error = set_unlock_args(flags, ua, &args);
5954 if (error)
5955 goto out_put;
5956
5957 error = unlock_lock(ls, lkb, &args);
5958
5959 if (error == -DLM_EUNLOCK)
5960 error = 0;
ef0c2bb0
DT
5961 /* from validate_unlock_args() */
5962 if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
5963 error = 0;
597d0cae
DT
5964 if (error)
5965 goto out_put;
5966
5967 spin_lock(&ua->proc->locks_spin);
23e8e1aa 5968 /* dlm_user_add_cb() may have already taken lkb off the proc list */
a1bc86e6
DT
5969 if (!list_empty(&lkb->lkb_ownqueue))
5970 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
597d0cae 5971 spin_unlock(&ua->proc->locks_spin);
597d0cae 5972 out_put:
7a3de732 5973 trace_dlm_unlock_end(ls, lkb, flags, error);
597d0cae
DT
5974 dlm_put_lkb(lkb);
5975 out:
85e86edf 5976 dlm_unlock_recovery(ls);
ef0c2bb0 5977 kfree(ua_tmp);
597d0cae
DT
5978 return error;
5979}
5980
5981int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5982 uint32_t flags, uint32_t lkid)
5983{
5984 struct dlm_lkb *lkb;
5985 struct dlm_args args;
5986 struct dlm_user_args *ua;
5987 int error;
5988
85e86edf 5989 dlm_lock_recovery(ls);
597d0cae
DT
5990
5991 error = find_lkb(ls, lkid, &lkb);
5992 if (error)
5993 goto out;
5994
7a3de732
AA
5995 trace_dlm_unlock_start(ls, lkb, flags);
5996
d292c0cc 5997 ua = lkb->lkb_ua;
b434eda6
PC
5998 if (ua_tmp->castparam)
5999 ua->castparam = ua_tmp->castparam;
c059f70e 6000 ua->user_lksb = ua_tmp->user_lksb;
597d0cae
DT
6001
6002 error = set_unlock_args(flags, ua, &args);
6003 if (error)
6004 goto out_put;
6005
6006 error = cancel_lock(ls, lkb, &args);
6007
6008 if (error == -DLM_ECANCEL)
6009 error = 0;
ef0c2bb0
DT
6010 /* from validate_unlock_args() */
6011 if (error == -EBUSY)
6012 error = 0;
597d0cae 6013 out_put:
7a3de732 6014 trace_dlm_unlock_end(ls, lkb, flags, error);
597d0cae
DT
6015 dlm_put_lkb(lkb);
6016 out:
85e86edf 6017 dlm_unlock_recovery(ls);
ef0c2bb0 6018 kfree(ua_tmp);
597d0cae
DT
6019 return error;
6020}
6021
8b4021fa
DT
6022int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
6023{
6024 struct dlm_lkb *lkb;
6025 struct dlm_args args;
6026 struct dlm_user_args *ua;
6027 struct dlm_rsb *r;
6028 int error;
6029
6030 dlm_lock_recovery(ls);
6031
6032 error = find_lkb(ls, lkid, &lkb);
6033 if (error)
6034 goto out;
6035
7a3de732
AA
6036 trace_dlm_unlock_start(ls, lkb, flags);
6037
d292c0cc 6038 ua = lkb->lkb_ua;
8b4021fa
DT
6039
6040 error = set_unlock_args(flags, ua, &args);
6041 if (error)
6042 goto out_put;
6043
6044 /* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
6045
6046 r = lkb->lkb_resource;
6047 hold_rsb(r);
6048 lock_rsb(r);
6049
6050 error = validate_unlock_args(lkb, &args);
6051 if (error)
6052 goto out_r;
6053 lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
6054
6055 error = _cancel_lock(r, lkb);
6056 out_r:
6057 unlock_rsb(r);
6058 put_rsb(r);
6059
6060 if (error == -DLM_ECANCEL)
6061 error = 0;
6062 /* from validate_unlock_args() */
6063 if (error == -EBUSY)
6064 error = 0;
6065 out_put:
7a3de732 6066 trace_dlm_unlock_end(ls, lkb, flags, error);
8b4021fa
DT
6067 dlm_put_lkb(lkb);
6068 out:
6069 dlm_unlock_recovery(ls);
6070 return error;
6071}
6072
ef0c2bb0
DT
6073/* lkb's that are removed from the waiters list by revert are just left on the
6074 orphans list with the granted orphan locks, to be freed by purge */
6075
597d0cae
DT
6076static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6077{
ef0c2bb0
DT
6078 struct dlm_args args;
6079 int error;
597d0cae 6080
2ab4bd8e 6081 hold_lkb(lkb); /* reference for the ls_orphans list */
ef0c2bb0
DT
6082 mutex_lock(&ls->ls_orphans_mutex);
6083 list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
6084 mutex_unlock(&ls->ls_orphans_mutex);
597d0cae 6085
d292c0cc 6086 set_unlock_args(0, lkb->lkb_ua, &args);
ef0c2bb0
DT
6087
6088 error = cancel_lock(ls, lkb, &args);
6089 if (error == -DLM_ECANCEL)
6090 error = 0;
6091 return error;
597d0cae
DT
6092}
6093
da8c6663
DT
6094/* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't
6095 granted. Regardless of what rsb queue the lock is on, it's removed and
6096 freed. The IVVALBLK flag causes the lvb on the resource to be invalidated
6097 if our lock is PW/EX (it's ignored if our granted mode is smaller.) */
597d0cae
DT
6098
6099static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6100{
597d0cae
DT
6101 struct dlm_args args;
6102 int error;
6103
da8c6663
DT
6104 set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK,
6105 lkb->lkb_ua, &args);
597d0cae
DT
6106
6107 error = unlock_lock(ls, lkb, &args);
6108 if (error == -DLM_EUNLOCK)
6109 error = 0;
6110 return error;
6111}
6112
ef0c2bb0
DT
6113/* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
6114 (which does lock_rsb) due to deadlock with receiving a message that does
23e8e1aa 6115 lock_rsb followed by dlm_user_add_cb() */
ef0c2bb0
DT
6116
6117static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
6118 struct dlm_user_proc *proc)
6119{
6120 struct dlm_lkb *lkb = NULL;
6121
296d9d1e 6122 spin_lock(&ls->ls_clear_proc_locks);
ef0c2bb0
DT
6123 if (list_empty(&proc->locks))
6124 goto out;
6125
6126 lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
6127 list_del_init(&lkb->lkb_ownqueue);
6128
6129 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6130 lkb->lkb_flags |= DLM_IFL_ORPHAN;
6131 else
6132 lkb->lkb_flags |= DLM_IFL_DEAD;
6133 out:
296d9d1e 6134 spin_unlock(&ls->ls_clear_proc_locks);
ef0c2bb0
DT
6135 return lkb;
6136}
6137
23e8e1aa 6138/* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which
597d0cae
DT
6139 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
6140 which we clear here. */
6141
6142/* proc CLOSING flag is set so no more device_reads should look at proc->asts
6143 list, and no more device_writes should add lkb's to proc->locks list; so we
6144 shouldn't need to take asts_spin or locks_spin here. this assumes that
6145 device reads/writes/closes are serialized -- FIXME: we may need to serialize
6146 them ourself. */
6147
6148void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6149{
6150 struct dlm_lkb *lkb, *safe;
6151
85e86edf 6152 dlm_lock_recovery(ls);
597d0cae 6153
ef0c2bb0
DT
6154 while (1) {
6155 lkb = del_proc_lock(ls, proc);
6156 if (!lkb)
6157 break;
84d8cd69 6158 del_timeout(lkb);
ef0c2bb0 6159 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
597d0cae 6160 orphan_proc_lock(ls, lkb);
ef0c2bb0 6161 else
597d0cae 6162 unlock_proc_lock(ls, lkb);
597d0cae
DT
6163
6164 /* this removes the reference for the proc->locks list
6165 added by dlm_user_request, it may result in the lkb
6166 being freed */
6167
6168 dlm_put_lkb(lkb);
6169 }
a1bc86e6 6170
296d9d1e 6171 spin_lock(&ls->ls_clear_proc_locks);
ef0c2bb0 6172
a1bc86e6
DT
6173 /* in-progress unlocks */
6174 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6175 list_del_init(&lkb->lkb_ownqueue);
6176 lkb->lkb_flags |= DLM_IFL_DEAD;
6177 dlm_put_lkb(lkb);
6178 }
6179
23e8e1aa 6180 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
61bed0ba 6181 dlm_purge_lkb_callbacks(lkb);
23e8e1aa 6182 list_del_init(&lkb->lkb_cb_list);
a1bc86e6
DT
6183 dlm_put_lkb(lkb);
6184 }
6185
296d9d1e 6186 spin_unlock(&ls->ls_clear_proc_locks);
85e86edf 6187 dlm_unlock_recovery(ls);
597d0cae 6188}
a1bc86e6 6189
8499137d
DT
6190static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6191{
6192 struct dlm_lkb *lkb, *safe;
6193
6194 while (1) {
6195 lkb = NULL;
6196 spin_lock(&proc->locks_spin);
6197 if (!list_empty(&proc->locks)) {
6198 lkb = list_entry(proc->locks.next, struct dlm_lkb,
6199 lkb_ownqueue);
6200 list_del_init(&lkb->lkb_ownqueue);
6201 }
6202 spin_unlock(&proc->locks_spin);
6203
6204 if (!lkb)
6205 break;
6206
6207 lkb->lkb_flags |= DLM_IFL_DEAD;
6208 unlock_proc_lock(ls, lkb);
6209 dlm_put_lkb(lkb); /* ref from proc->locks list */
6210 }
6211
6212 spin_lock(&proc->locks_spin);
6213 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6214 list_del_init(&lkb->lkb_ownqueue);
6215 lkb->lkb_flags |= DLM_IFL_DEAD;
6216 dlm_put_lkb(lkb);
6217 }
6218 spin_unlock(&proc->locks_spin);
6219
6220 spin_lock(&proc->asts_spin);
23e8e1aa 6221 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
61bed0ba 6222 dlm_purge_lkb_callbacks(lkb);
23e8e1aa 6223 list_del_init(&lkb->lkb_cb_list);
8499137d
DT
6224 dlm_put_lkb(lkb);
6225 }
6226 spin_unlock(&proc->asts_spin);
6227}
6228
6229/* pid of 0 means purge all orphans */
6230
6231static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
6232{
6233 struct dlm_lkb *lkb, *safe;
6234
6235 mutex_lock(&ls->ls_orphans_mutex);
6236 list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
6237 if (pid && lkb->lkb_ownpid != pid)
6238 continue;
6239 unlock_proc_lock(ls, lkb);
6240 list_del_init(&lkb->lkb_ownqueue);
6241 dlm_put_lkb(lkb);
6242 }
6243 mutex_unlock(&ls->ls_orphans_mutex);
6244}
6245
6246static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
6247{
6248 struct dlm_message *ms;
6249 struct dlm_mhandle *mh;
6250 int error;
6251
6252 error = _create_message(ls, sizeof(struct dlm_message), nodeid,
e1711fe3 6253 DLM_MSG_PURGE, &ms, &mh, GFP_NOFS);
8499137d
DT
6254 if (error)
6255 return error;
00e99ccd
AA
6256 ms->m_nodeid = cpu_to_le32(nodeid);
6257 ms->m_pid = cpu_to_le32(pid);
8499137d 6258
e01c4b7b 6259 return send_message(mh, ms, NULL, 0);
8499137d
DT
6260}
6261
6262int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
6263 int nodeid, int pid)
6264{
6265 int error = 0;
6266
2ab4bd8e 6267 if (nodeid && (nodeid != dlm_our_nodeid())) {
8499137d
DT
6268 error = send_purge(ls, nodeid, pid);
6269 } else {
85e86edf 6270 dlm_lock_recovery(ls);
8499137d
DT
6271 if (pid == current->pid)
6272 purge_proc_locks(ls, proc);
6273 else
6274 do_purge(ls, nodeid, pid);
85e86edf 6275 dlm_unlock_recovery(ls);
8499137d
DT
6276 }
6277 return error;
6278}
6279
5054e79d
AA
6280/* debug functionality */
6281int dlm_debug_add_lkb(struct dlm_ls *ls, uint32_t lkb_id, char *name, int len,
6282 int lkb_nodeid, unsigned int lkb_flags, int lkb_status)
6283{
6284 struct dlm_lksb *lksb;
6285 struct dlm_lkb *lkb;
6286 struct dlm_rsb *r;
6287 int error;
6288
6289 /* we currently can't set a valid user lock */
6290 if (lkb_flags & DLM_IFL_USER)
6291 return -EOPNOTSUPP;
6292
6293 lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
6294 if (!lksb)
6295 return -ENOMEM;
6296
6297 error = _create_lkb(ls, &lkb, lkb_id, lkb_id + 1);
6298 if (error) {
6299 kfree(lksb);
6300 return error;
6301 }
6302
6303 lkb->lkb_flags = lkb_flags;
6304 lkb->lkb_nodeid = lkb_nodeid;
6305 lkb->lkb_lksb = lksb;
6306 /* user specific pointer, just don't have it NULL for kernel locks */
6307 if (~lkb_flags & DLM_IFL_USER)
6308 lkb->lkb_astparam = (void *)0xDEADBEEF;
6309
6310 error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
6311 if (error) {
6312 kfree(lksb);
6313 __put_lkb(ls, lkb);
6314 return error;
6315 }
6316
6317 lock_rsb(r);
6318 attach_lkb(r, lkb);
6319 add_lkb(r, lkb, lkb_status);
6320 unlock_rsb(r);
6321 put_rsb(r);
6322
6323 return 0;
6324}
6325
63eab2b0
AA
6326int dlm_debug_add_lkb_to_waiters(struct dlm_ls *ls, uint32_t lkb_id,
6327 int mstype, int to_nodeid)
6328{
6329 struct dlm_lkb *lkb;
6330 int error;
6331
6332 error = find_lkb(ls, lkb_id, &lkb);
6333 if (error)
6334 return error;
6335
6336 error = add_to_waiters(lkb, mstype, to_nodeid);
6337 dlm_put_lkb(lkb);
6338 return error;
6339}
6340