fs: dlm: const void resource name parameter
[linux-block.git] / fs / dlm / lock.c
CommitLineData
2522fe45 1// SPDX-License-Identifier: GPL-2.0-only
e7fd4179
DT
2/******************************************************************************
3*******************************************************************************
4**
7fe2b319 5** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
e7fd4179 6**
e7fd4179
DT
7**
8*******************************************************************************
9******************************************************************************/
10
11/* Central locking logic has four stages:
12
13 dlm_lock()
14 dlm_unlock()
15
16 request_lock(ls, lkb)
17 convert_lock(ls, lkb)
18 unlock_lock(ls, lkb)
19 cancel_lock(ls, lkb)
20
21 _request_lock(r, lkb)
22 _convert_lock(r, lkb)
23 _unlock_lock(r, lkb)
24 _cancel_lock(r, lkb)
25
26 do_request(r, lkb)
27 do_convert(r, lkb)
28 do_unlock(r, lkb)
29 do_cancel(r, lkb)
30
31 Stage 1 (lock, unlock) is mainly about checking input args and
32 splitting into one of the four main operations:
33
34 dlm_lock = request_lock
35 dlm_lock+CONVERT = convert_lock
36 dlm_unlock = unlock_lock
37 dlm_unlock+CANCEL = cancel_lock
38
39 Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
40 provided to the next stage.
41
42 Stage 3, _xxxx_lock(), determines if the operation is local or remote.
43 When remote, it calls send_xxxx(), when local it calls do_xxxx().
44
45 Stage 4, do_xxxx(), is the guts of the operation. It manipulates the
46 given rsb and lkb and queues callbacks.
47
48 For remote operations, send_xxxx() results in the corresponding do_xxxx()
49 function being executed on the remote node. The connecting send/receive
50 calls on local (L) and remote (R) nodes:
51
52 L: send_xxxx() -> R: receive_xxxx()
53 R: do_xxxx()
54 L: receive_xxxx_reply() <- R: send_xxxx_reply()
55*/
f1d3b8f9
AA
56#include <trace/events/dlm.h>
57
597d0cae 58#include <linux/types.h>
9beb3bf5 59#include <linux/rbtree.h>
5a0e3ad6 60#include <linux/slab.h>
e7fd4179 61#include "dlm_internal.h"
597d0cae 62#include <linux/dlm_device.h>
e7fd4179 63#include "memory.h"
a070a91c 64#include "midcomms.h"
e7fd4179
DT
65#include "requestqueue.h"
66#include "util.h"
67#include "dir.h"
68#include "member.h"
69#include "lockspace.h"
70#include "ast.h"
71#include "lock.h"
72#include "rcom.h"
73#include "recover.h"
74#include "lvb_table.h"
597d0cae 75#include "user.h"
e7fd4179
DT
76#include "config.h"
77
78static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
79static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
80static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
81static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
82static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
83static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
84static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
85static int send_remove(struct dlm_rsb *r);
86static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
3ae1acf9 87static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
e7fd4179
DT
88static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
89 struct dlm_message *ms);
90static int receive_extralen(struct dlm_message *ms);
8499137d 91static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
3ae1acf9 92static void del_timeout(struct dlm_lkb *lkb);
c04fecb4 93static void toss_rsb(struct kref *kref);
e7fd4179
DT
94
95/*
96 * Lock compatibilty matrix - thanks Steve
97 * UN = Unlocked state. Not really a state, used as a flag
98 * PD = Padding. Used to make the matrix a nice power of two in size
99 * Other states are the same as the VMS DLM.
100 * Usage: matrix[grmode+1][rqmode+1] (although m[rq+1][gr+1] is the same)
101 */
102
103static const int __dlm_compat_matrix[8][8] = {
104 /* UN NL CR CW PR PW EX PD */
105 {1, 1, 1, 1, 1, 1, 1, 0}, /* UN */
106 {1, 1, 1, 1, 1, 1, 1, 0}, /* NL */
107 {1, 1, 1, 1, 1, 1, 0, 0}, /* CR */
108 {1, 1, 1, 1, 0, 0, 0, 0}, /* CW */
109 {1, 1, 1, 0, 1, 0, 0, 0}, /* PR */
110 {1, 1, 1, 0, 0, 0, 0, 0}, /* PW */
111 {1, 1, 0, 0, 0, 0, 0, 0}, /* EX */
112 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
113};
114
115/*
116 * This defines the direction of transfer of LVB data.
117 * Granted mode is the row; requested mode is the column.
118 * Usage: matrix[grmode+1][rqmode+1]
119 * 1 = LVB is returned to the caller
120 * 0 = LVB is written to the resource
121 * -1 = nothing happens to the LVB
122 */
123
124const int dlm_lvb_operations[8][8] = {
125 /* UN NL CR CW PR PW EX PD*/
126 { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
127 { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
128 { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
129 { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
130 { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
131 { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
132 { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
133 { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
134};
e7fd4179
DT
135
136#define modes_compat(gr, rq) \
137 __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
138
139int dlm_modes_compat(int mode1, int mode2)
140{
141 return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
142}
143
144/*
145 * Compatibility matrix for conversions with QUECVT set.
146 * Granted mode is the row; requested mode is the column.
147 * Usage: matrix[grmode+1][rqmode+1]
148 */
149
150static const int __quecvt_compat_matrix[8][8] = {
151 /* UN NL CR CW PR PW EX PD */
152 {0, 0, 0, 0, 0, 0, 0, 0}, /* UN */
153 {0, 0, 1, 1, 1, 1, 1, 0}, /* NL */
154 {0, 0, 0, 1, 1, 1, 1, 0}, /* CR */
155 {0, 0, 0, 0, 1, 1, 1, 0}, /* CW */
156 {0, 0, 0, 1, 0, 1, 1, 0}, /* PR */
157 {0, 0, 0, 0, 0, 0, 1, 0}, /* PW */
158 {0, 0, 0, 0, 0, 0, 0, 0}, /* EX */
159 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
160};
161
597d0cae 162void dlm_print_lkb(struct dlm_lkb *lkb)
e7fd4179 163{
6d40c4a7 164 printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x "
4875647a 165 "sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n",
e7fd4179
DT
166 lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
167 lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
4875647a
DT
168 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid,
169 (unsigned long long)lkb->lkb_recover_seq);
e7fd4179
DT
170}
171
170e19ab 172static void dlm_print_rsb(struct dlm_rsb *r)
e7fd4179 173{
c04fecb4
DT
174 printk(KERN_ERR "rsb: nodeid %d master %d dir %d flags %lx first %x "
175 "rlc %d name %s\n",
176 r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
177 r->res_flags, r->res_first_lkid, r->res_recover_locks_count,
178 r->res_name);
e7fd4179
DT
179}
180
a345da3e
DT
181void dlm_dump_rsb(struct dlm_rsb *r)
182{
183 struct dlm_lkb *lkb;
184
185 dlm_print_rsb(r);
186
187 printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
188 list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
189 printk(KERN_ERR "rsb lookup list\n");
190 list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
191 dlm_print_lkb(lkb);
192 printk(KERN_ERR "rsb grant queue:\n");
193 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
194 dlm_print_lkb(lkb);
195 printk(KERN_ERR "rsb convert queue:\n");
196 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
197 dlm_print_lkb(lkb);
198 printk(KERN_ERR "rsb wait queue:\n");
199 list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
200 dlm_print_lkb(lkb);
201}
202
e7fd4179
DT
203/* Threads cannot use the lockspace while it's being recovered */
204
85e86edf 205static inline void dlm_lock_recovery(struct dlm_ls *ls)
e7fd4179
DT
206{
207 down_read(&ls->ls_in_recovery);
208}
209
85e86edf 210void dlm_unlock_recovery(struct dlm_ls *ls)
e7fd4179
DT
211{
212 up_read(&ls->ls_in_recovery);
213}
214
85e86edf 215int dlm_lock_recovery_try(struct dlm_ls *ls)
e7fd4179
DT
216{
217 return down_read_trylock(&ls->ls_in_recovery);
218}
219
220static inline int can_be_queued(struct dlm_lkb *lkb)
221{
222 return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
223}
224
225static inline int force_blocking_asts(struct dlm_lkb *lkb)
226{
227 return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
228}
229
230static inline int is_demoted(struct dlm_lkb *lkb)
231{
232 return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
233}
234
7d3c1feb
DT
235static inline int is_altmode(struct dlm_lkb *lkb)
236{
237 return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
238}
239
240static inline int is_granted(struct dlm_lkb *lkb)
241{
242 return (lkb->lkb_status == DLM_LKSTS_GRANTED);
243}
244
e7fd4179
DT
245static inline int is_remote(struct dlm_rsb *r)
246{
247 DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
248 return !!r->res_nodeid;
249}
250
251static inline int is_process_copy(struct dlm_lkb *lkb)
252{
253 return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
254}
255
256static inline int is_master_copy(struct dlm_lkb *lkb)
257{
90135925 258 return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
e7fd4179
DT
259}
260
261static inline int middle_conversion(struct dlm_lkb *lkb)
262{
263 if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
264 (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
90135925
DT
265 return 1;
266 return 0;
e7fd4179
DT
267}
268
269static inline int down_conversion(struct dlm_lkb *lkb)
270{
271 return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
272}
273
ef0c2bb0
DT
274static inline int is_overlap_unlock(struct dlm_lkb *lkb)
275{
276 return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
277}
278
279static inline int is_overlap_cancel(struct dlm_lkb *lkb)
280{
281 return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
282}
283
284static inline int is_overlap(struct dlm_lkb *lkb)
285{
286 return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
287 DLM_IFL_OVERLAP_CANCEL));
288}
289
e7fd4179
DT
290static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
291{
292 if (is_master_copy(lkb))
293 return;
294
3ae1acf9
DT
295 del_timeout(lkb);
296
e7fd4179
DT
297 DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
298
6b0afc0c 299#ifdef CONFIG_DLM_DEPRECATED_API
3ae1acf9
DT
300 /* if the operation was a cancel, then return -DLM_ECANCEL, if a
301 timeout caused the cancel then return -ETIMEDOUT */
302 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
303 lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
304 rv = -ETIMEDOUT;
305 }
6b0afc0c 306#endif
3ae1acf9 307
8b4021fa
DT
308 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
309 lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
310 rv = -EDEADLK;
311 }
312
23e8e1aa 313 dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
e7fd4179
DT
314}
315
ef0c2bb0
DT
316static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
317{
318 queue_cast(r, lkb,
319 is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
320}
321
e7fd4179
DT
322static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
323{
b6fa8796 324 if (is_master_copy(lkb)) {
e7fd4179 325 send_bast(r, lkb, rqmode);
b6fa8796 326 } else {
23e8e1aa 327 dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0);
b6fa8796 328 }
e7fd4179
DT
329}
330
331/*
332 * Basic operations on rsb's and lkb's
333 */
334
c04fecb4
DT
335/* This is only called to add a reference when the code already holds
336 a valid reference to the rsb, so there's no need for locking. */
337
338static inline void hold_rsb(struct dlm_rsb *r)
339{
340 kref_get(&r->res_ref);
341}
342
343void dlm_hold_rsb(struct dlm_rsb *r)
344{
345 hold_rsb(r);
346}
347
348/* When all references to the rsb are gone it's transferred to
349 the tossed list for later disposal. */
350
351static void put_rsb(struct dlm_rsb *r)
352{
353 struct dlm_ls *ls = r->res_ls;
354 uint32_t bucket = r->res_bucket;
9502a7f6 355 int rv;
c04fecb4 356
9502a7f6
AA
357 rv = kref_put_lock(&r->res_ref, toss_rsb,
358 &ls->ls_rsbtbl[bucket].lock);
359 if (rv)
360 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
c04fecb4
DT
361}
362
363void dlm_put_rsb(struct dlm_rsb *r)
364{
365 put_rsb(r);
366}
367
3881ac04
DT
368static int pre_rsb_struct(struct dlm_ls *ls)
369{
370 struct dlm_rsb *r1, *r2;
371 int count = 0;
372
373 spin_lock(&ls->ls_new_rsb_spin);
374 if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
375 spin_unlock(&ls->ls_new_rsb_spin);
376 return 0;
377 }
378 spin_unlock(&ls->ls_new_rsb_spin);
379
380 r1 = dlm_allocate_rsb(ls);
381 r2 = dlm_allocate_rsb(ls);
382
383 spin_lock(&ls->ls_new_rsb_spin);
384 if (r1) {
385 list_add(&r1->res_hashchain, &ls->ls_new_rsb);
386 ls->ls_new_rsb_count++;
387 }
388 if (r2) {
389 list_add(&r2->res_hashchain, &ls->ls_new_rsb);
390 ls->ls_new_rsb_count++;
391 }
392 count = ls->ls_new_rsb_count;
393 spin_unlock(&ls->ls_new_rsb_spin);
394
395 if (!count)
396 return -ENOMEM;
397 return 0;
398}
399
400/* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
401 unlock any spinlocks, go back and call pre_rsb_struct again.
402 Otherwise, take an rsb off the list and return it. */
403
56171e0d 404static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len,
3881ac04 405 struct dlm_rsb **r_ret)
e7fd4179
DT
406{
407 struct dlm_rsb *r;
3881ac04
DT
408 int count;
409
410 spin_lock(&ls->ls_new_rsb_spin);
411 if (list_empty(&ls->ls_new_rsb)) {
412 count = ls->ls_new_rsb_count;
413 spin_unlock(&ls->ls_new_rsb_spin);
414 log_debug(ls, "find_rsb retry %d %d %s",
56171e0d
AA
415 count, dlm_config.ci_new_rsb_count,
416 (const char *)name);
3881ac04
DT
417 return -EAGAIN;
418 }
e7fd4179 419
3881ac04
DT
420 r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
421 list_del(&r->res_hashchain);
9beb3bf5
BP
422 /* Convert the empty list_head to a NULL rb_node for tree usage: */
423 memset(&r->res_hashnode, 0, sizeof(struct rb_node));
3881ac04
DT
424 ls->ls_new_rsb_count--;
425 spin_unlock(&ls->ls_new_rsb_spin);
e7fd4179
DT
426
427 r->res_ls = ls;
428 r->res_length = len;
429 memcpy(r->res_name, name, len);
90135925 430 mutex_init(&r->res_mutex);
e7fd4179
DT
431
432 INIT_LIST_HEAD(&r->res_lookup);
433 INIT_LIST_HEAD(&r->res_grantqueue);
434 INIT_LIST_HEAD(&r->res_convertqueue);
435 INIT_LIST_HEAD(&r->res_waitqueue);
436 INIT_LIST_HEAD(&r->res_root_list);
437 INIT_LIST_HEAD(&r->res_recover_list);
438
3881ac04
DT
439 *r_ret = r;
440 return 0;
e7fd4179
DT
441}
442
9beb3bf5
BP
443static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
444{
445 char maxname[DLM_RESNAME_MAXLEN];
446
447 memset(maxname, 0, DLM_RESNAME_MAXLEN);
448 memcpy(maxname, name, nlen);
449 return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
450}
451
56171e0d 452int dlm_search_rsb_tree(struct rb_root *tree, const void *name, int len,
c04fecb4 453 struct dlm_rsb **r_ret)
e7fd4179 454{
9beb3bf5 455 struct rb_node *node = tree->rb_node;
e7fd4179 456 struct dlm_rsb *r;
9beb3bf5
BP
457 int rc;
458
459 while (node) {
460 r = rb_entry(node, struct dlm_rsb, res_hashnode);
461 rc = rsb_cmp(r, name, len);
462 if (rc < 0)
463 node = node->rb_left;
464 else if (rc > 0)
465 node = node->rb_right;
466 else
e7fd4179
DT
467 goto found;
468 }
18c60c0a 469 *r_ret = NULL;
597d0cae 470 return -EBADR;
e7fd4179
DT
471
472 found:
e7fd4179 473 *r_ret = r;
c04fecb4 474 return 0;
e7fd4179
DT
475}
476
9beb3bf5
BP
477static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
478{
479 struct rb_node **newn = &tree->rb_node;
480 struct rb_node *parent = NULL;
481 int rc;
482
483 while (*newn) {
484 struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
485 res_hashnode);
486
487 parent = *newn;
488 rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
489 if (rc < 0)
490 newn = &parent->rb_left;
491 else if (rc > 0)
492 newn = &parent->rb_right;
493 else {
494 log_print("rsb_insert match");
495 dlm_dump_rsb(rsb);
496 dlm_dump_rsb(cur);
497 return -EEXIST;
498 }
499 }
500
501 rb_link_node(&rsb->res_hashnode, parent, newn);
502 rb_insert_color(&rsb->res_hashnode, tree);
503 return 0;
504}
505
c04fecb4
DT
506/*
507 * Find rsb in rsbtbl and potentially create/add one
508 *
509 * Delaying the release of rsb's has a similar benefit to applications keeping
510 * NL locks on an rsb, but without the guarantee that the cached master value
511 * will still be valid when the rsb is reused. Apps aren't always smart enough
512 * to keep NL locks on an rsb that they may lock again shortly; this can lead
513 * to excessive master lookups and removals if we don't delay the release.
514 *
515 * Searching for an rsb means looking through both the normal list and toss
516 * list. When found on the toss list the rsb is moved to the normal list with
517 * ref count of 1; when found on normal list the ref count is incremented.
518 *
519 * rsb's on the keep list are being used locally and refcounted.
520 * rsb's on the toss list are not being used locally, and are not refcounted.
521 *
522 * The toss list rsb's were either
523 * - previously used locally but not any more (were on keep list, then
524 * moved to toss list when last refcount dropped)
525 * - created and put on toss list as a directory record for a lookup
526 * (we are the dir node for the res, but are not using the res right now,
527 * but some other node is)
528 *
529 * The purpose of find_rsb() is to return a refcounted rsb for local use.
530 * So, if the given rsb is on the toss list, it is moved to the keep list
531 * before being returned.
532 *
533 * toss_rsb() happens when all local usage of the rsb is done, i.e. no
534 * more refcounts exist, so the rsb is moved from the keep list to the
535 * toss list.
536 *
537 * rsb's on both keep and toss lists are used for doing a name to master
538 * lookups. rsb's that are in use locally (and being refcounted) are on
539 * the keep list, rsb's that are not in use locally (not refcounted) and
540 * only exist for name/master lookups are on the toss list.
541 *
542 * rsb's on the toss list who's dir_nodeid is not local can have stale
543 * name/master mappings. So, remote requests on such rsb's can potentially
544 * return with an error, which means the mapping is stale and needs to
545 * be updated with a new lookup. (The idea behind MASTER UNCERTAIN and
546 * first_lkid is to keep only a single outstanding request on an rsb
547 * while that rsb has a potentially stale master.)
548 */
549
56171e0d 550static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
c04fecb4
DT
551 uint32_t hash, uint32_t b,
552 int dir_nodeid, int from_nodeid,
553 unsigned int flags, struct dlm_rsb **r_ret)
e7fd4179 554{
c04fecb4
DT
555 struct dlm_rsb *r = NULL;
556 int our_nodeid = dlm_our_nodeid();
557 int from_local = 0;
558 int from_other = 0;
559 int from_dir = 0;
560 int create = 0;
e7fd4179
DT
561 int error;
562
c04fecb4
DT
563 if (flags & R_RECEIVE_REQUEST) {
564 if (from_nodeid == dir_nodeid)
565 from_dir = 1;
566 else
567 from_other = 1;
568 } else if (flags & R_REQUEST) {
569 from_local = 1;
570 }
571
572 /*
573 * flags & R_RECEIVE_RECOVER is from dlm_recover_master_copy, so
574 * from_nodeid has sent us a lock in dlm_recover_locks, believing
575 * we're the new master. Our local recovery may not have set
576 * res_master_nodeid to our_nodeid yet, so allow either. Don't
577 * create the rsb; dlm_recover_process_copy() will handle EBADR
578 * by resending.
579 *
580 * If someone sends us a request, we are the dir node, and we do
581 * not find the rsb anywhere, then recreate it. This happens if
582 * someone sends us a request after we have removed/freed an rsb
583 * from our toss list. (They sent a request instead of lookup
584 * because they are using an rsb from their toss list.)
585 */
586
587 if (from_local || from_dir ||
588 (from_other && (dir_nodeid == our_nodeid))) {
589 create = 1;
e7fd4179 590 }
57638bf3 591
c04fecb4
DT
592 retry:
593 if (create) {
594 error = pre_rsb_struct(ls);
595 if (error < 0)
596 goto out;
597 }
598
599 spin_lock(&ls->ls_rsbtbl[b].lock);
600
601 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
e7fd4179 602 if (error)
c04fecb4
DT
603 goto do_toss;
604
605 /*
606 * rsb is active, so we can't check master_nodeid without lock_rsb.
607 */
e7fd4179 608
c04fecb4 609 kref_get(&r->res_ref);
c04fecb4
DT
610 goto out_unlock;
611
612
613 do_toss:
614 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
9beb3bf5 615 if (error)
c04fecb4 616 goto do_new;
e7fd4179 617
c04fecb4
DT
618 /*
619 * rsb found inactive (master_nodeid may be out of date unless
620 * we are the dir_nodeid or were the master) No other thread
621 * is using this rsb because it's on the toss list, so we can
622 * look at or update res_master_nodeid without lock_rsb.
623 */
e7fd4179 624
c04fecb4
DT
625 if ((r->res_master_nodeid != our_nodeid) && from_other) {
626 /* our rsb was not master, and another node (not the dir node)
627 has sent us a request */
628 log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
629 from_nodeid, r->res_master_nodeid, dir_nodeid,
630 r->res_name);
631 error = -ENOTBLK;
632 goto out_unlock;
633 }
634
635 if ((r->res_master_nodeid != our_nodeid) && from_dir) {
636 /* don't think this should ever happen */
637 log_error(ls, "find_rsb toss from_dir %d master %d",
638 from_nodeid, r->res_master_nodeid);
639 dlm_print_rsb(r);
640 /* fix it and go on */
641 r->res_master_nodeid = our_nodeid;
642 r->res_nodeid = 0;
e7fd4179
DT
643 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
644 r->res_first_lkid = 0;
c04fecb4
DT
645 }
646
647 if (from_local && (r->res_master_nodeid != our_nodeid)) {
648 /* Because we have held no locks on this rsb,
649 res_master_nodeid could have become stale. */
e7fd4179
DT
650 rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
651 r->res_first_lkid = 0;
c04fecb4
DT
652 }
653
654 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
655 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
656 goto out_unlock;
657
658
659 do_new:
660 /*
661 * rsb not found
662 */
663
664 if (error == -EBADR && !create)
665 goto out_unlock;
666
667 error = get_rsb_struct(ls, name, len, &r);
668 if (error == -EAGAIN) {
669 spin_unlock(&ls->ls_rsbtbl[b].lock);
670 goto retry;
671 }
672 if (error)
673 goto out_unlock;
674
675 r->res_hash = hash;
676 r->res_bucket = b;
677 r->res_dir_nodeid = dir_nodeid;
678 kref_init(&r->res_ref);
679
680 if (from_dir) {
681 /* want to see how often this happens */
682 log_debug(ls, "find_rsb new from_dir %d recreate %s",
683 from_nodeid, r->res_name);
684 r->res_master_nodeid = our_nodeid;
685 r->res_nodeid = 0;
686 goto out_add;
687 }
688
689 if (from_other && (dir_nodeid != our_nodeid)) {
690 /* should never happen */
691 log_error(ls, "find_rsb new from_other %d dir %d our %d %s",
692 from_nodeid, dir_nodeid, our_nodeid, r->res_name);
693 dlm_free_rsb(r);
e8243f32 694 r = NULL;
c04fecb4
DT
695 error = -ENOTBLK;
696 goto out_unlock;
697 }
698
699 if (from_other) {
700 log_debug(ls, "find_rsb new from_other %d dir %d %s",
701 from_nodeid, dir_nodeid, r->res_name);
702 }
703
704 if (dir_nodeid == our_nodeid) {
705 /* When we are the dir nodeid, we can set the master
706 node immediately */
707 r->res_master_nodeid = our_nodeid;
708 r->res_nodeid = 0;
e7fd4179 709 } else {
c04fecb4
DT
710 /* set_master will send_lookup to dir_nodeid */
711 r->res_master_nodeid = 0;
712 r->res_nodeid = -1;
713 }
714
715 out_add:
716 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
717 out_unlock:
718 spin_unlock(&ls->ls_rsbtbl[b].lock);
719 out:
720 *r_ret = r;
721 return error;
722}
723
724/* During recovery, other nodes can send us new MSTCPY locks (from
725 dlm_recover_locks) before we've made ourself master (in
726 dlm_recover_masters). */
727
56171e0d 728static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
c04fecb4
DT
729 uint32_t hash, uint32_t b,
730 int dir_nodeid, int from_nodeid,
731 unsigned int flags, struct dlm_rsb **r_ret)
732{
733 struct dlm_rsb *r = NULL;
734 int our_nodeid = dlm_our_nodeid();
735 int recover = (flags & R_RECEIVE_RECOVER);
736 int error;
737
738 retry:
739 error = pre_rsb_struct(ls);
740 if (error < 0)
741 goto out;
742
743 spin_lock(&ls->ls_rsbtbl[b].lock);
744
745 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
746 if (error)
747 goto do_toss;
748
749 /*
750 * rsb is active, so we can't check master_nodeid without lock_rsb.
751 */
752
753 kref_get(&r->res_ref);
754 goto out_unlock;
755
756
757 do_toss:
758 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
759 if (error)
760 goto do_new;
761
762 /*
763 * rsb found inactive. No other thread is using this rsb because
764 * it's on the toss list, so we can look at or update
765 * res_master_nodeid without lock_rsb.
766 */
767
768 if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) {
769 /* our rsb is not master, and another node has sent us a
770 request; this should never happen */
771 log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
772 from_nodeid, r->res_master_nodeid, dir_nodeid);
773 dlm_print_rsb(r);
774 error = -ENOTBLK;
775 goto out_unlock;
e7fd4179 776 }
c04fecb4
DT
777
778 if (!recover && (r->res_master_nodeid != our_nodeid) &&
779 (dir_nodeid == our_nodeid)) {
780 /* our rsb is not master, and we are dir; may as well fix it;
781 this should never happen */
782 log_error(ls, "find_rsb toss our %d master %d dir %d",
783 our_nodeid, r->res_master_nodeid, dir_nodeid);
784 dlm_print_rsb(r);
785 r->res_master_nodeid = our_nodeid;
786 r->res_nodeid = 0;
787 }
788
789 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
790 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
791 goto out_unlock;
792
793
794 do_new:
795 /*
796 * rsb not found
797 */
798
799 error = get_rsb_struct(ls, name, len, &r);
800 if (error == -EAGAIN) {
801 spin_unlock(&ls->ls_rsbtbl[b].lock);
802 goto retry;
803 }
804 if (error)
805 goto out_unlock;
806
807 r->res_hash = hash;
808 r->res_bucket = b;
809 r->res_dir_nodeid = dir_nodeid;
810 r->res_master_nodeid = dir_nodeid;
811 r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid;
812 kref_init(&r->res_ref);
813
814 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
815 out_unlock:
816 spin_unlock(&ls->ls_rsbtbl[b].lock);
e7fd4179
DT
817 out:
818 *r_ret = r;
819 return error;
820}
821
56171e0d
AA
822static int find_rsb(struct dlm_ls *ls, const void *name, int len,
823 int from_nodeid, unsigned int flags,
824 struct dlm_rsb **r_ret)
c04fecb4
DT
825{
826 uint32_t hash, b;
827 int dir_nodeid;
828
829 if (len > DLM_RESNAME_MAXLEN)
830 return -EINVAL;
831
832 hash = jhash(name, len, 0);
833 b = hash & (ls->ls_rsbtbl_size - 1);
834
835 dir_nodeid = dlm_hash2nodeid(ls, hash);
836
837 if (dlm_no_directory(ls))
838 return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid,
839 from_nodeid, flags, r_ret);
840 else
841 return find_rsb_dir(ls, name, len, hash, b, dir_nodeid,
842 from_nodeid, flags, r_ret);
843}
844
845/* we have received a request and found that res_master_nodeid != our_nodeid,
846 so we need to return an error or make ourself the master */
847
848static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
849 int from_nodeid)
850{
851 if (dlm_no_directory(ls)) {
852 log_error(ls, "find_rsb keep from_nodeid %d master %d dir %d",
853 from_nodeid, r->res_master_nodeid,
854 r->res_dir_nodeid);
855 dlm_print_rsb(r);
856 return -ENOTBLK;
857 }
858
859 if (from_nodeid != r->res_dir_nodeid) {
860 /* our rsb is not master, and another node (not the dir node)
861 has sent us a request. this is much more common when our
862 master_nodeid is zero, so limit debug to non-zero. */
863
864 if (r->res_master_nodeid) {
865 log_debug(ls, "validate master from_other %d master %d "
866 "dir %d first %x %s", from_nodeid,
867 r->res_master_nodeid, r->res_dir_nodeid,
868 r->res_first_lkid, r->res_name);
869 }
870 return -ENOTBLK;
871 } else {
872 /* our rsb is not master, but the dir nodeid has sent us a
873 request; this could happen with master 0 / res_nodeid -1 */
874
875 if (r->res_master_nodeid) {
876 log_error(ls, "validate master from_dir %d master %d "
877 "first %x %s",
878 from_nodeid, r->res_master_nodeid,
879 r->res_first_lkid, r->res_name);
880 }
881
882 r->res_master_nodeid = dlm_our_nodeid();
883 r->res_nodeid = 0;
884 return 0;
885 }
886}
887
40159748
AA
888static void __dlm_master_lookup(struct dlm_ls *ls, struct dlm_rsb *r, int our_nodeid,
889 int from_nodeid, bool toss_list, unsigned int flags,
890 int *r_nodeid, int *result)
891{
892 int fix_master = (flags & DLM_LU_RECOVER_MASTER);
893 int from_master = (flags & DLM_LU_RECOVER_DIR);
894
895 if (r->res_dir_nodeid != our_nodeid) {
896 /* should not happen, but may as well fix it and carry on */
897 log_error(ls, "%s res_dir %d our %d %s", __func__,
898 r->res_dir_nodeid, our_nodeid, r->res_name);
899 r->res_dir_nodeid = our_nodeid;
900 }
901
902 if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) {
903 /* Recovery uses this function to set a new master when
904 * the previous master failed. Setting NEW_MASTER will
905 * force dlm_recover_masters to call recover_master on this
906 * rsb even though the res_nodeid is no longer removed.
907 */
908
909 r->res_master_nodeid = from_nodeid;
910 r->res_nodeid = from_nodeid;
911 rsb_set_flag(r, RSB_NEW_MASTER);
912
913 if (toss_list) {
914 /* I don't think we should ever find it on toss list. */
915 log_error(ls, "%s fix_master on toss", __func__);
916 dlm_dump_rsb(r);
917 }
918 }
919
920 if (from_master && (r->res_master_nodeid != from_nodeid)) {
921 /* this will happen if from_nodeid became master during
922 * a previous recovery cycle, and we aborted the previous
923 * cycle before recovering this master value
924 */
925
926 log_limit(ls, "%s from_master %d master_nodeid %d res_nodeid %d first %x %s",
927 __func__, from_nodeid, r->res_master_nodeid,
928 r->res_nodeid, r->res_first_lkid, r->res_name);
929
930 if (r->res_master_nodeid == our_nodeid) {
931 log_error(ls, "from_master %d our_master", from_nodeid);
932 dlm_dump_rsb(r);
933 goto ret_assign;
934 }
935
936 r->res_master_nodeid = from_nodeid;
937 r->res_nodeid = from_nodeid;
938 rsb_set_flag(r, RSB_NEW_MASTER);
939 }
940
941 if (!r->res_master_nodeid) {
942 /* this will happen if recovery happens while we're looking
943 * up the master for this rsb
944 */
945
946 log_debug(ls, "%s master 0 to %d first %x %s", __func__,
947 from_nodeid, r->res_first_lkid, r->res_name);
948 r->res_master_nodeid = from_nodeid;
949 r->res_nodeid = from_nodeid;
950 }
951
952 if (!from_master && !fix_master &&
953 (r->res_master_nodeid == from_nodeid)) {
954 /* this can happen when the master sends remove, the dir node
955 * finds the rsb on the keep list and ignores the remove,
956 * and the former master sends a lookup
957 */
958
959 log_limit(ls, "%s from master %d flags %x first %x %s",
960 __func__, from_nodeid, flags, r->res_first_lkid,
961 r->res_name);
962 }
963
964 ret_assign:
965 *r_nodeid = r->res_master_nodeid;
966 if (result)
967 *result = DLM_LU_MATCH;
968}
969
e7fd4179 970/*
c04fecb4
DT
971 * We're the dir node for this res and another node wants to know the
972 * master nodeid. During normal operation (non recovery) this is only
973 * called from receive_lookup(); master lookups when the local node is
974 * the dir node are done by find_rsb().
e7fd4179 975 *
c04fecb4
DT
976 * normal operation, we are the dir node for a resource
977 * . _request_lock
978 * . set_master
979 * . send_lookup
980 * . receive_lookup
981 * . dlm_master_lookup flags 0
e7fd4179 982 *
c04fecb4
DT
983 * recover directory, we are rebuilding dir for all resources
984 * . dlm_recover_directory
985 * . dlm_rcom_names
986 * remote node sends back the rsb names it is master of and we are dir of
987 * . dlm_master_lookup RECOVER_DIR (fix_master 0, from_master 1)
988 * we either create new rsb setting remote node as master, or find existing
989 * rsb and set master to be the remote node.
990 *
991 * recover masters, we are finding the new master for resources
992 * . dlm_recover_masters
993 * . recover_master
994 * . dlm_send_rcom_lookup
995 * . receive_rcom_lookup
996 * . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0)
e7fd4179
DT
997 */
998
c04fecb4
DT
999int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
1000 unsigned int flags, int *r_nodeid, int *result)
e7fd4179 1001{
3881ac04 1002 struct dlm_rsb *r = NULL;
c04fecb4 1003 uint32_t hash, b;
c04fecb4 1004 int our_nodeid = dlm_our_nodeid();
40159748 1005 int dir_nodeid, error;
ef58bcca 1006
c04fecb4
DT
1007 if (len > DLM_RESNAME_MAXLEN)
1008 return -EINVAL;
1009
1010 if (from_nodeid == our_nodeid) {
1011 log_error(ls, "dlm_master_lookup from our_nodeid %d flags %x",
1012 our_nodeid, flags);
1013 return -EINVAL;
3881ac04 1014 }
e7fd4179 1015
c04fecb4
DT
1016 hash = jhash(name, len, 0);
1017 b = hash & (ls->ls_rsbtbl_size - 1);
e7fd4179 1018
c04fecb4
DT
1019 dir_nodeid = dlm_hash2nodeid(ls, hash);
1020 if (dir_nodeid != our_nodeid) {
1021 log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d",
1022 from_nodeid, dir_nodeid, our_nodeid, hash,
1023 ls->ls_num_nodes);
1024 *r_nodeid = -1;
1025 return -EINVAL;
1026 }
e7fd4179 1027
3881ac04 1028 retry:
c04fecb4
DT
1029 error = pre_rsb_struct(ls);
1030 if (error < 0)
1031 return error;
1032
1033 spin_lock(&ls->ls_rsbtbl[b].lock);
1034 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
1035 if (!error) {
1036 /* because the rsb is active, we need to lock_rsb before
40159748
AA
1037 * checking/changing re_master_nodeid
1038 */
c04fecb4
DT
1039
1040 hold_rsb(r);
1041 spin_unlock(&ls->ls_rsbtbl[b].lock);
1042 lock_rsb(r);
e7fd4179 1043
40159748
AA
1044 __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, false,
1045 flags, r_nodeid, result);
c04fecb4 1046
40159748
AA
1047 /* the rsb was active */
1048 unlock_rsb(r);
1049 put_rsb(r);
c04fecb4 1050
40159748 1051 return 0;
c04fecb4
DT
1052 }
1053
40159748
AA
1054 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1055 if (error)
1056 goto not_found;
c04fecb4 1057
40159748
AA
1058 /* because the rsb is inactive (on toss list), it's not refcounted
1059 * and lock_rsb is not used, but is protected by the rsbtbl lock
1060 */
c04fecb4 1061
40159748
AA
1062 __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, true, flags,
1063 r_nodeid, result);
c04fecb4 1064
40159748
AA
1065 r->res_toss_time = jiffies;
1066 /* the rsb was inactive (on toss list) */
1067 spin_unlock(&ls->ls_rsbtbl[b].lock);
c04fecb4 1068
c04fecb4 1069 return 0;
e7fd4179 1070
c04fecb4
DT
1071 not_found:
1072 error = get_rsb_struct(ls, name, len, &r);
3881ac04 1073 if (error == -EAGAIN) {
c04fecb4 1074 spin_unlock(&ls->ls_rsbtbl[b].lock);
3881ac04
DT
1075 goto retry;
1076 }
1077 if (error)
1078 goto out_unlock;
e7fd4179
DT
1079
1080 r->res_hash = hash;
c04fecb4
DT
1081 r->res_bucket = b;
1082 r->res_dir_nodeid = our_nodeid;
1083 r->res_master_nodeid = from_nodeid;
1084 r->res_nodeid = from_nodeid;
e7fd4179 1085 kref_init(&r->res_ref);
c04fecb4 1086 r->res_toss_time = jiffies;
e7fd4179 1087
c04fecb4
DT
1088 error = rsb_insert(r, &ls->ls_rsbtbl[b].toss);
1089 if (error) {
1090 /* should never happen */
1091 dlm_free_rsb(r);
1092 spin_unlock(&ls->ls_rsbtbl[b].lock);
1093 goto retry;
e7fd4179 1094 }
c04fecb4
DT
1095
1096 if (result)
1097 *result = DLM_LU_ADD;
1098 *r_nodeid = from_nodeid;
3881ac04 1099 out_unlock:
c04fecb4 1100 spin_unlock(&ls->ls_rsbtbl[b].lock);
e7fd4179
DT
1101 return error;
1102}
1103
6d40c4a7
DT
1104static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
1105{
1106 struct rb_node *n;
1107 struct dlm_rsb *r;
1108 int i;
1109
1110 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1111 spin_lock(&ls->ls_rsbtbl[i].lock);
1112 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
1113 r = rb_entry(n, struct dlm_rsb, res_hashnode);
1114 if (r->res_hash == hash)
1115 dlm_dump_rsb(r);
1116 }
1117 spin_unlock(&ls->ls_rsbtbl[i].lock);
1118 }
1119}
1120
c04fecb4 1121void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len)
e7fd4179 1122{
c04fecb4
DT
1123 struct dlm_rsb *r = NULL;
1124 uint32_t hash, b;
1125 int error;
e7fd4179 1126
c04fecb4
DT
1127 hash = jhash(name, len, 0);
1128 b = hash & (ls->ls_rsbtbl_size - 1);
1129
1130 spin_lock(&ls->ls_rsbtbl[b].lock);
1131 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
1132 if (!error)
1133 goto out_dump;
1134
1135 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1136 if (error)
1137 goto out;
1138 out_dump:
1139 dlm_dump_rsb(r);
1140 out:
1141 spin_unlock(&ls->ls_rsbtbl[b].lock);
e7fd4179
DT
1142}
1143
1144static void toss_rsb(struct kref *kref)
1145{
1146 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1147 struct dlm_ls *ls = r->res_ls;
1148
1149 DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
1150 kref_init(&r->res_ref);
9beb3bf5
BP
1151 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
1152 rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
e7fd4179 1153 r->res_toss_time = jiffies;
f1172283 1154 ls->ls_rsbtbl[r->res_bucket].flags |= DLM_RTF_SHRINK;
e7fd4179 1155 if (r->res_lvbptr) {
52bda2b5 1156 dlm_free_lvb(r->res_lvbptr);
e7fd4179
DT
1157 r->res_lvbptr = NULL;
1158 }
1159}
1160
e7fd4179
DT
1161/* See comment for unhold_lkb */
1162
1163static void unhold_rsb(struct dlm_rsb *r)
1164{
1165 int rv;
1166 rv = kref_put(&r->res_ref, toss_rsb);
a345da3e 1167 DLM_ASSERT(!rv, dlm_dump_rsb(r););
e7fd4179
DT
1168}
1169
1170static void kill_rsb(struct kref *kref)
1171{
1172 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1173
1174 /* All work is done after the return from kref_put() so we
1175 can release the write_lock before the remove and free. */
1176
a345da3e
DT
1177 DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
1178 DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
1179 DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
1180 DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
1181 DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
1182 DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
e7fd4179
DT
1183}
1184
1185/* Attaching/detaching lkb's from rsb's is for rsb reference counting.
1186 The rsb must exist as long as any lkb's for it do. */
1187
1188static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1189{
1190 hold_rsb(r);
1191 lkb->lkb_resource = r;
1192}
1193
1194static void detach_lkb(struct dlm_lkb *lkb)
1195{
1196 if (lkb->lkb_resource) {
1197 put_rsb(lkb->lkb_resource);
1198 lkb->lkb_resource = NULL;
1199 }
1200}
1201
75d25ffe
AA
1202static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret,
1203 int start, int end)
e7fd4179 1204{
3d6aa675 1205 struct dlm_lkb *lkb;
2a86b3e7 1206 int rv;
e7fd4179 1207
52bda2b5 1208 lkb = dlm_allocate_lkb(ls);
e7fd4179
DT
1209 if (!lkb)
1210 return -ENOMEM;
1211
1212 lkb->lkb_nodeid = -1;
1213 lkb->lkb_grmode = DLM_LOCK_IV;
1214 kref_init(&lkb->lkb_ref);
34e22bed 1215 INIT_LIST_HEAD(&lkb->lkb_ownqueue);
ef0c2bb0 1216 INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
6b0afc0c 1217#ifdef CONFIG_DLM_DEPRECATED_API
3ae1acf9 1218 INIT_LIST_HEAD(&lkb->lkb_time_list);
6b0afc0c 1219#endif
23e8e1aa
DT
1220 INIT_LIST_HEAD(&lkb->lkb_cb_list);
1221 mutex_init(&lkb->lkb_cb_mutex);
1222 INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
e7fd4179 1223
2a86b3e7 1224 idr_preload(GFP_NOFS);
3d6aa675 1225 spin_lock(&ls->ls_lkbidr_spin);
75d25ffe 1226 rv = idr_alloc(&ls->ls_lkbidr, lkb, start, end, GFP_NOWAIT);
2a86b3e7
TH
1227 if (rv >= 0)
1228 lkb->lkb_id = rv;
3d6aa675 1229 spin_unlock(&ls->ls_lkbidr_spin);
2a86b3e7 1230 idr_preload_end();
e7fd4179 1231
3d6aa675
DT
1232 if (rv < 0) {
1233 log_error(ls, "create_lkb idr error %d", rv);
23851e97 1234 dlm_free_lkb(lkb);
3d6aa675 1235 return rv;
e7fd4179
DT
1236 }
1237
e7fd4179
DT
1238 *lkb_ret = lkb;
1239 return 0;
1240}
1241
75d25ffe
AA
1242static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
1243{
1244 return _create_lkb(ls, lkb_ret, 1, 0);
1245}
1246
e7fd4179
DT
1247static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
1248{
1249 struct dlm_lkb *lkb;
e7fd4179 1250
3d6aa675
DT
1251 spin_lock(&ls->ls_lkbidr_spin);
1252 lkb = idr_find(&ls->ls_lkbidr, lkid);
e7fd4179
DT
1253 if (lkb)
1254 kref_get(&lkb->lkb_ref);
3d6aa675 1255 spin_unlock(&ls->ls_lkbidr_spin);
e7fd4179
DT
1256
1257 *lkb_ret = lkb;
1258 return lkb ? 0 : -ENOENT;
1259}
1260
1261static void kill_lkb(struct kref *kref)
1262{
1263 struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
1264
1265 /* All work is done after the return from kref_put() so we
1266 can release the write_lock before the detach_lkb */
1267
1268 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1269}
1270
b3f58d8f
DT
1271/* __put_lkb() is used when an lkb may not have an rsb attached to
1272 it so we need to provide the lockspace explicitly */
1273
1274static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
e7fd4179 1275{
3d6aa675 1276 uint32_t lkid = lkb->lkb_id;
8e51ec61 1277 int rv;
e7fd4179 1278
8e51ec61
AA
1279 rv = kref_put_lock(&lkb->lkb_ref, kill_lkb,
1280 &ls->ls_lkbidr_spin);
1281 if (rv) {
3d6aa675
DT
1282 idr_remove(&ls->ls_lkbidr, lkid);
1283 spin_unlock(&ls->ls_lkbidr_spin);
e7fd4179
DT
1284
1285 detach_lkb(lkb);
1286
1287 /* for local/process lkbs, lvbptr points to caller's lksb */
1288 if (lkb->lkb_lvbptr && is_master_copy(lkb))
52bda2b5
DT
1289 dlm_free_lvb(lkb->lkb_lvbptr);
1290 dlm_free_lkb(lkb);
e7fd4179 1291 }
8e51ec61
AA
1292
1293 return rv;
e7fd4179
DT
1294}
1295
1296int dlm_put_lkb(struct dlm_lkb *lkb)
1297{
b3f58d8f
DT
1298 struct dlm_ls *ls;
1299
1300 DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
1301 DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
1302
1303 ls = lkb->lkb_resource->res_ls;
1304 return __put_lkb(ls, lkb);
e7fd4179
DT
1305}
1306
1307/* This is only called to add a reference when the code already holds
1308 a valid reference to the lkb, so there's no need for locking. */
1309
1310static inline void hold_lkb(struct dlm_lkb *lkb)
1311{
1312 kref_get(&lkb->lkb_ref);
1313}
1314
95858989
AA
1315static void unhold_lkb_assert(struct kref *kref)
1316{
1317 struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
1318
1319 DLM_ASSERT(false, dlm_print_lkb(lkb););
1320}
1321
e7fd4179
DT
1322/* This is called when we need to remove a reference and are certain
1323 it's not the last ref. e.g. del_lkb is always called between a
1324 find_lkb/put_lkb and is always the inverse of a previous add_lkb.
1325 put_lkb would work fine, but would involve unnecessary locking */
1326
1327static inline void unhold_lkb(struct dlm_lkb *lkb)
1328{
95858989 1329 kref_put(&lkb->lkb_ref, unhold_lkb_assert);
e7fd4179
DT
1330}
1331
1332static void lkb_add_ordered(struct list_head *new, struct list_head *head,
1333 int mode)
1334{
c490b3af 1335 struct dlm_lkb *lkb = NULL, *iter;
e7fd4179 1336
c490b3af
JK
1337 list_for_each_entry(iter, head, lkb_statequeue)
1338 if (iter->lkb_rqmode < mode) {
1339 lkb = iter;
1340 list_add_tail(new, &iter->lkb_statequeue);
e7fd4179 1341 break;
c490b3af 1342 }
e7fd4179 1343
c490b3af
JK
1344 if (!lkb)
1345 list_add_tail(new, head);
e7fd4179
DT
1346}
1347
1348/* add/remove lkb to rsb's grant/convert/wait queue */
1349
1350static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
1351{
1352 kref_get(&lkb->lkb_ref);
1353
1354 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1355
eeda418d
DT
1356 lkb->lkb_timestamp = ktime_get();
1357
e7fd4179
DT
1358 lkb->lkb_status = status;
1359
1360 switch (status) {
1361 case DLM_LKSTS_WAITING:
1362 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1363 list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
1364 else
1365 list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
1366 break;
1367 case DLM_LKSTS_GRANTED:
1368 /* convention says granted locks kept in order of grmode */
1369 lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
1370 lkb->lkb_grmode);
1371 break;
1372 case DLM_LKSTS_CONVERT:
1373 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1374 list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
1375 else
1376 list_add_tail(&lkb->lkb_statequeue,
1377 &r->res_convertqueue);
1378 break;
1379 default:
1380 DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
1381 }
1382}
1383
1384static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1385{
1386 lkb->lkb_status = 0;
1387 list_del(&lkb->lkb_statequeue);
1388 unhold_lkb(lkb);
1389}
1390
1391static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
1392{
1393 hold_lkb(lkb);
1394 del_lkb(r, lkb);
1395 add_lkb(r, lkb, sts);
1396 unhold_lkb(lkb);
1397}
1398
ef0c2bb0
DT
1399static int msg_reply_type(int mstype)
1400{
1401 switch (mstype) {
1402 case DLM_MSG_REQUEST:
1403 return DLM_MSG_REQUEST_REPLY;
1404 case DLM_MSG_CONVERT:
1405 return DLM_MSG_CONVERT_REPLY;
1406 case DLM_MSG_UNLOCK:
1407 return DLM_MSG_UNLOCK_REPLY;
1408 case DLM_MSG_CANCEL:
1409 return DLM_MSG_CANCEL_REPLY;
1410 case DLM_MSG_LOOKUP:
1411 return DLM_MSG_LOOKUP_REPLY;
1412 }
1413 return -1;
1414}
1415
e7fd4179
DT
1416/* add/remove lkb from global waiters list of lkb's waiting for
1417 a reply from a remote node */
1418
c6ff669b 1419static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
e7fd4179
DT
1420{
1421 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
ef0c2bb0 1422 int error = 0;
e7fd4179 1423
90135925 1424 mutex_lock(&ls->ls_waiters_mutex);
ef0c2bb0
DT
1425
1426 if (is_overlap_unlock(lkb) ||
1427 (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
1428 error = -EINVAL;
1429 goto out;
1430 }
1431
1432 if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
1433 switch (mstype) {
1434 case DLM_MSG_UNLOCK:
1435 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
1436 break;
1437 case DLM_MSG_CANCEL:
1438 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
1439 break;
1440 default:
1441 error = -EBUSY;
1442 goto out;
1443 }
1444 lkb->lkb_wait_count++;
1445 hold_lkb(lkb);
1446
43279e53 1447 log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
ef0c2bb0
DT
1448 lkb->lkb_id, lkb->lkb_wait_type, mstype,
1449 lkb->lkb_wait_count, lkb->lkb_flags);
e7fd4179
DT
1450 goto out;
1451 }
ef0c2bb0
DT
1452
1453 DLM_ASSERT(!lkb->lkb_wait_count,
1454 dlm_print_lkb(lkb);
1455 printk("wait_count %d\n", lkb->lkb_wait_count););
1456
1457 lkb->lkb_wait_count++;
e7fd4179 1458 lkb->lkb_wait_type = mstype;
c6ff669b 1459 lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
ef0c2bb0 1460 hold_lkb(lkb);
e7fd4179
DT
1461 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
1462 out:
ef0c2bb0 1463 if (error)
43279e53 1464 log_error(ls, "addwait error %x %d flags %x %d %d %s",
ef0c2bb0
DT
1465 lkb->lkb_id, error, lkb->lkb_flags, mstype,
1466 lkb->lkb_wait_type, lkb->lkb_resource->res_name);
90135925 1467 mutex_unlock(&ls->ls_waiters_mutex);
ef0c2bb0 1468 return error;
e7fd4179
DT
1469}
1470
b790c3b7
DT
1471/* We clear the RESEND flag because we might be taking an lkb off the waiters
1472 list as part of process_requestqueue (e.g. a lookup that has an optimized
1473 request reply on the requestqueue) between dlm_recover_waiters_pre() which
1474 set RESEND and dlm_recover_waiters_post() */
1475
43279e53
DT
1476static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
1477 struct dlm_message *ms)
e7fd4179 1478{
ef0c2bb0
DT
1479 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1480 int overlap_done = 0;
e7fd4179 1481
ef0c2bb0 1482 if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
43279e53 1483 log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
ef0c2bb0
DT
1484 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
1485 overlap_done = 1;
1486 goto out_del;
e7fd4179 1487 }
ef0c2bb0
DT
1488
1489 if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
43279e53 1490 log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
ef0c2bb0
DT
1491 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1492 overlap_done = 1;
1493 goto out_del;
1494 }
1495
43279e53
DT
1496 /* Cancel state was preemptively cleared by a successful convert,
1497 see next comment, nothing to do. */
1498
1499 if ((mstype == DLM_MSG_CANCEL_REPLY) &&
1500 (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
1501 log_debug(ls, "remwait %x cancel_reply wait_type %d",
1502 lkb->lkb_id, lkb->lkb_wait_type);
1503 return -1;
1504 }
1505
1506 /* Remove for the convert reply, and premptively remove for the
1507 cancel reply. A convert has been granted while there's still
1508 an outstanding cancel on it (the cancel is moot and the result
1509 in the cancel reply should be 0). We preempt the cancel reply
1510 because the app gets the convert result and then can follow up
1511 with another op, like convert. This subsequent op would see the
1512 lingering state of the cancel and fail with -EBUSY. */
1513
1514 if ((mstype == DLM_MSG_CONVERT_REPLY) &&
1515 (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
1516 is_overlap_cancel(lkb) && ms && !ms->m_result) {
1517 log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
1518 lkb->lkb_id);
1519 lkb->lkb_wait_type = 0;
1520 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1521 lkb->lkb_wait_count--;
1689c169 1522 unhold_lkb(lkb);
43279e53
DT
1523 goto out_del;
1524 }
1525
ef0c2bb0
DT
1526 /* N.B. type of reply may not always correspond to type of original
1527 msg due to lookup->request optimization, verify others? */
1528
1529 if (lkb->lkb_wait_type) {
1530 lkb->lkb_wait_type = 0;
1531 goto out_del;
1532 }
1533
6d40c4a7 1534 log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait",
3428785a
AA
1535 lkb->lkb_id, ms ? le32_to_cpu(ms->m_header.h_nodeid) : 0,
1536 lkb->lkb_remid, mstype, lkb->lkb_flags);
ef0c2bb0
DT
1537 return -1;
1538
1539 out_del:
1540 /* the force-unlock/cancel has completed and we haven't recvd a reply
1541 to the op that was in progress prior to the unlock/cancel; we
1542 give up on any reply to the earlier op. FIXME: not sure when/how
1543 this would happen */
1544
1545 if (overlap_done && lkb->lkb_wait_type) {
43279e53 1546 log_error(ls, "remwait error %x reply %d wait_type %d overlap",
ef0c2bb0
DT
1547 lkb->lkb_id, mstype, lkb->lkb_wait_type);
1548 lkb->lkb_wait_count--;
1689c169 1549 unhold_lkb(lkb);
ef0c2bb0
DT
1550 lkb->lkb_wait_type = 0;
1551 }
1552
1553 DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
1554
b790c3b7 1555 lkb->lkb_flags &= ~DLM_IFL_RESEND;
ef0c2bb0
DT
1556 lkb->lkb_wait_count--;
1557 if (!lkb->lkb_wait_count)
1558 list_del_init(&lkb->lkb_wait_reply);
e7fd4179 1559 unhold_lkb(lkb);
ef0c2bb0 1560 return 0;
e7fd4179
DT
1561}
1562
ef0c2bb0 1563static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
e7fd4179
DT
1564{
1565 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1566 int error;
1567
90135925 1568 mutex_lock(&ls->ls_waiters_mutex);
43279e53 1569 error = _remove_from_waiters(lkb, mstype, NULL);
90135925 1570 mutex_unlock(&ls->ls_waiters_mutex);
e7fd4179
DT
1571 return error;
1572}
1573
ef0c2bb0
DT
1574/* Handles situations where we might be processing a "fake" or "stub" reply in
1575 which we can't try to take waiters_mutex again. */
1576
1577static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
1578{
1579 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1580 int error;
1581
00e99ccd 1582 if (ms->m_flags != cpu_to_le32(DLM_IFL_STUB_MS))
ef0c2bb0 1583 mutex_lock(&ls->ls_waiters_mutex);
00e99ccd
AA
1584 error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms);
1585 if (ms->m_flags != cpu_to_le32(DLM_IFL_STUB_MS))
ef0c2bb0
DT
1586 mutex_unlock(&ls->ls_waiters_mutex);
1587 return error;
1588}
1589
05c32f47 1590/* If there's an rsb for the same resource being removed, ensure
21d9ac1a
AA
1591 * that the remove message is sent before the new lookup message.
1592 */
1593
1594#define DLM_WAIT_PENDING_COND(ls, r) \
1595 (ls->ls_remove_len && \
1596 !rsb_cmp(r, ls->ls_remove_name, \
1597 ls->ls_remove_len))
e7fd4179 1598
05c32f47 1599static void wait_pending_remove(struct dlm_rsb *r)
e7fd4179 1600{
05c32f47
DT
1601 struct dlm_ls *ls = r->res_ls;
1602 restart:
1603 spin_lock(&ls->ls_remove_spin);
21d9ac1a 1604 if (DLM_WAIT_PENDING_COND(ls, r)) {
05c32f47 1605 log_debug(ls, "delay lookup for remove dir %d %s",
21d9ac1a 1606 r->res_dir_nodeid, r->res_name);
05c32f47 1607 spin_unlock(&ls->ls_remove_spin);
21d9ac1a 1608 wait_event(ls->ls_remove_wait, !DLM_WAIT_PENDING_COND(ls, r));
05c32f47
DT
1609 goto restart;
1610 }
1611 spin_unlock(&ls->ls_remove_spin);
1612}
e7fd4179 1613
05c32f47
DT
1614/*
1615 * ls_remove_spin protects ls_remove_name and ls_remove_len which are
1616 * read by other threads in wait_pending_remove. ls_remove_names
1617 * and ls_remove_lens are only used by the scan thread, so they do
1618 * not need protection.
1619 */
c04fecb4 1620
05c32f47
DT
1621static void shrink_bucket(struct dlm_ls *ls, int b)
1622{
1623 struct rb_node *n, *next;
1624 struct dlm_rsb *r;
1625 char *name;
1626 int our_nodeid = dlm_our_nodeid();
1627 int remote_count = 0;
f1172283 1628 int need_shrink = 0;
05c32f47 1629 int i, len, rv;
c04fecb4 1630
05c32f47 1631 memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
c04fecb4 1632
05c32f47 1633 spin_lock(&ls->ls_rsbtbl[b].lock);
f1172283
DT
1634
1635 if (!(ls->ls_rsbtbl[b].flags & DLM_RTF_SHRINK)) {
1636 spin_unlock(&ls->ls_rsbtbl[b].lock);
1637 return;
1638 }
1639
05c32f47
DT
1640 for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) {
1641 next = rb_next(n);
1642 r = rb_entry(n, struct dlm_rsb, res_hashnode);
1643
1644 /* If we're the directory record for this rsb, and
1645 we're not the master of it, then we need to wait
1646 for the master node to send us a dir remove for
1647 before removing the dir record. */
1648
1649 if (!dlm_no_directory(ls) &&
1650 (r->res_master_nodeid != our_nodeid) &&
1651 (dlm_dir_nodeid(r) == our_nodeid)) {
1652 continue;
e7fd4179
DT
1653 }
1654
f1172283
DT
1655 need_shrink = 1;
1656
05c32f47
DT
1657 if (!time_after_eq(jiffies, r->res_toss_time +
1658 dlm_config.ci_toss_secs * HZ)) {
1659 continue;
e7fd4179
DT
1660 }
1661
05c32f47
DT
1662 if (!dlm_no_directory(ls) &&
1663 (r->res_master_nodeid == our_nodeid) &&
1664 (dlm_dir_nodeid(r) != our_nodeid)) {
e7fd4179 1665
c04fecb4
DT
1666 /* We're the master of this rsb but we're not
1667 the directory record, so we need to tell the
1668 dir node to remove the dir record. */
1669
05c32f47
DT
1670 ls->ls_remove_lens[remote_count] = r->res_length;
1671 memcpy(ls->ls_remove_names[remote_count], r->res_name,
1672 DLM_RESNAME_MAXLEN);
1673 remote_count++;
c04fecb4 1674
05c32f47
DT
1675 if (remote_count >= DLM_REMOVE_NAMES_MAX)
1676 break;
1677 continue;
1678 }
1679
1680 if (!kref_put(&r->res_ref, kill_rsb)) {
e7fd4179 1681 log_error(ls, "tossed rsb in use %s", r->res_name);
05c32f47 1682 continue;
e7fd4179 1683 }
05c32f47
DT
1684
1685 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1686 dlm_free_rsb(r);
e7fd4179 1687 }
f1172283
DT
1688
1689 if (need_shrink)
1690 ls->ls_rsbtbl[b].flags |= DLM_RTF_SHRINK;
1691 else
1692 ls->ls_rsbtbl[b].flags &= ~DLM_RTF_SHRINK;
05c32f47 1693 spin_unlock(&ls->ls_rsbtbl[b].lock);
e7fd4179 1694
05c32f47
DT
1695 /*
1696 * While searching for rsb's to free, we found some that require
1697 * remote removal. We leave them in place and find them again here
1698 * so there is a very small gap between removing them from the toss
1699 * list and sending the removal. Keeping this gap small is
1700 * important to keep us (the master node) from being out of sync
1701 * with the remote dir node for very long.
1702 *
1703 * From the time the rsb is removed from toss until just after
1704 * send_remove, the rsb name is saved in ls_remove_name. A new
1705 * lookup checks this to ensure that a new lookup message for the
1706 * same resource name is not sent just before the remove message.
1707 */
1708
1709 for (i = 0; i < remote_count; i++) {
1710 name = ls->ls_remove_names[i];
1711 len = ls->ls_remove_lens[i];
1712
1713 spin_lock(&ls->ls_rsbtbl[b].lock);
1714 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1715 if (rv) {
1716 spin_unlock(&ls->ls_rsbtbl[b].lock);
1717 log_debug(ls, "remove_name not toss %s", name);
1718 continue;
1719 }
1720
1721 if (r->res_master_nodeid != our_nodeid) {
1722 spin_unlock(&ls->ls_rsbtbl[b].lock);
1723 log_debug(ls, "remove_name master %d dir %d our %d %s",
1724 r->res_master_nodeid, r->res_dir_nodeid,
1725 our_nodeid, name);
1726 continue;
1727 }
1728
1729 if (r->res_dir_nodeid == our_nodeid) {
1730 /* should never happen */
1731 spin_unlock(&ls->ls_rsbtbl[b].lock);
1732 log_error(ls, "remove_name dir %d master %d our %d %s",
1733 r->res_dir_nodeid, r->res_master_nodeid,
1734 our_nodeid, name);
1735 continue;
1736 }
1737
1738 if (!time_after_eq(jiffies, r->res_toss_time +
1739 dlm_config.ci_toss_secs * HZ)) {
1740 spin_unlock(&ls->ls_rsbtbl[b].lock);
1741 log_debug(ls, "remove_name toss_time %lu now %lu %s",
1742 r->res_toss_time, jiffies, name);
1743 continue;
1744 }
1745
1746 if (!kref_put(&r->res_ref, kill_rsb)) {
1747 spin_unlock(&ls->ls_rsbtbl[b].lock);
1748 log_error(ls, "remove_name in use %s", name);
1749 continue;
1750 }
1751
1752 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1753
1754 /* block lookup of same name until we've sent remove */
1755 spin_lock(&ls->ls_remove_spin);
1756 ls->ls_remove_len = len;
1757 memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
1758 spin_unlock(&ls->ls_remove_spin);
1759 spin_unlock(&ls->ls_rsbtbl[b].lock);
1760
1761 send_remove(r);
1762
1763 /* allow lookup of name again */
1764 spin_lock(&ls->ls_remove_spin);
1765 ls->ls_remove_len = 0;
1766 memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
1767 spin_unlock(&ls->ls_remove_spin);
f6f74183 1768 wake_up(&ls->ls_remove_wait);
05c32f47
DT
1769
1770 dlm_free_rsb(r);
1771 }
e7fd4179
DT
1772}
1773
1774void dlm_scan_rsbs(struct dlm_ls *ls)
1775{
1776 int i;
1777
e7fd4179
DT
1778 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1779 shrink_bucket(ls, i);
85e86edf
DT
1780 if (dlm_locking_stopped(ls))
1781 break;
e7fd4179
DT
1782 cond_resched();
1783 }
1784}
1785
6b0afc0c 1786#ifdef CONFIG_DLM_DEPRECATED_API
3ae1acf9
DT
1787static void add_timeout(struct dlm_lkb *lkb)
1788{
1789 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1790
eeda418d 1791 if (is_master_copy(lkb))
3ae1acf9 1792 return;
3ae1acf9
DT
1793
1794 if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
1795 !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1796 lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
1797 goto add_it;
1798 }
84d8cd69
DT
1799 if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
1800 goto add_it;
3ae1acf9
DT
1801 return;
1802
1803 add_it:
1804 DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
1805 mutex_lock(&ls->ls_timeout_mutex);
1806 hold_lkb(lkb);
3ae1acf9
DT
1807 list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
1808 mutex_unlock(&ls->ls_timeout_mutex);
1809}
1810
1811static void del_timeout(struct dlm_lkb *lkb)
1812{
1813 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1814
1815 mutex_lock(&ls->ls_timeout_mutex);
1816 if (!list_empty(&lkb->lkb_time_list)) {
1817 list_del_init(&lkb->lkb_time_list);
1818 unhold_lkb(lkb);
1819 }
1820 mutex_unlock(&ls->ls_timeout_mutex);
1821}
1822
1823/* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
1824 lkb_lksb_timeout without lock_rsb? Note: we can't lock timeout_mutex
1825 and then lock rsb because of lock ordering in add_timeout. We may need
1826 to specify some special timeout-related bits in the lkb that are just to
1827 be accessed under the timeout_mutex. */
1828
1829void dlm_scan_timeout(struct dlm_ls *ls)
1830{
1831 struct dlm_rsb *r;
dc1acd5c 1832 struct dlm_lkb *lkb = NULL, *iter;
3ae1acf9 1833 int do_cancel, do_warn;
eeda418d 1834 s64 wait_us;
3ae1acf9
DT
1835
1836 for (;;) {
1837 if (dlm_locking_stopped(ls))
1838 break;
1839
1840 do_cancel = 0;
1841 do_warn = 0;
1842 mutex_lock(&ls->ls_timeout_mutex);
dc1acd5c 1843 list_for_each_entry(iter, &ls->ls_timeout, lkb_time_list) {
3ae1acf9 1844
eeda418d 1845 wait_us = ktime_to_us(ktime_sub(ktime_get(),
dc1acd5c 1846 iter->lkb_timestamp));
eeda418d 1847
dc1acd5c
JK
1848 if ((iter->lkb_exflags & DLM_LKF_TIMEOUT) &&
1849 wait_us >= (iter->lkb_timeout_cs * 10000))
3ae1acf9
DT
1850 do_cancel = 1;
1851
dc1acd5c 1852 if ((iter->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
eeda418d 1853 wait_us >= dlm_config.ci_timewarn_cs * 10000)
3ae1acf9
DT
1854 do_warn = 1;
1855
1856 if (!do_cancel && !do_warn)
1857 continue;
dc1acd5c
JK
1858 hold_lkb(iter);
1859 lkb = iter;
3ae1acf9
DT
1860 break;
1861 }
1862 mutex_unlock(&ls->ls_timeout_mutex);
1863
dc1acd5c 1864 if (!lkb)
3ae1acf9
DT
1865 break;
1866
1867 r = lkb->lkb_resource;
1868 hold_rsb(r);
1869 lock_rsb(r);
1870
1871 if (do_warn) {
1872 /* clear flag so we only warn once */
1873 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1874 if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
1875 del_timeout(lkb);
1876 dlm_timeout_warn(lkb);
1877 }
1878
1879 if (do_cancel) {
b3cab7b9 1880 log_debug(ls, "timeout cancel %x node %d %s",
639aca41 1881 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
3ae1acf9
DT
1882 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1883 lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
1884 del_timeout(lkb);
1885 _cancel_lock(r, lkb);
1886 }
1887
1888 unlock_rsb(r);
1889 unhold_rsb(r);
1890 dlm_put_lkb(lkb);
1891 }
1892}
1893
1894/* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
1895 dlm_recoverd before checking/setting ls_recover_begin. */
1896
1897void dlm_adjust_timeouts(struct dlm_ls *ls)
1898{
1899 struct dlm_lkb *lkb;
eeda418d 1900 u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin);
3ae1acf9
DT
1901
1902 ls->ls_recover_begin = 0;
1903 mutex_lock(&ls->ls_timeout_mutex);
1904 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
eeda418d 1905 lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
3ae1acf9
DT
1906 mutex_unlock(&ls->ls_timeout_mutex);
1907}
6b0afc0c
AA
1908#else
1909static void add_timeout(struct dlm_lkb *lkb) { }
1910static void del_timeout(struct dlm_lkb *lkb) { }
1911#endif
3ae1acf9 1912
e7fd4179
DT
1913/* lkb is master or local copy */
1914
1915static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1916{
1917 int b, len = r->res_ls->ls_lvblen;
1918
1919 /* b=1 lvb returned to caller
1920 b=0 lvb written to rsb or invalidated
1921 b=-1 do nothing */
1922
1923 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1924
1925 if (b == 1) {
1926 if (!lkb->lkb_lvbptr)
1927 return;
1928
1929 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1930 return;
1931
1932 if (!r->res_lvbptr)
1933 return;
1934
1935 memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1936 lkb->lkb_lvbseq = r->res_lvbseq;
1937
1938 } else if (b == 0) {
1939 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1940 rsb_set_flag(r, RSB_VALNOTVALID);
1941 return;
1942 }
1943
1944 if (!lkb->lkb_lvbptr)
1945 return;
1946
1947 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1948 return;
1949
1950 if (!r->res_lvbptr)
52bda2b5 1951 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
e7fd4179
DT
1952
1953 if (!r->res_lvbptr)
1954 return;
1955
1956 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1957 r->res_lvbseq++;
1958 lkb->lkb_lvbseq = r->res_lvbseq;
1959 rsb_clear_flag(r, RSB_VALNOTVALID);
1960 }
1961
1962 if (rsb_flag(r, RSB_VALNOTVALID))
1963 lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
1964}
1965
1966static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1967{
1968 if (lkb->lkb_grmode < DLM_LOCK_PW)
1969 return;
1970
1971 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1972 rsb_set_flag(r, RSB_VALNOTVALID);
1973 return;
1974 }
1975
1976 if (!lkb->lkb_lvbptr)
1977 return;
1978
1979 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1980 return;
1981
1982 if (!r->res_lvbptr)
52bda2b5 1983 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
e7fd4179
DT
1984
1985 if (!r->res_lvbptr)
1986 return;
1987
1988 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
1989 r->res_lvbseq++;
1990 rsb_clear_flag(r, RSB_VALNOTVALID);
1991}
1992
1993/* lkb is process copy (pc) */
1994
1995static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1996 struct dlm_message *ms)
1997{
1998 int b;
1999
2000 if (!lkb->lkb_lvbptr)
2001 return;
2002
2003 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2004 return;
2005
597d0cae 2006 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
e7fd4179
DT
2007 if (b == 1) {
2008 int len = receive_extralen(ms);
cfa805f6
BVA
2009 if (len > r->res_ls->ls_lvblen)
2010 len = r->res_ls->ls_lvblen;
e7fd4179 2011 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
00e99ccd 2012 lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq);
e7fd4179
DT
2013 }
2014}
2015
2016/* Manipulate lkb's on rsb's convert/granted/waiting queues
2017 remove_lock -- used for unlock, removes lkb from granted
2018 revert_lock -- used for cancel, moves lkb from convert to granted
2019 grant_lock -- used for request and convert, adds lkb to granted or
2020 moves lkb from convert or waiting to granted
2021
2022 Each of these is used for master or local copy lkb's. There is
2023 also a _pc() variation used to make the corresponding change on
2024 a process copy (pc) lkb. */
2025
2026static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2027{
2028 del_lkb(r, lkb);
2029 lkb->lkb_grmode = DLM_LOCK_IV;
2030 /* this unhold undoes the original ref from create_lkb()
2031 so this leads to the lkb being freed */
2032 unhold_lkb(lkb);
2033}
2034
2035static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2036{
2037 set_lvb_unlock(r, lkb);
2038 _remove_lock(r, lkb);
2039}
2040
2041static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2042{
2043 _remove_lock(r, lkb);
2044}
2045
ef0c2bb0
DT
2046/* returns: 0 did nothing
2047 1 moved lock to granted
2048 -1 removed lock */
2049
2050static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
e7fd4179 2051{
ef0c2bb0
DT
2052 int rv = 0;
2053
e7fd4179
DT
2054 lkb->lkb_rqmode = DLM_LOCK_IV;
2055
2056 switch (lkb->lkb_status) {
597d0cae
DT
2057 case DLM_LKSTS_GRANTED:
2058 break;
e7fd4179
DT
2059 case DLM_LKSTS_CONVERT:
2060 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
ef0c2bb0 2061 rv = 1;
e7fd4179
DT
2062 break;
2063 case DLM_LKSTS_WAITING:
2064 del_lkb(r, lkb);
2065 lkb->lkb_grmode = DLM_LOCK_IV;
2066 /* this unhold undoes the original ref from create_lkb()
2067 so this leads to the lkb being freed */
2068 unhold_lkb(lkb);
ef0c2bb0 2069 rv = -1;
e7fd4179
DT
2070 break;
2071 default:
2072 log_print("invalid status for revert %d", lkb->lkb_status);
2073 }
ef0c2bb0 2074 return rv;
e7fd4179
DT
2075}
2076
ef0c2bb0 2077static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
e7fd4179 2078{
ef0c2bb0 2079 return revert_lock(r, lkb);
e7fd4179
DT
2080}
2081
2082static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2083{
2084 if (lkb->lkb_grmode != lkb->lkb_rqmode) {
2085 lkb->lkb_grmode = lkb->lkb_rqmode;
2086 if (lkb->lkb_status)
2087 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2088 else
2089 add_lkb(r, lkb, DLM_LKSTS_GRANTED);
2090 }
2091
2092 lkb->lkb_rqmode = DLM_LOCK_IV;
4875647a 2093 lkb->lkb_highbast = 0;
e7fd4179
DT
2094}
2095
2096static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2097{
2098 set_lvb_lock(r, lkb);
2099 _grant_lock(r, lkb);
e7fd4179
DT
2100}
2101
2102static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2103 struct dlm_message *ms)
2104{
2105 set_lvb_lock_pc(r, lkb, ms);
2106 _grant_lock(r, lkb);
2107}
2108
2109/* called by grant_pending_locks() which means an async grant message must
2110 be sent to the requesting node in addition to granting the lock if the
2111 lkb belongs to a remote node. */
2112
2113static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
2114{
2115 grant_lock(r, lkb);
2116 if (is_master_copy(lkb))
2117 send_grant(r, lkb);
2118 else
2119 queue_cast(r, lkb, 0);
2120}
2121
7d3c1feb
DT
2122/* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
2123 change the granted/requested modes. We're munging things accordingly in
2124 the process copy.
2125 CONVDEADLK: our grmode may have been forced down to NL to resolve a
2126 conversion deadlock
2127 ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
2128 compatible with other granted locks */
2129
2a7ce0ed 2130static void munge_demoted(struct dlm_lkb *lkb)
7d3c1feb 2131{
7d3c1feb
DT
2132 if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
2133 log_print("munge_demoted %x invalid modes gr %d rq %d",
2134 lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
2135 return;
2136 }
2137
2138 lkb->lkb_grmode = DLM_LOCK_NL;
2139}
2140
2141static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
2142{
00e99ccd
AA
2143 if (ms->m_type != cpu_to_le32(DLM_MSG_REQUEST_REPLY) &&
2144 ms->m_type != cpu_to_le32(DLM_MSG_GRANT)) {
7d3c1feb 2145 log_print("munge_altmode %x invalid reply type %d",
00e99ccd 2146 lkb->lkb_id, le32_to_cpu(ms->m_type));
7d3c1feb
DT
2147 return;
2148 }
2149
2150 if (lkb->lkb_exflags & DLM_LKF_ALTPR)
2151 lkb->lkb_rqmode = DLM_LOCK_PR;
2152 else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
2153 lkb->lkb_rqmode = DLM_LOCK_CW;
2154 else {
2155 log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
2156 dlm_print_lkb(lkb);
2157 }
2158}
2159
e7fd4179
DT
2160static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
2161{
2162 struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
2163 lkb_statequeue);
2164 if (lkb->lkb_id == first->lkb_id)
90135925 2165 return 1;
e7fd4179 2166
90135925 2167 return 0;
e7fd4179
DT
2168}
2169
e7fd4179
DT
2170/* Check if the given lkb conflicts with another lkb on the queue. */
2171
2172static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
2173{
2174 struct dlm_lkb *this;
2175
2176 list_for_each_entry(this, head, lkb_statequeue) {
2177 if (this == lkb)
2178 continue;
3bcd3687 2179 if (!modes_compat(this, lkb))
90135925 2180 return 1;
e7fd4179 2181 }
90135925 2182 return 0;
e7fd4179
DT
2183}
2184
2185/*
2186 * "A conversion deadlock arises with a pair of lock requests in the converting
2187 * queue for one resource. The granted mode of each lock blocks the requested
2188 * mode of the other lock."
2189 *
c85d65e9
DT
2190 * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
2191 * convert queue from being granted, then deadlk/demote lkb.
e7fd4179
DT
2192 *
2193 * Example:
2194 * Granted Queue: empty
2195 * Convert Queue: NL->EX (first lock)
2196 * PR->EX (second lock)
2197 *
2198 * The first lock can't be granted because of the granted mode of the second
2199 * lock and the second lock can't be granted because it's not first in the
c85d65e9
DT
2200 * list. We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
2201 * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
2202 * flag set and return DEMOTED in the lksb flags.
e7fd4179 2203 *
c85d65e9
DT
2204 * Originally, this function detected conv-deadlk in a more limited scope:
2205 * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
2206 * - if lkb1 was the first entry in the queue (not just earlier), and was
2207 * blocked by the granted mode of lkb2, and there was nothing on the
2208 * granted queue preventing lkb1 from being granted immediately, i.e.
2209 * lkb2 was the only thing preventing lkb1 from being granted.
2210 *
2211 * That second condition meant we'd only say there was conv-deadlk if
2212 * resolving it (by demotion) would lead to the first lock on the convert
2213 * queue being granted right away. It allowed conversion deadlocks to exist
2214 * between locks on the convert queue while they couldn't be granted anyway.
2215 *
2216 * Now, we detect and take action on conversion deadlocks immediately when
2217 * they're created, even if they may not be immediately consequential. If
2218 * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
2219 * mode that would prevent lkb1's conversion from being granted, we do a
2220 * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
2221 * I think this means that the lkb_is_ahead condition below should always
2222 * be zero, i.e. there will never be conv-deadlk between two locks that are
2223 * both already on the convert queue.
e7fd4179
DT
2224 */
2225
c85d65e9 2226static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
e7fd4179 2227{
c85d65e9
DT
2228 struct dlm_lkb *lkb1;
2229 int lkb_is_ahead = 0;
e7fd4179 2230
c85d65e9
DT
2231 list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
2232 if (lkb1 == lkb2) {
2233 lkb_is_ahead = 1;
e7fd4179
DT
2234 continue;
2235 }
2236
c85d65e9
DT
2237 if (!lkb_is_ahead) {
2238 if (!modes_compat(lkb2, lkb1))
2239 return 1;
2240 } else {
2241 if (!modes_compat(lkb2, lkb1) &&
2242 !modes_compat(lkb1, lkb2))
2243 return 1;
2244 }
e7fd4179 2245 }
90135925 2246 return 0;
e7fd4179
DT
2247}
2248
2249/*
2250 * Return 1 if the lock can be granted, 0 otherwise.
2251 * Also detect and resolve conversion deadlocks.
2252 *
2253 * lkb is the lock to be granted
2254 *
2255 * now is 1 if the function is being called in the context of the
2256 * immediate request, it is 0 if called later, after the lock has been
2257 * queued.
2258 *
c503a621
DT
2259 * recover is 1 if dlm_recover_grant() is trying to grant conversions
2260 * after recovery.
2261 *
e7fd4179
DT
2262 * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
2263 */
2264
c503a621
DT
2265static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2266 int recover)
e7fd4179
DT
2267{
2268 int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
2269
2270 /*
2271 * 6-10: Version 5.4 introduced an option to address the phenomenon of
2272 * a new request for a NL mode lock being blocked.
2273 *
2274 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
2275 * request, then it would be granted. In essence, the use of this flag
2276 * tells the Lock Manager to expedite theis request by not considering
2277 * what may be in the CONVERTING or WAITING queues... As of this
2278 * writing, the EXPEDITE flag can be used only with new requests for NL
2279 * mode locks. This flag is not valid for conversion requests.
2280 *
2281 * A shortcut. Earlier checks return an error if EXPEDITE is used in a
2282 * conversion or used with a non-NL requested mode. We also know an
2283 * EXPEDITE request is always granted immediately, so now must always
2284 * be 1. The full condition to grant an expedite request: (now &&
2285 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
2286 * therefore be shortened to just checking the flag.
2287 */
2288
2289 if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
90135925 2290 return 1;
e7fd4179
DT
2291
2292 /*
2293 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
2294 * added to the remaining conditions.
2295 */
2296
2297 if (queue_conflict(&r->res_grantqueue, lkb))
c503a621 2298 return 0;
e7fd4179
DT
2299
2300 /*
2301 * 6-3: By default, a conversion request is immediately granted if the
2302 * requested mode is compatible with the modes of all other granted
2303 * locks
2304 */
2305
2306 if (queue_conflict(&r->res_convertqueue, lkb))
c503a621
DT
2307 return 0;
2308
2309 /*
2310 * The RECOVER_GRANT flag means dlm_recover_grant() is granting
2311 * locks for a recovered rsb, on which lkb's have been rebuilt.
2312 * The lkb's may have been rebuilt on the queues in a different
2313 * order than they were in on the previous master. So, granting
2314 * queued conversions in order after recovery doesn't make sense
2315 * since the order hasn't been preserved anyway. The new order
2316 * could also have created a new "in place" conversion deadlock.
2317 * (e.g. old, failed master held granted EX, with PR->EX, NL->EX.
2318 * After recovery, there would be no granted locks, and possibly
2319 * NL->EX, PR->EX, an in-place conversion deadlock.) So, after
2320 * recovery, grant conversions without considering order.
2321 */
2322
2323 if (conv && recover)
2324 return 1;
e7fd4179
DT
2325
2326 /*
2327 * 6-5: But the default algorithm for deciding whether to grant or
2328 * queue conversion requests does not by itself guarantee that such
2329 * requests are serviced on a "first come first serve" basis. This, in
2330 * turn, can lead to a phenomenon known as "indefinate postponement".
2331 *
2332 * 6-7: This issue is dealt with by using the optional QUECVT flag with
2333 * the system service employed to request a lock conversion. This flag
2334 * forces certain conversion requests to be queued, even if they are
2335 * compatible with the granted modes of other locks on the same
2336 * resource. Thus, the use of this flag results in conversion requests
2337 * being ordered on a "first come first servce" basis.
2338 *
2339 * DCT: This condition is all about new conversions being able to occur
2340 * "in place" while the lock remains on the granted queue (assuming
2341 * nothing else conflicts.) IOW if QUECVT isn't set, a conversion
2342 * doesn't _have_ to go onto the convert queue where it's processed in
2343 * order. The "now" variable is necessary to distinguish converts
2344 * being received and processed for the first time now, because once a
2345 * convert is moved to the conversion queue the condition below applies
2346 * requiring fifo granting.
2347 */
2348
2349 if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
90135925 2350 return 1;
e7fd4179 2351
53ad1c98
DT
2352 /*
2353 * Even if the convert is compat with all granted locks,
2354 * QUECVT forces it behind other locks on the convert queue.
2355 */
2356
2357 if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) {
2358 if (list_empty(&r->res_convertqueue))
2359 return 1;
2360 else
c503a621 2361 return 0;
53ad1c98
DT
2362 }
2363
e7fd4179 2364 /*
3bcd3687
DT
2365 * The NOORDER flag is set to avoid the standard vms rules on grant
2366 * order.
e7fd4179
DT
2367 */
2368
2369 if (lkb->lkb_exflags & DLM_LKF_NOORDER)
90135925 2370 return 1;
e7fd4179
DT
2371
2372 /*
2373 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
2374 * granted until all other conversion requests ahead of it are granted
2375 * and/or canceled.
2376 */
2377
2378 if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
90135925 2379 return 1;
e7fd4179
DT
2380
2381 /*
2382 * 6-4: By default, a new request is immediately granted only if all
2383 * three of the following conditions are satisfied when the request is
2384 * issued:
2385 * - The queue of ungranted conversion requests for the resource is
2386 * empty.
2387 * - The queue of ungranted new requests for the resource is empty.
2388 * - The mode of the new request is compatible with the most
2389 * restrictive mode of all granted locks on the resource.
2390 */
2391
2392 if (now && !conv && list_empty(&r->res_convertqueue) &&
2393 list_empty(&r->res_waitqueue))
90135925 2394 return 1;
e7fd4179
DT
2395
2396 /*
2397 * 6-4: Once a lock request is in the queue of ungranted new requests,
2398 * it cannot be granted until the queue of ungranted conversion
2399 * requests is empty, all ungranted new requests ahead of it are
2400 * granted and/or canceled, and it is compatible with the granted mode
2401 * of the most restrictive lock granted on the resource.
2402 */
2403
2404 if (!now && !conv && list_empty(&r->res_convertqueue) &&
2405 first_in_list(lkb, &r->res_waitqueue))
90135925 2406 return 1;
c503a621 2407
90135925 2408 return 0;
e7fd4179
DT
2409}
2410
c85d65e9 2411static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
c503a621 2412 int recover, int *err)
e7fd4179 2413{
e7fd4179
DT
2414 int rv;
2415 int8_t alt = 0, rqmode = lkb->lkb_rqmode;
c85d65e9
DT
2416 int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
2417
2418 if (err)
2419 *err = 0;
e7fd4179 2420
c503a621 2421 rv = _can_be_granted(r, lkb, now, recover);
e7fd4179
DT
2422 if (rv)
2423 goto out;
2424
c85d65e9
DT
2425 /*
2426 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
2427 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
2428 * cancels one of the locks.
2429 */
2430
2431 if (is_convert && can_be_queued(lkb) &&
2432 conversion_deadlock_detect(r, lkb)) {
2433 if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
2434 lkb->lkb_grmode = DLM_LOCK_NL;
2435 lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
294e7e45 2436 } else if (err) {
2437 *err = -EDEADLK;
2438 } else {
2439 log_print("can_be_granted deadlock %x now %d",
2440 lkb->lkb_id, now);
2441 dlm_dump_rsb(r);
c85d65e9 2442 }
e7fd4179 2443 goto out;
c85d65e9 2444 }
e7fd4179 2445
c85d65e9
DT
2446 /*
2447 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
2448 * to grant a request in a mode other than the normal rqmode. It's a
2449 * simple way to provide a big optimization to applications that can
2450 * use them.
2451 */
2452
2453 if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
e7fd4179 2454 alt = DLM_LOCK_PR;
c85d65e9 2455 else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
e7fd4179
DT
2456 alt = DLM_LOCK_CW;
2457
2458 if (alt) {
2459 lkb->lkb_rqmode = alt;
c503a621 2460 rv = _can_be_granted(r, lkb, now, 0);
e7fd4179
DT
2461 if (rv)
2462 lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
2463 else
2464 lkb->lkb_rqmode = rqmode;
2465 }
2466 out:
2467 return rv;
2468}
2469
36509258
DT
2470/* Returns the highest requested mode of all blocked conversions; sets
2471 cw if there's a blocked conversion to DLM_LOCK_CW. */
c85d65e9 2472
4875647a
DT
2473static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw,
2474 unsigned int *count)
e7fd4179
DT
2475{
2476 struct dlm_lkb *lkb, *s;
c503a621 2477 int recover = rsb_flag(r, RSB_RECOVER_GRANT);
e7fd4179 2478 int hi, demoted, quit, grant_restart, demote_restart;
c85d65e9 2479 int deadlk;
e7fd4179
DT
2480
2481 quit = 0;
2482 restart:
2483 grant_restart = 0;
2484 demote_restart = 0;
2485 hi = DLM_LOCK_IV;
2486
2487 list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
2488 demoted = is_demoted(lkb);
c85d65e9
DT
2489 deadlk = 0;
2490
c503a621 2491 if (can_be_granted(r, lkb, 0, recover, &deadlk)) {
e7fd4179
DT
2492 grant_lock_pending(r, lkb);
2493 grant_restart = 1;
4875647a
DT
2494 if (count)
2495 (*count)++;
c85d65e9 2496 continue;
e7fd4179 2497 }
c85d65e9
DT
2498
2499 if (!demoted && is_demoted(lkb)) {
2500 log_print("WARN: pending demoted %x node %d %s",
2501 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2502 demote_restart = 1;
2503 continue;
2504 }
2505
2506 if (deadlk) {
294e7e45 2507 /*
2508 * If DLM_LKB_NODLKWT flag is set and conversion
2509 * deadlock is detected, we request blocking AST and
2510 * down (or cancel) conversion.
2511 */
2512 if (lkb->lkb_exflags & DLM_LKF_NODLCKWT) {
2513 if (lkb->lkb_highbast < lkb->lkb_rqmode) {
2514 queue_bast(r, lkb, lkb->lkb_rqmode);
2515 lkb->lkb_highbast = lkb->lkb_rqmode;
2516 }
2517 } else {
2518 log_print("WARN: pending deadlock %x node %d %s",
2519 lkb->lkb_id, lkb->lkb_nodeid,
2520 r->res_name);
2521 dlm_dump_rsb(r);
2522 }
c85d65e9
DT
2523 continue;
2524 }
2525
2526 hi = max_t(int, lkb->lkb_rqmode, hi);
36509258
DT
2527
2528 if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
2529 *cw = 1;
e7fd4179
DT
2530 }
2531
2532 if (grant_restart)
2533 goto restart;
2534 if (demote_restart && !quit) {
2535 quit = 1;
2536 goto restart;
2537 }
2538
2539 return max_t(int, high, hi);
2540}
2541
4875647a
DT
2542static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw,
2543 unsigned int *count)
e7fd4179
DT
2544{
2545 struct dlm_lkb *lkb, *s;
2546
2547 list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
c503a621 2548 if (can_be_granted(r, lkb, 0, 0, NULL)) {
e7fd4179 2549 grant_lock_pending(r, lkb);
4875647a
DT
2550 if (count)
2551 (*count)++;
2552 } else {
e7fd4179 2553 high = max_t(int, lkb->lkb_rqmode, high);
36509258
DT
2554 if (lkb->lkb_rqmode == DLM_LOCK_CW)
2555 *cw = 1;
2556 }
e7fd4179
DT
2557 }
2558
2559 return high;
2560}
2561
36509258
DT
2562/* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
2563 on either the convert or waiting queue.
2564 high is the largest rqmode of all locks blocked on the convert or
2565 waiting queue. */
2566
2567static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
2568{
2569 if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
2570 if (gr->lkb_highbast < DLM_LOCK_EX)
2571 return 1;
2572 return 0;
2573 }
2574
2575 if (gr->lkb_highbast < high &&
2576 !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
2577 return 1;
2578 return 0;
2579}
2580
4875647a 2581static void grant_pending_locks(struct dlm_rsb *r, unsigned int *count)
e7fd4179
DT
2582{
2583 struct dlm_lkb *lkb, *s;
2584 int high = DLM_LOCK_IV;
36509258 2585 int cw = 0;
e7fd4179 2586
4875647a
DT
2587 if (!is_master(r)) {
2588 log_print("grant_pending_locks r nodeid %d", r->res_nodeid);
2589 dlm_dump_rsb(r);
2590 return;
2591 }
e7fd4179 2592
4875647a
DT
2593 high = grant_pending_convert(r, high, &cw, count);
2594 high = grant_pending_wait(r, high, &cw, count);
e7fd4179
DT
2595
2596 if (high == DLM_LOCK_IV)
2597 return;
2598
2599 /*
2600 * If there are locks left on the wait/convert queue then send blocking
2601 * ASTs to granted locks based on the largest requested mode (high)
36509258 2602 * found above.
e7fd4179
DT
2603 */
2604
2605 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
e5dae548 2606 if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
329fc4c3
DT
2607 if (cw && high == DLM_LOCK_PR &&
2608 lkb->lkb_grmode == DLM_LOCK_PR)
36509258
DT
2609 queue_bast(r, lkb, DLM_LOCK_CW);
2610 else
2611 queue_bast(r, lkb, high);
e7fd4179
DT
2612 lkb->lkb_highbast = high;
2613 }
2614 }
2615}
2616
36509258
DT
2617static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
2618{
2619 if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
2620 (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
2621 if (gr->lkb_highbast < DLM_LOCK_EX)
2622 return 1;
2623 return 0;
2624 }
2625
2626 if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
2627 return 1;
2628 return 0;
2629}
2630
e7fd4179
DT
2631static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
2632 struct dlm_lkb *lkb)
2633{
2634 struct dlm_lkb *gr;
2635
2636 list_for_each_entry(gr, head, lkb_statequeue) {
314dd2a0
SW
2637 /* skip self when sending basts to convertqueue */
2638 if (gr == lkb)
2639 continue;
e5dae548 2640 if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
e7fd4179
DT
2641 queue_bast(r, gr, lkb->lkb_rqmode);
2642 gr->lkb_highbast = lkb->lkb_rqmode;
2643 }
2644 }
2645}
2646
2647static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
2648{
2649 send_bast_queue(r, &r->res_grantqueue, lkb);
2650}
2651
2652static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
2653{
2654 send_bast_queue(r, &r->res_grantqueue, lkb);
2655 send_bast_queue(r, &r->res_convertqueue, lkb);
2656}
2657
2658/* set_master(r, lkb) -- set the master nodeid of a resource
2659
2660 The purpose of this function is to set the nodeid field in the given
2661 lkb using the nodeid field in the given rsb. If the rsb's nodeid is
2662 known, it can just be copied to the lkb and the function will return
2663 0. If the rsb's nodeid is _not_ known, it needs to be looked up
2664 before it can be copied to the lkb.
2665
2666 When the rsb nodeid is being looked up remotely, the initial lkb
2667 causing the lookup is kept on the ls_waiters list waiting for the
2668 lookup reply. Other lkb's waiting for the same rsb lookup are kept
2669 on the rsb's res_lookup list until the master is verified.
2670
2671 Return values:
2672 0: nodeid is set in rsb/lkb and the caller should go ahead and use it
2673 1: the rsb master is not available and the lkb has been placed on
2674 a wait queue
2675*/
2676
2677static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
2678{
c04fecb4 2679 int our_nodeid = dlm_our_nodeid();
e7fd4179
DT
2680
2681 if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
2682 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
2683 r->res_first_lkid = lkb->lkb_id;
2684 lkb->lkb_nodeid = r->res_nodeid;
2685 return 0;
2686 }
2687
2688 if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
2689 list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
2690 return 1;
2691 }
2692
c04fecb4 2693 if (r->res_master_nodeid == our_nodeid) {
e7fd4179
DT
2694 lkb->lkb_nodeid = 0;
2695 return 0;
2696 }
2697
c04fecb4
DT
2698 if (r->res_master_nodeid) {
2699 lkb->lkb_nodeid = r->res_master_nodeid;
e7fd4179
DT
2700 return 0;
2701 }
2702
c04fecb4
DT
2703 if (dlm_dir_nodeid(r) == our_nodeid) {
2704 /* This is a somewhat unusual case; find_rsb will usually
2705 have set res_master_nodeid when dir nodeid is local, but
2706 there are cases where we become the dir node after we've
2707 past find_rsb and go through _request_lock again.
2708 confirm_master() or process_lookup_list() needs to be
2709 called after this. */
2710 log_debug(r->res_ls, "set_master %x self master %d dir %d %s",
2711 lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid,
2712 r->res_name);
2713 r->res_master_nodeid = our_nodeid;
e7fd4179
DT
2714 r->res_nodeid = 0;
2715 lkb->lkb_nodeid = 0;
c04fecb4 2716 return 0;
e7fd4179 2717 }
c04fecb4 2718
05c32f47
DT
2719 wait_pending_remove(r);
2720
c04fecb4
DT
2721 r->res_first_lkid = lkb->lkb_id;
2722 send_lookup(r, lkb);
2723 return 1;
e7fd4179
DT
2724}
2725
2726static void process_lookup_list(struct dlm_rsb *r)
2727{
2728 struct dlm_lkb *lkb, *safe;
2729
2730 list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
ef0c2bb0 2731 list_del_init(&lkb->lkb_rsb_lookup);
e7fd4179
DT
2732 _request_lock(r, lkb);
2733 schedule();
2734 }
2735}
2736
2737/* confirm_master -- confirm (or deny) an rsb's master nodeid */
2738
2739static void confirm_master(struct dlm_rsb *r, int error)
2740{
2741 struct dlm_lkb *lkb;
2742
2743 if (!r->res_first_lkid)
2744 return;
2745
2746 switch (error) {
2747 case 0:
2748 case -EINPROGRESS:
2749 r->res_first_lkid = 0;
2750 process_lookup_list(r);
2751 break;
2752
2753 case -EAGAIN:
aec64e1b
DT
2754 case -EBADR:
2755 case -ENOTBLK:
2756 /* the remote request failed and won't be retried (it was
2757 a NOQUEUE, or has been canceled/unlocked); make a waiting
2758 lkb the first_lkid */
e7fd4179
DT
2759
2760 r->res_first_lkid = 0;
2761
2762 if (!list_empty(&r->res_lookup)) {
2763 lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
2764 lkb_rsb_lookup);
ef0c2bb0 2765 list_del_init(&lkb->lkb_rsb_lookup);
e7fd4179
DT
2766 r->res_first_lkid = lkb->lkb_id;
2767 _request_lock(r, lkb);
761b9d3f 2768 }
e7fd4179
DT
2769 break;
2770
2771 default:
2772 log_error(r->res_ls, "confirm_master unknown error %d", error);
2773 }
2774}
2775
6b0afc0c 2776#ifdef CONFIG_DLM_DEPRECATED_API
e7fd4179 2777static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
e5dae548
DT
2778 int namelen, unsigned long timeout_cs,
2779 void (*ast) (void *astparam),
2780 void *astparam,
2781 void (*bast) (void *astparam, int mode),
2782 struct dlm_args *args)
6b0afc0c
AA
2783#else
2784static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
2785 int namelen, void (*ast)(void *astparam),
2786 void *astparam,
2787 void (*bast)(void *astparam, int mode),
2788 struct dlm_args *args)
2789#endif
e7fd4179
DT
2790{
2791 int rv = -EINVAL;
2792
2793 /* check for invalid arg usage */
2794
2795 if (mode < 0 || mode > DLM_LOCK_EX)
2796 goto out;
2797
2798 if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
2799 goto out;
2800
2801 if (flags & DLM_LKF_CANCEL)
2802 goto out;
2803
2804 if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
2805 goto out;
2806
2807 if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
2808 goto out;
2809
2810 if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
2811 goto out;
2812
2813 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
2814 goto out;
2815
2816 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
2817 goto out;
2818
2819 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
2820 goto out;
2821
2822 if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2823 goto out;
2824
2825 if (!ast || !lksb)
2826 goto out;
2827
2828 if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2829 goto out;
2830
e7fd4179
DT
2831 if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2832 goto out;
2833
2834 /* these args will be copied to the lkb in validate_lock_args,
2835 it cannot be done now because when converting locks, fields in
2836 an active lkb cannot be modified before locking the rsb */
2837
2838 args->flags = flags;
e5dae548
DT
2839 args->astfn = ast;
2840 args->astparam = astparam;
2841 args->bastfn = bast;
6b0afc0c 2842#ifdef CONFIG_DLM_DEPRECATED_API
d7db923e 2843 args->timeout = timeout_cs;
6b0afc0c 2844#endif
e7fd4179
DT
2845 args->mode = mode;
2846 args->lksb = lksb;
e7fd4179
DT
2847 rv = 0;
2848 out:
2849 return rv;
2850}
2851
2852static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2853{
2854 if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2855 DLM_LKF_FORCEUNLOCK))
2856 return -EINVAL;
2857
ef0c2bb0
DT
2858 if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2859 return -EINVAL;
2860
e7fd4179 2861 args->flags = flags;
e5dae548 2862 args->astparam = astarg;
e7fd4179
DT
2863 return 0;
2864}
2865
2866static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2867 struct dlm_args *args)
2868{
44637ca4 2869 int rv = -EBUSY;
e7fd4179
DT
2870
2871 if (args->flags & DLM_LKF_CONVERT) {
e7fd4179
DT
2872 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2873 goto out;
2874
67e4d8c5
AA
2875 /* lock not allowed if there's any op in progress */
2876 if (lkb->lkb_wait_type || lkb->lkb_wait_count)
e7fd4179 2877 goto out;
ef0c2bb0
DT
2878
2879 if (is_overlap(lkb))
2880 goto out;
44637ca4
AA
2881
2882 rv = -EINVAL;
2883 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
2884 goto out;
2885
2886 if (args->flags & DLM_LKF_QUECVT &&
2887 !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2888 goto out;
e7fd4179
DT
2889 }
2890
2891 lkb->lkb_exflags = args->flags;
2892 lkb->lkb_sbflags = 0;
e5dae548 2893 lkb->lkb_astfn = args->astfn;
e7fd4179 2894 lkb->lkb_astparam = args->astparam;
e5dae548 2895 lkb->lkb_bastfn = args->bastfn;
e7fd4179
DT
2896 lkb->lkb_rqmode = args->mode;
2897 lkb->lkb_lksb = args->lksb;
2898 lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2899 lkb->lkb_ownpid = (int) current->pid;
6b0afc0c 2900#ifdef CONFIG_DLM_DEPRECATED_API
d7db923e 2901 lkb->lkb_timeout_cs = args->timeout;
6b0afc0c 2902#endif
e7fd4179
DT
2903 rv = 0;
2904 out:
9ac8ba46
AA
2905 switch (rv) {
2906 case 0:
2907 break;
2908 case -EINVAL:
2909 /* annoy the user because dlm usage is wrong */
2910 WARN_ON(1);
2911 log_error(ls, "%s %d %x %x %x %d %d %s", __func__,
2912 rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2913 lkb->lkb_status, lkb->lkb_wait_type,
2914 lkb->lkb_resource->res_name);
2915 break;
2916 default:
c2d76a62 2917 log_debug(ls, "%s %d %x %x %x %d %d %s", __func__,
43279e53
DT
2918 rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2919 lkb->lkb_status, lkb->lkb_wait_type,
2920 lkb->lkb_resource->res_name);
9ac8ba46
AA
2921 break;
2922 }
2923
e7fd4179
DT
2924 return rv;
2925}
2926
ef0c2bb0
DT
2927/* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2928 for success */
2929
2930/* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2931 because there may be a lookup in progress and it's valid to do
2932 cancel/unlockf on it */
2933
e7fd4179
DT
2934static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2935{
ef0c2bb0 2936 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
420ba3cd 2937 int rv = -EBUSY;
e7fd4179 2938
420ba3cd
AA
2939 /* normal unlock not allowed if there's any op in progress */
2940 if (!(args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) &&
2941 (lkb->lkb_wait_type || lkb->lkb_wait_count))
e7fd4179
DT
2942 goto out;
2943
ef0c2bb0
DT
2944 /* an lkb may be waiting for an rsb lookup to complete where the
2945 lookup was initiated by another lock */
2946
42dc1601
DT
2947 if (!list_empty(&lkb->lkb_rsb_lookup)) {
2948 if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
ef0c2bb0
DT
2949 log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2950 list_del_init(&lkb->lkb_rsb_lookup);
2951 queue_cast(lkb->lkb_resource, lkb,
2952 args->flags & DLM_LKF_CANCEL ?
2953 -DLM_ECANCEL : -DLM_EUNLOCK);
2954 unhold_lkb(lkb); /* undoes create_lkb() */
ef0c2bb0 2955 }
42dc1601 2956 /* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
420ba3cd
AA
2957 goto out;
2958 }
2959
2960 rv = -EINVAL;
2961 if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
2962 log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2963 dlm_print_lkb(lkb);
2964 goto out;
2965 }
2966
2967 /* an lkb may still exist even though the lock is EOL'ed due to a
2968 * cancel, unlock or failed noqueue request; an app can't use these
2969 * locks; return same error as if the lkid had not been found at all
2970 */
2971
2972 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
2973 log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2974 rv = -ENOENT;
42dc1601 2975 goto out;
ef0c2bb0
DT
2976 }
2977
2978 /* cancel not allowed with another cancel/unlock in progress */
2979
2980 if (args->flags & DLM_LKF_CANCEL) {
2981 if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2982 goto out;
2983
2984 if (is_overlap(lkb))
2985 goto out;
2986
3ae1acf9
DT
2987 /* don't let scand try to do a cancel */
2988 del_timeout(lkb);
2989
ef0c2bb0
DT
2990 if (lkb->lkb_flags & DLM_IFL_RESEND) {
2991 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2992 rv = -EBUSY;
2993 goto out;
2994 }
2995
a536e381
DT
2996 /* there's nothing to cancel */
2997 if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
2998 !lkb->lkb_wait_type) {
2999 rv = -EBUSY;
3000 goto out;
3001 }
3002
ef0c2bb0
DT
3003 switch (lkb->lkb_wait_type) {
3004 case DLM_MSG_LOOKUP:
3005 case DLM_MSG_REQUEST:
3006 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
3007 rv = -EBUSY;
3008 goto out;
3009 case DLM_MSG_UNLOCK:
3010 case DLM_MSG_CANCEL:
3011 goto out;
3012 }
3013 /* add_to_waiters() will set OVERLAP_CANCEL */
3014 goto out_ok;
3015 }
3016
3017 /* do we need to allow a force-unlock if there's a normal unlock
3018 already in progress? in what conditions could the normal unlock
3019 fail such that we'd want to send a force-unlock to be sure? */
3020
3021 if (args->flags & DLM_LKF_FORCEUNLOCK) {
3022 if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
3023 goto out;
3024
3025 if (is_overlap_unlock(lkb))
3026 goto out;
e7fd4179 3027
3ae1acf9
DT
3028 /* don't let scand try to do a cancel */
3029 del_timeout(lkb);
3030
ef0c2bb0
DT
3031 if (lkb->lkb_flags & DLM_IFL_RESEND) {
3032 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3033 rv = -EBUSY;
3034 goto out;
3035 }
3036
3037 switch (lkb->lkb_wait_type) {
3038 case DLM_MSG_LOOKUP:
3039 case DLM_MSG_REQUEST:
3040 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3041 rv = -EBUSY;
3042 goto out;
3043 case DLM_MSG_UNLOCK:
3044 goto out;
3045 }
3046 /* add_to_waiters() will set OVERLAP_UNLOCK */
ef0c2bb0
DT
3047 }
3048
e7fd4179 3049 out_ok:
ef0c2bb0
DT
3050 /* an overlapping op shouldn't blow away exflags from other op */
3051 lkb->lkb_exflags |= args->flags;
e7fd4179
DT
3052 lkb->lkb_sbflags = 0;
3053 lkb->lkb_astparam = args->astparam;
e7fd4179
DT
3054 rv = 0;
3055 out:
9ac8ba46
AA
3056 switch (rv) {
3057 case 0:
3058 break;
3059 case -EINVAL:
3060 /* annoy the user because dlm usage is wrong */
3061 WARN_ON(1);
3062 log_error(ls, "%s %d %x %x %x %x %d %s", __func__, rv,
3063 lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
3064 args->flags, lkb->lkb_wait_type,
3065 lkb->lkb_resource->res_name);
3066 break;
3067 default:
c2d76a62 3068 log_debug(ls, "%s %d %x %x %x %x %d %s", __func__, rv,
ef0c2bb0
DT
3069 lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
3070 args->flags, lkb->lkb_wait_type,
3071 lkb->lkb_resource->res_name);
9ac8ba46
AA
3072 break;
3073 }
3074
e7fd4179
DT
3075 return rv;
3076}
3077
3078/*
3079 * Four stage 4 varieties:
3080 * do_request(), do_convert(), do_unlock(), do_cancel()
3081 * These are called on the master node for the given lock and
3082 * from the central locking logic.
3083 */
3084
3085static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3086{
3087 int error = 0;
3088
c503a621 3089 if (can_be_granted(r, lkb, 1, 0, NULL)) {
e7fd4179
DT
3090 grant_lock(r, lkb);
3091 queue_cast(r, lkb, 0);
3092 goto out;
3093 }
3094
3095 if (can_be_queued(lkb)) {
3096 error = -EINPROGRESS;
3097 add_lkb(r, lkb, DLM_LKSTS_WAITING);
3ae1acf9 3098 add_timeout(lkb);
e7fd4179
DT
3099 goto out;
3100 }
3101
3102 error = -EAGAIN;
e7fd4179 3103 queue_cast(r, lkb, -EAGAIN);
e7fd4179
DT
3104 out:
3105 return error;
3106}
3107
cf6620ac
DT
3108static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3109 int error)
3110{
3111 switch (error) {
3112 case -EAGAIN:
3113 if (force_blocking_asts(lkb))
3114 send_blocking_asts_all(r, lkb);
3115 break;
3116 case -EINPROGRESS:
3117 send_blocking_asts(r, lkb);
3118 break;
3119 }
3120}
3121
e7fd4179
DT
3122static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3123{
3124 int error = 0;
c85d65e9 3125 int deadlk = 0;
e7fd4179
DT
3126
3127 /* changing an existing lock may allow others to be granted */
3128
c503a621 3129 if (can_be_granted(r, lkb, 1, 0, &deadlk)) {
e7fd4179
DT
3130 grant_lock(r, lkb);
3131 queue_cast(r, lkb, 0);
e7fd4179
DT
3132 goto out;
3133 }
3134
c85d65e9
DT
3135 /* can_be_granted() detected that this lock would block in a conversion
3136 deadlock, so we leave it on the granted queue and return EDEADLK in
3137 the ast for the convert. */
3138
294e7e45 3139 if (deadlk && !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
c85d65e9 3140 /* it's left on the granted queue */
c85d65e9
DT
3141 revert_lock(r, lkb);
3142 queue_cast(r, lkb, -EDEADLK);
3143 error = -EDEADLK;
3144 goto out;
3145 }
3146
7d3c1feb
DT
3147 /* is_demoted() means the can_be_granted() above set the grmode
3148 to NL, and left us on the granted queue. This auto-demotion
3149 (due to CONVDEADLK) might mean other locks, and/or this lock, are
3150 now grantable. We have to try to grant other converting locks
3151 before we try again to grant this one. */
3152
3153 if (is_demoted(lkb)) {
4875647a 3154 grant_pending_convert(r, DLM_LOCK_IV, NULL, NULL);
c503a621 3155 if (_can_be_granted(r, lkb, 1, 0)) {
7d3c1feb
DT
3156 grant_lock(r, lkb);
3157 queue_cast(r, lkb, 0);
7d3c1feb
DT
3158 goto out;
3159 }
3160 /* else fall through and move to convert queue */
3161 }
3162
3163 if (can_be_queued(lkb)) {
e7fd4179
DT
3164 error = -EINPROGRESS;
3165 del_lkb(r, lkb);
3166 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3ae1acf9 3167 add_timeout(lkb);
e7fd4179
DT
3168 goto out;
3169 }
3170
3171 error = -EAGAIN;
e7fd4179 3172 queue_cast(r, lkb, -EAGAIN);
e7fd4179
DT
3173 out:
3174 return error;
3175}
3176
cf6620ac
DT
3177static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3178 int error)
3179{
3180 switch (error) {
3181 case 0:
4875647a 3182 grant_pending_locks(r, NULL);
cf6620ac
DT
3183 /* grant_pending_locks also sends basts */
3184 break;
3185 case -EAGAIN:
3186 if (force_blocking_asts(lkb))
3187 send_blocking_asts_all(r, lkb);
3188 break;
3189 case -EINPROGRESS:
3190 send_blocking_asts(r, lkb);
3191 break;
3192 }
3193}
3194
e7fd4179
DT
3195static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3196{
3197 remove_lock(r, lkb);
3198 queue_cast(r, lkb, -DLM_EUNLOCK);
e7fd4179
DT
3199 return -DLM_EUNLOCK;
3200}
3201
cf6620ac
DT
3202static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3203 int error)
3204{
4875647a 3205 grant_pending_locks(r, NULL);
cf6620ac
DT
3206}
3207
ef0c2bb0 3208/* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
c04fecb4 3209
e7fd4179
DT
3210static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3211{
ef0c2bb0
DT
3212 int error;
3213
3214 error = revert_lock(r, lkb);
3215 if (error) {
3216 queue_cast(r, lkb, -DLM_ECANCEL);
ef0c2bb0
DT
3217 return -DLM_ECANCEL;
3218 }
3219 return 0;
e7fd4179
DT
3220}
3221
cf6620ac
DT
3222static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3223 int error)
3224{
3225 if (error)
4875647a 3226 grant_pending_locks(r, NULL);
cf6620ac
DT
3227}
3228
e7fd4179
DT
3229/*
3230 * Four stage 3 varieties:
3231 * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
3232 */
3233
3234/* add a new lkb to a possibly new rsb, called by requesting process */
3235
3236static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3237{
3238 int error;
3239
3240 /* set_master: sets lkb nodeid from r */
3241
3242 error = set_master(r, lkb);
3243 if (error < 0)
3244 goto out;
3245 if (error) {
3246 error = 0;
3247 goto out;
3248 }
3249
cf6620ac 3250 if (is_remote(r)) {
e7fd4179
DT
3251 /* receive_request() calls do_request() on remote node */
3252 error = send_request(r, lkb);
cf6620ac 3253 } else {
e7fd4179 3254 error = do_request(r, lkb);
cf6620ac
DT
3255 /* for remote locks the request_reply is sent
3256 between do_request and do_request_effects */
3257 do_request_effects(r, lkb, error);
3258 }
e7fd4179
DT
3259 out:
3260 return error;
3261}
3262
3bcd3687 3263/* change some property of an existing lkb, e.g. mode */
e7fd4179
DT
3264
3265static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3266{
3267 int error;
3268
cf6620ac 3269 if (is_remote(r)) {
e7fd4179
DT
3270 /* receive_convert() calls do_convert() on remote node */
3271 error = send_convert(r, lkb);
cf6620ac 3272 } else {
e7fd4179 3273 error = do_convert(r, lkb);
cf6620ac
DT
3274 /* for remote locks the convert_reply is sent
3275 between do_convert and do_convert_effects */
3276 do_convert_effects(r, lkb, error);
3277 }
e7fd4179
DT
3278
3279 return error;
3280}
3281
3282/* remove an existing lkb from the granted queue */
3283
3284static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3285{
3286 int error;
3287
cf6620ac 3288 if (is_remote(r)) {
e7fd4179
DT
3289 /* receive_unlock() calls do_unlock() on remote node */
3290 error = send_unlock(r, lkb);
cf6620ac 3291 } else {
e7fd4179 3292 error = do_unlock(r, lkb);
cf6620ac
DT
3293 /* for remote locks the unlock_reply is sent
3294 between do_unlock and do_unlock_effects */
3295 do_unlock_effects(r, lkb, error);
3296 }
e7fd4179
DT
3297
3298 return error;
3299}
3300
3301/* remove an existing lkb from the convert or wait queue */
3302
3303static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3304{
3305 int error;
3306
cf6620ac 3307 if (is_remote(r)) {
e7fd4179
DT
3308 /* receive_cancel() calls do_cancel() on remote node */
3309 error = send_cancel(r, lkb);
cf6620ac 3310 } else {
e7fd4179 3311 error = do_cancel(r, lkb);
cf6620ac
DT
3312 /* for remote locks the cancel_reply is sent
3313 between do_cancel and do_cancel_effects */
3314 do_cancel_effects(r, lkb, error);
3315 }
e7fd4179
DT
3316
3317 return error;
3318}
3319
3320/*
3321 * Four stage 2 varieties:
3322 * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
3323 */
3324
56171e0d
AA
3325static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3326 const void *name, int len,
3327 struct dlm_args *args)
e7fd4179
DT
3328{
3329 struct dlm_rsb *r;
3330 int error;
3331
3332 error = validate_lock_args(ls, lkb, args);
3333 if (error)
c04fecb4 3334 return error;
e7fd4179 3335
c04fecb4 3336 error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
e7fd4179 3337 if (error)
c04fecb4 3338 return error;
e7fd4179
DT
3339
3340 lock_rsb(r);
3341
3342 attach_lkb(r, lkb);
3343 lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
3344
3345 error = _request_lock(r, lkb);
3346
3347 unlock_rsb(r);
3348 put_rsb(r);
e7fd4179
DT
3349 return error;
3350}
3351
3352static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3353 struct dlm_args *args)
3354{
3355 struct dlm_rsb *r;
3356 int error;
3357
3358 r = lkb->lkb_resource;
3359
3360 hold_rsb(r);
3361 lock_rsb(r);
3362
3363 error = validate_lock_args(ls, lkb, args);
3364 if (error)
3365 goto out;
3366
3367 error = _convert_lock(r, lkb);
3368 out:
3369 unlock_rsb(r);
3370 put_rsb(r);
3371 return error;
3372}
3373
3374static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3375 struct dlm_args *args)
3376{
3377 struct dlm_rsb *r;
3378 int error;
3379
3380 r = lkb->lkb_resource;
3381
3382 hold_rsb(r);
3383 lock_rsb(r);
3384
3385 error = validate_unlock_args(lkb, args);
3386 if (error)
3387 goto out;
3388
3389 error = _unlock_lock(r, lkb);
3390 out:
3391 unlock_rsb(r);
3392 put_rsb(r);
3393 return error;
3394}
3395
3396static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3397 struct dlm_args *args)
3398{
3399 struct dlm_rsb *r;
3400 int error;
3401
3402 r = lkb->lkb_resource;
3403
3404 hold_rsb(r);
3405 lock_rsb(r);
3406
3407 error = validate_unlock_args(lkb, args);
3408 if (error)
3409 goto out;
3410
3411 error = _cancel_lock(r, lkb);
3412 out:
3413 unlock_rsb(r);
3414 put_rsb(r);
3415 return error;
3416}
3417
3418/*
3419 * Two stage 1 varieties: dlm_lock() and dlm_unlock()
3420 */
3421
3422int dlm_lock(dlm_lockspace_t *lockspace,
3423 int mode,
3424 struct dlm_lksb *lksb,
3425 uint32_t flags,
56171e0d 3426 const void *name,
e7fd4179
DT
3427 unsigned int namelen,
3428 uint32_t parent_lkid,
3429 void (*ast) (void *astarg),
3430 void *astarg,
3bcd3687 3431 void (*bast) (void *astarg, int mode))
e7fd4179
DT
3432{
3433 struct dlm_ls *ls;
3434 struct dlm_lkb *lkb;
3435 struct dlm_args args;
3436 int error, convert = flags & DLM_LKF_CONVERT;
3437
3438 ls = dlm_find_lockspace_local(lockspace);
3439 if (!ls)
3440 return -EINVAL;
3441
85e86edf 3442 dlm_lock_recovery(ls);
e7fd4179
DT
3443
3444 if (convert)
3445 error = find_lkb(ls, lksb->sb_lkid, &lkb);
3446 else
3447 error = create_lkb(ls, &lkb);
3448
3449 if (error)
3450 goto out;
3451
5d92a30e 3452 trace_dlm_lock_start(ls, lkb, name, namelen, mode, flags);
f1d3b8f9 3453
6b0afc0c 3454#ifdef CONFIG_DLM_DEPRECATED_API
d7db923e 3455 error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
3bcd3687 3456 astarg, bast, &args);
6b0afc0c
AA
3457#else
3458 error = set_lock_args(mode, lksb, flags, namelen, ast, astarg, bast,
3459 &args);
3460#endif
e7fd4179
DT
3461 if (error)
3462 goto out_put;
3463
3464 if (convert)
3465 error = convert_lock(ls, lkb, &args);
3466 else
3467 error = request_lock(ls, lkb, name, namelen, &args);
3468
3469 if (error == -EINPROGRESS)
3470 error = 0;
3471 out_put:
7a3de732 3472 trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, true);
f1d3b8f9 3473
e7fd4179 3474 if (convert || error)
b3f58d8f 3475 __put_lkb(ls, lkb);
c85d65e9 3476 if (error == -EAGAIN || error == -EDEADLK)
e7fd4179
DT
3477 error = 0;
3478 out:
85e86edf 3479 dlm_unlock_recovery(ls);
e7fd4179
DT
3480 dlm_put_lockspace(ls);
3481 return error;
3482}
3483
3484int dlm_unlock(dlm_lockspace_t *lockspace,
3485 uint32_t lkid,
3486 uint32_t flags,
3487 struct dlm_lksb *lksb,
3488 void *astarg)
3489{
3490 struct dlm_ls *ls;
3491 struct dlm_lkb *lkb;
3492 struct dlm_args args;
3493 int error;
3494
3495 ls = dlm_find_lockspace_local(lockspace);
3496 if (!ls)
3497 return -EINVAL;
3498
85e86edf 3499 dlm_lock_recovery(ls);
e7fd4179
DT
3500
3501 error = find_lkb(ls, lkid, &lkb);
3502 if (error)
3503 goto out;
3504
f1d3b8f9
AA
3505 trace_dlm_unlock_start(ls, lkb, flags);
3506
e7fd4179
DT
3507 error = set_unlock_args(flags, astarg, &args);
3508 if (error)
3509 goto out_put;
3510
3511 if (flags & DLM_LKF_CANCEL)
3512 error = cancel_lock(ls, lkb, &args);
3513 else
3514 error = unlock_lock(ls, lkb, &args);
3515
3516 if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
3517 error = 0;
ef0c2bb0
DT
3518 if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
3519 error = 0;
e7fd4179 3520 out_put:
f1d3b8f9
AA
3521 trace_dlm_unlock_end(ls, lkb, flags, error);
3522
b3f58d8f 3523 dlm_put_lkb(lkb);
e7fd4179 3524 out:
85e86edf 3525 dlm_unlock_recovery(ls);
e7fd4179
DT
3526 dlm_put_lockspace(ls);
3527 return error;
3528}
3529
3530/*
3531 * send/receive routines for remote operations and replies
3532 *
3533 * send_args
3534 * send_common
3535 * send_request receive_request
3536 * send_convert receive_convert
3537 * send_unlock receive_unlock
3538 * send_cancel receive_cancel
3539 * send_grant receive_grant
3540 * send_bast receive_bast
3541 * send_lookup receive_lookup
3542 * send_remove receive_remove
3543 *
3544 * send_common_reply
3545 * receive_request_reply send_request_reply
3546 * receive_convert_reply send_convert_reply
3547 * receive_unlock_reply send_unlock_reply
3548 * receive_cancel_reply send_cancel_reply
3549 * receive_lookup_reply send_lookup_reply
3550 */
3551
7e4dac33
DT
3552static int _create_message(struct dlm_ls *ls, int mb_len,
3553 int to_nodeid, int mstype,
3554 struct dlm_message **ms_ret,
3555 struct dlm_mhandle **mh_ret)
e7fd4179
DT
3556{
3557 struct dlm_message *ms;
3558 struct dlm_mhandle *mh;
3559 char *mb;
e7fd4179
DT
3560
3561 /* get_buffer gives us a message handle (mh) that we need to
a070a91c 3562 pass into midcomms_commit and a message buffer (mb) that we
e7fd4179
DT
3563 write our data into */
3564
a070a91c 3565 mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, GFP_NOFS, &mb);
e7fd4179
DT
3566 if (!mh)
3567 return -ENOBUFS;
3568
e7fd4179
DT
3569 ms = (struct dlm_message *) mb;
3570
3428785a
AA
3571 ms->m_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
3572 ms->m_header.u.h_lockspace = cpu_to_le32(ls->ls_global_id);
3573 ms->m_header.h_nodeid = cpu_to_le32(dlm_our_nodeid());
3574 ms->m_header.h_length = cpu_to_le16(mb_len);
e7fd4179
DT
3575 ms->m_header.h_cmd = DLM_MSG;
3576
00e99ccd 3577 ms->m_type = cpu_to_le32(mstype);
e7fd4179
DT
3578
3579 *mh_ret = mh;
3580 *ms_ret = ms;
3581 return 0;
3582}
3583
7e4dac33
DT
3584static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
3585 int to_nodeid, int mstype,
3586 struct dlm_message **ms_ret,
3587 struct dlm_mhandle **mh_ret)
3588{
3589 int mb_len = sizeof(struct dlm_message);
3590
3591 switch (mstype) {
3592 case DLM_MSG_REQUEST:
3593 case DLM_MSG_LOOKUP:
3594 case DLM_MSG_REMOVE:
3595 mb_len += r->res_length;
3596 break;
3597 case DLM_MSG_CONVERT:
3598 case DLM_MSG_UNLOCK:
3599 case DLM_MSG_REQUEST_REPLY:
3600 case DLM_MSG_CONVERT_REPLY:
3601 case DLM_MSG_GRANT:
3602 if (lkb && lkb->lkb_lvbptr)
3603 mb_len += r->res_ls->ls_lvblen;
3604 break;
3605 }
3606
3607 return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
3608 ms_ret, mh_ret);
3609}
3610
e7fd4179
DT
3611/* further lowcomms enhancements or alternate implementations may make
3612 the return value from this function useful at some point */
3613
3614static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
3615{
a070a91c 3616 dlm_midcomms_commit_mhandle(mh);
e7fd4179
DT
3617 return 0;
3618}
3619
3620static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
3621 struct dlm_message *ms)
3622{
00e99ccd
AA
3623 ms->m_nodeid = cpu_to_le32(lkb->lkb_nodeid);
3624 ms->m_pid = cpu_to_le32(lkb->lkb_ownpid);
3625 ms->m_lkid = cpu_to_le32(lkb->lkb_id);
3626 ms->m_remid = cpu_to_le32(lkb->lkb_remid);
3627 ms->m_exflags = cpu_to_le32(lkb->lkb_exflags);
3628 ms->m_sbflags = cpu_to_le32(lkb->lkb_sbflags);
3629 ms->m_flags = cpu_to_le32(lkb->lkb_flags);
3630 ms->m_lvbseq = cpu_to_le32(lkb->lkb_lvbseq);
3631 ms->m_status = cpu_to_le32(lkb->lkb_status);
3632 ms->m_grmode = cpu_to_le32(lkb->lkb_grmode);
3633 ms->m_rqmode = cpu_to_le32(lkb->lkb_rqmode);
3634 ms->m_hash = cpu_to_le32(r->res_hash);
e7fd4179
DT
3635
3636 /* m_result and m_bastmode are set from function args,
3637 not from lkb fields */
3638
e5dae548 3639 if (lkb->lkb_bastfn)
00e99ccd 3640 ms->m_asts |= cpu_to_le32(DLM_CB_BAST);
e5dae548 3641 if (lkb->lkb_astfn)
00e99ccd 3642 ms->m_asts |= cpu_to_le32(DLM_CB_CAST);
e7fd4179 3643
da49f36f
DT
3644 /* compare with switch in create_message; send_remove() doesn't
3645 use send_args() */
e7fd4179 3646
da49f36f 3647 switch (ms->m_type) {
00e99ccd
AA
3648 case cpu_to_le32(DLM_MSG_REQUEST):
3649 case cpu_to_le32(DLM_MSG_LOOKUP):
da49f36f
DT
3650 memcpy(ms->m_extra, r->res_name, r->res_length);
3651 break;
00e99ccd
AA
3652 case cpu_to_le32(DLM_MSG_CONVERT):
3653 case cpu_to_le32(DLM_MSG_UNLOCK):
3654 case cpu_to_le32(DLM_MSG_REQUEST_REPLY):
3655 case cpu_to_le32(DLM_MSG_CONVERT_REPLY):
3656 case cpu_to_le32(DLM_MSG_GRANT):
7175e131 3657 if (!lkb->lkb_lvbptr || !(lkb->lkb_exflags & DLM_LKF_VALBLK))
da49f36f 3658 break;
e7fd4179 3659 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
da49f36f
DT
3660 break;
3661 }
e7fd4179
DT
3662}
3663
3664static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
3665{
3666 struct dlm_message *ms;
3667 struct dlm_mhandle *mh;
3668 int to_nodeid, error;
3669
c6ff669b
DT
3670 to_nodeid = r->res_nodeid;
3671
3672 error = add_to_waiters(lkb, mstype, to_nodeid);
ef0c2bb0
DT
3673 if (error)
3674 return error;
e7fd4179 3675
e7fd4179
DT
3676 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3677 if (error)
3678 goto fail;
3679
3680 send_args(r, lkb, ms);
3681
3682 error = send_message(mh, ms);
3683 if (error)
3684 goto fail;
3685 return 0;
3686
3687 fail:
ef0c2bb0 3688 remove_from_waiters(lkb, msg_reply_type(mstype));
e7fd4179
DT
3689 return error;
3690}
3691
3692static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3693{
3694 return send_common(r, lkb, DLM_MSG_REQUEST);
3695}
3696
3697static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3698{
3699 int error;
3700
3701 error = send_common(r, lkb, DLM_MSG_CONVERT);
3702
3703 /* down conversions go without a reply from the master */
3704 if (!error && down_conversion(lkb)) {
ef0c2bb0 3705 remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
00e99ccd
AA
3706 r->res_ls->ls_stub_ms.m_flags = cpu_to_le32(DLM_IFL_STUB_MS);
3707 r->res_ls->ls_stub_ms.m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY);
e7fd4179
DT
3708 r->res_ls->ls_stub_ms.m_result = 0;
3709 __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
3710 }
3711
3712 return error;
3713}
3714
3715/* FIXME: if this lkb is the only lock we hold on the rsb, then set
3716 MASTER_UNCERTAIN to force the next request on the rsb to confirm
3717 that the master is still correct. */
3718
3719static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3720{
3721 return send_common(r, lkb, DLM_MSG_UNLOCK);
3722}
3723
3724static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3725{
3726 return send_common(r, lkb, DLM_MSG_CANCEL);
3727}
3728
3729static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
3730{
3731 struct dlm_message *ms;
3732 struct dlm_mhandle *mh;
3733 int to_nodeid, error;
3734
3735 to_nodeid = lkb->lkb_nodeid;
3736
3737 error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
3738 if (error)
3739 goto out;
3740
3741 send_args(r, lkb, ms);
3742
3743 ms->m_result = 0;
3744
3745 error = send_message(mh, ms);
3746 out:
3747 return error;
3748}
3749
3750static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
3751{
3752 struct dlm_message *ms;
3753 struct dlm_mhandle *mh;
3754 int to_nodeid, error;
3755
3756 to_nodeid = lkb->lkb_nodeid;
3757
3758 error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
3759 if (error)
3760 goto out;
3761
3762 send_args(r, lkb, ms);
3763
00e99ccd 3764 ms->m_bastmode = cpu_to_le32(mode);
e7fd4179
DT
3765
3766 error = send_message(mh, ms);
3767 out:
3768 return error;
3769}
3770
3771static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
3772{
3773 struct dlm_message *ms;
3774 struct dlm_mhandle *mh;
3775 int to_nodeid, error;
3776
c6ff669b
DT
3777 to_nodeid = dlm_dir_nodeid(r);
3778
3779 error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
ef0c2bb0
DT
3780 if (error)
3781 return error;
e7fd4179 3782
e7fd4179
DT
3783 error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
3784 if (error)
3785 goto fail;
3786
3787 send_args(r, lkb, ms);
3788
3789 error = send_message(mh, ms);
3790 if (error)
3791 goto fail;
3792 return 0;
3793
3794 fail:
ef0c2bb0 3795 remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
e7fd4179
DT
3796 return error;
3797}
3798
3799static int send_remove(struct dlm_rsb *r)
3800{
3801 struct dlm_message *ms;
3802 struct dlm_mhandle *mh;
3803 int to_nodeid, error;
3804
3805 to_nodeid = dlm_dir_nodeid(r);
3806
3807 error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
3808 if (error)
3809 goto out;
3810
3811 memcpy(ms->m_extra, r->res_name, r->res_length);
00e99ccd 3812 ms->m_hash = cpu_to_le32(r->res_hash);
e7fd4179
DT
3813
3814 error = send_message(mh, ms);
3815 out:
3816 return error;
3817}
3818
3819static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3820 int mstype, int rv)
3821{
3822 struct dlm_message *ms;
3823 struct dlm_mhandle *mh;
3824 int to_nodeid, error;
3825
3826 to_nodeid = lkb->lkb_nodeid;
3827
3828 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3829 if (error)
3830 goto out;
3831
3832 send_args(r, lkb, ms);
3833
00e99ccd 3834 ms->m_result = cpu_to_le32(to_dlm_errno(rv));
e7fd4179
DT
3835
3836 error = send_message(mh, ms);
3837 out:
3838 return error;
3839}
3840
3841static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3842{
3843 return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
3844}
3845
3846static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3847{
3848 return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
3849}
3850
3851static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3852{
3853 return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
3854}
3855
3856static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3857{
3858 return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
3859}
3860
3861static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
3862 int ret_nodeid, int rv)
3863{
3864 struct dlm_rsb *r = &ls->ls_stub_rsb;
3865 struct dlm_message *ms;
3866 struct dlm_mhandle *mh;
3428785a 3867 int error, nodeid = le32_to_cpu(ms_in->m_header.h_nodeid);
e7fd4179
DT
3868
3869 error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
3870 if (error)
3871 goto out;
3872
3873 ms->m_lkid = ms_in->m_lkid;
00e99ccd
AA
3874 ms->m_result = cpu_to_le32(to_dlm_errno(rv));
3875 ms->m_nodeid = cpu_to_le32(ret_nodeid);
e7fd4179
DT
3876
3877 error = send_message(mh, ms);
3878 out:
3879 return error;
3880}
3881
3882/* which args we save from a received message depends heavily on the type
3883 of message, unlike the send side where we can safely send everything about
3884 the lkb for any type of message */
3885
3886static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
3887{
00e99ccd
AA
3888 lkb->lkb_exflags = le32_to_cpu(ms->m_exflags);
3889 lkb->lkb_sbflags = le32_to_cpu(ms->m_sbflags);
e7fd4179 3890 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
00e99ccd 3891 (le32_to_cpu(ms->m_flags) & 0x0000FFFF);
e7fd4179
DT
3892}
3893
3894static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3895{
00e99ccd 3896 if (ms->m_flags == cpu_to_le32(DLM_IFL_STUB_MS))
2a7ce0ed
DT
3897 return;
3898
00e99ccd 3899 lkb->lkb_sbflags = le32_to_cpu(ms->m_sbflags);
e7fd4179 3900 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
00e99ccd 3901 (le32_to_cpu(ms->m_flags) & 0x0000FFFF);
e7fd4179
DT
3902}
3903
3904static int receive_extralen(struct dlm_message *ms)
3905{
3428785a
AA
3906 return (le16_to_cpu(ms->m_header.h_length) -
3907 sizeof(struct dlm_message));
e7fd4179
DT
3908}
3909
e7fd4179
DT
3910static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
3911 struct dlm_message *ms)
3912{
3913 int len;
3914
3915 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3916 if (!lkb->lkb_lvbptr)
52bda2b5 3917 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
e7fd4179
DT
3918 if (!lkb->lkb_lvbptr)
3919 return -ENOMEM;
3920 len = receive_extralen(ms);
cfa805f6
BVA
3921 if (len > ls->ls_lvblen)
3922 len = ls->ls_lvblen;
e7fd4179
DT
3923 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
3924 }
3925 return 0;
3926}
3927
e5dae548
DT
3928static void fake_bastfn(void *astparam, int mode)
3929{
3930 log_print("fake_bastfn should not be called");
3931}
3932
3933static void fake_astfn(void *astparam)
3934{
3935 log_print("fake_astfn should not be called");
3936}
3937
e7fd4179
DT
3938static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3939 struct dlm_message *ms)
3940{
3428785a 3941 lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
00e99ccd
AA
3942 lkb->lkb_ownpid = le32_to_cpu(ms->m_pid);
3943 lkb->lkb_remid = le32_to_cpu(ms->m_lkid);
e7fd4179 3944 lkb->lkb_grmode = DLM_LOCK_IV;
00e99ccd 3945 lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode);
e5dae548 3946
00e99ccd
AA
3947 lkb->lkb_bastfn = (ms->m_asts & cpu_to_le32(DLM_CB_BAST)) ? &fake_bastfn : NULL;
3948 lkb->lkb_astfn = (ms->m_asts & cpu_to_le32(DLM_CB_CAST)) ? &fake_astfn : NULL;
e7fd4179 3949
8d07fd50
DT
3950 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3951 /* lkb was just created so there won't be an lvb yet */
52bda2b5 3952 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
8d07fd50
DT
3953 if (!lkb->lkb_lvbptr)
3954 return -ENOMEM;
3955 }
e7fd4179
DT
3956
3957 return 0;
3958}
3959
3960static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3961 struct dlm_message *ms)
3962{
e7fd4179
DT
3963 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3964 return -EBUSY;
3965
e7fd4179
DT
3966 if (receive_lvb(ls, lkb, ms))
3967 return -ENOMEM;
3968
00e99ccd
AA
3969 lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode);
3970 lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq);
e7fd4179
DT
3971
3972 return 0;
3973}
3974
3975static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3976 struct dlm_message *ms)
3977{
e7fd4179
DT
3978 if (receive_lvb(ls, lkb, ms))
3979 return -ENOMEM;
3980 return 0;
3981}
3982
3983/* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3984 uses to send a reply and that the remote end uses to process the reply. */
3985
3986static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
3987{
3988 struct dlm_lkb *lkb = &ls->ls_stub_lkb;
3428785a 3989 lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
00e99ccd 3990 lkb->lkb_remid = le32_to_cpu(ms->m_lkid);
e7fd4179
DT
3991}
3992
c54e04b0
DT
3993/* This is called after the rsb is locked so that we can safely inspect
3994 fields in the lkb. */
3995
3996static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
3997{
3428785a 3998 int from = le32_to_cpu(ms->m_header.h_nodeid);
c54e04b0
DT
3999 int error = 0;
4000
6c2e3bf6 4001 /* currently mixing of user/kernel locks are not supported */
00e99ccd
AA
4002 if (ms->m_flags & cpu_to_le32(DLM_IFL_USER) &&
4003 ~lkb->lkb_flags & DLM_IFL_USER) {
6c2e3bf6
AA
4004 log_error(lkb->lkb_resource->res_ls,
4005 "got user dlm message for a kernel lock");
4006 error = -EINVAL;
4007 goto out;
4008 }
4009
c54e04b0 4010 switch (ms->m_type) {
00e99ccd
AA
4011 case cpu_to_le32(DLM_MSG_CONVERT):
4012 case cpu_to_le32(DLM_MSG_UNLOCK):
4013 case cpu_to_le32(DLM_MSG_CANCEL):
c54e04b0
DT
4014 if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
4015 error = -EINVAL;
4016 break;
4017
00e99ccd
AA
4018 case cpu_to_le32(DLM_MSG_CONVERT_REPLY):
4019 case cpu_to_le32(DLM_MSG_UNLOCK_REPLY):
4020 case cpu_to_le32(DLM_MSG_CANCEL_REPLY):
4021 case cpu_to_le32(DLM_MSG_GRANT):
4022 case cpu_to_le32(DLM_MSG_BAST):
c54e04b0
DT
4023 if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
4024 error = -EINVAL;
4025 break;
4026
00e99ccd 4027 case cpu_to_le32(DLM_MSG_REQUEST_REPLY):
c54e04b0
DT
4028 if (!is_process_copy(lkb))
4029 error = -EINVAL;
4030 else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
4031 error = -EINVAL;
4032 break;
4033
4034 default:
4035 error = -EINVAL;
4036 }
4037
6c2e3bf6 4038out:
c54e04b0
DT
4039 if (error)
4040 log_error(lkb->lkb_resource->res_ls,
4041 "ignore invalid message %d from %d %x %x %x %d",
00e99ccd
AA
4042 le32_to_cpu(ms->m_type), from, lkb->lkb_id,
4043 lkb->lkb_remid, lkb->lkb_flags, lkb->lkb_nodeid);
c54e04b0
DT
4044 return error;
4045}
4046
96006ea6
DT
4047static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len)
4048{
4049 char name[DLM_RESNAME_MAXLEN + 1];
4050 struct dlm_message *ms;
4051 struct dlm_mhandle *mh;
4052 struct dlm_rsb *r;
4053 uint32_t hash, b;
4054 int rv, dir_nodeid;
4055
4056 memset(name, 0, sizeof(name));
4057 memcpy(name, ms_name, len);
4058
4059 hash = jhash(name, len, 0);
4060 b = hash & (ls->ls_rsbtbl_size - 1);
4061
4062 dir_nodeid = dlm_hash2nodeid(ls, hash);
4063
4064 log_error(ls, "send_repeat_remove dir %d %s", dir_nodeid, name);
4065
4066 spin_lock(&ls->ls_rsbtbl[b].lock);
4067 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4068 if (!rv) {
4069 spin_unlock(&ls->ls_rsbtbl[b].lock);
4070 log_error(ls, "repeat_remove on keep %s", name);
4071 return;
4072 }
4073
4074 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4075 if (!rv) {
4076 spin_unlock(&ls->ls_rsbtbl[b].lock);
4077 log_error(ls, "repeat_remove on toss %s", name);
4078 return;
4079 }
4080
4081 /* use ls->remove_name2 to avoid conflict with shrink? */
4082
4083 spin_lock(&ls->ls_remove_spin);
4084 ls->ls_remove_len = len;
4085 memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
4086 spin_unlock(&ls->ls_remove_spin);
4087 spin_unlock(&ls->ls_rsbtbl[b].lock);
4088
4089 rv = _create_message(ls, sizeof(struct dlm_message) + len,
4090 dir_nodeid, DLM_MSG_REMOVE, &ms, &mh);
4091 if (rv)
ba589959 4092 goto out;
96006ea6
DT
4093
4094 memcpy(ms->m_extra, name, len);
00e99ccd 4095 ms->m_hash = cpu_to_le32(hash);
96006ea6
DT
4096
4097 send_message(mh, ms);
4098
ba589959 4099out:
96006ea6
DT
4100 spin_lock(&ls->ls_remove_spin);
4101 ls->ls_remove_len = 0;
4102 memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
4103 spin_unlock(&ls->ls_remove_spin);
f6f74183 4104 wake_up(&ls->ls_remove_wait);
96006ea6
DT
4105}
4106
6d40c4a7 4107static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4108{
4109 struct dlm_lkb *lkb;
4110 struct dlm_rsb *r;
c04fecb4 4111 int from_nodeid;
96006ea6 4112 int error, namelen = 0;
e7fd4179 4113
3428785a 4114 from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
c04fecb4 4115
e7fd4179
DT
4116 error = create_lkb(ls, &lkb);
4117 if (error)
4118 goto fail;
4119
4120 receive_flags(lkb, ms);
4121 lkb->lkb_flags |= DLM_IFL_MSTCPY;
4122 error = receive_request_args(ls, lkb, ms);
4123 if (error) {
b3f58d8f 4124 __put_lkb(ls, lkb);
e7fd4179
DT
4125 goto fail;
4126 }
4127
c04fecb4
DT
4128 /* The dir node is the authority on whether we are the master
4129 for this rsb or not, so if the master sends us a request, we should
4130 recreate the rsb if we've destroyed it. This race happens when we
4131 send a remove message to the dir node at the same time that the dir
4132 node sends us a request for the rsb. */
4133
e7fd4179
DT
4134 namelen = receive_extralen(ms);
4135
c04fecb4
DT
4136 error = find_rsb(ls, ms->m_extra, namelen, from_nodeid,
4137 R_RECEIVE_REQUEST, &r);
e7fd4179 4138 if (error) {
b3f58d8f 4139 __put_lkb(ls, lkb);
e7fd4179
DT
4140 goto fail;
4141 }
4142
4143 lock_rsb(r);
4144
c04fecb4
DT
4145 if (r->res_master_nodeid != dlm_our_nodeid()) {
4146 error = validate_master_nodeid(ls, r, from_nodeid);
4147 if (error) {
4148 unlock_rsb(r);
4149 put_rsb(r);
4150 __put_lkb(ls, lkb);
4151 goto fail;
4152 }
4153 }
4154
e7fd4179
DT
4155 attach_lkb(r, lkb);
4156 error = do_request(r, lkb);
4157 send_request_reply(r, lkb, error);
cf6620ac 4158 do_request_effects(r, lkb, error);
e7fd4179
DT
4159
4160 unlock_rsb(r);
4161 put_rsb(r);
4162
4163 if (error == -EINPROGRESS)
4164 error = 0;
4165 if (error)
b3f58d8f 4166 dlm_put_lkb(lkb);
6d40c4a7 4167 return 0;
e7fd4179
DT
4168
4169 fail:
c04fecb4
DT
4170 /* TODO: instead of returning ENOTBLK, add the lkb to res_lookup
4171 and do this receive_request again from process_lookup_list once
4172 we get the lookup reply. This would avoid a many repeated
4173 ENOTBLK request failures when the lookup reply designating us
4174 as master is delayed. */
4175
4176 /* We could repeatedly return -EBADR here if our send_remove() is
4177 delayed in being sent/arriving/being processed on the dir node.
4178 Another node would repeatedly lookup up the master, and the dir
4179 node would continue returning our nodeid until our send_remove
96006ea6
DT
4180 took effect.
4181
4182 We send another remove message in case our previous send_remove
4183 was lost/ignored/missed somehow. */
c04fecb4
DT
4184
4185 if (error != -ENOTBLK) {
4186 log_limit(ls, "receive_request %x from %d %d",
00e99ccd 4187 le32_to_cpu(ms->m_lkid), from_nodeid, error);
c04fecb4
DT
4188 }
4189
96006ea6
DT
4190 if (namelen && error == -EBADR) {
4191 send_repeat_remove(ls, ms->m_extra, namelen);
4192 msleep(1000);
4193 }
4194
e7fd4179
DT
4195 setup_stub_lkb(ls, ms);
4196 send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
6d40c4a7 4197 return error;
e7fd4179
DT
4198}
4199
6d40c4a7 4200static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4201{
4202 struct dlm_lkb *lkb;
4203 struct dlm_rsb *r;
90135925 4204 int error, reply = 1;
e7fd4179 4205
00e99ccd 4206 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
e7fd4179
DT
4207 if (error)
4208 goto fail;
4209
00e99ccd 4210 if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) {
4875647a
DT
4211 log_error(ls, "receive_convert %x remid %x recover_seq %llu "
4212 "remote %d %x", lkb->lkb_id, lkb->lkb_remid,
4213 (unsigned long long)lkb->lkb_recover_seq,
00e99ccd
AA
4214 le32_to_cpu(ms->m_header.h_nodeid),
4215 le32_to_cpu(ms->m_lkid));
6d40c4a7 4216 error = -ENOENT;
c0174726 4217 dlm_put_lkb(lkb);
6d40c4a7
DT
4218 goto fail;
4219 }
4220
e7fd4179
DT
4221 r = lkb->lkb_resource;
4222
4223 hold_rsb(r);
4224 lock_rsb(r);
4225
c54e04b0
DT
4226 error = validate_message(lkb, ms);
4227 if (error)
4228 goto out;
4229
e7fd4179 4230 receive_flags(lkb, ms);
cf6620ac 4231
e7fd4179 4232 error = receive_convert_args(ls, lkb, ms);
cf6620ac
DT
4233 if (error) {
4234 send_convert_reply(r, lkb, error);
4235 goto out;
4236 }
4237
e7fd4179
DT
4238 reply = !down_conversion(lkb);
4239
4240 error = do_convert(r, lkb);
e7fd4179
DT
4241 if (reply)
4242 send_convert_reply(r, lkb, error);
cf6620ac 4243 do_convert_effects(r, lkb, error);
c54e04b0 4244 out:
e7fd4179
DT
4245 unlock_rsb(r);
4246 put_rsb(r);
b3f58d8f 4247 dlm_put_lkb(lkb);
6d40c4a7 4248 return 0;
e7fd4179
DT
4249
4250 fail:
4251 setup_stub_lkb(ls, ms);
4252 send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
6d40c4a7 4253 return error;
e7fd4179
DT
4254}
4255
6d40c4a7 4256static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4257{
4258 struct dlm_lkb *lkb;
4259 struct dlm_rsb *r;
4260 int error;
4261
00e99ccd 4262 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
e7fd4179
DT
4263 if (error)
4264 goto fail;
4265
00e99ccd 4266 if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) {
6d40c4a7
DT
4267 log_error(ls, "receive_unlock %x remid %x remote %d %x",
4268 lkb->lkb_id, lkb->lkb_remid,
00e99ccd
AA
4269 le32_to_cpu(ms->m_header.h_nodeid),
4270 le32_to_cpu(ms->m_lkid));
6d40c4a7 4271 error = -ENOENT;
c0174726 4272 dlm_put_lkb(lkb);
6d40c4a7
DT
4273 goto fail;
4274 }
4275
e7fd4179
DT
4276 r = lkb->lkb_resource;
4277
4278 hold_rsb(r);
4279 lock_rsb(r);
4280
c54e04b0
DT
4281 error = validate_message(lkb, ms);
4282 if (error)
4283 goto out;
4284
e7fd4179 4285 receive_flags(lkb, ms);
cf6620ac 4286
e7fd4179 4287 error = receive_unlock_args(ls, lkb, ms);
cf6620ac
DT
4288 if (error) {
4289 send_unlock_reply(r, lkb, error);
4290 goto out;
4291 }
e7fd4179
DT
4292
4293 error = do_unlock(r, lkb);
e7fd4179 4294 send_unlock_reply(r, lkb, error);
cf6620ac 4295 do_unlock_effects(r, lkb, error);
c54e04b0 4296 out:
e7fd4179
DT
4297 unlock_rsb(r);
4298 put_rsb(r);
b3f58d8f 4299 dlm_put_lkb(lkb);
6d40c4a7 4300 return 0;
e7fd4179
DT
4301
4302 fail:
4303 setup_stub_lkb(ls, ms);
4304 send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
6d40c4a7 4305 return error;
e7fd4179
DT
4306}
4307
6d40c4a7 4308static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4309{
4310 struct dlm_lkb *lkb;
4311 struct dlm_rsb *r;
4312 int error;
4313
00e99ccd 4314 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
e7fd4179
DT
4315 if (error)
4316 goto fail;
4317
4318 receive_flags(lkb, ms);
4319
4320 r = lkb->lkb_resource;
4321
4322 hold_rsb(r);
4323 lock_rsb(r);
4324
c54e04b0
DT
4325 error = validate_message(lkb, ms);
4326 if (error)
4327 goto out;
4328
e7fd4179
DT
4329 error = do_cancel(r, lkb);
4330 send_cancel_reply(r, lkb, error);
cf6620ac 4331 do_cancel_effects(r, lkb, error);
c54e04b0 4332 out:
e7fd4179
DT
4333 unlock_rsb(r);
4334 put_rsb(r);
b3f58d8f 4335 dlm_put_lkb(lkb);
6d40c4a7 4336 return 0;
e7fd4179
DT
4337
4338 fail:
4339 setup_stub_lkb(ls, ms);
4340 send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
6d40c4a7 4341 return error;
e7fd4179
DT
4342}
4343
6d40c4a7 4344static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4345{
4346 struct dlm_lkb *lkb;
4347 struct dlm_rsb *r;
4348 int error;
4349
00e99ccd 4350 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
6d40c4a7
DT
4351 if (error)
4352 return error;
e7fd4179
DT
4353
4354 r = lkb->lkb_resource;
4355
4356 hold_rsb(r);
4357 lock_rsb(r);
4358
c54e04b0
DT
4359 error = validate_message(lkb, ms);
4360 if (error)
4361 goto out;
4362
e7fd4179 4363 receive_flags_reply(lkb, ms);
7d3c1feb
DT
4364 if (is_altmode(lkb))
4365 munge_altmode(lkb, ms);
e7fd4179
DT
4366 grant_lock_pc(r, lkb, ms);
4367 queue_cast(r, lkb, 0);
c54e04b0 4368 out:
e7fd4179
DT
4369 unlock_rsb(r);
4370 put_rsb(r);
b3f58d8f 4371 dlm_put_lkb(lkb);
6d40c4a7 4372 return 0;
e7fd4179
DT
4373}
4374
6d40c4a7 4375static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4376{
4377 struct dlm_lkb *lkb;
4378 struct dlm_rsb *r;
4379 int error;
4380
00e99ccd 4381 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
6d40c4a7
DT
4382 if (error)
4383 return error;
e7fd4179
DT
4384
4385 r = lkb->lkb_resource;
4386
4387 hold_rsb(r);
4388 lock_rsb(r);
4389
c54e04b0
DT
4390 error = validate_message(lkb, ms);
4391 if (error)
4392 goto out;
e7fd4179 4393
00e99ccd
AA
4394 queue_bast(r, lkb, le32_to_cpu(ms->m_bastmode));
4395 lkb->lkb_highbast = le32_to_cpu(ms->m_bastmode);
c54e04b0 4396 out:
e7fd4179
DT
4397 unlock_rsb(r);
4398 put_rsb(r);
b3f58d8f 4399 dlm_put_lkb(lkb);
6d40c4a7 4400 return 0;
e7fd4179
DT
4401}
4402
4403static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
4404{
c04fecb4 4405 int len, error, ret_nodeid, from_nodeid, our_nodeid;
e7fd4179 4406
3428785a 4407 from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
e7fd4179
DT
4408 our_nodeid = dlm_our_nodeid();
4409
4410 len = receive_extralen(ms);
4411
c04fecb4
DT
4412 error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0,
4413 &ret_nodeid, NULL);
e7fd4179
DT
4414
4415 /* Optimization: we're master so treat lookup as a request */
4416 if (!error && ret_nodeid == our_nodeid) {
4417 receive_request(ls, ms);
4418 return;
4419 }
e7fd4179
DT
4420 send_lookup_reply(ls, ms, ret_nodeid, error);
4421}
4422
4423static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
4424{
c04fecb4
DT
4425 char name[DLM_RESNAME_MAXLEN+1];
4426 struct dlm_rsb *r;
4427 uint32_t hash, b;
4428 int rv, len, dir_nodeid, from_nodeid;
e7fd4179 4429
3428785a 4430 from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
e7fd4179
DT
4431
4432 len = receive_extralen(ms);
4433
c04fecb4
DT
4434 if (len > DLM_RESNAME_MAXLEN) {
4435 log_error(ls, "receive_remove from %d bad len %d",
4436 from_nodeid, len);
4437 return;
4438 }
4439
00e99ccd 4440 dir_nodeid = dlm_hash2nodeid(ls, le32_to_cpu(ms->m_hash));
e7fd4179 4441 if (dir_nodeid != dlm_our_nodeid()) {
c04fecb4
DT
4442 log_error(ls, "receive_remove from %d bad nodeid %d",
4443 from_nodeid, dir_nodeid);
e7fd4179
DT
4444 return;
4445 }
4446
c04fecb4
DT
4447 /* Look for name on rsbtbl.toss, if it's there, kill it.
4448 If it's on rsbtbl.keep, it's being used, and we should ignore this
4449 message. This is an expected race between the dir node sending a
4450 request to the master node at the same time as the master node sends
4451 a remove to the dir node. The resolution to that race is for the
4452 dir node to ignore the remove message, and the master node to
4453 recreate the master rsb when it gets a request from the dir node for
4454 an rsb it doesn't have. */
4455
4456 memset(name, 0, sizeof(name));
4457 memcpy(name, ms->m_extra, len);
4458
4459 hash = jhash(name, len, 0);
4460 b = hash & (ls->ls_rsbtbl_size - 1);
4461
4462 spin_lock(&ls->ls_rsbtbl[b].lock);
4463
4464 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4465 if (rv) {
4466 /* verify the rsb is on keep list per comment above */
4467 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4468 if (rv) {
4469 /* should not happen */
4470 log_error(ls, "receive_remove from %d not found %s",
4471 from_nodeid, name);
4472 spin_unlock(&ls->ls_rsbtbl[b].lock);
4473 return;
4474 }
4475 if (r->res_master_nodeid != from_nodeid) {
4476 /* should not happen */
4477 log_error(ls, "receive_remove keep from %d master %d",
4478 from_nodeid, r->res_master_nodeid);
4479 dlm_print_rsb(r);
4480 spin_unlock(&ls->ls_rsbtbl[b].lock);
4481 return;
4482 }
4483
4484 log_debug(ls, "receive_remove from %d master %d first %x %s",
4485 from_nodeid, r->res_master_nodeid, r->res_first_lkid,
4486 name);
4487 spin_unlock(&ls->ls_rsbtbl[b].lock);
4488 return;
4489 }
4490
4491 if (r->res_master_nodeid != from_nodeid) {
4492 log_error(ls, "receive_remove toss from %d master %d",
4493 from_nodeid, r->res_master_nodeid);
4494 dlm_print_rsb(r);
4495 spin_unlock(&ls->ls_rsbtbl[b].lock);
4496 return;
4497 }
4498
4499 if (kref_put(&r->res_ref, kill_rsb)) {
4500 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
4501 spin_unlock(&ls->ls_rsbtbl[b].lock);
4502 dlm_free_rsb(r);
4503 } else {
4504 log_error(ls, "receive_remove from %d rsb ref error",
4505 from_nodeid);
4506 dlm_print_rsb(r);
4507 spin_unlock(&ls->ls_rsbtbl[b].lock);
4508 }
e7fd4179
DT
4509}
4510
8499137d
DT
4511static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
4512{
00e99ccd 4513 do_purge(ls, le32_to_cpu(ms->m_nodeid), le32_to_cpu(ms->m_pid));
8499137d
DT
4514}
4515
6d40c4a7 4516static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4517{
4518 struct dlm_lkb *lkb;
4519 struct dlm_rsb *r;
ef0c2bb0 4520 int error, mstype, result;
3428785a 4521 int from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
e7fd4179 4522
00e99ccd 4523 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
6d40c4a7
DT
4524 if (error)
4525 return error;
e7fd4179 4526
e7fd4179
DT
4527 r = lkb->lkb_resource;
4528 hold_rsb(r);
4529 lock_rsb(r);
4530
c54e04b0
DT
4531 error = validate_message(lkb, ms);
4532 if (error)
4533 goto out;
4534
ef0c2bb0
DT
4535 mstype = lkb->lkb_wait_type;
4536 error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
4875647a
DT
4537 if (error) {
4538 log_error(ls, "receive_request_reply %x remote %d %x result %d",
00e99ccd
AA
4539 lkb->lkb_id, from_nodeid, le32_to_cpu(ms->m_lkid),
4540 from_dlm_errno(le32_to_cpu(ms->m_result)));
4875647a 4541 dlm_dump_rsb(r);
ef0c2bb0 4542 goto out;
4875647a 4543 }
ef0c2bb0 4544
e7fd4179
DT
4545 /* Optimization: the dir node was also the master, so it took our
4546 lookup as a request and sent request reply instead of lookup reply */
4547 if (mstype == DLM_MSG_LOOKUP) {
c04fecb4
DT
4548 r->res_master_nodeid = from_nodeid;
4549 r->res_nodeid = from_nodeid;
4550 lkb->lkb_nodeid = from_nodeid;
e7fd4179
DT
4551 }
4552
ef0c2bb0 4553 /* this is the value returned from do_request() on the master */
00e99ccd 4554 result = from_dlm_errno(le32_to_cpu(ms->m_result));
ef0c2bb0
DT
4555
4556 switch (result) {
e7fd4179 4557 case -EAGAIN:
ef0c2bb0 4558 /* request would block (be queued) on remote master */
e7fd4179
DT
4559 queue_cast(r, lkb, -EAGAIN);
4560 confirm_master(r, -EAGAIN);
ef0c2bb0 4561 unhold_lkb(lkb); /* undoes create_lkb() */
e7fd4179
DT
4562 break;
4563
4564 case -EINPROGRESS:
4565 case 0:
4566 /* request was queued or granted on remote master */
4567 receive_flags_reply(lkb, ms);
00e99ccd 4568 lkb->lkb_remid = le32_to_cpu(ms->m_lkid);
7d3c1feb
DT
4569 if (is_altmode(lkb))
4570 munge_altmode(lkb, ms);
3ae1acf9 4571 if (result) {
e7fd4179 4572 add_lkb(r, lkb, DLM_LKSTS_WAITING);
3ae1acf9
DT
4573 add_timeout(lkb);
4574 } else {
e7fd4179
DT
4575 grant_lock_pc(r, lkb, ms);
4576 queue_cast(r, lkb, 0);
4577 }
ef0c2bb0 4578 confirm_master(r, result);
e7fd4179
DT
4579 break;
4580
597d0cae 4581 case -EBADR:
e7fd4179
DT
4582 case -ENOTBLK:
4583 /* find_rsb failed to find rsb or rsb wasn't master */
c04fecb4
DT
4584 log_limit(ls, "receive_request_reply %x from %d %d "
4585 "master %d dir %d first %x %s", lkb->lkb_id,
4586 from_nodeid, result, r->res_master_nodeid,
4587 r->res_dir_nodeid, r->res_first_lkid, r->res_name);
4588
4589 if (r->res_dir_nodeid != dlm_our_nodeid() &&
4590 r->res_master_nodeid != dlm_our_nodeid()) {
4591 /* cause _request_lock->set_master->send_lookup */
4592 r->res_master_nodeid = 0;
4593 r->res_nodeid = -1;
4594 lkb->lkb_nodeid = -1;
4595 }
ef0c2bb0
DT
4596
4597 if (is_overlap(lkb)) {
4598 /* we'll ignore error in cancel/unlock reply */
4599 queue_cast_overlap(r, lkb);
aec64e1b 4600 confirm_master(r, result);
ef0c2bb0 4601 unhold_lkb(lkb); /* undoes create_lkb() */
c04fecb4 4602 } else {
ef0c2bb0 4603 _request_lock(r, lkb);
c04fecb4
DT
4604
4605 if (r->res_master_nodeid == dlm_our_nodeid())
4606 confirm_master(r, 0);
4607 }
e7fd4179
DT
4608 break;
4609
4610 default:
ef0c2bb0
DT
4611 log_error(ls, "receive_request_reply %x error %d",
4612 lkb->lkb_id, result);
e7fd4179
DT
4613 }
4614
ef0c2bb0
DT
4615 if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
4616 log_debug(ls, "receive_request_reply %x result %d unlock",
4617 lkb->lkb_id, result);
4618 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4619 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4620 send_unlock(r, lkb);
4621 } else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
4622 log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
4623 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4624 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4625 send_cancel(r, lkb);
4626 } else {
4627 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4628 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4629 }
4630 out:
e7fd4179
DT
4631 unlock_rsb(r);
4632 put_rsb(r);
b3f58d8f 4633 dlm_put_lkb(lkb);
6d40c4a7 4634 return 0;
e7fd4179
DT
4635}
4636
4637static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
4638 struct dlm_message *ms)
4639{
e7fd4179 4640 /* this is the value returned from do_convert() on the master */
00e99ccd 4641 switch (from_dlm_errno(le32_to_cpu(ms->m_result))) {
e7fd4179
DT
4642 case -EAGAIN:
4643 /* convert would block (be queued) on remote master */
4644 queue_cast(r, lkb, -EAGAIN);
4645 break;
4646
c85d65e9
DT
4647 case -EDEADLK:
4648 receive_flags_reply(lkb, ms);
4649 revert_lock_pc(r, lkb);
4650 queue_cast(r, lkb, -EDEADLK);
4651 break;
4652
e7fd4179
DT
4653 case -EINPROGRESS:
4654 /* convert was queued on remote master */
7d3c1feb
DT
4655 receive_flags_reply(lkb, ms);
4656 if (is_demoted(lkb))
2a7ce0ed 4657 munge_demoted(lkb);
e7fd4179
DT
4658 del_lkb(r, lkb);
4659 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3ae1acf9 4660 add_timeout(lkb);
e7fd4179
DT
4661 break;
4662
4663 case 0:
4664 /* convert was granted on remote master */
4665 receive_flags_reply(lkb, ms);
7d3c1feb 4666 if (is_demoted(lkb))
2a7ce0ed 4667 munge_demoted(lkb);
e7fd4179
DT
4668 grant_lock_pc(r, lkb, ms);
4669 queue_cast(r, lkb, 0);
4670 break;
4671
4672 default:
6d40c4a7 4673 log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d",
3428785a 4674 lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid),
00e99ccd
AA
4675 le32_to_cpu(ms->m_lkid),
4676 from_dlm_errno(le32_to_cpu(ms->m_result)));
6d40c4a7
DT
4677 dlm_print_rsb(r);
4678 dlm_print_lkb(lkb);
e7fd4179
DT
4679 }
4680}
4681
4682static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4683{
4684 struct dlm_rsb *r = lkb->lkb_resource;
ef0c2bb0 4685 int error;
e7fd4179
DT
4686
4687 hold_rsb(r);
4688 lock_rsb(r);
4689
c54e04b0
DT
4690 error = validate_message(lkb, ms);
4691 if (error)
4692 goto out;
4693
ef0c2bb0
DT
4694 /* stub reply can happen with waiters_mutex held */
4695 error = remove_from_waiters_ms(lkb, ms);
4696 if (error)
4697 goto out;
e7fd4179 4698
ef0c2bb0
DT
4699 __receive_convert_reply(r, lkb, ms);
4700 out:
e7fd4179
DT
4701 unlock_rsb(r);
4702 put_rsb(r);
4703}
4704
6d40c4a7 4705static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4706{
4707 struct dlm_lkb *lkb;
4708 int error;
4709
00e99ccd 4710 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
6d40c4a7
DT
4711 if (error)
4712 return error;
e7fd4179 4713
e7fd4179 4714 _receive_convert_reply(lkb, ms);
b3f58d8f 4715 dlm_put_lkb(lkb);
6d40c4a7 4716 return 0;
e7fd4179
DT
4717}
4718
4719static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4720{
4721 struct dlm_rsb *r = lkb->lkb_resource;
ef0c2bb0 4722 int error;
e7fd4179
DT
4723
4724 hold_rsb(r);
4725 lock_rsb(r);
4726
c54e04b0
DT
4727 error = validate_message(lkb, ms);
4728 if (error)
4729 goto out;
4730
ef0c2bb0
DT
4731 /* stub reply can happen with waiters_mutex held */
4732 error = remove_from_waiters_ms(lkb, ms);
4733 if (error)
4734 goto out;
4735
e7fd4179
DT
4736 /* this is the value returned from do_unlock() on the master */
4737
00e99ccd 4738 switch (from_dlm_errno(le32_to_cpu(ms->m_result))) {
e7fd4179
DT
4739 case -DLM_EUNLOCK:
4740 receive_flags_reply(lkb, ms);
4741 remove_lock_pc(r, lkb);
4742 queue_cast(r, lkb, -DLM_EUNLOCK);
4743 break;
ef0c2bb0
DT
4744 case -ENOENT:
4745 break;
e7fd4179 4746 default:
ef0c2bb0 4747 log_error(r->res_ls, "receive_unlock_reply %x error %d",
00e99ccd 4748 lkb->lkb_id, from_dlm_errno(le32_to_cpu(ms->m_result)));
e7fd4179 4749 }
ef0c2bb0 4750 out:
e7fd4179
DT
4751 unlock_rsb(r);
4752 put_rsb(r);
4753}
4754
6d40c4a7 4755static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4756{
4757 struct dlm_lkb *lkb;
4758 int error;
4759
00e99ccd 4760 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
6d40c4a7
DT
4761 if (error)
4762 return error;
e7fd4179 4763
e7fd4179 4764 _receive_unlock_reply(lkb, ms);
b3f58d8f 4765 dlm_put_lkb(lkb);
6d40c4a7 4766 return 0;
e7fd4179
DT
4767}
4768
4769static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4770{
4771 struct dlm_rsb *r = lkb->lkb_resource;
ef0c2bb0 4772 int error;
e7fd4179
DT
4773
4774 hold_rsb(r);
4775 lock_rsb(r);
4776
c54e04b0
DT
4777 error = validate_message(lkb, ms);
4778 if (error)
4779 goto out;
4780
ef0c2bb0
DT
4781 /* stub reply can happen with waiters_mutex held */
4782 error = remove_from_waiters_ms(lkb, ms);
4783 if (error)
4784 goto out;
4785
e7fd4179
DT
4786 /* this is the value returned from do_cancel() on the master */
4787
00e99ccd 4788 switch (from_dlm_errno(le32_to_cpu(ms->m_result))) {
e7fd4179
DT
4789 case -DLM_ECANCEL:
4790 receive_flags_reply(lkb, ms);
4791 revert_lock_pc(r, lkb);
84d8cd69 4792 queue_cast(r, lkb, -DLM_ECANCEL);
ef0c2bb0
DT
4793 break;
4794 case 0:
e7fd4179
DT
4795 break;
4796 default:
ef0c2bb0 4797 log_error(r->res_ls, "receive_cancel_reply %x error %d",
00e99ccd
AA
4798 lkb->lkb_id,
4799 from_dlm_errno(le32_to_cpu(ms->m_result)));
e7fd4179 4800 }
ef0c2bb0 4801 out:
e7fd4179
DT
4802 unlock_rsb(r);
4803 put_rsb(r);
4804}
4805
6d40c4a7 4806static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
e7fd4179
DT
4807{
4808 struct dlm_lkb *lkb;
4809 int error;
4810
00e99ccd 4811 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
6d40c4a7
DT
4812 if (error)
4813 return error;
e7fd4179 4814
e7fd4179 4815 _receive_cancel_reply(lkb, ms);
b3f58d8f 4816 dlm_put_lkb(lkb);
6d40c4a7 4817 return 0;
e7fd4179
DT
4818}
4819
4820static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
4821{
4822 struct dlm_lkb *lkb;
4823 struct dlm_rsb *r;
4824 int error, ret_nodeid;
c04fecb4 4825 int do_lookup_list = 0;
e7fd4179 4826
00e99ccd 4827 error = find_lkb(ls, le32_to_cpu(ms->m_lkid), &lkb);
e7fd4179 4828 if (error) {
00e99ccd
AA
4829 log_error(ls, "%s no lkid %x", __func__,
4830 le32_to_cpu(ms->m_lkid));
e7fd4179
DT
4831 return;
4832 }
4833
c04fecb4 4834 /* ms->m_result is the value returned by dlm_master_lookup on dir node
e7fd4179 4835 FIXME: will a non-zero error ever be returned? */
e7fd4179
DT
4836
4837 r = lkb->lkb_resource;
4838 hold_rsb(r);
4839 lock_rsb(r);
4840
ef0c2bb0
DT
4841 error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
4842 if (error)
4843 goto out;
4844
00e99ccd 4845 ret_nodeid = le32_to_cpu(ms->m_nodeid);
c04fecb4
DT
4846
4847 /* We sometimes receive a request from the dir node for this
4848 rsb before we've received the dir node's loookup_reply for it.
4849 The request from the dir node implies we're the master, so we set
4850 ourself as master in receive_request_reply, and verify here that
4851 we are indeed the master. */
4852
4853 if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) {
4854 /* This should never happen */
4855 log_error(ls, "receive_lookup_reply %x from %d ret %d "
4856 "master %d dir %d our %d first %x %s",
3428785a
AA
4857 lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid),
4858 ret_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
c04fecb4
DT
4859 dlm_our_nodeid(), r->res_first_lkid, r->res_name);
4860 }
4861
e7fd4179 4862 if (ret_nodeid == dlm_our_nodeid()) {
c04fecb4 4863 r->res_master_nodeid = ret_nodeid;
e7fd4179 4864 r->res_nodeid = 0;
c04fecb4 4865 do_lookup_list = 1;
e7fd4179 4866 r->res_first_lkid = 0;
c04fecb4
DT
4867 } else if (ret_nodeid == -1) {
4868 /* the remote node doesn't believe it's the dir node */
4869 log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid",
3428785a 4870 lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid));
c04fecb4
DT
4871 r->res_master_nodeid = 0;
4872 r->res_nodeid = -1;
4873 lkb->lkb_nodeid = -1;
e7fd4179 4874 } else {
c04fecb4
DT
4875 /* set_master() will set lkb_nodeid from r */
4876 r->res_master_nodeid = ret_nodeid;
e7fd4179
DT
4877 r->res_nodeid = ret_nodeid;
4878 }
4879
ef0c2bb0
DT
4880 if (is_overlap(lkb)) {
4881 log_debug(ls, "receive_lookup_reply %x unlock %x",
4882 lkb->lkb_id, lkb->lkb_flags);
4883 queue_cast_overlap(r, lkb);
4884 unhold_lkb(lkb); /* undoes create_lkb() */
4885 goto out_list;
4886 }
4887
e7fd4179
DT
4888 _request_lock(r, lkb);
4889
ef0c2bb0 4890 out_list:
c04fecb4 4891 if (do_lookup_list)
e7fd4179 4892 process_lookup_list(r);
ef0c2bb0 4893 out:
e7fd4179
DT
4894 unlock_rsb(r);
4895 put_rsb(r);
b3f58d8f 4896 dlm_put_lkb(lkb);
e7fd4179
DT
4897}
4898
6d40c4a7
DT
4899static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4900 uint32_t saved_seq)
e7fd4179 4901{
6d40c4a7
DT
4902 int error = 0, noent = 0;
4903
3428785a 4904 if (!dlm_is_member(ls, le32_to_cpu(ms->m_header.h_nodeid))) {
c04fecb4 4905 log_limit(ls, "receive %d from non-member %d %x %x %d",
00e99ccd
AA
4906 le32_to_cpu(ms->m_type),
4907 le32_to_cpu(ms->m_header.h_nodeid),
4908 le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid),
4909 from_dlm_errno(le32_to_cpu(ms->m_result)));
46b43eed
DT
4910 return;
4911 }
4912
e7fd4179
DT
4913 switch (ms->m_type) {
4914
4915 /* messages sent to a master node */
4916
00e99ccd 4917 case cpu_to_le32(DLM_MSG_REQUEST):
6d40c4a7 4918 error = receive_request(ls, ms);
e7fd4179
DT
4919 break;
4920
00e99ccd 4921 case cpu_to_le32(DLM_MSG_CONVERT):
6d40c4a7 4922 error = receive_convert(ls, ms);
e7fd4179
DT
4923 break;
4924
00e99ccd 4925 case cpu_to_le32(DLM_MSG_UNLOCK):
6d40c4a7 4926 error = receive_unlock(ls, ms);
e7fd4179
DT
4927 break;
4928
00e99ccd 4929 case cpu_to_le32(DLM_MSG_CANCEL):
6d40c4a7
DT
4930 noent = 1;
4931 error = receive_cancel(ls, ms);
e7fd4179
DT
4932 break;
4933
4934 /* messages sent from a master node (replies to above) */
4935
00e99ccd 4936 case cpu_to_le32(DLM_MSG_REQUEST_REPLY):
6d40c4a7 4937 error = receive_request_reply(ls, ms);
e7fd4179
DT
4938 break;
4939
00e99ccd 4940 case cpu_to_le32(DLM_MSG_CONVERT_REPLY):
6d40c4a7 4941 error = receive_convert_reply(ls, ms);
e7fd4179
DT
4942 break;
4943
00e99ccd 4944 case cpu_to_le32(DLM_MSG_UNLOCK_REPLY):
6d40c4a7 4945 error = receive_unlock_reply(ls, ms);
e7fd4179
DT
4946 break;
4947
00e99ccd 4948 case cpu_to_le32(DLM_MSG_CANCEL_REPLY):
6d40c4a7 4949 error = receive_cancel_reply(ls, ms);
e7fd4179
DT
4950 break;
4951
4952 /* messages sent from a master node (only two types of async msg) */
4953
00e99ccd 4954 case cpu_to_le32(DLM_MSG_GRANT):
6d40c4a7
DT
4955 noent = 1;
4956 error = receive_grant(ls, ms);
e7fd4179
DT
4957 break;
4958
00e99ccd 4959 case cpu_to_le32(DLM_MSG_BAST):
6d40c4a7
DT
4960 noent = 1;
4961 error = receive_bast(ls, ms);
e7fd4179
DT
4962 break;
4963
4964 /* messages sent to a dir node */
4965
00e99ccd 4966 case cpu_to_le32(DLM_MSG_LOOKUP):
e7fd4179
DT
4967 receive_lookup(ls, ms);
4968 break;
4969
00e99ccd 4970 case cpu_to_le32(DLM_MSG_REMOVE):
e7fd4179
DT
4971 receive_remove(ls, ms);
4972 break;
4973
4974 /* messages sent from a dir node (remove has no reply) */
4975
00e99ccd 4976 case cpu_to_le32(DLM_MSG_LOOKUP_REPLY):
e7fd4179
DT
4977 receive_lookup_reply(ls, ms);
4978 break;
4979
8499137d
DT
4980 /* other messages */
4981
00e99ccd 4982 case cpu_to_le32(DLM_MSG_PURGE):
8499137d
DT
4983 receive_purge(ls, ms);
4984 break;
4985
e7fd4179 4986 default:
00e99ccd
AA
4987 log_error(ls, "unknown message type %d",
4988 le32_to_cpu(ms->m_type));
e7fd4179 4989 }
6d40c4a7
DT
4990
4991 /*
4992 * When checking for ENOENT, we're checking the result of
4993 * find_lkb(m_remid):
4994 *
4995 * The lock id referenced in the message wasn't found. This may
4996 * happen in normal usage for the async messages and cancel, so
4997 * only use log_debug for them.
4998 *
4875647a 4999 * Some errors are expected and normal.
6d40c4a7
DT
5000 */
5001
5002 if (error == -ENOENT && noent) {
4875647a 5003 log_debug(ls, "receive %d no %x remote %d %x saved_seq %u",
00e99ccd 5004 le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid),
3428785a 5005 le32_to_cpu(ms->m_header.h_nodeid),
00e99ccd 5006 le32_to_cpu(ms->m_lkid), saved_seq);
6d40c4a7 5007 } else if (error == -ENOENT) {
4875647a 5008 log_error(ls, "receive %d no %x remote %d %x saved_seq %u",
00e99ccd 5009 le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid),
3428785a 5010 le32_to_cpu(ms->m_header.h_nodeid),
00e99ccd 5011 le32_to_cpu(ms->m_lkid), saved_seq);
6d40c4a7 5012
00e99ccd
AA
5013 if (ms->m_type == cpu_to_le32(DLM_MSG_CONVERT))
5014 dlm_dump_rsb_hash(ls, le32_to_cpu(ms->m_hash));
6d40c4a7 5015 }
4875647a
DT
5016
5017 if (error == -EINVAL) {
5018 log_error(ls, "receive %d inval from %d lkid %x remid %x "
5019 "saved_seq %u",
00e99ccd
AA
5020 le32_to_cpu(ms->m_type),
5021 le32_to_cpu(ms->m_header.h_nodeid),
5022 le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid),
5023 saved_seq);
4875647a 5024 }
e7fd4179
DT
5025}
5026
c36258b5
DT
5027/* If the lockspace is in recovery mode (locking stopped), then normal
5028 messages are saved on the requestqueue for processing after recovery is
5029 done. When not in recovery mode, we wait for dlm_recoverd to drain saved
5030 messages off the requestqueue before we process new ones. This occurs right
5031 after recovery completes when we transition from saving all messages on
5032 requestqueue, to processing all the saved messages, to processing new
5033 messages as they arrive. */
e7fd4179 5034
c36258b5
DT
5035static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
5036 int nodeid)
5037{
5038 if (dlm_locking_stopped(ls)) {
c04fecb4
DT
5039 /* If we were a member of this lockspace, left, and rejoined,
5040 other nodes may still be sending us messages from the
5041 lockspace generation before we left. */
5042 if (!ls->ls_generation) {
5043 log_limit(ls, "receive %d from %d ignore old gen",
00e99ccd 5044 le32_to_cpu(ms->m_type), nodeid);
c04fecb4
DT
5045 return;
5046 }
5047
8b0d8e03 5048 dlm_add_requestqueue(ls, nodeid, ms);
c36258b5
DT
5049 } else {
5050 dlm_wait_requestqueue(ls);
6d40c4a7 5051 _receive_message(ls, ms, 0);
c36258b5
DT
5052 }
5053}
5054
5055/* This is called by dlm_recoverd to process messages that were saved on
5056 the requestqueue. */
5057
6d40c4a7
DT
5058void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms,
5059 uint32_t saved_seq)
c36258b5 5060{
6d40c4a7 5061 _receive_message(ls, ms, saved_seq);
c36258b5
DT
5062}
5063
5064/* This is called by the midcomms layer when something is received for
5065 the lockspace. It could be either a MSG (normal message sent as part of
5066 standard locking activity) or an RCOM (recovery message sent as part of
5067 lockspace recovery). */
5068
eef7d739 5069void dlm_receive_buffer(union dlm_packet *p, int nodeid)
c36258b5 5070{
eef7d739 5071 struct dlm_header *hd = &p->header;
c36258b5
DT
5072 struct dlm_ls *ls;
5073 int type = 0;
5074
5075 switch (hd->h_cmd) {
5076 case DLM_MSG:
00e99ccd 5077 type = le32_to_cpu(p->message.m_type);
c36258b5
DT
5078 break;
5079 case DLM_RCOM:
2f9dbeda 5080 type = le32_to_cpu(p->rcom.rc_type);
c36258b5
DT
5081 break;
5082 default:
5083 log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
5084 return;
5085 }
5086
3428785a 5087 if (le32_to_cpu(hd->h_nodeid) != nodeid) {
c36258b5 5088 log_print("invalid h_nodeid %d from %d lockspace %x",
3428785a
AA
5089 le32_to_cpu(hd->h_nodeid), nodeid,
5090 le32_to_cpu(hd->u.h_lockspace));
c36258b5
DT
5091 return;
5092 }
5093
3428785a 5094 ls = dlm_find_lockspace_global(le32_to_cpu(hd->u.h_lockspace));
c36258b5 5095 if (!ls) {
4875647a
DT
5096 if (dlm_config.ci_log_debug) {
5097 printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace "
5098 "%u from %d cmd %d type %d\n",
3428785a
AA
5099 le32_to_cpu(hd->u.h_lockspace), nodeid,
5100 hd->h_cmd, type);
4875647a 5101 }
c36258b5
DT
5102
5103 if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
eef7d739 5104 dlm_send_ls_not_ready(nodeid, &p->rcom);
c36258b5
DT
5105 return;
5106 }
5107
5108 /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
5109 be inactive (in this ls) before transitioning to recovery mode */
5110
5111 down_read(&ls->ls_recv_active);
5112 if (hd->h_cmd == DLM_MSG)
eef7d739 5113 dlm_receive_message(ls, &p->message, nodeid);
f45307d3 5114 else if (hd->h_cmd == DLM_RCOM)
eef7d739 5115 dlm_receive_rcom(ls, &p->rcom, nodeid);
f45307d3
AA
5116 else
5117 log_error(ls, "invalid h_cmd %d from %d lockspace %x",
5118 hd->h_cmd, nodeid, le32_to_cpu(hd->u.h_lockspace));
c36258b5
DT
5119 up_read(&ls->ls_recv_active);
5120
5121 dlm_put_lockspace(ls);
5122}
e7fd4179 5123
2a7ce0ed
DT
5124static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
5125 struct dlm_message *ms_stub)
e7fd4179
DT
5126{
5127 if (middle_conversion(lkb)) {
5128 hold_lkb(lkb);
2a7ce0ed 5129 memset(ms_stub, 0, sizeof(struct dlm_message));
00e99ccd
AA
5130 ms_stub->m_flags = cpu_to_le32(DLM_IFL_STUB_MS);
5131 ms_stub->m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY);
5132 ms_stub->m_result = cpu_to_le32(to_dlm_errno(-EINPROGRESS));
3428785a 5133 ms_stub->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
2a7ce0ed 5134 _receive_convert_reply(lkb, ms_stub);
e7fd4179
DT
5135
5136 /* Same special case as in receive_rcom_lock_args() */
5137 lkb->lkb_grmode = DLM_LOCK_IV;
5138 rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
5139 unhold_lkb(lkb);
5140
5141 } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
5142 lkb->lkb_flags |= DLM_IFL_RESEND;
5143 }
5144
5145 /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
5146 conversions are async; there's no reply from the remote master */
5147}
5148
5149/* A waiting lkb needs recovery if the master node has failed, or
5150 the master node is changing (only when no directory is used) */
5151
13ef1111
DT
5152static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb,
5153 int dir_nodeid)
e7fd4179 5154{
4875647a 5155 if (dlm_no_directory(ls))
13ef1111
DT
5156 return 1;
5157
4875647a 5158 if (dlm_is_removed(ls, lkb->lkb_wait_nodeid))
e7fd4179
DT
5159 return 1;
5160
5161 return 0;
5162}
5163
5164/* Recovery for locks that are waiting for replies from nodes that are now
5165 gone. We can just complete unlocks and cancels by faking a reply from the
5166 dead node. Requests and up-conversions we flag to be resent after
5167 recovery. Down-conversions can just be completed with a fake reply like
5168 unlocks. Conversions between PR and CW need special attention. */
5169
5170void dlm_recover_waiters_pre(struct dlm_ls *ls)
5171{
5172 struct dlm_lkb *lkb, *safe;
2a7ce0ed 5173 struct dlm_message *ms_stub;
601342ce 5174 int wait_type, stub_unlock_result, stub_cancel_result;
13ef1111 5175 int dir_nodeid;
e7fd4179 5176
102e67d4 5177 ms_stub = kmalloc(sizeof(*ms_stub), GFP_KERNEL);
0d37eca7 5178 if (!ms_stub)
2a7ce0ed 5179 return;
2a7ce0ed 5180
90135925 5181 mutex_lock(&ls->ls_waiters_mutex);
e7fd4179
DT
5182
5183 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
2a7ce0ed 5184
13ef1111
DT
5185 dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource);
5186
2a7ce0ed
DT
5187 /* exclude debug messages about unlocks because there can be so
5188 many and they aren't very interesting */
5189
5190 if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) {
13ef1111
DT
5191 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5192 "lkb_nodeid %d wait_nodeid %d dir_nodeid %d",
5193 lkb->lkb_id,
5194 lkb->lkb_remid,
5195 lkb->lkb_wait_type,
5196 lkb->lkb_resource->res_nodeid,
5197 lkb->lkb_nodeid,
5198 lkb->lkb_wait_nodeid,
5199 dir_nodeid);
2a7ce0ed 5200 }
e7fd4179
DT
5201
5202 /* all outstanding lookups, regardless of destination will be
5203 resent after recovery is done */
5204
5205 if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
5206 lkb->lkb_flags |= DLM_IFL_RESEND;
5207 continue;
5208 }
5209
13ef1111 5210 if (!waiter_needs_recovery(ls, lkb, dir_nodeid))
e7fd4179
DT
5211 continue;
5212
601342ce
DT
5213 wait_type = lkb->lkb_wait_type;
5214 stub_unlock_result = -DLM_EUNLOCK;
5215 stub_cancel_result = -DLM_ECANCEL;
5216
5217 /* Main reply may have been received leaving a zero wait_type,
5218 but a reply for the overlapping op may not have been
5219 received. In that case we need to fake the appropriate
5220 reply for the overlap op. */
5221
5222 if (!wait_type) {
5223 if (is_overlap_cancel(lkb)) {
5224 wait_type = DLM_MSG_CANCEL;
5225 if (lkb->lkb_grmode == DLM_LOCK_IV)
5226 stub_cancel_result = 0;
5227 }
5228 if (is_overlap_unlock(lkb)) {
5229 wait_type = DLM_MSG_UNLOCK;
5230 if (lkb->lkb_grmode == DLM_LOCK_IV)
5231 stub_unlock_result = -ENOENT;
5232 }
5233
5234 log_debug(ls, "rwpre overlap %x %x %d %d %d",
5235 lkb->lkb_id, lkb->lkb_flags, wait_type,
5236 stub_cancel_result, stub_unlock_result);
5237 }
5238
5239 switch (wait_type) {
e7fd4179
DT
5240
5241 case DLM_MSG_REQUEST:
5242 lkb->lkb_flags |= DLM_IFL_RESEND;
5243 break;
5244
5245 case DLM_MSG_CONVERT:
2a7ce0ed 5246 recover_convert_waiter(ls, lkb, ms_stub);
e7fd4179
DT
5247 break;
5248
5249 case DLM_MSG_UNLOCK:
5250 hold_lkb(lkb);
2a7ce0ed 5251 memset(ms_stub, 0, sizeof(struct dlm_message));
00e99ccd
AA
5252 ms_stub->m_flags = cpu_to_le32(DLM_IFL_STUB_MS);
5253 ms_stub->m_type = cpu_to_le32(DLM_MSG_UNLOCK_REPLY);
5254 ms_stub->m_result = cpu_to_le32(to_dlm_errno(stub_unlock_result));
3428785a 5255 ms_stub->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
2a7ce0ed 5256 _receive_unlock_reply(lkb, ms_stub);
b3f58d8f 5257 dlm_put_lkb(lkb);
e7fd4179
DT
5258 break;
5259
5260 case DLM_MSG_CANCEL:
5261 hold_lkb(lkb);
2a7ce0ed 5262 memset(ms_stub, 0, sizeof(struct dlm_message));
00e99ccd
AA
5263 ms_stub->m_flags = cpu_to_le32(DLM_IFL_STUB_MS);
5264 ms_stub->m_type = cpu_to_le32(DLM_MSG_CANCEL_REPLY);
5265 ms_stub->m_result = cpu_to_le32(to_dlm_errno(stub_cancel_result));
3428785a 5266 ms_stub->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
2a7ce0ed 5267 _receive_cancel_reply(lkb, ms_stub);
b3f58d8f 5268 dlm_put_lkb(lkb);
e7fd4179
DT
5269 break;
5270
5271 default:
601342ce
DT
5272 log_error(ls, "invalid lkb wait_type %d %d",
5273 lkb->lkb_wait_type, wait_type);
e7fd4179 5274 }
81456807 5275 schedule();
e7fd4179 5276 }
90135925 5277 mutex_unlock(&ls->ls_waiters_mutex);
2a7ce0ed 5278 kfree(ms_stub);
e7fd4179
DT
5279}
5280
ef0c2bb0 5281static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
e7fd4179 5282{
dc1acd5c 5283 struct dlm_lkb *lkb = NULL, *iter;
e7fd4179 5284
90135925 5285 mutex_lock(&ls->ls_waiters_mutex);
dc1acd5c
JK
5286 list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) {
5287 if (iter->lkb_flags & DLM_IFL_RESEND) {
5288 hold_lkb(iter);
5289 lkb = iter;
e7fd4179
DT
5290 break;
5291 }
5292 }
90135925 5293 mutex_unlock(&ls->ls_waiters_mutex);
e7fd4179 5294
ef0c2bb0 5295 return lkb;
e7fd4179
DT
5296}
5297
5298/* Deal with lookups and lkb's marked RESEND from _pre. We may now be the
5299 master or dir-node for r. Processing the lkb may result in it being placed
5300 back on waiters. */
5301
ef0c2bb0
DT
5302/* We do this after normal locking has been enabled and any saved messages
5303 (in requestqueue) have been processed. We should be confident that at
5304 this point we won't get or process a reply to any of these waiting
5305 operations. But, new ops may be coming in on the rsbs/locks here from
5306 userspace or remotely. */
5307
5308/* there may have been an overlap unlock/cancel prior to recovery or after
5309 recovery. if before, the lkb may still have a pos wait_count; if after, the
5310 overlap flag would just have been set and nothing new sent. we can be
5311 confident here than any replies to either the initial op or overlap ops
5312 prior to recovery have been received. */
5313
e7fd4179
DT
5314int dlm_recover_waiters_post(struct dlm_ls *ls)
5315{
5316 struct dlm_lkb *lkb;
5317 struct dlm_rsb *r;
ef0c2bb0 5318 int error = 0, mstype, err, oc, ou;
e7fd4179
DT
5319
5320 while (1) {
5321 if (dlm_locking_stopped(ls)) {
5322 log_debug(ls, "recover_waiters_post aborted");
5323 error = -EINTR;
5324 break;
5325 }
5326
ef0c2bb0
DT
5327 lkb = find_resend_waiter(ls);
5328 if (!lkb)
e7fd4179
DT
5329 break;
5330
5331 r = lkb->lkb_resource;
ef0c2bb0
DT
5332 hold_rsb(r);
5333 lock_rsb(r);
5334
5335 mstype = lkb->lkb_wait_type;
5336 oc = is_overlap_cancel(lkb);
5337 ou = is_overlap_unlock(lkb);
5338 err = 0;
e7fd4179 5339
13ef1111
DT
5340 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5341 "lkb_nodeid %d wait_nodeid %d dir_nodeid %d "
5342 "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype,
5343 r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid,
5344 dlm_dir_nodeid(r), oc, ou);
e7fd4179 5345
ef0c2bb0
DT
5346 /* At this point we assume that we won't get a reply to any
5347 previous op or overlap op on this lock. First, do a big
5348 remove_from_waiters() for all previous ops. */
5349
5350 lkb->lkb_flags &= ~DLM_IFL_RESEND;
5351 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
5352 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
5353 lkb->lkb_wait_type = 0;
1689c169
AA
5354 /* drop all wait_count references we still
5355 * hold a reference for this iteration.
5356 */
5357 while (lkb->lkb_wait_count) {
5358 lkb->lkb_wait_count--;
5359 unhold_lkb(lkb);
5360 }
ef0c2bb0
DT
5361 mutex_lock(&ls->ls_waiters_mutex);
5362 list_del_init(&lkb->lkb_wait_reply);
5363 mutex_unlock(&ls->ls_waiters_mutex);
ef0c2bb0
DT
5364
5365 if (oc || ou) {
5366 /* do an unlock or cancel instead of resending */
5367 switch (mstype) {
5368 case DLM_MSG_LOOKUP:
5369 case DLM_MSG_REQUEST:
5370 queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
5371 -DLM_ECANCEL);
5372 unhold_lkb(lkb); /* undoes create_lkb() */
5373 break;
5374 case DLM_MSG_CONVERT:
5375 if (oc) {
5376 queue_cast(r, lkb, -DLM_ECANCEL);
5377 } else {
5378 lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
5379 _unlock_lock(r, lkb);
5380 }
5381 break;
5382 default:
5383 err = 1;
5384 }
5385 } else {
5386 switch (mstype) {
5387 case DLM_MSG_LOOKUP:
5388 case DLM_MSG_REQUEST:
5389 _request_lock(r, lkb);
5390 if (is_master(r))
5391 confirm_master(r, 0);
5392 break;
5393 case DLM_MSG_CONVERT:
5394 _convert_lock(r, lkb);
5395 break;
5396 default:
5397 err = 1;
5398 }
e7fd4179 5399 }
ef0c2bb0 5400
13ef1111
DT
5401 if (err) {
5402 log_error(ls, "waiter %x msg %d r_nodeid %d "
5403 "dir_nodeid %d overlap %d %d",
5404 lkb->lkb_id, mstype, r->res_nodeid,
5405 dlm_dir_nodeid(r), oc, ou);
5406 }
ef0c2bb0
DT
5407 unlock_rsb(r);
5408 put_rsb(r);
5409 dlm_put_lkb(lkb);
e7fd4179
DT
5410 }
5411
5412 return error;
5413}
5414
4875647a
DT
5415static void purge_mstcpy_list(struct dlm_ls *ls, struct dlm_rsb *r,
5416 struct list_head *list)
e7fd4179 5417{
e7fd4179
DT
5418 struct dlm_lkb *lkb, *safe;
5419
4875647a
DT
5420 list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5421 if (!is_master_copy(lkb))
5422 continue;
5423
5424 /* don't purge lkbs we've added in recover_master_copy for
5425 the current recovery seq */
5426
5427 if (lkb->lkb_recover_seq == ls->ls_recover_seq)
5428 continue;
5429
5430 del_lkb(r, lkb);
5431
5432 /* this put should free the lkb */
5433 if (!dlm_put_lkb(lkb))
5434 log_error(ls, "purged mstcpy lkb not released");
e7fd4179
DT
5435 }
5436}
5437
4875647a 5438void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
e7fd4179 5439{
4875647a 5440 struct dlm_ls *ls = r->res_ls;
e7fd4179 5441
4875647a
DT
5442 purge_mstcpy_list(ls, r, &r->res_grantqueue);
5443 purge_mstcpy_list(ls, r, &r->res_convertqueue);
5444 purge_mstcpy_list(ls, r, &r->res_waitqueue);
e7fd4179
DT
5445}
5446
4875647a
DT
5447static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
5448 struct list_head *list,
5449 int nodeid_gone, unsigned int *count)
e7fd4179 5450{
4875647a 5451 struct dlm_lkb *lkb, *safe;
e7fd4179 5452
4875647a
DT
5453 list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5454 if (!is_master_copy(lkb))
5455 continue;
5456
5457 if ((lkb->lkb_nodeid == nodeid_gone) ||
5458 dlm_is_removed(ls, lkb->lkb_nodeid)) {
5459
da8c6663
DT
5460 /* tell recover_lvb to invalidate the lvb
5461 because a node holding EX/PW failed */
5462 if ((lkb->lkb_exflags & DLM_LKF_VALBLK) &&
5463 (lkb->lkb_grmode >= DLM_LOCK_PW)) {
5464 rsb_set_flag(r, RSB_RECOVER_LVB_INVAL);
5465 }
5466
4875647a
DT
5467 del_lkb(r, lkb);
5468
5469 /* this put should free the lkb */
5470 if (!dlm_put_lkb(lkb))
5471 log_error(ls, "purged dead lkb not released");
5472
5473 rsb_set_flag(r, RSB_RECOVER_GRANT);
5474
5475 (*count)++;
5476 }
5477 }
e7fd4179
DT
5478}
5479
5480/* Get rid of locks held by nodes that are gone. */
5481
4875647a 5482void dlm_recover_purge(struct dlm_ls *ls)
e7fd4179
DT
5483{
5484 struct dlm_rsb *r;
4875647a
DT
5485 struct dlm_member *memb;
5486 int nodes_count = 0;
5487 int nodeid_gone = 0;
5488 unsigned int lkb_count = 0;
5489
5490 /* cache one removed nodeid to optimize the common
5491 case of a single node removed */
5492
5493 list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
5494 nodes_count++;
5495 nodeid_gone = memb->nodeid;
5496 }
e7fd4179 5497
4875647a
DT
5498 if (!nodes_count)
5499 return;
e7fd4179
DT
5500
5501 down_write(&ls->ls_root_sem);
5502 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
5503 hold_rsb(r);
5504 lock_rsb(r);
4875647a
DT
5505 if (is_master(r)) {
5506 purge_dead_list(ls, r, &r->res_grantqueue,
5507 nodeid_gone, &lkb_count);
5508 purge_dead_list(ls, r, &r->res_convertqueue,
5509 nodeid_gone, &lkb_count);
5510 purge_dead_list(ls, r, &r->res_waitqueue,
5511 nodeid_gone, &lkb_count);
5512 }
e7fd4179
DT
5513 unlock_rsb(r);
5514 unhold_rsb(r);
4875647a 5515 cond_resched();
e7fd4179
DT
5516 }
5517 up_write(&ls->ls_root_sem);
5518
4875647a 5519 if (lkb_count)
075f0177 5520 log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes",
4875647a 5521 lkb_count, nodes_count);
e7fd4179
DT
5522}
5523
4875647a 5524static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
97a35d1e 5525{
9beb3bf5 5526 struct rb_node *n;
4875647a 5527 struct dlm_rsb *r;
97a35d1e 5528
c7be761a 5529 spin_lock(&ls->ls_rsbtbl[bucket].lock);
9beb3bf5
BP
5530 for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
5531 r = rb_entry(n, struct dlm_rsb, res_hashnode);
4875647a
DT
5532
5533 if (!rsb_flag(r, RSB_RECOVER_GRANT))
5534 continue;
c503a621
DT
5535 if (!is_master(r)) {
5536 rsb_clear_flag(r, RSB_RECOVER_GRANT);
97a35d1e 5537 continue;
c503a621 5538 }
97a35d1e 5539 hold_rsb(r);
4875647a
DT
5540 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5541 return r;
97a35d1e 5542 }
c7be761a 5543 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
4875647a 5544 return NULL;
97a35d1e
DT
5545}
5546
4875647a
DT
5547/*
5548 * Attempt to grant locks on resources that we are the master of.
5549 * Locks may have become grantable during recovery because locks
5550 * from departed nodes have been purged (or not rebuilt), allowing
5551 * previously blocked locks to now be granted. The subset of rsb's
5552 * we are interested in are those with lkb's on either the convert or
5553 * waiting queues.
5554 *
5555 * Simplest would be to go through each master rsb and check for non-empty
5556 * convert or waiting queues, and attempt to grant on those rsbs.
5557 * Checking the queues requires lock_rsb, though, for which we'd need
5558 * to release the rsbtbl lock. This would make iterating through all
5559 * rsb's very inefficient. So, we rely on earlier recovery routines
5560 * to set RECOVER_GRANT on any rsb's that we should attempt to grant
5561 * locks for.
5562 */
5563
5564void dlm_recover_grant(struct dlm_ls *ls)
e7fd4179
DT
5565{
5566 struct dlm_rsb *r;
2b4e926a 5567 int bucket = 0;
4875647a
DT
5568 unsigned int count = 0;
5569 unsigned int rsb_count = 0;
5570 unsigned int lkb_count = 0;
e7fd4179 5571
2b4e926a 5572 while (1) {
4875647a 5573 r = find_grant_rsb(ls, bucket);
2b4e926a
DT
5574 if (!r) {
5575 if (bucket == ls->ls_rsbtbl_size - 1)
5576 break;
5577 bucket++;
97a35d1e 5578 continue;
2b4e926a 5579 }
4875647a
DT
5580 rsb_count++;
5581 count = 0;
97a35d1e 5582 lock_rsb(r);
c503a621 5583 /* the RECOVER_GRANT flag is checked in the grant path */
4875647a 5584 grant_pending_locks(r, &count);
c503a621 5585 rsb_clear_flag(r, RSB_RECOVER_GRANT);
4875647a
DT
5586 lkb_count += count;
5587 confirm_master(r, 0);
97a35d1e
DT
5588 unlock_rsb(r);
5589 put_rsb(r);
4875647a 5590 cond_resched();
e7fd4179 5591 }
4875647a
DT
5592
5593 if (lkb_count)
075f0177 5594 log_rinfo(ls, "dlm_recover_grant %u locks on %u resources",
4875647a 5595 lkb_count, rsb_count);
e7fd4179
DT
5596}
5597
5598static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
5599 uint32_t remid)
5600{
5601 struct dlm_lkb *lkb;
5602
5603 list_for_each_entry(lkb, head, lkb_statequeue) {
5604 if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
5605 return lkb;
5606 }
5607 return NULL;
5608}
5609
5610static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
5611 uint32_t remid)
5612{
5613 struct dlm_lkb *lkb;
5614
5615 lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
5616 if (lkb)
5617 return lkb;
5618 lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
5619 if (lkb)
5620 return lkb;
5621 lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
5622 if (lkb)
5623 return lkb;
5624 return NULL;
5625}
5626
ae773d0b 5627/* needs at least dlm_rcom + rcom_lock */
e7fd4179
DT
5628static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
5629 struct dlm_rsb *r, struct dlm_rcom *rc)
5630{
5631 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
e7fd4179 5632
3428785a 5633 lkb->lkb_nodeid = le32_to_cpu(rc->rc_header.h_nodeid);
163a1859
AV
5634 lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
5635 lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
5636 lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
5637 lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
e7fd4179 5638 lkb->lkb_flags |= DLM_IFL_MSTCPY;
163a1859 5639 lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
e7fd4179
DT
5640 lkb->lkb_rqmode = rl->rl_rqmode;
5641 lkb->lkb_grmode = rl->rl_grmode;
5642 /* don't set lkb_status because add_lkb wants to itself */
5643
8304d6f2
DT
5644 lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
5645 lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
e7fd4179 5646
e7fd4179 5647 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3428785a
AA
5648 int lvblen = le16_to_cpu(rc->rc_header.h_length) -
5649 sizeof(struct dlm_rcom) - sizeof(struct rcom_lock);
a5dd0631
AV
5650 if (lvblen > ls->ls_lvblen)
5651 return -EINVAL;
52bda2b5 5652 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
e7fd4179
DT
5653 if (!lkb->lkb_lvbptr)
5654 return -ENOMEM;
e7fd4179
DT
5655 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
5656 }
5657
5658 /* Conversions between PR and CW (middle modes) need special handling.
5659 The real granted mode of these converting locks cannot be determined
5660 until all locks have been rebuilt on the rsb (recover_conversion) */
5661
163a1859
AV
5662 if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
5663 middle_conversion(lkb)) {
e7fd4179
DT
5664 rl->rl_status = DLM_LKSTS_CONVERT;
5665 lkb->lkb_grmode = DLM_LOCK_IV;
5666 rsb_set_flag(r, RSB_RECOVER_CONVERT);
5667 }
5668
5669 return 0;
5670}
5671
5672/* This lkb may have been recovered in a previous aborted recovery so we need
5673 to check if the rsb already has an lkb with the given remote nodeid/lkid.
5674 If so we just send back a standard reply. If not, we create a new lkb with
5675 the given values and send back our lkid. We send back our lkid by sending
5676 back the rcom_lock struct we got but with the remid field filled in. */
5677
ae773d0b 5678/* needs at least dlm_rcom + rcom_lock */
e7fd4179
DT
5679int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5680{
5681 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5682 struct dlm_rsb *r;
5683 struct dlm_lkb *lkb;
6d40c4a7 5684 uint32_t remid = 0;
3428785a 5685 int from_nodeid = le32_to_cpu(rc->rc_header.h_nodeid);
e7fd4179
DT
5686 int error;
5687
5688 if (rl->rl_parent_lkid) {
5689 error = -EOPNOTSUPP;
5690 goto out;
5691 }
5692
6d40c4a7
DT
5693 remid = le32_to_cpu(rl->rl_lkid);
5694
4875647a
DT
5695 /* In general we expect the rsb returned to be R_MASTER, but we don't
5696 have to require it. Recovery of masters on one node can overlap
5697 recovery of locks on another node, so one node can send us MSTCPY
5698 locks before we've made ourselves master of this rsb. We can still
5699 add new MSTCPY locks that we receive here without any harm; when
5700 we make ourselves master, dlm_recover_masters() won't touch the
5701 MSTCPY locks we've received early. */
5702
c04fecb4
DT
5703 error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
5704 from_nodeid, R_RECEIVE_RECOVER, &r);
e7fd4179
DT
5705 if (error)
5706 goto out;
5707
c04fecb4
DT
5708 lock_rsb(r);
5709
4875647a
DT
5710 if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) {
5711 log_error(ls, "dlm_recover_master_copy remote %d %x not dir",
c04fecb4 5712 from_nodeid, remid);
4875647a 5713 error = -EBADR;
c04fecb4 5714 goto out_unlock;
4875647a
DT
5715 }
5716
c04fecb4 5717 lkb = search_remid(r, from_nodeid, remid);
e7fd4179
DT
5718 if (lkb) {
5719 error = -EEXIST;
5720 goto out_remid;
5721 }
5722
5723 error = create_lkb(ls, &lkb);
5724 if (error)
5725 goto out_unlock;
5726
5727 error = receive_rcom_lock_args(ls, lkb, r, rc);
5728 if (error) {
b3f58d8f 5729 __put_lkb(ls, lkb);
e7fd4179
DT
5730 goto out_unlock;
5731 }
5732
5733 attach_lkb(r, lkb);
5734 add_lkb(r, lkb, rl->rl_status);
4875647a
DT
5735 ls->ls_recover_locks_in++;
5736
5737 if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
5738 rsb_set_flag(r, RSB_RECOVER_GRANT);
e7fd4179
DT
5739
5740 out_remid:
5741 /* this is the new value returned to the lock holder for
5742 saving in its process-copy lkb */
163a1859 5743 rl->rl_remid = cpu_to_le32(lkb->lkb_id);
e7fd4179 5744
4875647a
DT
5745 lkb->lkb_recover_seq = ls->ls_recover_seq;
5746
e7fd4179
DT
5747 out_unlock:
5748 unlock_rsb(r);
5749 put_rsb(r);
5750 out:
6d40c4a7 5751 if (error && error != -EEXIST)
075f0177 5752 log_rinfo(ls, "dlm_recover_master_copy remote %d %x error %d",
c04fecb4 5753 from_nodeid, remid, error);
163a1859 5754 rl->rl_result = cpu_to_le32(error);
e7fd4179
DT
5755 return error;
5756}
5757
ae773d0b 5758/* needs at least dlm_rcom + rcom_lock */
e7fd4179
DT
5759int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5760{
5761 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5762 struct dlm_rsb *r;
5763 struct dlm_lkb *lkb;
6d40c4a7
DT
5764 uint32_t lkid, remid;
5765 int error, result;
5766
5767 lkid = le32_to_cpu(rl->rl_lkid);
5768 remid = le32_to_cpu(rl->rl_remid);
5769 result = le32_to_cpu(rl->rl_result);
e7fd4179 5770
6d40c4a7 5771 error = find_lkb(ls, lkid, &lkb);
e7fd4179 5772 if (error) {
6d40c4a7 5773 log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d",
3428785a
AA
5774 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
5775 result);
e7fd4179
DT
5776 return error;
5777 }
5778
4875647a
DT
5779 r = lkb->lkb_resource;
5780 hold_rsb(r);
5781 lock_rsb(r);
5782
6d40c4a7
DT
5783 if (!is_process_copy(lkb)) {
5784 log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d",
3428785a
AA
5785 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
5786 result);
4875647a
DT
5787 dlm_dump_rsb(r);
5788 unlock_rsb(r);
5789 put_rsb(r);
5790 dlm_put_lkb(lkb);
6d40c4a7
DT
5791 return -EINVAL;
5792 }
e7fd4179 5793
6d40c4a7 5794 switch (result) {
dc200a88
DT
5795 case -EBADR:
5796 /* There's a chance the new master received our lock before
5797 dlm_recover_master_reply(), this wouldn't happen if we did
5798 a barrier between recover_masters and recover_locks. */
6d40c4a7
DT
5799
5800 log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d",
3428785a
AA
5801 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
5802 result);
6d40c4a7 5803
dc200a88
DT
5804 dlm_send_rcom_lock(r, lkb);
5805 goto out;
e7fd4179 5806 case -EEXIST:
e7fd4179 5807 case 0:
6d40c4a7 5808 lkb->lkb_remid = remid;
e7fd4179
DT
5809 break;
5810 default:
6d40c4a7 5811 log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk",
3428785a
AA
5812 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
5813 result);
e7fd4179
DT
5814 }
5815
5816 /* an ack for dlm_recover_locks() which waits for replies from
5817 all the locks it sends to new masters */
5818 dlm_recovered_lock(r);
dc200a88 5819 out:
e7fd4179
DT
5820 unlock_rsb(r);
5821 put_rsb(r);
b3f58d8f 5822 dlm_put_lkb(lkb);
e7fd4179
DT
5823
5824 return 0;
5825}
5826
6b0afc0c 5827#ifdef CONFIG_DLM_DEPRECATED_API
597d0cae
DT
5828int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
5829 int mode, uint32_t flags, void *name, unsigned int namelen,
d7db923e 5830 unsigned long timeout_cs)
6b0afc0c
AA
5831#else
5832int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
5833 int mode, uint32_t flags, void *name, unsigned int namelen)
5834#endif
597d0cae
DT
5835{
5836 struct dlm_lkb *lkb;
5837 struct dlm_args args;
5838 int error;
5839
85e86edf 5840 dlm_lock_recovery(ls);
597d0cae
DT
5841
5842 error = create_lkb(ls, &lkb);
5843 if (error) {
5844 kfree(ua);
5845 goto out;
5846 }
5847
7a3de732
AA
5848 trace_dlm_lock_start(ls, lkb, name, namelen, mode, flags);
5849
597d0cae 5850 if (flags & DLM_LKF_VALBLK) {
573c24c4 5851 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
597d0cae
DT
5852 if (!ua->lksb.sb_lvbptr) {
5853 kfree(ua);
5854 __put_lkb(ls, lkb);
5855 error = -ENOMEM;
7a3de732 5856 goto out_trace_end;
597d0cae
DT
5857 }
5858 }
6b0afc0c 5859#ifdef CONFIG_DLM_DEPRECATED_API
d7db923e 5860 error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
e5dae548 5861 fake_astfn, ua, fake_bastfn, &args);
6b0afc0c
AA
5862#else
5863 error = set_lock_args(mode, &ua->lksb, flags, namelen, fake_astfn, ua,
5864 fake_bastfn, &args);
5865#endif
597d0cae 5866 if (error) {
d47b41ac
VA
5867 kfree(ua->lksb.sb_lvbptr);
5868 ua->lksb.sb_lvbptr = NULL;
5869 kfree(ua);
597d0cae 5870 __put_lkb(ls, lkb);
7a3de732 5871 goto out_trace_end;
597d0cae
DT
5872 }
5873
d47b41ac
VA
5874 /* After ua is attached to lkb it will be freed by dlm_free_lkb().
5875 When DLM_IFL_USER is set, the dlm knows that this is a userspace
5876 lock and that lkb_astparam is the dlm_user_args structure. */
5877 lkb->lkb_flags |= DLM_IFL_USER;
597d0cae
DT
5878 error = request_lock(ls, lkb, name, namelen, &args);
5879
5880 switch (error) {
5881 case 0:
5882 break;
5883 case -EINPROGRESS:
5884 error = 0;
5885 break;
5886 case -EAGAIN:
5887 error = 0;
df561f66 5888 fallthrough;
597d0cae
DT
5889 default:
5890 __put_lkb(ls, lkb);
7a3de732 5891 goto out_trace_end;
597d0cae
DT
5892 }
5893
5894 /* add this new lkb to the per-process list of locks */
5895 spin_lock(&ua->proc->locks_spin);
ef0c2bb0 5896 hold_lkb(lkb);
597d0cae
DT
5897 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5898 spin_unlock(&ua->proc->locks_spin);
7a3de732
AA
5899 out_trace_end:
5900 trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, false);
597d0cae 5901 out:
85e86edf 5902 dlm_unlock_recovery(ls);
597d0cae
DT
5903 return error;
5904}
5905
6b0afc0c 5906#ifdef CONFIG_DLM_DEPRECATED_API
597d0cae 5907int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
d7db923e
DT
5908 int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
5909 unsigned long timeout_cs)
6b0afc0c
AA
5910#else
5911int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5912 int mode, uint32_t flags, uint32_t lkid, char *lvb_in)
5913#endif
597d0cae
DT
5914{
5915 struct dlm_lkb *lkb;
5916 struct dlm_args args;
5917 struct dlm_user_args *ua;
5918 int error;
5919
85e86edf 5920 dlm_lock_recovery(ls);
597d0cae
DT
5921
5922 error = find_lkb(ls, lkid, &lkb);
5923 if (error)
5924 goto out;
5925
7a3de732
AA
5926 trace_dlm_lock_start(ls, lkb, NULL, 0, mode, flags);
5927
597d0cae
DT
5928 /* user can change the params on its lock when it converts it, or
5929 add an lvb that didn't exist before */
5930
d292c0cc 5931 ua = lkb->lkb_ua;
597d0cae
DT
5932
5933 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
573c24c4 5934 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
597d0cae
DT
5935 if (!ua->lksb.sb_lvbptr) {
5936 error = -ENOMEM;
5937 goto out_put;
5938 }
5939 }
5940 if (lvb_in && ua->lksb.sb_lvbptr)
5941 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5942
d7db923e 5943 ua->xid = ua_tmp->xid;
597d0cae
DT
5944 ua->castparam = ua_tmp->castparam;
5945 ua->castaddr = ua_tmp->castaddr;
5946 ua->bastparam = ua_tmp->bastparam;
5947 ua->bastaddr = ua_tmp->bastaddr;
10948eb4 5948 ua->user_lksb = ua_tmp->user_lksb;
597d0cae 5949
6b0afc0c 5950#ifdef CONFIG_DLM_DEPRECATED_API
d7db923e 5951 error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
e5dae548 5952 fake_astfn, ua, fake_bastfn, &args);
6b0afc0c
AA
5953#else
5954 error = set_lock_args(mode, &ua->lksb, flags, 0, fake_astfn, ua,
5955 fake_bastfn, &args);
5956#endif
597d0cae
DT
5957 if (error)
5958 goto out_put;
5959
5960 error = convert_lock(ls, lkb, &args);
5961
c85d65e9 5962 if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
597d0cae
DT
5963 error = 0;
5964 out_put:
7a3de732 5965 trace_dlm_lock_end(ls, lkb, NULL, 0, mode, flags, error, false);
597d0cae
DT
5966 dlm_put_lkb(lkb);
5967 out:
85e86edf 5968 dlm_unlock_recovery(ls);
597d0cae
DT
5969 kfree(ua_tmp);
5970 return error;
5971}
5972
2ab4bd8e
DT
5973/*
5974 * The caller asks for an orphan lock on a given resource with a given mode.
5975 * If a matching lock exists, it's moved to the owner's list of locks and
5976 * the lkid is returned.
5977 */
5978
5979int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5980 int mode, uint32_t flags, void *name, unsigned int namelen,
8d614a44 5981 uint32_t *lkid)
2ab4bd8e 5982{
dc1acd5c 5983 struct dlm_lkb *lkb = NULL, *iter;
2ab4bd8e
DT
5984 struct dlm_user_args *ua;
5985 int found_other_mode = 0;
2ab4bd8e
DT
5986 int rv = 0;
5987
5988 mutex_lock(&ls->ls_orphans_mutex);
dc1acd5c
JK
5989 list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) {
5990 if (iter->lkb_resource->res_length != namelen)
2ab4bd8e 5991 continue;
dc1acd5c 5992 if (memcmp(iter->lkb_resource->res_name, name, namelen))
2ab4bd8e 5993 continue;
dc1acd5c 5994 if (iter->lkb_grmode != mode) {
2ab4bd8e
DT
5995 found_other_mode = 1;
5996 continue;
5997 }
5998
dc1acd5c
JK
5999 lkb = iter;
6000 list_del_init(&iter->lkb_ownqueue);
6001 iter->lkb_flags &= ~DLM_IFL_ORPHAN;
6002 *lkid = iter->lkb_id;
2ab4bd8e
DT
6003 break;
6004 }
6005 mutex_unlock(&ls->ls_orphans_mutex);
6006
dc1acd5c 6007 if (!lkb && found_other_mode) {
2ab4bd8e
DT
6008 rv = -EAGAIN;
6009 goto out;
6010 }
6011
dc1acd5c 6012 if (!lkb) {
2ab4bd8e
DT
6013 rv = -ENOENT;
6014 goto out;
6015 }
6016
6017 lkb->lkb_exflags = flags;
6018 lkb->lkb_ownpid = (int) current->pid;
6019
6020 ua = lkb->lkb_ua;
6021
6022 ua->proc = ua_tmp->proc;
6023 ua->xid = ua_tmp->xid;
6024 ua->castparam = ua_tmp->castparam;
6025 ua->castaddr = ua_tmp->castaddr;
6026 ua->bastparam = ua_tmp->bastparam;
6027 ua->bastaddr = ua_tmp->bastaddr;
6028 ua->user_lksb = ua_tmp->user_lksb;
6029
6030 /*
6031 * The lkb reference from the ls_orphans list was not
6032 * removed above, and is now considered the reference
6033 * for the proc locks list.
6034 */
6035
6036 spin_lock(&ua->proc->locks_spin);
6037 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
6038 spin_unlock(&ua->proc->locks_spin);
6039 out:
6040 kfree(ua_tmp);
6041 return rv;
6042}
6043
597d0cae
DT
6044int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
6045 uint32_t flags, uint32_t lkid, char *lvb_in)
6046{
6047 struct dlm_lkb *lkb;
6048 struct dlm_args args;
6049 struct dlm_user_args *ua;
6050 int error;
6051
85e86edf 6052 dlm_lock_recovery(ls);
597d0cae
DT
6053
6054 error = find_lkb(ls, lkid, &lkb);
6055 if (error)
6056 goto out;
6057
7a3de732
AA
6058 trace_dlm_unlock_start(ls, lkb, flags);
6059
d292c0cc 6060 ua = lkb->lkb_ua;
597d0cae
DT
6061
6062 if (lvb_in && ua->lksb.sb_lvbptr)
6063 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
b434eda6
PC
6064 if (ua_tmp->castparam)
6065 ua->castparam = ua_tmp->castparam;
cc346d55 6066 ua->user_lksb = ua_tmp->user_lksb;
597d0cae
DT
6067
6068 error = set_unlock_args(flags, ua, &args);
6069 if (error)
6070 goto out_put;
6071
6072 error = unlock_lock(ls, lkb, &args);
6073
6074 if (error == -DLM_EUNLOCK)
6075 error = 0;
ef0c2bb0
DT
6076 /* from validate_unlock_args() */
6077 if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
6078 error = 0;
597d0cae
DT
6079 if (error)
6080 goto out_put;
6081
6082 spin_lock(&ua->proc->locks_spin);
23e8e1aa 6083 /* dlm_user_add_cb() may have already taken lkb off the proc list */
a1bc86e6
DT
6084 if (!list_empty(&lkb->lkb_ownqueue))
6085 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
597d0cae 6086 spin_unlock(&ua->proc->locks_spin);
597d0cae 6087 out_put:
7a3de732 6088 trace_dlm_unlock_end(ls, lkb, flags, error);
597d0cae
DT
6089 dlm_put_lkb(lkb);
6090 out:
85e86edf 6091 dlm_unlock_recovery(ls);
ef0c2bb0 6092 kfree(ua_tmp);
597d0cae
DT
6093 return error;
6094}
6095
6096int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
6097 uint32_t flags, uint32_t lkid)
6098{
6099 struct dlm_lkb *lkb;
6100 struct dlm_args args;
6101 struct dlm_user_args *ua;
6102 int error;
6103
85e86edf 6104 dlm_lock_recovery(ls);
597d0cae
DT
6105
6106 error = find_lkb(ls, lkid, &lkb);
6107 if (error)
6108 goto out;
6109
7a3de732
AA
6110 trace_dlm_unlock_start(ls, lkb, flags);
6111
d292c0cc 6112 ua = lkb->lkb_ua;
b434eda6
PC
6113 if (ua_tmp->castparam)
6114 ua->castparam = ua_tmp->castparam;
c059f70e 6115 ua->user_lksb = ua_tmp->user_lksb;
597d0cae
DT
6116
6117 error = set_unlock_args(flags, ua, &args);
6118 if (error)
6119 goto out_put;
6120
6121 error = cancel_lock(ls, lkb, &args);
6122
6123 if (error == -DLM_ECANCEL)
6124 error = 0;
ef0c2bb0
DT
6125 /* from validate_unlock_args() */
6126 if (error == -EBUSY)
6127 error = 0;
597d0cae 6128 out_put:
7a3de732 6129 trace_dlm_unlock_end(ls, lkb, flags, error);
597d0cae
DT
6130 dlm_put_lkb(lkb);
6131 out:
85e86edf 6132 dlm_unlock_recovery(ls);
ef0c2bb0 6133 kfree(ua_tmp);
597d0cae
DT
6134 return error;
6135}
6136
8b4021fa
DT
6137int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
6138{
6139 struct dlm_lkb *lkb;
6140 struct dlm_args args;
6141 struct dlm_user_args *ua;
6142 struct dlm_rsb *r;
6143 int error;
6144
6145 dlm_lock_recovery(ls);
6146
6147 error = find_lkb(ls, lkid, &lkb);
6148 if (error)
6149 goto out;
6150
7a3de732
AA
6151 trace_dlm_unlock_start(ls, lkb, flags);
6152
d292c0cc 6153 ua = lkb->lkb_ua;
8b4021fa
DT
6154
6155 error = set_unlock_args(flags, ua, &args);
6156 if (error)
6157 goto out_put;
6158
6159 /* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
6160
6161 r = lkb->lkb_resource;
6162 hold_rsb(r);
6163 lock_rsb(r);
6164
6165 error = validate_unlock_args(lkb, &args);
6166 if (error)
6167 goto out_r;
6168 lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
6169
6170 error = _cancel_lock(r, lkb);
6171 out_r:
6172 unlock_rsb(r);
6173 put_rsb(r);
6174
6175 if (error == -DLM_ECANCEL)
6176 error = 0;
6177 /* from validate_unlock_args() */
6178 if (error == -EBUSY)
6179 error = 0;
6180 out_put:
7a3de732 6181 trace_dlm_unlock_end(ls, lkb, flags, error);
8b4021fa
DT
6182 dlm_put_lkb(lkb);
6183 out:
6184 dlm_unlock_recovery(ls);
6185 return error;
6186}
6187
ef0c2bb0
DT
6188/* lkb's that are removed from the waiters list by revert are just left on the
6189 orphans list with the granted orphan locks, to be freed by purge */
6190
597d0cae
DT
6191static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6192{
ef0c2bb0
DT
6193 struct dlm_args args;
6194 int error;
597d0cae 6195
2ab4bd8e 6196 hold_lkb(lkb); /* reference for the ls_orphans list */
ef0c2bb0
DT
6197 mutex_lock(&ls->ls_orphans_mutex);
6198 list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
6199 mutex_unlock(&ls->ls_orphans_mutex);
597d0cae 6200
d292c0cc 6201 set_unlock_args(0, lkb->lkb_ua, &args);
ef0c2bb0
DT
6202
6203 error = cancel_lock(ls, lkb, &args);
6204 if (error == -DLM_ECANCEL)
6205 error = 0;
6206 return error;
597d0cae
DT
6207}
6208
da8c6663
DT
6209/* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't
6210 granted. Regardless of what rsb queue the lock is on, it's removed and
6211 freed. The IVVALBLK flag causes the lvb on the resource to be invalidated
6212 if our lock is PW/EX (it's ignored if our granted mode is smaller.) */
597d0cae
DT
6213
6214static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6215{
597d0cae
DT
6216 struct dlm_args args;
6217 int error;
6218
da8c6663
DT
6219 set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK,
6220 lkb->lkb_ua, &args);
597d0cae
DT
6221
6222 error = unlock_lock(ls, lkb, &args);
6223 if (error == -DLM_EUNLOCK)
6224 error = 0;
6225 return error;
6226}
6227
ef0c2bb0
DT
6228/* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
6229 (which does lock_rsb) due to deadlock with receiving a message that does
23e8e1aa 6230 lock_rsb followed by dlm_user_add_cb() */
ef0c2bb0
DT
6231
6232static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
6233 struct dlm_user_proc *proc)
6234{
6235 struct dlm_lkb *lkb = NULL;
6236
296d9d1e 6237 spin_lock(&ls->ls_clear_proc_locks);
ef0c2bb0
DT
6238 if (list_empty(&proc->locks))
6239 goto out;
6240
6241 lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
6242 list_del_init(&lkb->lkb_ownqueue);
6243
6244 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6245 lkb->lkb_flags |= DLM_IFL_ORPHAN;
6246 else
6247 lkb->lkb_flags |= DLM_IFL_DEAD;
6248 out:
296d9d1e 6249 spin_unlock(&ls->ls_clear_proc_locks);
ef0c2bb0
DT
6250 return lkb;
6251}
6252
23e8e1aa 6253/* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which
597d0cae
DT
6254 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
6255 which we clear here. */
6256
6257/* proc CLOSING flag is set so no more device_reads should look at proc->asts
6258 list, and no more device_writes should add lkb's to proc->locks list; so we
6259 shouldn't need to take asts_spin or locks_spin here. this assumes that
6260 device reads/writes/closes are serialized -- FIXME: we may need to serialize
6261 them ourself. */
6262
6263void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6264{
6265 struct dlm_lkb *lkb, *safe;
6266
85e86edf 6267 dlm_lock_recovery(ls);
597d0cae 6268
ef0c2bb0
DT
6269 while (1) {
6270 lkb = del_proc_lock(ls, proc);
6271 if (!lkb)
6272 break;
84d8cd69 6273 del_timeout(lkb);
ef0c2bb0 6274 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
597d0cae 6275 orphan_proc_lock(ls, lkb);
ef0c2bb0 6276 else
597d0cae 6277 unlock_proc_lock(ls, lkb);
597d0cae
DT
6278
6279 /* this removes the reference for the proc->locks list
6280 added by dlm_user_request, it may result in the lkb
6281 being freed */
6282
6283 dlm_put_lkb(lkb);
6284 }
a1bc86e6 6285
296d9d1e 6286 spin_lock(&ls->ls_clear_proc_locks);
ef0c2bb0 6287
a1bc86e6
DT
6288 /* in-progress unlocks */
6289 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6290 list_del_init(&lkb->lkb_ownqueue);
6291 lkb->lkb_flags |= DLM_IFL_DEAD;
6292 dlm_put_lkb(lkb);
6293 }
6294
23e8e1aa 6295 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
8304d6f2
DT
6296 memset(&lkb->lkb_callbacks, 0,
6297 sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
23e8e1aa 6298 list_del_init(&lkb->lkb_cb_list);
a1bc86e6
DT
6299 dlm_put_lkb(lkb);
6300 }
6301
296d9d1e 6302 spin_unlock(&ls->ls_clear_proc_locks);
85e86edf 6303 dlm_unlock_recovery(ls);
597d0cae 6304}
a1bc86e6 6305
8499137d
DT
6306static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6307{
6308 struct dlm_lkb *lkb, *safe;
6309
6310 while (1) {
6311 lkb = NULL;
6312 spin_lock(&proc->locks_spin);
6313 if (!list_empty(&proc->locks)) {
6314 lkb = list_entry(proc->locks.next, struct dlm_lkb,
6315 lkb_ownqueue);
6316 list_del_init(&lkb->lkb_ownqueue);
6317 }
6318 spin_unlock(&proc->locks_spin);
6319
6320 if (!lkb)
6321 break;
6322
6323 lkb->lkb_flags |= DLM_IFL_DEAD;
6324 unlock_proc_lock(ls, lkb);
6325 dlm_put_lkb(lkb); /* ref from proc->locks list */
6326 }
6327
6328 spin_lock(&proc->locks_spin);
6329 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6330 list_del_init(&lkb->lkb_ownqueue);
6331 lkb->lkb_flags |= DLM_IFL_DEAD;
6332 dlm_put_lkb(lkb);
6333 }
6334 spin_unlock(&proc->locks_spin);
6335
6336 spin_lock(&proc->asts_spin);
23e8e1aa 6337 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
8304d6f2
DT
6338 memset(&lkb->lkb_callbacks, 0,
6339 sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
23e8e1aa 6340 list_del_init(&lkb->lkb_cb_list);
8499137d
DT
6341 dlm_put_lkb(lkb);
6342 }
6343 spin_unlock(&proc->asts_spin);
6344}
6345
6346/* pid of 0 means purge all orphans */
6347
6348static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
6349{
6350 struct dlm_lkb *lkb, *safe;
6351
6352 mutex_lock(&ls->ls_orphans_mutex);
6353 list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
6354 if (pid && lkb->lkb_ownpid != pid)
6355 continue;
6356 unlock_proc_lock(ls, lkb);
6357 list_del_init(&lkb->lkb_ownqueue);
6358 dlm_put_lkb(lkb);
6359 }
6360 mutex_unlock(&ls->ls_orphans_mutex);
6361}
6362
6363static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
6364{
6365 struct dlm_message *ms;
6366 struct dlm_mhandle *mh;
6367 int error;
6368
6369 error = _create_message(ls, sizeof(struct dlm_message), nodeid,
6370 DLM_MSG_PURGE, &ms, &mh);
6371 if (error)
6372 return error;
00e99ccd
AA
6373 ms->m_nodeid = cpu_to_le32(nodeid);
6374 ms->m_pid = cpu_to_le32(pid);
8499137d
DT
6375
6376 return send_message(mh, ms);
6377}
6378
6379int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
6380 int nodeid, int pid)
6381{
6382 int error = 0;
6383
2ab4bd8e 6384 if (nodeid && (nodeid != dlm_our_nodeid())) {
8499137d
DT
6385 error = send_purge(ls, nodeid, pid);
6386 } else {
85e86edf 6387 dlm_lock_recovery(ls);
8499137d
DT
6388 if (pid == current->pid)
6389 purge_proc_locks(ls, proc);
6390 else
6391 do_purge(ls, nodeid, pid);
85e86edf 6392 dlm_unlock_recovery(ls);
8499137d
DT
6393 }
6394 return error;
6395}
6396
5054e79d
AA
6397/* debug functionality */
6398int dlm_debug_add_lkb(struct dlm_ls *ls, uint32_t lkb_id, char *name, int len,
6399 int lkb_nodeid, unsigned int lkb_flags, int lkb_status)
6400{
6401 struct dlm_lksb *lksb;
6402 struct dlm_lkb *lkb;
6403 struct dlm_rsb *r;
6404 int error;
6405
6406 /* we currently can't set a valid user lock */
6407 if (lkb_flags & DLM_IFL_USER)
6408 return -EOPNOTSUPP;
6409
6410 lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
6411 if (!lksb)
6412 return -ENOMEM;
6413
6414 error = _create_lkb(ls, &lkb, lkb_id, lkb_id + 1);
6415 if (error) {
6416 kfree(lksb);
6417 return error;
6418 }
6419
6420 lkb->lkb_flags = lkb_flags;
6421 lkb->lkb_nodeid = lkb_nodeid;
6422 lkb->lkb_lksb = lksb;
6423 /* user specific pointer, just don't have it NULL for kernel locks */
6424 if (~lkb_flags & DLM_IFL_USER)
6425 lkb->lkb_astparam = (void *)0xDEADBEEF;
6426
6427 error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
6428 if (error) {
6429 kfree(lksb);
6430 __put_lkb(ls, lkb);
6431 return error;
6432 }
6433
6434 lock_rsb(r);
6435 attach_lkb(r, lkb);
6436 add_lkb(r, lkb, lkb_status);
6437 unlock_rsb(r);
6438 put_rsb(r);
6439
6440 return 0;
6441}
6442
63eab2b0
AA
6443int dlm_debug_add_lkb_to_waiters(struct dlm_ls *ls, uint32_t lkb_id,
6444 int mstype, int to_nodeid)
6445{
6446 struct dlm_lkb *lkb;
6447 int error;
6448
6449 error = find_lkb(ls, lkb_id, &lkb);
6450 if (error)
6451 return error;
6452
6453 error = add_to_waiters(lkb, mstype, to_nodeid);
6454 dlm_put_lkb(lkb);
6455 return error;
6456}
6457