ocfs2: do LVB puts in place
[linux-2.6-block.git] / fs / ocfs2 / dlm / dlmmaster.c
CommitLineData
6714d8e8
KH
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmmod.c
5 *
6 * standalone DLM module
7 *
8 * Copyright (C) 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 *
25 */
26
27
28#include <linux/module.h>
29#include <linux/fs.h>
30#include <linux/types.h>
31#include <linux/slab.h>
32#include <linux/highmem.h>
33#include <linux/utsname.h>
34#include <linux/init.h>
35#include <linux/sysctl.h>
36#include <linux/random.h>
37#include <linux/blkdev.h>
38#include <linux/socket.h>
39#include <linux/inet.h>
40#include <linux/spinlock.h>
41#include <linux/delay.h>
42
43
44#include "cluster/heartbeat.h"
45#include "cluster/nodemanager.h"
46#include "cluster/tcp.h"
47
48#include "dlmapi.h"
49#include "dlmcommon.h"
50#include "dlmdebug.h"
82353b59 51#include "dlmdomain.h"
6714d8e8
KH
52
53#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
54#include "cluster/masklog.h"
55
56enum dlm_mle_type {
57 DLM_MLE_BLOCK,
58 DLM_MLE_MASTER,
59 DLM_MLE_MIGRATION
60};
61
62struct dlm_lock_name
63{
64 u8 len;
65 u8 name[DLM_LOCKID_NAME_MAX];
66};
67
68struct dlm_master_list_entry
69{
70 struct list_head list;
71 struct list_head hb_events;
72 struct dlm_ctxt *dlm;
73 spinlock_t spinlock;
74 wait_queue_head_t wq;
75 atomic_t woken;
76 struct kref mle_refs;
a2bf0477 77 int inuse;
6714d8e8
KH
78 unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
79 unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
80 unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
81 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
82 u8 master;
83 u8 new_master;
84 enum dlm_mle_type type;
85 struct o2hb_callback_func mle_hb_up;
86 struct o2hb_callback_func mle_hb_down;
87 union {
88 struct dlm_lock_resource *res;
89 struct dlm_lock_name name;
90 } u;
91};
92
93static void dlm_mle_node_down(struct dlm_ctxt *dlm,
94 struct dlm_master_list_entry *mle,
95 struct o2nm_node *node,
96 int idx);
97static void dlm_mle_node_up(struct dlm_ctxt *dlm,
98 struct dlm_master_list_entry *mle,
99 struct o2nm_node *node,
100 int idx);
101
102static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
103static int dlm_do_assert_master(struct dlm_ctxt *dlm, const char *lockname,
104 unsigned int namelen, void *nodemap,
105 u32 flags);
106
107static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
108 struct dlm_master_list_entry *mle,
109 const char *name,
110 unsigned int namelen)
111{
112 struct dlm_lock_resource *res;
113
114 if (dlm != mle->dlm)
115 return 0;
116
117 if (mle->type == DLM_MLE_BLOCK ||
118 mle->type == DLM_MLE_MIGRATION) {
119 if (namelen != mle->u.name.len ||
120 memcmp(name, mle->u.name.name, namelen)!=0)
121 return 0;
122 } else {
123 res = mle->u.res;
124 if (namelen != res->lockname.len ||
125 memcmp(res->lockname.name, name, namelen) != 0)
126 return 0;
127 }
128 return 1;
129}
130
131#if 0
132/* Code here is included but defined out as it aids debugging */
133
95883719
KH
134#define dlm_print_nodemap(m) _dlm_print_nodemap(m,#m)
135void _dlm_print_nodemap(unsigned long *map, const char *mapname)
136{
137 int i;
138 printk("%s=[ ", mapname);
139 for (i=0; i<O2NM_MAX_NODES; i++)
140 if (test_bit(i, map))
141 printk("%d ", i);
142 printk("]");
143}
144
6714d8e8
KH
145void dlm_print_one_mle(struct dlm_master_list_entry *mle)
146{
95883719 147 int refs;
6714d8e8
KH
148 char *type;
149 char attached;
150 u8 master;
151 unsigned int namelen;
152 const char *name;
153 struct kref *k;
95883719
KH
154 unsigned long *maybe = mle->maybe_map,
155 *vote = mle->vote_map,
156 *resp = mle->response_map,
157 *node = mle->node_map;
6714d8e8
KH
158
159 k = &mle->mle_refs;
160 if (mle->type == DLM_MLE_BLOCK)
161 type = "BLK";
162 else if (mle->type == DLM_MLE_MASTER)
163 type = "MAS";
164 else
165 type = "MIG";
166 refs = atomic_read(&k->refcount);
167 master = mle->master;
168 attached = (list_empty(&mle->hb_events) ? 'N' : 'Y');
169
170 if (mle->type != DLM_MLE_MASTER) {
171 namelen = mle->u.name.len;
172 name = mle->u.name.name;
173 } else {
174 namelen = mle->u.res->lockname.len;
175 name = mle->u.res->lockname.name;
176 }
177
95883719
KH
178 mlog(ML_NOTICE, "%.*s: %3s refs=%3d mas=%3u new=%3u evt=%c inuse=%d ",
179 namelen, name, type, refs, master, mle->new_master, attached,
180 mle->inuse);
181 dlm_print_nodemap(maybe);
182 printk(", ");
183 dlm_print_nodemap(vote);
184 printk(", ");
185 dlm_print_nodemap(resp);
186 printk(", ");
187 dlm_print_nodemap(node);
188 printk(", ");
189 printk("\n");
6714d8e8
KH
190}
191
192static void dlm_dump_mles(struct dlm_ctxt *dlm)
193{
194 struct dlm_master_list_entry *mle;
195 struct list_head *iter;
196
197 mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name);
6714d8e8
KH
198 spin_lock(&dlm->master_lock);
199 list_for_each(iter, &dlm->master_list) {
200 mle = list_entry(iter, struct dlm_master_list_entry, list);
201 dlm_print_one_mle(mle);
202 }
203 spin_unlock(&dlm->master_lock);
204}
205
6714d8e8
KH
206int dlm_dump_all_mles(const char __user *data, unsigned int len)
207{
208 struct list_head *iter;
209 struct dlm_ctxt *dlm;
210
211 spin_lock(&dlm_domain_lock);
212 list_for_each(iter, &dlm_domains) {
213 dlm = list_entry (iter, struct dlm_ctxt, list);
214 mlog(ML_NOTICE, "found dlm: %p, name=%s\n", dlm, dlm->name);
215 dlm_dump_mles(dlm);
216 }
217 spin_unlock(&dlm_domain_lock);
218 return len;
219}
220EXPORT_SYMBOL_GPL(dlm_dump_all_mles);
221
222#endif /* 0 */
223
224
225static kmem_cache_t *dlm_mle_cache = NULL;
226
227
228static void dlm_mle_release(struct kref *kref);
229static void dlm_init_mle(struct dlm_master_list_entry *mle,
230 enum dlm_mle_type type,
231 struct dlm_ctxt *dlm,
232 struct dlm_lock_resource *res,
233 const char *name,
234 unsigned int namelen);
235static void dlm_put_mle(struct dlm_master_list_entry *mle);
236static void __dlm_put_mle(struct dlm_master_list_entry *mle);
237static int dlm_find_mle(struct dlm_ctxt *dlm,
238 struct dlm_master_list_entry **mle,
239 char *name, unsigned int namelen);
240
241static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to);
242
243
244static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
245 struct dlm_lock_resource *res,
246 struct dlm_master_list_entry *mle,
247 int *blocked);
248static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
249 struct dlm_lock_resource *res,
250 struct dlm_master_list_entry *mle,
251 int blocked);
252static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
253 struct dlm_lock_resource *res,
254 struct dlm_master_list_entry *mle,
255 struct dlm_master_list_entry **oldmle,
256 const char *name, unsigned int namelen,
257 u8 new_master, u8 master);
258
259static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
260 struct dlm_lock_resource *res);
261static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
262 struct dlm_lock_resource *res);
263static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
264 struct dlm_lock_resource *res,
265 u8 target);
c03872f5
KH
266static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
267 struct dlm_lock_resource *res);
6714d8e8
KH
268
269
270int dlm_is_host_down(int errno)
271{
272 switch (errno) {
273 case -EBADF:
274 case -ECONNREFUSED:
275 case -ENOTCONN:
276 case -ECONNRESET:
277 case -EPIPE:
278 case -EHOSTDOWN:
279 case -EHOSTUNREACH:
280 case -ETIMEDOUT:
281 case -ECONNABORTED:
282 case -ENETDOWN:
283 case -ENETUNREACH:
284 case -ENETRESET:
285 case -ESHUTDOWN:
286 case -ENOPROTOOPT:
287 case -EINVAL: /* if returned from our tcp code,
288 this means there is no socket */
289 return 1;
290 }
291 return 0;
292}
293
294
295/*
296 * MASTER LIST FUNCTIONS
297 */
298
299
300/*
301 * regarding master list entries and heartbeat callbacks:
302 *
303 * in order to avoid sleeping and allocation that occurs in
304 * heartbeat, master list entries are simply attached to the
305 * dlm's established heartbeat callbacks. the mle is attached
306 * when it is created, and since the dlm->spinlock is held at
307 * that time, any heartbeat event will be properly discovered
308 * by the mle. the mle needs to be detached from the
309 * dlm->mle_hb_events list as soon as heartbeat events are no
310 * longer useful to the mle, and before the mle is freed.
311 *
312 * as a general rule, heartbeat events are no longer needed by
313 * the mle once an "answer" regarding the lock master has been
314 * received.
315 */
316static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
317 struct dlm_master_list_entry *mle)
318{
319 assert_spin_locked(&dlm->spinlock);
320
321 list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
322}
323
324
325static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
326 struct dlm_master_list_entry *mle)
327{
328 if (!list_empty(&mle->hb_events))
329 list_del_init(&mle->hb_events);
330}
331
332
333static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
334 struct dlm_master_list_entry *mle)
335{
336 spin_lock(&dlm->spinlock);
337 __dlm_mle_detach_hb_events(dlm, mle);
338 spin_unlock(&dlm->spinlock);
339}
340
a2bf0477
KH
341static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
342{
343 struct dlm_ctxt *dlm;
344 dlm = mle->dlm;
345
346 assert_spin_locked(&dlm->spinlock);
347 assert_spin_locked(&dlm->master_lock);
348 mle->inuse++;
349 kref_get(&mle->mle_refs);
350}
351
352static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
353{
354 struct dlm_ctxt *dlm;
355 dlm = mle->dlm;
356
357 spin_lock(&dlm->spinlock);
358 spin_lock(&dlm->master_lock);
359 mle->inuse--;
360 __dlm_put_mle(mle);
361 spin_unlock(&dlm->master_lock);
362 spin_unlock(&dlm->spinlock);
363
364}
365
6714d8e8
KH
366/* remove from list and free */
367static void __dlm_put_mle(struct dlm_master_list_entry *mle)
368{
369 struct dlm_ctxt *dlm;
370 dlm = mle->dlm;
371
372 assert_spin_locked(&dlm->spinlock);
373 assert_spin_locked(&dlm->master_lock);
aa852354
KH
374 if (!atomic_read(&mle->mle_refs.refcount)) {
375 /* this may or may not crash, but who cares.
376 * it's a BUG. */
377 mlog(ML_ERROR, "bad mle: %p\n", mle);
378 dlm_print_one_mle(mle);
379 BUG();
380 } else
381 kref_put(&mle->mle_refs, dlm_mle_release);
6714d8e8
KH
382}
383
384
385/* must not have any spinlocks coming in */
386static void dlm_put_mle(struct dlm_master_list_entry *mle)
387{
388 struct dlm_ctxt *dlm;
389 dlm = mle->dlm;
390
391 spin_lock(&dlm->spinlock);
392 spin_lock(&dlm->master_lock);
393 __dlm_put_mle(mle);
394 spin_unlock(&dlm->master_lock);
395 spin_unlock(&dlm->spinlock);
396}
397
398static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
399{
400 kref_get(&mle->mle_refs);
401}
402
403static void dlm_init_mle(struct dlm_master_list_entry *mle,
404 enum dlm_mle_type type,
405 struct dlm_ctxt *dlm,
406 struct dlm_lock_resource *res,
407 const char *name,
408 unsigned int namelen)
409{
410 assert_spin_locked(&dlm->spinlock);
411
412 mle->dlm = dlm;
413 mle->type = type;
414 INIT_LIST_HEAD(&mle->list);
415 INIT_LIST_HEAD(&mle->hb_events);
416 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
417 spin_lock_init(&mle->spinlock);
418 init_waitqueue_head(&mle->wq);
419 atomic_set(&mle->woken, 0);
420 kref_init(&mle->mle_refs);
421 memset(mle->response_map, 0, sizeof(mle->response_map));
422 mle->master = O2NM_MAX_NODES;
423 mle->new_master = O2NM_MAX_NODES;
a2bf0477 424 mle->inuse = 0;
6714d8e8
KH
425
426 if (mle->type == DLM_MLE_MASTER) {
427 BUG_ON(!res);
428 mle->u.res = res;
429 } else if (mle->type == DLM_MLE_BLOCK) {
430 BUG_ON(!name);
431 memcpy(mle->u.name.name, name, namelen);
432 mle->u.name.len = namelen;
433 } else /* DLM_MLE_MIGRATION */ {
434 BUG_ON(!name);
435 memcpy(mle->u.name.name, name, namelen);
436 mle->u.name.len = namelen;
437 }
438
439 /* copy off the node_map and register hb callbacks on our copy */
440 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
441 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
442 clear_bit(dlm->node_num, mle->vote_map);
443 clear_bit(dlm->node_num, mle->node_map);
444
445 /* attach the mle to the domain node up/down events */
446 __dlm_mle_attach_hb_events(dlm, mle);
447}
448
449
450/* returns 1 if found, 0 if not */
451static int dlm_find_mle(struct dlm_ctxt *dlm,
452 struct dlm_master_list_entry **mle,
453 char *name, unsigned int namelen)
454{
455 struct dlm_master_list_entry *tmpmle;
456 struct list_head *iter;
457
458 assert_spin_locked(&dlm->master_lock);
459
460 list_for_each(iter, &dlm->master_list) {
461 tmpmle = list_entry(iter, struct dlm_master_list_entry, list);
462 if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
463 continue;
464 dlm_get_mle(tmpmle);
465 *mle = tmpmle;
466 return 1;
467 }
468 return 0;
469}
470
471void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
472{
473 struct dlm_master_list_entry *mle;
474 struct list_head *iter;
475
476 assert_spin_locked(&dlm->spinlock);
477
478 list_for_each(iter, &dlm->mle_hb_events) {
479 mle = list_entry(iter, struct dlm_master_list_entry,
480 hb_events);
481 if (node_up)
482 dlm_mle_node_up(dlm, mle, NULL, idx);
483 else
484 dlm_mle_node_down(dlm, mle, NULL, idx);
485 }
486}
487
488static void dlm_mle_node_down(struct dlm_ctxt *dlm,
489 struct dlm_master_list_entry *mle,
490 struct o2nm_node *node, int idx)
491{
492 spin_lock(&mle->spinlock);
493
494 if (!test_bit(idx, mle->node_map))
495 mlog(0, "node %u already removed from nodemap!\n", idx);
496 else
497 clear_bit(idx, mle->node_map);
498
499 spin_unlock(&mle->spinlock);
500}
501
502static void dlm_mle_node_up(struct dlm_ctxt *dlm,
503 struct dlm_master_list_entry *mle,
504 struct o2nm_node *node, int idx)
505{
506 spin_lock(&mle->spinlock);
507
508 if (test_bit(idx, mle->node_map))
509 mlog(0, "node %u already in node map!\n", idx);
510 else
511 set_bit(idx, mle->node_map);
512
513 spin_unlock(&mle->spinlock);
514}
515
516
517int dlm_init_mle_cache(void)
518{
519 dlm_mle_cache = kmem_cache_create("dlm_mle_cache",
520 sizeof(struct dlm_master_list_entry),
521 0, SLAB_HWCACHE_ALIGN,
522 NULL, NULL);
523 if (dlm_mle_cache == NULL)
524 return -ENOMEM;
525 return 0;
526}
527
528void dlm_destroy_mle_cache(void)
529{
530 if (dlm_mle_cache)
531 kmem_cache_destroy(dlm_mle_cache);
532}
533
534static void dlm_mle_release(struct kref *kref)
535{
536 struct dlm_master_list_entry *mle;
537 struct dlm_ctxt *dlm;
538
539 mlog_entry_void();
540
541 mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
542 dlm = mle->dlm;
543
544 if (mle->type != DLM_MLE_MASTER) {
545 mlog(0, "calling mle_release for %.*s, type %d\n",
546 mle->u.name.len, mle->u.name.name, mle->type);
547 } else {
548 mlog(0, "calling mle_release for %.*s, type %d\n",
549 mle->u.res->lockname.len,
550 mle->u.res->lockname.name, mle->type);
551 }
552 assert_spin_locked(&dlm->spinlock);
553 assert_spin_locked(&dlm->master_lock);
554
555 /* remove from list if not already */
556 if (!list_empty(&mle->list))
557 list_del_init(&mle->list);
558
559 /* detach the mle from the domain node up/down events */
560 __dlm_mle_detach_hb_events(dlm, mle);
561
562 /* NOTE: kfree under spinlock here.
563 * if this is bad, we can move this to a freelist. */
564 kmem_cache_free(dlm_mle_cache, mle);
565}
566
567
568/*
569 * LOCK RESOURCE FUNCTIONS
570 */
571
572static void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
573 struct dlm_lock_resource *res,
574 u8 owner)
575{
576 assert_spin_locked(&res->spinlock);
577
578 mlog_entry("%.*s, %u\n", res->lockname.len, res->lockname.name, owner);
579
580 if (owner == dlm->node_num)
581 atomic_inc(&dlm->local_resources);
582 else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN)
583 atomic_inc(&dlm->unknown_resources);
584 else
585 atomic_inc(&dlm->remote_resources);
586
587 res->owner = owner;
588}
589
590void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
591 struct dlm_lock_resource *res, u8 owner)
592{
593 assert_spin_locked(&res->spinlock);
594
595 if (owner == res->owner)
596 return;
597
598 if (res->owner == dlm->node_num)
599 atomic_dec(&dlm->local_resources);
600 else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN)
601 atomic_dec(&dlm->unknown_resources);
602 else
603 atomic_dec(&dlm->remote_resources);
604
605 dlm_set_lockres_owner(dlm, res, owner);
606}
607
608
609static void dlm_lockres_release(struct kref *kref)
610{
611 struct dlm_lock_resource *res;
612
613 res = container_of(kref, struct dlm_lock_resource, refs);
614
615 /* This should not happen -- all lockres' have a name
616 * associated with them at init time. */
617 BUG_ON(!res->lockname.name);
618
619 mlog(0, "destroying lockres %.*s\n", res->lockname.len,
620 res->lockname.name);
621
622 /* By the time we're ready to blow this guy away, we shouldn't
623 * be on any lists. */
81f2094a 624 BUG_ON(!hlist_unhashed(&res->hash_node));
6714d8e8
KH
625 BUG_ON(!list_empty(&res->granted));
626 BUG_ON(!list_empty(&res->converting));
627 BUG_ON(!list_empty(&res->blocked));
628 BUG_ON(!list_empty(&res->dirty));
629 BUG_ON(!list_empty(&res->recovering));
630 BUG_ON(!list_empty(&res->purge));
631
632 kfree(res->lockname.name);
633
634 kfree(res);
635}
636
6714d8e8
KH
637void dlm_lockres_put(struct dlm_lock_resource *res)
638{
639 kref_put(&res->refs, dlm_lockres_release);
640}
641
642static void dlm_init_lockres(struct dlm_ctxt *dlm,
643 struct dlm_lock_resource *res,
644 const char *name, unsigned int namelen)
645{
646 char *qname;
647
648 /* If we memset here, we lose our reference to the kmalloc'd
649 * res->lockname.name, so be sure to init every field
650 * correctly! */
651
652 qname = (char *) res->lockname.name;
653 memcpy(qname, name, namelen);
654
655 res->lockname.len = namelen;
a3d33291 656 res->lockname.hash = dlm_lockid_hash(name, namelen);
6714d8e8
KH
657
658 init_waitqueue_head(&res->wq);
659 spin_lock_init(&res->spinlock);
81f2094a 660 INIT_HLIST_NODE(&res->hash_node);
6714d8e8
KH
661 INIT_LIST_HEAD(&res->granted);
662 INIT_LIST_HEAD(&res->converting);
663 INIT_LIST_HEAD(&res->blocked);
664 INIT_LIST_HEAD(&res->dirty);
665 INIT_LIST_HEAD(&res->recovering);
666 INIT_LIST_HEAD(&res->purge);
667 atomic_set(&res->asts_reserved, 0);
668 res->migration_pending = 0;
669
670 kref_init(&res->refs);
671
672 /* just for consistency */
673 spin_lock(&res->spinlock);
674 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
675 spin_unlock(&res->spinlock);
676
677 res->state = DLM_LOCK_RES_IN_PROGRESS;
678
679 res->last_used = 0;
680
681 memset(res->lvb, 0, DLM_LVB_LEN);
682}
683
684struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
685 const char *name,
686 unsigned int namelen)
687{
688 struct dlm_lock_resource *res;
689
690 res = kmalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL);
691 if (!res)
692 return NULL;
693
694 res->lockname.name = kmalloc(namelen, GFP_KERNEL);
695 if (!res->lockname.name) {
696 kfree(res);
697 return NULL;
698 }
699
700 dlm_init_lockres(dlm, res, name, namelen);
701 return res;
702}
703
704/*
705 * lookup a lock resource by name.
706 * may already exist in the hashtable.
707 * lockid is null terminated
708 *
709 * if not, allocate enough for the lockres and for
710 * the temporary structure used in doing the mastering.
711 *
712 * also, do a lookup in the dlm->master_list to see
713 * if another node has begun mastering the same lock.
714 * if so, there should be a block entry in there
715 * for this name, and we should *not* attempt to master
716 * the lock here. need to wait around for that node
717 * to assert_master (or die).
718 *
719 */
720struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
721 const char *lockid,
722 int flags)
723{
724 struct dlm_lock_resource *tmpres=NULL, *res=NULL;
725 struct dlm_master_list_entry *mle = NULL;
726 struct dlm_master_list_entry *alloc_mle = NULL;
727 int blocked = 0;
728 int ret, nodenum;
729 struct dlm_node_iter iter;
a3d33291 730 unsigned int namelen, hash;
6714d8e8 731 int tries = 0;
c03872f5 732 int bit, wait_on_recovery = 0;
6714d8e8
KH
733
734 BUG_ON(!lockid);
735
736 namelen = strlen(lockid);
a3d33291 737 hash = dlm_lockid_hash(lockid, namelen);
6714d8e8
KH
738
739 mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
740
741lookup:
742 spin_lock(&dlm->spinlock);
a3d33291 743 tmpres = __dlm_lookup_lockres(dlm, lockid, namelen, hash);
6714d8e8
KH
744 if (tmpres) {
745 spin_unlock(&dlm->spinlock);
746 mlog(0, "found in hash!\n");
747 if (res)
748 dlm_lockres_put(res);
749 res = tmpres;
750 goto leave;
751 }
752
753 if (!res) {
754 spin_unlock(&dlm->spinlock);
755 mlog(0, "allocating a new resource\n");
756 /* nothing found and we need to allocate one. */
757 alloc_mle = (struct dlm_master_list_entry *)
758 kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL);
759 if (!alloc_mle)
760 goto leave;
761 res = dlm_new_lockres(dlm, lockid, namelen);
762 if (!res)
763 goto leave;
764 goto lookup;
765 }
766
767 mlog(0, "no lockres found, allocated our own: %p\n", res);
768
769 if (flags & LKM_LOCAL) {
770 /* caller knows it's safe to assume it's not mastered elsewhere
771 * DONE! return right away */
772 spin_lock(&res->spinlock);
773 dlm_change_lockres_owner(dlm, res, dlm->node_num);
774 __dlm_insert_lockres(dlm, res);
775 spin_unlock(&res->spinlock);
776 spin_unlock(&dlm->spinlock);
777 /* lockres still marked IN_PROGRESS */
778 goto wake_waiters;
779 }
780
781 /* check master list to see if another node has started mastering it */
782 spin_lock(&dlm->master_lock);
783
784 /* if we found a block, wait for lock to be mastered by another node */
785 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
786 if (blocked) {
787 if (mle->type == DLM_MLE_MASTER) {
788 mlog(ML_ERROR, "master entry for nonexistent lock!\n");
789 BUG();
790 } else if (mle->type == DLM_MLE_MIGRATION) {
791 /* migration is in progress! */
792 /* the good news is that we now know the
793 * "current" master (mle->master). */
794
795 spin_unlock(&dlm->master_lock);
796 assert_spin_locked(&dlm->spinlock);
797
798 /* set the lockres owner and hash it */
799 spin_lock(&res->spinlock);
800 dlm_set_lockres_owner(dlm, res, mle->master);
801 __dlm_insert_lockres(dlm, res);
802 spin_unlock(&res->spinlock);
803 spin_unlock(&dlm->spinlock);
804
805 /* master is known, detach */
806 dlm_mle_detach_hb_events(dlm, mle);
807 dlm_put_mle(mle);
808 mle = NULL;
809 goto wake_waiters;
810 }
811 } else {
812 /* go ahead and try to master lock on this node */
813 mle = alloc_mle;
814 /* make sure this does not get freed below */
815 alloc_mle = NULL;
816 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
817 set_bit(dlm->node_num, mle->maybe_map);
818 list_add(&mle->list, &dlm->master_list);
c03872f5
KH
819
820 /* still holding the dlm spinlock, check the recovery map
821 * to see if there are any nodes that still need to be
822 * considered. these will not appear in the mle nodemap
823 * but they might own this lockres. wait on them. */
824 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
825 if (bit < O2NM_MAX_NODES) {
826 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to"
827 "recover before lock mastery can begin\n",
828 dlm->name, namelen, (char *)lockid, bit);
829 wait_on_recovery = 1;
830 }
6714d8e8
KH
831 }
832
833 /* at this point there is either a DLM_MLE_BLOCK or a
834 * DLM_MLE_MASTER on the master list, so it's safe to add the
835 * lockres to the hashtable. anyone who finds the lock will
836 * still have to wait on the IN_PROGRESS. */
837
838 /* finally add the lockres to its hash bucket */
839 __dlm_insert_lockres(dlm, res);
840 /* get an extra ref on the mle in case this is a BLOCK
841 * if so, the creator of the BLOCK may try to put the last
842 * ref at this time in the assert master handler, so we
843 * need an extra one to keep from a bad ptr deref. */
a2bf0477 844 dlm_get_mle_inuse(mle);
6714d8e8
KH
845 spin_unlock(&dlm->master_lock);
846 spin_unlock(&dlm->spinlock);
847
c03872f5
KH
848 while (wait_on_recovery) {
849 /* any cluster changes that occurred after dropping the
850 * dlm spinlock would be detectable be a change on the mle,
851 * so we only need to clear out the recovery map once. */
852 if (dlm_is_recovery_lock(lockid, namelen)) {
853 mlog(ML_NOTICE, "%s: recovery map is not empty, but "
854 "must master $RECOVERY lock now\n", dlm->name);
855 if (!dlm_pre_master_reco_lockres(dlm, res))
856 wait_on_recovery = 0;
857 else {
858 mlog(0, "%s: waiting 500ms for heartbeat state "
859 "change\n", dlm->name);
860 msleep(500);
861 }
862 continue;
863 }
864
865 dlm_kick_recovery_thread(dlm);
866 msleep(100);
867 dlm_wait_for_recovery(dlm);
868
869 spin_lock(&dlm->spinlock);
870 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
871 if (bit < O2NM_MAX_NODES) {
872 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to"
873 "recover before lock mastery can begin\n",
874 dlm->name, namelen, (char *)lockid, bit);
875 wait_on_recovery = 1;
876 } else
877 wait_on_recovery = 0;
878 spin_unlock(&dlm->spinlock);
879 }
880
6714d8e8
KH
881 /* must wait for lock to be mastered elsewhere */
882 if (blocked)
883 goto wait;
884
885redo_request:
886 ret = -EINVAL;
887 dlm_node_iter_init(mle->vote_map, &iter);
888 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
889 ret = dlm_do_master_request(mle, nodenum);
890 if (ret < 0)
891 mlog_errno(ret);
892 if (mle->master != O2NM_MAX_NODES) {
893 /* found a master ! */
9c6510a5
KH
894 if (mle->master <= nodenum)
895 break;
896 /* if our master request has not reached the master
897 * yet, keep going until it does. this is how the
898 * master will know that asserts are needed back to
899 * the lower nodes. */
900 mlog(0, "%s:%.*s: requests only up to %u but master "
901 "is %u, keep going\n", dlm->name, namelen,
902 lockid, nodenum, mle->master);
6714d8e8
KH
903 }
904 }
905
906wait:
907 /* keep going until the response map includes all nodes */
908 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
909 if (ret < 0) {
910 mlog(0, "%s:%.*s: node map changed, redo the "
911 "master request now, blocked=%d\n",
912 dlm->name, res->lockname.len,
913 res->lockname.name, blocked);
914 if (++tries > 20) {
915 mlog(ML_ERROR, "%s:%.*s: spinning on "
916 "dlm_wait_for_lock_mastery, blocked=%d\n",
917 dlm->name, res->lockname.len,
918 res->lockname.name, blocked);
919 dlm_print_one_lock_resource(res);
920 /* dlm_print_one_mle(mle); */
921 tries = 0;
922 }
923 goto redo_request;
924 }
925
926 mlog(0, "lockres mastered by %u\n", res->owner);
927 /* make sure we never continue without this */
928 BUG_ON(res->owner == O2NM_MAX_NODES);
929
930 /* master is known, detach if not already detached */
931 dlm_mle_detach_hb_events(dlm, mle);
932 dlm_put_mle(mle);
933 /* put the extra ref */
a2bf0477 934 dlm_put_mle_inuse(mle);
6714d8e8
KH
935
936wake_waiters:
937 spin_lock(&res->spinlock);
938 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
939 spin_unlock(&res->spinlock);
940 wake_up(&res->wq);
941
942leave:
943 /* need to free the unused mle */
944 if (alloc_mle)
945 kmem_cache_free(dlm_mle_cache, alloc_mle);
946
947 return res;
948}
949
950
951#define DLM_MASTERY_TIMEOUT_MS 5000
952
953static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
954 struct dlm_lock_resource *res,
955 struct dlm_master_list_entry *mle,
956 int *blocked)
957{
958 u8 m;
959 int ret, bit;
960 int map_changed, voting_done;
961 int assert, sleep;
962
963recheck:
964 ret = 0;
965 assert = 0;
966
967 /* check if another node has already become the owner */
968 spin_lock(&res->spinlock);
969 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
9c6510a5
KH
970 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
971 res->lockname.len, res->lockname.name, res->owner);
6714d8e8 972 spin_unlock(&res->spinlock);
9c6510a5
KH
973 /* this will cause the master to re-assert across
974 * the whole cluster, freeing up mles */
975 ret = dlm_do_master_request(mle, res->owner);
976 if (ret < 0) {
977 /* give recovery a chance to run */
978 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
979 msleep(500);
980 goto recheck;
981 }
982 ret = 0;
6714d8e8
KH
983 goto leave;
984 }
985 spin_unlock(&res->spinlock);
986
987 spin_lock(&mle->spinlock);
988 m = mle->master;
989 map_changed = (memcmp(mle->vote_map, mle->node_map,
990 sizeof(mle->vote_map)) != 0);
991 voting_done = (memcmp(mle->vote_map, mle->response_map,
992 sizeof(mle->vote_map)) == 0);
993
994 /* restart if we hit any errors */
995 if (map_changed) {
996 int b;
997 mlog(0, "%s: %.*s: node map changed, restarting\n",
998 dlm->name, res->lockname.len, res->lockname.name);
999 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1000 b = (mle->type == DLM_MLE_BLOCK);
1001 if ((*blocked && !b) || (!*blocked && b)) {
1002 mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1003 dlm->name, res->lockname.len, res->lockname.name,
1004 *blocked, b);
1005 *blocked = b;
1006 }
1007 spin_unlock(&mle->spinlock);
1008 if (ret < 0) {
1009 mlog_errno(ret);
1010 goto leave;
1011 }
1012 mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1013 "rechecking now\n", dlm->name, res->lockname.len,
1014 res->lockname.name);
1015 goto recheck;
aa852354
KH
1016 } else {
1017 if (!voting_done) {
1018 mlog(0, "map not changed and voting not done "
1019 "for %s:%.*s\n", dlm->name, res->lockname.len,
1020 res->lockname.name);
1021 }
6714d8e8
KH
1022 }
1023
1024 if (m != O2NM_MAX_NODES) {
1025 /* another node has done an assert!
1026 * all done! */
1027 sleep = 0;
1028 } else {
1029 sleep = 1;
1030 /* have all nodes responded? */
1031 if (voting_done && !*blocked) {
1032 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1033 if (dlm->node_num <= bit) {
1034 /* my node number is lowest.
1035 * now tell other nodes that I am
1036 * mastering this. */
1037 mle->master = dlm->node_num;
1038 assert = 1;
1039 sleep = 0;
1040 }
1041 /* if voting is done, but we have not received
1042 * an assert master yet, we must sleep */
1043 }
1044 }
1045
1046 spin_unlock(&mle->spinlock);
1047
1048 /* sleep if we haven't finished voting yet */
1049 if (sleep) {
1050 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1051
1052 /*
1053 if (atomic_read(&mle->mle_refs.refcount) < 2)
1054 mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1055 atomic_read(&mle->mle_refs.refcount),
1056 res->lockname.len, res->lockname.name);
1057 */
1058 atomic_set(&mle->woken, 0);
1059 (void)wait_event_timeout(mle->wq,
1060 (atomic_read(&mle->woken) == 1),
1061 timeo);
1062 if (res->owner == O2NM_MAX_NODES) {
1063 mlog(0, "waiting again\n");
1064 goto recheck;
1065 }
1066 mlog(0, "done waiting, master is %u\n", res->owner);
1067 ret = 0;
1068 goto leave;
1069 }
1070
1071 ret = 0; /* done */
1072 if (assert) {
1073 m = dlm->node_num;
1074 mlog(0, "about to master %.*s here, this=%u\n",
1075 res->lockname.len, res->lockname.name, m);
1076 ret = dlm_do_assert_master(dlm, res->lockname.name,
1077 res->lockname.len, mle->vote_map, 0);
1078 if (ret) {
1079 /* This is a failure in the network path,
1080 * not in the response to the assert_master
1081 * (any nonzero response is a BUG on this node).
1082 * Most likely a socket just got disconnected
1083 * due to node death. */
1084 mlog_errno(ret);
1085 }
1086 /* no longer need to restart lock mastery.
1087 * all living nodes have been contacted. */
1088 ret = 0;
1089 }
1090
1091 /* set the lockres owner */
1092 spin_lock(&res->spinlock);
1093 dlm_change_lockres_owner(dlm, res, m);
1094 spin_unlock(&res->spinlock);
1095
1096leave:
1097 return ret;
1098}
1099
1100struct dlm_bitmap_diff_iter
1101{
1102 int curnode;
1103 unsigned long *orig_bm;
1104 unsigned long *cur_bm;
1105 unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1106};
1107
1108enum dlm_node_state_change
1109{
1110 NODE_DOWN = -1,
1111 NODE_NO_CHANGE = 0,
1112 NODE_UP
1113};
1114
1115static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1116 unsigned long *orig_bm,
1117 unsigned long *cur_bm)
1118{
1119 unsigned long p1, p2;
1120 int i;
1121
1122 iter->curnode = -1;
1123 iter->orig_bm = orig_bm;
1124 iter->cur_bm = cur_bm;
1125
1126 for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1127 p1 = *(iter->orig_bm + i);
1128 p2 = *(iter->cur_bm + i);
1129 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1130 }
1131}
1132
1133static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1134 enum dlm_node_state_change *state)
1135{
1136 int bit;
1137
1138 if (iter->curnode >= O2NM_MAX_NODES)
1139 return -ENOENT;
1140
1141 bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1142 iter->curnode+1);
1143 if (bit >= O2NM_MAX_NODES) {
1144 iter->curnode = O2NM_MAX_NODES;
1145 return -ENOENT;
1146 }
1147
1148 /* if it was there in the original then this node died */
1149 if (test_bit(bit, iter->orig_bm))
1150 *state = NODE_DOWN;
1151 else
1152 *state = NODE_UP;
1153
1154 iter->curnode = bit;
1155 return bit;
1156}
1157
1158
1159static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1160 struct dlm_lock_resource *res,
1161 struct dlm_master_list_entry *mle,
1162 int blocked)
1163{
1164 struct dlm_bitmap_diff_iter bdi;
1165 enum dlm_node_state_change sc;
1166 int node;
1167 int ret = 0;
1168
1169 mlog(0, "something happened such that the "
1170 "master process may need to be restarted!\n");
1171
1172 assert_spin_locked(&mle->spinlock);
1173
1174 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1175 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1176 while (node >= 0) {
1177 if (sc == NODE_UP) {
e2faea4c
KH
1178 /* a node came up. clear any old vote from
1179 * the response map and set it in the vote map
1180 * then restart the mastery. */
1181 mlog(ML_NOTICE, "node %d up while restarting\n", node);
6714d8e8
KH
1182
1183 /* redo the master request, but only for the new node */
1184 mlog(0, "sending request to new node\n");
1185 clear_bit(node, mle->response_map);
1186 set_bit(node, mle->vote_map);
1187 } else {
1188 mlog(ML_ERROR, "node down! %d\n", node);
1189
1190 /* if the node wasn't involved in mastery skip it,
1191 * but clear it out from the maps so that it will
1192 * not affect mastery of this lockres */
1193 clear_bit(node, mle->response_map);
1194 clear_bit(node, mle->vote_map);
1195 if (!test_bit(node, mle->maybe_map))
1196 goto next;
1197
1198 /* if we're already blocked on lock mastery, and the
1199 * dead node wasn't the expected master, or there is
1200 * another node in the maybe_map, keep waiting */
1201 if (blocked) {
1202 int lowest = find_next_bit(mle->maybe_map,
1203 O2NM_MAX_NODES, 0);
1204
1205 /* act like it was never there */
1206 clear_bit(node, mle->maybe_map);
1207
1208 if (node != lowest)
1209 goto next;
1210
1211 mlog(ML_ERROR, "expected master %u died while "
1212 "this node was blocked waiting on it!\n",
1213 node);
1214 lowest = find_next_bit(mle->maybe_map,
1215 O2NM_MAX_NODES,
1216 lowest+1);
1217 if (lowest < O2NM_MAX_NODES) {
1218 mlog(0, "still blocked. waiting "
1219 "on %u now\n", lowest);
1220 goto next;
1221 }
1222
1223 /* mle is an MLE_BLOCK, but there is now
1224 * nothing left to block on. we need to return
1225 * all the way back out and try again with
1226 * an MLE_MASTER. dlm_do_local_recovery_cleanup
1227 * has already run, so the mle refcount is ok */
1228 mlog(0, "no longer blocking. we can "
1229 "try to master this here\n");
1230 mle->type = DLM_MLE_MASTER;
1231 memset(mle->maybe_map, 0,
1232 sizeof(mle->maybe_map));
1233 memset(mle->response_map, 0,
1234 sizeof(mle->maybe_map));
1235 memcpy(mle->vote_map, mle->node_map,
1236 sizeof(mle->node_map));
1237 mle->u.res = res;
1238 set_bit(dlm->node_num, mle->maybe_map);
1239
1240 ret = -EAGAIN;
1241 goto next;
1242 }
1243
1244 clear_bit(node, mle->maybe_map);
1245 if (node > dlm->node_num)
1246 goto next;
1247
1248 mlog(0, "dead node in map!\n");
1249 /* yuck. go back and re-contact all nodes
1250 * in the vote_map, removing this node. */
1251 memset(mle->response_map, 0,
1252 sizeof(mle->response_map));
1253 }
1254 ret = -EAGAIN;
1255next:
1256 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1257 }
1258 return ret;
1259}
1260
1261
1262/*
1263 * DLM_MASTER_REQUEST_MSG
1264 *
1265 * returns: 0 on success,
1266 * -errno on a network error
1267 *
1268 * on error, the caller should assume the target node is "dead"
1269 *
1270 */
1271
1272static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to)
1273{
1274 struct dlm_ctxt *dlm = mle->dlm;
1275 struct dlm_master_request request;
1276 int ret, response=0, resend;
1277
1278 memset(&request, 0, sizeof(request));
1279 request.node_idx = dlm->node_num;
1280
1281 BUG_ON(mle->type == DLM_MLE_MIGRATION);
1282
1283 if (mle->type != DLM_MLE_MASTER) {
1284 request.namelen = mle->u.name.len;
1285 memcpy(request.name, mle->u.name.name, request.namelen);
1286 } else {
1287 request.namelen = mle->u.res->lockname.len;
1288 memcpy(request.name, mle->u.res->lockname.name,
1289 request.namelen);
1290 }
1291
1292again:
1293 ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1294 sizeof(request), to, &response);
1295 if (ret < 0) {
1296 if (ret == -ESRCH) {
1297 /* should never happen */
1298 mlog(ML_ERROR, "TCP stack not ready!\n");
1299 BUG();
1300 } else if (ret == -EINVAL) {
1301 mlog(ML_ERROR, "bad args passed to o2net!\n");
1302 BUG();
1303 } else if (ret == -ENOMEM) {
1304 mlog(ML_ERROR, "out of memory while trying to send "
1305 "network message! retrying\n");
1306 /* this is totally crude */
1307 msleep(50);
1308 goto again;
1309 } else if (!dlm_is_host_down(ret)) {
1310 /* not a network error. bad. */
1311 mlog_errno(ret);
1312 mlog(ML_ERROR, "unhandled error!");
1313 BUG();
1314 }
1315 /* all other errors should be network errors,
1316 * and likely indicate node death */
1317 mlog(ML_ERROR, "link to %d went down!\n", to);
1318 goto out;
1319 }
1320
1321 ret = 0;
1322 resend = 0;
1323 spin_lock(&mle->spinlock);
1324 switch (response) {
1325 case DLM_MASTER_RESP_YES:
1326 set_bit(to, mle->response_map);
1327 mlog(0, "node %u is the master, response=YES\n", to);
1328 mle->master = to;
1329 break;
1330 case DLM_MASTER_RESP_NO:
1331 mlog(0, "node %u not master, response=NO\n", to);
1332 set_bit(to, mle->response_map);
1333 break;
1334 case DLM_MASTER_RESP_MAYBE:
1335 mlog(0, "node %u not master, response=MAYBE\n", to);
1336 set_bit(to, mle->response_map);
1337 set_bit(to, mle->maybe_map);
1338 break;
1339 case DLM_MASTER_RESP_ERROR:
1340 mlog(0, "node %u hit an error, resending\n", to);
1341 resend = 1;
1342 response = 0;
1343 break;
1344 default:
1345 mlog(ML_ERROR, "bad response! %u\n", response);
1346 BUG();
1347 }
1348 spin_unlock(&mle->spinlock);
1349 if (resend) {
1350 /* this is also totally crude */
1351 msleep(50);
1352 goto again;
1353 }
1354
1355out:
1356 return ret;
1357}
1358
1359/*
1360 * locks that can be taken here:
1361 * dlm->spinlock
1362 * res->spinlock
1363 * mle->spinlock
1364 * dlm->master_list
1365 *
1366 * if possible, TRIM THIS DOWN!!!
1367 */
1368int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data)
1369{
1370 u8 response = DLM_MASTER_RESP_MAYBE;
1371 struct dlm_ctxt *dlm = data;
9c6510a5 1372 struct dlm_lock_resource *res = NULL;
6714d8e8
KH
1373 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1374 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1375 char *name;
a3d33291 1376 unsigned int namelen, hash;
6714d8e8
KH
1377 int found, ret;
1378 int set_maybe;
9c6510a5 1379 int dispatch_assert = 0;
6714d8e8
KH
1380
1381 if (!dlm_grab(dlm))
1382 return DLM_MASTER_RESP_NO;
1383
1384 if (!dlm_domain_fully_joined(dlm)) {
1385 response = DLM_MASTER_RESP_NO;
1386 goto send_response;
1387 }
1388
1389 name = request->name;
1390 namelen = request->namelen;
a3d33291 1391 hash = dlm_lockid_hash(name, namelen);
6714d8e8
KH
1392
1393 if (namelen > DLM_LOCKID_NAME_MAX) {
1394 response = DLM_IVBUFLEN;
1395 goto send_response;
1396 }
1397
1398way_up_top:
1399 spin_lock(&dlm->spinlock);
a3d33291 1400 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
6714d8e8
KH
1401 if (res) {
1402 spin_unlock(&dlm->spinlock);
1403
1404 /* take care of the easy cases up front */
1405 spin_lock(&res->spinlock);
1406 if (res->state & DLM_LOCK_RES_RECOVERING) {
1407 spin_unlock(&res->spinlock);
1408 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1409 "being recovered\n");
1410 response = DLM_MASTER_RESP_ERROR;
1411 if (mle)
1412 kmem_cache_free(dlm_mle_cache, mle);
1413 goto send_response;
1414 }
1415
1416 if (res->owner == dlm->node_num) {
6714d8e8
KH
1417 spin_unlock(&res->spinlock);
1418 // mlog(0, "this node is the master\n");
1419 response = DLM_MASTER_RESP_YES;
1420 if (mle)
1421 kmem_cache_free(dlm_mle_cache, mle);
1422
1423 /* this node is the owner.
1424 * there is some extra work that needs to
1425 * happen now. the requesting node has
1426 * caused all nodes up to this one to
1427 * create mles. this node now needs to
1428 * go back and clean those up. */
9c6510a5 1429 dispatch_assert = 1;
6714d8e8
KH
1430 goto send_response;
1431 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1432 spin_unlock(&res->spinlock);
1433 // mlog(0, "node %u is the master\n", res->owner);
1434 response = DLM_MASTER_RESP_NO;
1435 if (mle)
1436 kmem_cache_free(dlm_mle_cache, mle);
1437 goto send_response;
1438 }
1439
1440 /* ok, there is no owner. either this node is
1441 * being blocked, or it is actively trying to
1442 * master this lock. */
1443 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1444 mlog(ML_ERROR, "lock with no owner should be "
1445 "in-progress!\n");
1446 BUG();
1447 }
1448
1449 // mlog(0, "lockres is in progress...\n");
1450 spin_lock(&dlm->master_lock);
1451 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1452 if (!found) {
1453 mlog(ML_ERROR, "no mle found for this lock!\n");
1454 BUG();
1455 }
1456 set_maybe = 1;
1457 spin_lock(&tmpmle->spinlock);
1458 if (tmpmle->type == DLM_MLE_BLOCK) {
1459 // mlog(0, "this node is waiting for "
1460 // "lockres to be mastered\n");
1461 response = DLM_MASTER_RESP_NO;
1462 } else if (tmpmle->type == DLM_MLE_MIGRATION) {
1463 mlog(0, "node %u is master, but trying to migrate to "
1464 "node %u.\n", tmpmle->master, tmpmle->new_master);
1465 if (tmpmle->master == dlm->node_num) {
1466 response = DLM_MASTER_RESP_YES;
1467 mlog(ML_ERROR, "no owner on lockres, but this "
1468 "node is trying to migrate it to %u?!\n",
1469 tmpmle->new_master);
1470 BUG();
1471 } else {
1472 /* the real master can respond on its own */
1473 response = DLM_MASTER_RESP_NO;
1474 }
1475 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1476 set_maybe = 0;
9c6510a5 1477 if (tmpmle->master == dlm->node_num) {
6714d8e8 1478 response = DLM_MASTER_RESP_YES;
9c6510a5
KH
1479 /* this node will be the owner.
1480 * go back and clean the mles on any
1481 * other nodes */
1482 dispatch_assert = 1;
1483 } else
6714d8e8
KH
1484 response = DLM_MASTER_RESP_NO;
1485 } else {
1486 // mlog(0, "this node is attempting to "
1487 // "master lockres\n");
1488 response = DLM_MASTER_RESP_MAYBE;
1489 }
1490 if (set_maybe)
1491 set_bit(request->node_idx, tmpmle->maybe_map);
1492 spin_unlock(&tmpmle->spinlock);
1493
1494 spin_unlock(&dlm->master_lock);
1495 spin_unlock(&res->spinlock);
1496
1497 /* keep the mle attached to heartbeat events */
1498 dlm_put_mle(tmpmle);
1499 if (mle)
1500 kmem_cache_free(dlm_mle_cache, mle);
1501 goto send_response;
1502 }
1503
1504 /*
1505 * lockres doesn't exist on this node
1506 * if there is an MLE_BLOCK, return NO
1507 * if there is an MLE_MASTER, return MAYBE
1508 * otherwise, add an MLE_BLOCK, return NO
1509 */
1510 spin_lock(&dlm->master_lock);
1511 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1512 if (!found) {
1513 /* this lockid has never been seen on this node yet */
1514 // mlog(0, "no mle found\n");
1515 if (!mle) {
1516 spin_unlock(&dlm->master_lock);
1517 spin_unlock(&dlm->spinlock);
1518
1519 mle = (struct dlm_master_list_entry *)
1520 kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL);
1521 if (!mle) {
6714d8e8 1522 response = DLM_MASTER_RESP_ERROR;
9c6510a5 1523 mlog_errno(-ENOMEM);
6714d8e8
KH
1524 goto send_response;
1525 }
6714d8e8
KH
1526 goto way_up_top;
1527 }
1528
1529 // mlog(0, "this is second time thru, already allocated, "
1530 // "add the block.\n");
41b8c8a1 1531 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
6714d8e8
KH
1532 set_bit(request->node_idx, mle->maybe_map);
1533 list_add(&mle->list, &dlm->master_list);
1534 response = DLM_MASTER_RESP_NO;
1535 } else {
1536 // mlog(0, "mle was found\n");
1537 set_maybe = 1;
1538 spin_lock(&tmpmle->spinlock);
9c6510a5
KH
1539 if (tmpmle->master == dlm->node_num) {
1540 mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1541 BUG();
1542 }
6714d8e8
KH
1543 if (tmpmle->type == DLM_MLE_BLOCK)
1544 response = DLM_MASTER_RESP_NO;
1545 else if (tmpmle->type == DLM_MLE_MIGRATION) {
1546 mlog(0, "migration mle was found (%u->%u)\n",
1547 tmpmle->master, tmpmle->new_master);
6714d8e8
KH
1548 /* real master can respond on its own */
1549 response = DLM_MASTER_RESP_NO;
9c6510a5
KH
1550 } else
1551 response = DLM_MASTER_RESP_MAYBE;
6714d8e8
KH
1552 if (set_maybe)
1553 set_bit(request->node_idx, tmpmle->maybe_map);
1554 spin_unlock(&tmpmle->spinlock);
1555 }
1556 spin_unlock(&dlm->master_lock);
1557 spin_unlock(&dlm->spinlock);
1558
1559 if (found) {
1560 /* keep the mle attached to heartbeat events */
1561 dlm_put_mle(tmpmle);
1562 }
1563send_response:
9c6510a5
KH
1564
1565 if (dispatch_assert) {
1566 if (response != DLM_MASTER_RESP_YES)
1567 mlog(ML_ERROR, "invalid response %d\n", response);
1568 if (!res) {
1569 mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1570 BUG();
1571 }
1572 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1573 dlm->node_num, res->lockname.len, res->lockname.name);
1574 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1575 DLM_ASSERT_MASTER_MLE_CLEANUP);
1576 if (ret < 0) {
1577 mlog(ML_ERROR, "failed to dispatch assert master work\n");
1578 response = DLM_MASTER_RESP_ERROR;
1579 }
1580 }
1581
6714d8e8
KH
1582 dlm_put(dlm);
1583 return response;
1584}
1585
1586/*
1587 * DLM_ASSERT_MASTER_MSG
1588 */
1589
1590
1591/*
1592 * NOTE: this can be used for debugging
1593 * can periodically run all locks owned by this node
1594 * and re-assert across the cluster...
1595 */
1596static int dlm_do_assert_master(struct dlm_ctxt *dlm, const char *lockname,
1597 unsigned int namelen, void *nodemap,
1598 u32 flags)
1599{
1600 struct dlm_assert_master assert;
1601 int to, tmpret;
1602 struct dlm_node_iter iter;
1603 int ret = 0;
9c6510a5 1604 int reassert;
6714d8e8
KH
1605
1606 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
9c6510a5
KH
1607again:
1608 reassert = 0;
6714d8e8
KH
1609
1610 /* note that if this nodemap is empty, it returns 0 */
1611 dlm_node_iter_init(nodemap, &iter);
1612 while ((to = dlm_node_iter_next(&iter)) >= 0) {
1613 int r = 0;
1614 mlog(0, "sending assert master to %d (%.*s)\n", to,
1615 namelen, lockname);
1616 memset(&assert, 0, sizeof(assert));
1617 assert.node_idx = dlm->node_num;
1618 assert.namelen = namelen;
1619 memcpy(assert.name, lockname, namelen);
1620 assert.flags = cpu_to_be32(flags);
1621
1622 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1623 &assert, sizeof(assert), to, &r);
1624 if (tmpret < 0) {
1625 mlog(ML_ERROR, "assert_master returned %d!\n", tmpret);
1626 if (!dlm_is_host_down(tmpret)) {
1627 mlog(ML_ERROR, "unhandled error!\n");
1628 BUG();
1629 }
1630 /* a node died. finish out the rest of the nodes. */
1631 mlog(ML_ERROR, "link to %d went down!\n", to);
1632 /* any nonzero status return will do */
1633 ret = tmpret;
1634 } else if (r < 0) {
1635 /* ok, something horribly messed. kill thyself. */
1636 mlog(ML_ERROR,"during assert master of %.*s to %u, "
1637 "got %d.\n", namelen, lockname, to, r);
1638 dlm_dump_lock_resources(dlm);
1639 BUG();
9c6510a5
KH
1640 } else if (r == EAGAIN) {
1641 mlog(0, "%.*s: node %u create mles on other "
1642 "nodes and requests a re-assert\n",
1643 namelen, lockname, to);
1644 reassert = 1;
6714d8e8
KH
1645 }
1646 }
1647
9c6510a5
KH
1648 if (reassert)
1649 goto again;
1650
6714d8e8
KH
1651 return ret;
1652}
1653
1654/*
1655 * locks that can be taken here:
1656 * dlm->spinlock
1657 * res->spinlock
1658 * mle->spinlock
1659 * dlm->master_list
1660 *
1661 * if possible, TRIM THIS DOWN!!!
1662 */
1663int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
1664{
1665 struct dlm_ctxt *dlm = data;
1666 struct dlm_master_list_entry *mle = NULL;
1667 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1668 struct dlm_lock_resource *res = NULL;
1669 char *name;
a3d33291 1670 unsigned int namelen, hash;
6714d8e8 1671 u32 flags;
9c6510a5
KH
1672 int master_request = 0;
1673 int ret = 0;
6714d8e8
KH
1674
1675 if (!dlm_grab(dlm))
1676 return 0;
1677
1678 name = assert->name;
1679 namelen = assert->namelen;
a3d33291 1680 hash = dlm_lockid_hash(name, namelen);
6714d8e8
KH
1681 flags = be32_to_cpu(assert->flags);
1682
1683 if (namelen > DLM_LOCKID_NAME_MAX) {
1684 mlog(ML_ERROR, "Invalid name length!");
1685 goto done;
1686 }
1687
1688 spin_lock(&dlm->spinlock);
1689
1690 if (flags)
1691 mlog(0, "assert_master with flags: %u\n", flags);
1692
1693 /* find the MLE */
1694 spin_lock(&dlm->master_lock);
1695 if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1696 /* not an error, could be master just re-asserting */
1697 mlog(0, "just got an assert_master from %u, but no "
1698 "MLE for it! (%.*s)\n", assert->node_idx,
1699 namelen, name);
1700 } else {
1701 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1702 if (bit >= O2NM_MAX_NODES) {
1703 /* not necessarily an error, though less likely.
1704 * could be master just re-asserting. */
aa852354 1705 mlog(0, "no bits set in the maybe_map, but %u "
6714d8e8
KH
1706 "is asserting! (%.*s)\n", assert->node_idx,
1707 namelen, name);
1708 } else if (bit != assert->node_idx) {
1709 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1710 mlog(0, "master %u was found, %u should "
1711 "back off\n", assert->node_idx, bit);
1712 } else {
1713 /* with the fix for bug 569, a higher node
1714 * number winning the mastery will respond
1715 * YES to mastery requests, but this node
1716 * had no way of knowing. let it pass. */
aa852354 1717 mlog(0, "%u is the lowest node, "
6714d8e8
KH
1718 "%u is asserting. (%.*s) %u must "
1719 "have begun after %u won.\n", bit,
1720 assert->node_idx, namelen, name, bit,
1721 assert->node_idx);
1722 }
1723 }
2d1a868c
KH
1724 if (mle->type == DLM_MLE_MIGRATION) {
1725 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1726 mlog(0, "%s:%.*s: got cleanup assert"
1727 " from %u for migration\n",
1728 dlm->name, namelen, name,
1729 assert->node_idx);
1730 } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1731 mlog(0, "%s:%.*s: got unrelated assert"
1732 " from %u for migration, ignoring\n",
1733 dlm->name, namelen, name,
1734 assert->node_idx);
1735 __dlm_put_mle(mle);
1736 spin_unlock(&dlm->master_lock);
1737 spin_unlock(&dlm->spinlock);
1738 goto done;
1739 }
1740 }
6714d8e8
KH
1741 }
1742 spin_unlock(&dlm->master_lock);
1743
1744 /* ok everything checks out with the MLE
1745 * now check to see if there is a lockres */
a3d33291 1746 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
6714d8e8
KH
1747 if (res) {
1748 spin_lock(&res->spinlock);
1749 if (res->state & DLM_LOCK_RES_RECOVERING) {
1750 mlog(ML_ERROR, "%u asserting but %.*s is "
1751 "RECOVERING!\n", assert->node_idx, namelen, name);
1752 goto kill;
1753 }
1754 if (!mle) {
dc2ed195
KH
1755 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1756 res->owner != assert->node_idx) {
6714d8e8
KH
1757 mlog(ML_ERROR, "assert_master from "
1758 "%u, but current owner is "
1759 "%u! (%.*s)\n",
1760 assert->node_idx, res->owner,
1761 namelen, name);
1762 goto kill;
1763 }
1764 } else if (mle->type != DLM_MLE_MIGRATION) {
1765 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1766 /* owner is just re-asserting */
1767 if (res->owner == assert->node_idx) {
1768 mlog(0, "owner %u re-asserting on "
1769 "lock %.*s\n", assert->node_idx,
1770 namelen, name);
1771 goto ok;
1772 }
1773 mlog(ML_ERROR, "got assert_master from "
1774 "node %u, but %u is the owner! "
1775 "(%.*s)\n", assert->node_idx,
1776 res->owner, namelen, name);
1777 goto kill;
1778 }
1779 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1780 mlog(ML_ERROR, "got assert from %u, but lock "
1781 "with no owner should be "
1782 "in-progress! (%.*s)\n",
1783 assert->node_idx,
1784 namelen, name);
1785 goto kill;
1786 }
1787 } else /* mle->type == DLM_MLE_MIGRATION */ {
1788 /* should only be getting an assert from new master */
1789 if (assert->node_idx != mle->new_master) {
1790 mlog(ML_ERROR, "got assert from %u, but "
1791 "new master is %u, and old master "
1792 "was %u (%.*s)\n",
1793 assert->node_idx, mle->new_master,
1794 mle->master, namelen, name);
1795 goto kill;
1796 }
1797
1798 }
1799ok:
1800 spin_unlock(&res->spinlock);
1801 }
1802 spin_unlock(&dlm->spinlock);
1803
1804 // mlog(0, "woo! got an assert_master from node %u!\n",
1805 // assert->node_idx);
1806 if (mle) {
9c6510a5
KH
1807 int extra_ref = 0;
1808 int nn = -1;
a2bf0477 1809 int rr, err = 0;
6714d8e8
KH
1810
1811 spin_lock(&mle->spinlock);
9c6510a5
KH
1812 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1813 extra_ref = 1;
1814 else {
1815 /* MASTER mle: if any bits set in the response map
1816 * then the calling node needs to re-assert to clear
1817 * up nodes that this node contacted */
1818 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1819 nn+1)) < O2NM_MAX_NODES) {
1820 if (nn != dlm->node_num && nn != assert->node_idx)
1821 master_request = 1;
1822 }
1823 }
6714d8e8
KH
1824 mle->master = assert->node_idx;
1825 atomic_set(&mle->woken, 1);
1826 wake_up(&mle->wq);
1827 spin_unlock(&mle->spinlock);
1828
a2bf0477 1829 if (res) {
6714d8e8 1830 spin_lock(&res->spinlock);
a2bf0477
KH
1831 if (mle->type == DLM_MLE_MIGRATION) {
1832 mlog(0, "finishing off migration of lockres %.*s, "
1833 "from %u to %u\n",
1834 res->lockname.len, res->lockname.name,
1835 dlm->node_num, mle->new_master);
1836 res->state &= ~DLM_LOCK_RES_MIGRATING;
1837 dlm_change_lockres_owner(dlm, res, mle->new_master);
1838 BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1839 } else {
1840 dlm_change_lockres_owner(dlm, res, mle->master);
1841 }
6714d8e8
KH
1842 spin_unlock(&res->spinlock);
1843 }
a2bf0477
KH
1844
1845 /* master is known, detach if not already detached.
1846 * ensures that only one assert_master call will happen
1847 * on this mle. */
1848 spin_lock(&dlm->spinlock);
1849 spin_lock(&dlm->master_lock);
1850
1851 rr = atomic_read(&mle->mle_refs.refcount);
1852 if (mle->inuse > 0) {
1853 if (extra_ref && rr < 3)
1854 err = 1;
1855 else if (!extra_ref && rr < 2)
1856 err = 1;
1857 } else {
1858 if (extra_ref && rr < 2)
1859 err = 1;
1860 else if (!extra_ref && rr < 1)
1861 err = 1;
1862 }
1863 if (err) {
1864 mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
1865 "that will mess up this node, refs=%d, extra=%d, "
1866 "inuse=%d\n", dlm->name, namelen, name,
1867 assert->node_idx, rr, extra_ref, mle->inuse);
1868 dlm_print_one_mle(mle);
1869 }
1870 list_del_init(&mle->list);
1871 __dlm_mle_detach_hb_events(dlm, mle);
1872 __dlm_put_mle(mle);
6714d8e8
KH
1873 if (extra_ref) {
1874 /* the assert master message now balances the extra
1875 * ref given by the master / migration request message.
1876 * if this is the last put, it will be removed
1877 * from the list. */
a2bf0477
KH
1878 __dlm_put_mle(mle);
1879 }
1880 spin_unlock(&dlm->master_lock);
1881 spin_unlock(&dlm->spinlock);
1882 } else if (res) {
1883 if (res->owner != assert->node_idx) {
1884 mlog(0, "assert_master from %u, but current "
1885 "owner is %u (%.*s), no mle\n", assert->node_idx,
1886 res->owner, namelen, name);
6714d8e8
KH
1887 }
1888 }
1889
1890done:
9c6510a5 1891 ret = 0;
6714d8e8
KH
1892 if (res)
1893 dlm_lockres_put(res);
1894 dlm_put(dlm);
9c6510a5
KH
1895 if (master_request) {
1896 mlog(0, "need to tell master to reassert\n");
1897 ret = EAGAIN; // positive. negative would shoot down the node.
1898 }
1899 return ret;
6714d8e8
KH
1900
1901kill:
1902 /* kill the caller! */
1903 spin_unlock(&res->spinlock);
1904 spin_unlock(&dlm->spinlock);
1905 dlm_lockres_put(res);
1906 mlog(ML_ERROR, "Bad message received from another node. Dumping state "
1907 "and killing the other node now! This node is OK and can continue.\n");
1908 dlm_dump_lock_resources(dlm);
1909 dlm_put(dlm);
1910 return -EINVAL;
1911}
1912
1913int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
1914 struct dlm_lock_resource *res,
1915 int ignore_higher, u8 request_from, u32 flags)
1916{
1917 struct dlm_work_item *item;
1918 item = kcalloc(1, sizeof(*item), GFP_KERNEL);
1919 if (!item)
1920 return -ENOMEM;
1921
1922
1923 /* queue up work for dlm_assert_master_worker */
1924 dlm_grab(dlm); /* get an extra ref for the work item */
1925 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
1926 item->u.am.lockres = res; /* already have a ref */
1927 /* can optionally ignore node numbers higher than this node */
1928 item->u.am.ignore_higher = ignore_higher;
1929 item->u.am.request_from = request_from;
1930 item->u.am.flags = flags;
1931
9c6510a5
KH
1932 if (ignore_higher)
1933 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
1934 res->lockname.name);
1935
6714d8e8
KH
1936 spin_lock(&dlm->work_lock);
1937 list_add_tail(&item->list, &dlm->work_list);
1938 spin_unlock(&dlm->work_lock);
1939
1940 schedule_work(&dlm->dispatched_work);
1941 return 0;
1942}
1943
1944static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
1945{
1946 struct dlm_ctxt *dlm = data;
1947 int ret = 0;
1948 struct dlm_lock_resource *res;
1949 unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
1950 int ignore_higher;
1951 int bit;
1952 u8 request_from;
1953 u32 flags;
1954
1955 dlm = item->dlm;
1956 res = item->u.am.lockres;
1957 ignore_higher = item->u.am.ignore_higher;
1958 request_from = item->u.am.request_from;
1959 flags = item->u.am.flags;
1960
1961 spin_lock(&dlm->spinlock);
1962 memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
1963 spin_unlock(&dlm->spinlock);
1964
1965 clear_bit(dlm->node_num, nodemap);
1966 if (ignore_higher) {
1967 /* if is this just to clear up mles for nodes below
1968 * this node, do not send the message to the original
1969 * caller or any node number higher than this */
1970 clear_bit(request_from, nodemap);
1971 bit = dlm->node_num;
1972 while (1) {
1973 bit = find_next_bit(nodemap, O2NM_MAX_NODES,
1974 bit+1);
1975 if (bit >= O2NM_MAX_NODES)
1976 break;
1977 clear_bit(bit, nodemap);
1978 }
1979 }
1980
1981 /* this call now finishes out the nodemap
1982 * even if one or more nodes die */
1983 mlog(0, "worker about to master %.*s here, this=%u\n",
1984 res->lockname.len, res->lockname.name, dlm->node_num);
1985 ret = dlm_do_assert_master(dlm, res->lockname.name,
1986 res->lockname.len,
1987 nodemap, flags);
1988 if (ret < 0) {
1989 /* no need to restart, we are done */
1990 mlog_errno(ret);
1991 }
1992
1993 dlm_lockres_put(res);
1994
1995 mlog(0, "finished with dlm_assert_master_worker\n");
1996}
1997
c03872f5
KH
1998/* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
1999 * We cannot wait for node recovery to complete to begin mastering this
2000 * lockres because this lockres is used to kick off recovery! ;-)
2001 * So, do a pre-check on all living nodes to see if any of those nodes
2002 * think that $RECOVERY is currently mastered by a dead node. If so,
2003 * we wait a short time to allow that node to get notified by its own
2004 * heartbeat stack, then check again. All $RECOVERY lock resources
2005 * mastered by dead nodes are purged when the hearbeat callback is
2006 * fired, so we can know for sure that it is safe to continue once
2007 * the node returns a live node or no node. */
2008static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2009 struct dlm_lock_resource *res)
2010{
2011 struct dlm_node_iter iter;
2012 int nodenum;
2013 int ret = 0;
2014 u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2015
2016 spin_lock(&dlm->spinlock);
2017 dlm_node_iter_init(dlm->domain_map, &iter);
2018 spin_unlock(&dlm->spinlock);
2019
2020 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2021 /* do not send to self */
2022 if (nodenum == dlm->node_num)
2023 continue;
2024 ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2025 if (ret < 0) {
2026 mlog_errno(ret);
2027 if (!dlm_is_host_down(ret))
2028 BUG();
2029 /* host is down, so answer for that node would be
2030 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
2031 }
2032
2033 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2034 /* check to see if this master is in the recovery map */
2035 spin_lock(&dlm->spinlock);
2036 if (test_bit(master, dlm->recovery_map)) {
2037 mlog(ML_NOTICE, "%s: node %u has not seen "
2038 "node %u go down yet, and thinks the "
2039 "dead node is mastering the recovery "
2040 "lock. must wait.\n", dlm->name,
2041 nodenum, master);
2042 ret = -EAGAIN;
2043 }
2044 spin_unlock(&dlm->spinlock);
2045 mlog(0, "%s: reco lock master is %u\n", dlm->name,
2046 master);
2047 break;
2048 }
2049 }
2050 return ret;
2051}
2052
6714d8e8
KH
2053
2054/*
2055 * DLM_MIGRATE_LOCKRES
2056 */
2057
2058
2059int dlm_migrate_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
2060 u8 target)
2061{
2062 struct dlm_master_list_entry *mle = NULL;
2063 struct dlm_master_list_entry *oldmle = NULL;
2064 struct dlm_migratable_lockres *mres = NULL;
2065 int ret = -EINVAL;
2066 const char *name;
2067 unsigned int namelen;
2068 int mle_added = 0;
2069 struct list_head *queue, *iter;
2070 int i;
2071 struct dlm_lock *lock;
2072 int empty = 1;
2073
2074 if (!dlm_grab(dlm))
2075 return -EINVAL;
2076
2077 name = res->lockname.name;
2078 namelen = res->lockname.len;
2079
2080 mlog(0, "migrating %.*s to %u\n", namelen, name, target);
2081
2082 /*
2083 * ensure this lockres is a proper candidate for migration
2084 */
2085 spin_lock(&res->spinlock);
2086 if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2087 mlog(0, "cannot migrate lockres with unknown owner!\n");
2088 spin_unlock(&res->spinlock);
2089 goto leave;
2090 }
2091 if (res->owner != dlm->node_num) {
2092 mlog(0, "cannot migrate lockres this node doesn't own!\n");
2093 spin_unlock(&res->spinlock);
2094 goto leave;
2095 }
2096 mlog(0, "checking queues...\n");
2097 queue = &res->granted;
2098 for (i=0; i<3; i++) {
2099 list_for_each(iter, queue) {
2100 lock = list_entry (iter, struct dlm_lock, list);
2101 empty = 0;
2102 if (lock->ml.node == dlm->node_num) {
2103 mlog(0, "found a lock owned by this node "
2104 "still on the %s queue! will not "
2105 "migrate this lockres\n",
2106 i==0 ? "granted" :
2107 (i==1 ? "converting" : "blocked"));
2108 spin_unlock(&res->spinlock);
2109 ret = -ENOTEMPTY;
2110 goto leave;
2111 }
2112 }
2113 queue++;
2114 }
2115 mlog(0, "all locks on this lockres are nonlocal. continuing\n");
2116 spin_unlock(&res->spinlock);
2117
2118 /* no work to do */
2119 if (empty) {
2120 mlog(0, "no locks were found on this lockres! done!\n");
2121 ret = 0;
2122 goto leave;
2123 }
2124
2125 /*
2126 * preallocate up front
2127 * if this fails, abort
2128 */
2129
2130 ret = -ENOMEM;
2131 mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_KERNEL);
2132 if (!mres) {
2133 mlog_errno(ret);
2134 goto leave;
2135 }
2136
2137 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
2138 GFP_KERNEL);
2139 if (!mle) {
2140 mlog_errno(ret);
2141 goto leave;
2142 }
2143 ret = 0;
2144
2145 /*
2146 * find a node to migrate the lockres to
2147 */
2148
2149 mlog(0, "picking a migration node\n");
2150 spin_lock(&dlm->spinlock);
2151 /* pick a new node */
2152 if (!test_bit(target, dlm->domain_map) ||
2153 target >= O2NM_MAX_NODES) {
2154 target = dlm_pick_migration_target(dlm, res);
2155 }
2156 mlog(0, "node %u chosen for migration\n", target);
2157
2158 if (target >= O2NM_MAX_NODES ||
2159 !test_bit(target, dlm->domain_map)) {
2160 /* target chosen is not alive */
2161 ret = -EINVAL;
2162 }
2163
2164 if (ret) {
2165 spin_unlock(&dlm->spinlock);
2166 goto fail;
2167 }
2168
2169 mlog(0, "continuing with target = %u\n", target);
2170
2171 /*
2172 * clear any existing master requests and
2173 * add the migration mle to the list
2174 */
2175 spin_lock(&dlm->master_lock);
2176 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2177 namelen, target, dlm->node_num);
2178 spin_unlock(&dlm->master_lock);
2179 spin_unlock(&dlm->spinlock);
2180
2181 if (ret == -EEXIST) {
2182 mlog(0, "another process is already migrating it\n");
2183 goto fail;
2184 }
2185 mle_added = 1;
2186
2187 /*
2188 * set the MIGRATING flag and flush asts
2189 * if we fail after this we need to re-dirty the lockres
2190 */
2191 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2192 mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2193 "the target went down.\n", res->lockname.len,
2194 res->lockname.name, target);
2195 spin_lock(&res->spinlock);
2196 res->state &= ~DLM_LOCK_RES_MIGRATING;
2197 spin_unlock(&res->spinlock);
2198 ret = -EINVAL;
2199 }
2200
2201fail:
2202 if (oldmle) {
2203 /* master is known, detach if not already detached */
2204 dlm_mle_detach_hb_events(dlm, oldmle);
2205 dlm_put_mle(oldmle);
2206 }
2207
2208 if (ret < 0) {
2209 if (mle_added) {
2210 dlm_mle_detach_hb_events(dlm, mle);
2211 dlm_put_mle(mle);
2212 } else if (mle) {
2213 kmem_cache_free(dlm_mle_cache, mle);
2214 }
2215 goto leave;
2216 }
2217
2218 /*
2219 * at this point, we have a migration target, an mle
2220 * in the master list, and the MIGRATING flag set on
2221 * the lockres
2222 */
2223
2224
2225 /* get an extra reference on the mle.
2226 * otherwise the assert_master from the new
2227 * master will destroy this.
2228 * also, make sure that all callers of dlm_get_mle
2229 * take both dlm->spinlock and dlm->master_lock */
2230 spin_lock(&dlm->spinlock);
2231 spin_lock(&dlm->master_lock);
a2bf0477 2232 dlm_get_mle_inuse(mle);
6714d8e8
KH
2233 spin_unlock(&dlm->master_lock);
2234 spin_unlock(&dlm->spinlock);
2235
2236 /* notify new node and send all lock state */
2237 /* call send_one_lockres with migration flag.
2238 * this serves as notice to the target node that a
2239 * migration is starting. */
2240 ret = dlm_send_one_lockres(dlm, res, mres, target,
2241 DLM_MRES_MIGRATION);
2242
2243 if (ret < 0) {
2244 mlog(0, "migration to node %u failed with %d\n",
2245 target, ret);
2246 /* migration failed, detach and clean up mle */
2247 dlm_mle_detach_hb_events(dlm, mle);
2248 dlm_put_mle(mle);
a2bf0477
KH
2249 dlm_put_mle_inuse(mle);
2250 spin_lock(&res->spinlock);
2251 res->state &= ~DLM_LOCK_RES_MIGRATING;
2252 spin_unlock(&res->spinlock);
6714d8e8
KH
2253 goto leave;
2254 }
2255
2256 /* at this point, the target sends a message to all nodes,
2257 * (using dlm_do_migrate_request). this node is skipped since
2258 * we had to put an mle in the list to begin the process. this
2259 * node now waits for target to do an assert master. this node
2260 * will be the last one notified, ensuring that the migration
2261 * is complete everywhere. if the target dies while this is
2262 * going on, some nodes could potentially see the target as the
2263 * master, so it is important that my recovery finds the migration
2264 * mle and sets the master to UNKNONWN. */
2265
2266
2267 /* wait for new node to assert master */
2268 while (1) {
2269 ret = wait_event_interruptible_timeout(mle->wq,
2270 (atomic_read(&mle->woken) == 1),
2271 msecs_to_jiffies(5000));
2272
2273 if (ret >= 0) {
2274 if (atomic_read(&mle->woken) == 1 ||
2275 res->owner == target)
2276 break;
2277
2278 mlog(0, "timed out during migration\n");
e2faea4c
KH
2279 /* avoid hang during shutdown when migrating lockres
2280 * to a node which also goes down */
2281 if (dlm_is_node_dead(dlm, target)) {
aa852354
KH
2282 mlog(0, "%s:%.*s: expected migration "
2283 "target %u is no longer up, restarting\n",
e2faea4c
KH
2284 dlm->name, res->lockname.len,
2285 res->lockname.name, target);
2286 ret = -ERESTARTSYS;
2287 }
6714d8e8
KH
2288 }
2289 if (ret == -ERESTARTSYS) {
2290 /* migration failed, detach and clean up mle */
2291 dlm_mle_detach_hb_events(dlm, mle);
2292 dlm_put_mle(mle);
a2bf0477
KH
2293 dlm_put_mle_inuse(mle);
2294 spin_lock(&res->spinlock);
2295 res->state &= ~DLM_LOCK_RES_MIGRATING;
2296 spin_unlock(&res->spinlock);
6714d8e8
KH
2297 goto leave;
2298 }
2299 /* TODO: if node died: stop, clean up, return error */
2300 }
2301
2302 /* all done, set the owner, clear the flag */
2303 spin_lock(&res->spinlock);
2304 dlm_set_lockres_owner(dlm, res, target);
2305 res->state &= ~DLM_LOCK_RES_MIGRATING;
2306 dlm_remove_nonlocal_locks(dlm, res);
2307 spin_unlock(&res->spinlock);
2308 wake_up(&res->wq);
2309
2310 /* master is known, detach if not already detached */
2311 dlm_mle_detach_hb_events(dlm, mle);
a2bf0477 2312 dlm_put_mle_inuse(mle);
6714d8e8
KH
2313 ret = 0;
2314
2315 dlm_lockres_calc_usage(dlm, res);
2316
2317leave:
2318 /* re-dirty the lockres if we failed */
2319 if (ret < 0)
2320 dlm_kick_thread(dlm, res);
2321
2322 /* TODO: cleanup */
2323 if (mres)
2324 free_page((unsigned long)mres);
2325
2326 dlm_put(dlm);
2327
2328 mlog(0, "returning %d\n", ret);
2329 return ret;
2330}
2331EXPORT_SYMBOL_GPL(dlm_migrate_lockres);
2332
2333int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2334{
2335 int ret;
2336 spin_lock(&dlm->ast_lock);
2337 spin_lock(&lock->spinlock);
2338 ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2339 spin_unlock(&lock->spinlock);
2340 spin_unlock(&dlm->ast_lock);
2341 return ret;
2342}
2343
2344static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2345 struct dlm_lock_resource *res,
2346 u8 mig_target)
2347{
2348 int can_proceed;
2349 spin_lock(&res->spinlock);
2350 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2351 spin_unlock(&res->spinlock);
2352
2353 /* target has died, so make the caller break out of the
2354 * wait_event, but caller must recheck the domain_map */
2355 spin_lock(&dlm->spinlock);
2356 if (!test_bit(mig_target, dlm->domain_map))
2357 can_proceed = 1;
2358 spin_unlock(&dlm->spinlock);
2359 return can_proceed;
2360}
2361
2362int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2363{
2364 int ret;
2365 spin_lock(&res->spinlock);
2366 ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2367 spin_unlock(&res->spinlock);
2368 return ret;
2369}
2370
2371
2372static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2373 struct dlm_lock_resource *res,
2374 u8 target)
2375{
2376 int ret = 0;
2377
2378 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2379 res->lockname.len, res->lockname.name, dlm->node_num,
2380 target);
2381 /* need to set MIGRATING flag on lockres. this is done by
2382 * ensuring that all asts have been flushed for this lockres. */
2383 spin_lock(&res->spinlock);
2384 BUG_ON(res->migration_pending);
2385 res->migration_pending = 1;
2386 /* strategy is to reserve an extra ast then release
2387 * it below, letting the release do all of the work */
2388 __dlm_lockres_reserve_ast(res);
2389 spin_unlock(&res->spinlock);
2390
2391 /* now flush all the pending asts.. hang out for a bit */
2392 dlm_kick_thread(dlm, res);
2393 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2394 dlm_lockres_release_ast(dlm, res);
2395
2396 mlog(0, "about to wait on migration_wq, dirty=%s\n",
2397 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2398 /* if the extra ref we just put was the final one, this
2399 * will pass thru immediately. otherwise, we need to wait
2400 * for the last ast to finish. */
2401again:
2402 ret = wait_event_interruptible_timeout(dlm->migration_wq,
2403 dlm_migration_can_proceed(dlm, res, target),
2404 msecs_to_jiffies(1000));
2405 if (ret < 0) {
2406 mlog(0, "woken again: migrating? %s, dead? %s\n",
2407 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2408 test_bit(target, dlm->domain_map) ? "no":"yes");
2409 } else {
2410 mlog(0, "all is well: migrating? %s, dead? %s\n",
2411 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2412 test_bit(target, dlm->domain_map) ? "no":"yes");
2413 }
2414 if (!dlm_migration_can_proceed(dlm, res, target)) {
2415 mlog(0, "trying again...\n");
2416 goto again;
2417 }
2418
2419 /* did the target go down or die? */
2420 spin_lock(&dlm->spinlock);
2421 if (!test_bit(target, dlm->domain_map)) {
2422 mlog(ML_ERROR, "aha. migration target %u just went down\n",
2423 target);
2424 ret = -EHOSTDOWN;
2425 }
2426 spin_unlock(&dlm->spinlock);
2427
2428 /*
2429 * at this point:
2430 *
2431 * o the DLM_LOCK_RES_MIGRATING flag is set
2432 * o there are no pending asts on this lockres
2433 * o all processes trying to reserve an ast on this
2434 * lockres must wait for the MIGRATING flag to clear
2435 */
2436 return ret;
2437}
2438
2439/* last step in the migration process.
2440 * original master calls this to free all of the dlm_lock
2441 * structures that used to be for other nodes. */
2442static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2443 struct dlm_lock_resource *res)
2444{
2445 struct list_head *iter, *iter2;
2446 struct list_head *queue = &res->granted;
2447 int i;
2448 struct dlm_lock *lock;
2449
2450 assert_spin_locked(&res->spinlock);
2451
2452 BUG_ON(res->owner == dlm->node_num);
2453
2454 for (i=0; i<3; i++) {
2455 list_for_each_safe(iter, iter2, queue) {
2456 lock = list_entry (iter, struct dlm_lock, list);
2457 if (lock->ml.node != dlm->node_num) {
2458 mlog(0, "putting lock for node %u\n",
2459 lock->ml.node);
2460 /* be extra careful */
2461 BUG_ON(!list_empty(&lock->ast_list));
2462 BUG_ON(!list_empty(&lock->bast_list));
2463 BUG_ON(lock->ast_pending);
2464 BUG_ON(lock->bast_pending);
2465 list_del_init(&lock->list);
2466 dlm_lock_put(lock);
2467 }
2468 }
2469 queue++;
2470 }
2471}
2472
2473/* for now this is not too intelligent. we will
2474 * need stats to make this do the right thing.
2475 * this just finds the first lock on one of the
2476 * queues and uses that node as the target. */
2477static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2478 struct dlm_lock_resource *res)
2479{
2480 int i;
2481 struct list_head *queue = &res->granted;
2482 struct list_head *iter;
2483 struct dlm_lock *lock;
2484 int nodenum;
2485
2486 assert_spin_locked(&dlm->spinlock);
2487
2488 spin_lock(&res->spinlock);
2489 for (i=0; i<3; i++) {
2490 list_for_each(iter, queue) {
2491 /* up to the caller to make sure this node
2492 * is alive */
2493 lock = list_entry (iter, struct dlm_lock, list);
2494 if (lock->ml.node != dlm->node_num) {
2495 spin_unlock(&res->spinlock);
2496 return lock->ml.node;
2497 }
2498 }
2499 queue++;
2500 }
2501 spin_unlock(&res->spinlock);
2502 mlog(0, "have not found a suitable target yet! checking domain map\n");
2503
2504 /* ok now we're getting desperate. pick anyone alive. */
2505 nodenum = -1;
2506 while (1) {
2507 nodenum = find_next_bit(dlm->domain_map,
2508 O2NM_MAX_NODES, nodenum+1);
2509 mlog(0, "found %d in domain map\n", nodenum);
2510 if (nodenum >= O2NM_MAX_NODES)
2511 break;
2512 if (nodenum != dlm->node_num) {
2513 mlog(0, "picking %d\n", nodenum);
2514 return nodenum;
2515 }
2516 }
2517
2518 mlog(0, "giving up. no master to migrate to\n");
2519 return DLM_LOCK_RES_OWNER_UNKNOWN;
2520}
2521
2522
2523
2524/* this is called by the new master once all lockres
2525 * data has been received */
2526static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2527 struct dlm_lock_resource *res,
2528 u8 master, u8 new_master,
2529 struct dlm_node_iter *iter)
2530{
2531 struct dlm_migrate_request migrate;
2532 int ret, status = 0;
2533 int nodenum;
2534
2535 memset(&migrate, 0, sizeof(migrate));
2536 migrate.namelen = res->lockname.len;
2537 memcpy(migrate.name, res->lockname.name, migrate.namelen);
2538 migrate.new_master = new_master;
2539 migrate.master = master;
2540
2541 ret = 0;
2542
2543 /* send message to all nodes, except the master and myself */
2544 while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
2545 if (nodenum == master ||
2546 nodenum == new_master)
2547 continue;
2548
2549 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
2550 &migrate, sizeof(migrate), nodenum,
2551 &status);
2552 if (ret < 0)
2553 mlog_errno(ret);
2554 else if (status < 0) {
2555 mlog(0, "migrate request (node %u) returned %d!\n",
2556 nodenum, status);
2557 ret = status;
2558 }
2559 }
2560
2561 if (ret < 0)
2562 mlog_errno(ret);
2563
2564 mlog(0, "returning ret=%d\n", ret);
2565 return ret;
2566}
2567
2568
2569/* if there is an existing mle for this lockres, we now know who the master is.
2570 * (the one who sent us *this* message) we can clear it up right away.
2571 * since the process that put the mle on the list still has a reference to it,
2572 * we can unhash it now, set the master and wake the process. as a result,
2573 * we will have no mle in the list to start with. now we can add an mle for
2574 * the migration and this should be the only one found for those scanning the
2575 * list. */
2576int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data)
2577{
2578 struct dlm_ctxt *dlm = data;
2579 struct dlm_lock_resource *res = NULL;
2580 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
2581 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
2582 const char *name;
a3d33291 2583 unsigned int namelen, hash;
6714d8e8
KH
2584 int ret = 0;
2585
2586 if (!dlm_grab(dlm))
2587 return -EINVAL;
2588
2589 name = migrate->name;
2590 namelen = migrate->namelen;
a3d33291 2591 hash = dlm_lockid_hash(name, namelen);
6714d8e8
KH
2592
2593 /* preallocate.. if this fails, abort */
2594 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
2595 GFP_KERNEL);
2596
2597 if (!mle) {
2598 ret = -ENOMEM;
2599 goto leave;
2600 }
2601
2602 /* check for pre-existing lock */
2603 spin_lock(&dlm->spinlock);
a3d33291 2604 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
6714d8e8
KH
2605 spin_lock(&dlm->master_lock);
2606
2607 if (res) {
2608 spin_lock(&res->spinlock);
2609 if (res->state & DLM_LOCK_RES_RECOVERING) {
2610 /* if all is working ok, this can only mean that we got
2611 * a migrate request from a node that we now see as
2612 * dead. what can we do here? drop it to the floor? */
2613 spin_unlock(&res->spinlock);
2614 mlog(ML_ERROR, "Got a migrate request, but the "
2615 "lockres is marked as recovering!");
2616 kmem_cache_free(dlm_mle_cache, mle);
2617 ret = -EINVAL; /* need a better solution */
2618 goto unlock;
2619 }
2620 res->state |= DLM_LOCK_RES_MIGRATING;
2621 spin_unlock(&res->spinlock);
2622 }
2623
2624 /* ignore status. only nonzero status would BUG. */
2625 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
2626 name, namelen,
2627 migrate->new_master,
2628 migrate->master);
2629
2630unlock:
2631 spin_unlock(&dlm->master_lock);
2632 spin_unlock(&dlm->spinlock);
2633
2634 if (oldmle) {
2635 /* master is known, detach if not already detached */
2636 dlm_mle_detach_hb_events(dlm, oldmle);
2637 dlm_put_mle(oldmle);
2638 }
2639
2640 if (res)
2641 dlm_lockres_put(res);
2642leave:
2643 dlm_put(dlm);
2644 return ret;
2645}
2646
2647/* must be holding dlm->spinlock and dlm->master_lock
2648 * when adding a migration mle, we can clear any other mles
2649 * in the master list because we know with certainty that
2650 * the master is "master". so we remove any old mle from
2651 * the list after setting it's master field, and then add
2652 * the new migration mle. this way we can hold with the rule
2653 * of having only one mle for a given lock name at all times. */
2654static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
2655 struct dlm_lock_resource *res,
2656 struct dlm_master_list_entry *mle,
2657 struct dlm_master_list_entry **oldmle,
2658 const char *name, unsigned int namelen,
2659 u8 new_master, u8 master)
2660{
2661 int found;
2662 int ret = 0;
2663
2664 *oldmle = NULL;
2665
2666 mlog_entry_void();
2667
2668 assert_spin_locked(&dlm->spinlock);
2669 assert_spin_locked(&dlm->master_lock);
2670
2671 /* caller is responsible for any ref taken here on oldmle */
2672 found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
2673 if (found) {
2674 struct dlm_master_list_entry *tmp = *oldmle;
2675 spin_lock(&tmp->spinlock);
2676 if (tmp->type == DLM_MLE_MIGRATION) {
2677 if (master == dlm->node_num) {
2678 /* ah another process raced me to it */
2679 mlog(0, "tried to migrate %.*s, but some "
2680 "process beat me to it\n",
2681 namelen, name);
2682 ret = -EEXIST;
2683 } else {
2684 /* bad. 2 NODES are trying to migrate! */
2685 mlog(ML_ERROR, "migration error mle: "
2686 "master=%u new_master=%u // request: "
2687 "master=%u new_master=%u // "
2688 "lockres=%.*s\n",
2689 tmp->master, tmp->new_master,
2690 master, new_master,
2691 namelen, name);
2692 BUG();
2693 }
2694 } else {
2695 /* this is essentially what assert_master does */
2696 tmp->master = master;
2697 atomic_set(&tmp->woken, 1);
2698 wake_up(&tmp->wq);
2699 /* remove it from the list so that only one
2700 * mle will be found */
2701 list_del_init(&tmp->list);
da01ad05 2702 __dlm_mle_detach_hb_events(dlm, mle);
6714d8e8
KH
2703 }
2704 spin_unlock(&tmp->spinlock);
2705 }
2706
2707 /* now add a migration mle to the tail of the list */
2708 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
2709 mle->new_master = new_master;
2710 mle->master = master;
2711 /* do this for consistency with other mle types */
2712 set_bit(new_master, mle->maybe_map);
2713 list_add(&mle->list, &dlm->master_list);
2714
2715 return ret;
2716}
2717
2718
2719void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
2720{
2721 struct list_head *iter, *iter2;
2722 struct dlm_master_list_entry *mle;
2723 struct dlm_lock_resource *res;
a3d33291 2724 unsigned int hash;
6714d8e8
KH
2725
2726 mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
2727top:
2728 assert_spin_locked(&dlm->spinlock);
2729
2730 /* clean the master list */
2731 spin_lock(&dlm->master_lock);
2732 list_for_each_safe(iter, iter2, &dlm->master_list) {
2733 mle = list_entry(iter, struct dlm_master_list_entry, list);
2734
2735 BUG_ON(mle->type != DLM_MLE_BLOCK &&
2736 mle->type != DLM_MLE_MASTER &&
2737 mle->type != DLM_MLE_MIGRATION);
2738
2739 /* MASTER mles are initiated locally. the waiting
2740 * process will notice the node map change
2741 * shortly. let that happen as normal. */
2742 if (mle->type == DLM_MLE_MASTER)
2743 continue;
2744
2745
2746 /* BLOCK mles are initiated by other nodes.
2747 * need to clean up if the dead node would have
2748 * been the master. */
2749 if (mle->type == DLM_MLE_BLOCK) {
2750 int bit;
2751
2752 spin_lock(&mle->spinlock);
2753 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
2754 if (bit != dead_node) {
2755 mlog(0, "mle found, but dead node %u would "
2756 "not have been master\n", dead_node);
2757 spin_unlock(&mle->spinlock);
2758 } else {
2759 /* must drop the refcount by one since the
2760 * assert_master will never arrive. this
2761 * may result in the mle being unlinked and
2762 * freed, but there may still be a process
2763 * waiting in the dlmlock path which is fine. */
2764 mlog(ML_ERROR, "node %u was expected master\n",
2765 dead_node);
2766 atomic_set(&mle->woken, 1);
2767 spin_unlock(&mle->spinlock);
2768 wake_up(&mle->wq);
f671c09b
KH
2769 /* do not need events any longer, so detach
2770 * from heartbeat */
2771 __dlm_mle_detach_hb_events(dlm, mle);
6714d8e8
KH
2772 __dlm_put_mle(mle);
2773 }
2774 continue;
2775 }
2776
2777 /* everything else is a MIGRATION mle */
2778
2779 /* the rule for MIGRATION mles is that the master
2780 * becomes UNKNOWN if *either* the original or
2781 * the new master dies. all UNKNOWN lockreses
2782 * are sent to whichever node becomes the recovery
2783 * master. the new master is responsible for
2784 * determining if there is still a master for
2785 * this lockres, or if he needs to take over
2786 * mastery. either way, this node should expect
2787 * another message to resolve this. */
2788 if (mle->master != dead_node &&
2789 mle->new_master != dead_node)
2790 continue;
2791
2792 /* if we have reached this point, this mle needs to
2793 * be removed from the list and freed. */
2794
2795 /* remove from the list early. NOTE: unlinking
2796 * list_head while in list_for_each_safe */
da01ad05 2797 __dlm_mle_detach_hb_events(dlm, mle);
6714d8e8
KH
2798 spin_lock(&mle->spinlock);
2799 list_del_init(&mle->list);
2800 atomic_set(&mle->woken, 1);
2801 spin_unlock(&mle->spinlock);
2802 wake_up(&mle->wq);
2803
aa852354
KH
2804 mlog(0, "%s: node %u died during migration from "
2805 "%u to %u!\n", dlm->name, dead_node,
6714d8e8
KH
2806 mle->master, mle->new_master);
2807 /* if there is a lockres associated with this
2808 * mle, find it and set its owner to UNKNOWN */
a3d33291 2809 hash = dlm_lockid_hash(mle->u.name.name, mle->u.name.len);
6714d8e8 2810 res = __dlm_lookup_lockres(dlm, mle->u.name.name,
a3d33291 2811 mle->u.name.len, hash);
6714d8e8
KH
2812 if (res) {
2813 /* unfortunately if we hit this rare case, our
2814 * lock ordering is messed. we need to drop
2815 * the master lock so that we can take the
2816 * lockres lock, meaning that we will have to
2817 * restart from the head of list. */
2818 spin_unlock(&dlm->master_lock);
2819
2820 /* move lockres onto recovery list */
2821 spin_lock(&res->spinlock);
2822 dlm_set_lockres_owner(dlm, res,
2823 DLM_LOCK_RES_OWNER_UNKNOWN);
2824 dlm_move_lockres_to_recovery_list(dlm, res);
2825 spin_unlock(&res->spinlock);
2826 dlm_lockres_put(res);
2827
f671c09b
KH
2828 /* about to get rid of mle, detach from heartbeat */
2829 __dlm_mle_detach_hb_events(dlm, mle);
2830
6714d8e8
KH
2831 /* dump the mle */
2832 spin_lock(&dlm->master_lock);
2833 __dlm_put_mle(mle);
2834 spin_unlock(&dlm->master_lock);
2835
2836 /* restart */
2837 goto top;
2838 }
2839
2840 /* this may be the last reference */
2841 __dlm_put_mle(mle);
2842 }
2843 spin_unlock(&dlm->master_lock);
2844}
2845
2846
2847int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
2848 u8 old_master)
2849{
2850 struct dlm_node_iter iter;
2851 int ret = 0;
2852
2853 spin_lock(&dlm->spinlock);
2854 dlm_node_iter_init(dlm->domain_map, &iter);
2855 clear_bit(old_master, iter.node_map);
2856 clear_bit(dlm->node_num, iter.node_map);
2857 spin_unlock(&dlm->spinlock);
2858
2859 mlog(0, "now time to do a migrate request to other nodes\n");
2860 ret = dlm_do_migrate_request(dlm, res, old_master,
2861 dlm->node_num, &iter);
2862 if (ret < 0) {
2863 mlog_errno(ret);
2864 goto leave;
2865 }
2866
2867 mlog(0, "doing assert master of %.*s to all except the original node\n",
2868 res->lockname.len, res->lockname.name);
2869 /* this call now finishes out the nodemap
2870 * even if one or more nodes die */
2871 ret = dlm_do_assert_master(dlm, res->lockname.name,
2872 res->lockname.len, iter.node_map,
2873 DLM_ASSERT_MASTER_FINISH_MIGRATION);
2874 if (ret < 0) {
2875 /* no longer need to retry. all living nodes contacted. */
2876 mlog_errno(ret);
2877 ret = 0;
2878 }
2879
2880 memset(iter.node_map, 0, sizeof(iter.node_map));
2881 set_bit(old_master, iter.node_map);
2882 mlog(0, "doing assert master of %.*s back to %u\n",
2883 res->lockname.len, res->lockname.name, old_master);
2884 ret = dlm_do_assert_master(dlm, res->lockname.name,
2885 res->lockname.len, iter.node_map,
2886 DLM_ASSERT_MASTER_FINISH_MIGRATION);
2887 if (ret < 0) {
2888 mlog(0, "assert master to original master failed "
2889 "with %d.\n", ret);
2890 /* the only nonzero status here would be because of
2891 * a dead original node. we're done. */
2892 ret = 0;
2893 }
2894
2895 /* all done, set the owner, clear the flag */
2896 spin_lock(&res->spinlock);
2897 dlm_set_lockres_owner(dlm, res, dlm->node_num);
2898 res->state &= ~DLM_LOCK_RES_MIGRATING;
2899 spin_unlock(&res->spinlock);
2900 /* re-dirty it on the new master */
2901 dlm_kick_thread(dlm, res);
2902 wake_up(&res->wq);
2903leave:
2904 return ret;
2905}
2906
2907/*
2908 * LOCKRES AST REFCOUNT
2909 * this is integral to migration
2910 */
2911
2912/* for future intent to call an ast, reserve one ahead of time.
2913 * this should be called only after waiting on the lockres
2914 * with dlm_wait_on_lockres, and while still holding the
2915 * spinlock after the call. */
2916void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
2917{
2918 assert_spin_locked(&res->spinlock);
2919 if (res->state & DLM_LOCK_RES_MIGRATING) {
2920 __dlm_print_one_lock_resource(res);
2921 }
2922 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
2923
2924 atomic_inc(&res->asts_reserved);
2925}
2926
2927/*
2928 * used to drop the reserved ast, either because it went unused,
2929 * or because the ast/bast was actually called.
2930 *
2931 * also, if there is a pending migration on this lockres,
2932 * and this was the last pending ast on the lockres,
2933 * atomically set the MIGRATING flag before we drop the lock.
2934 * this is how we ensure that migration can proceed with no
2935 * asts in progress. note that it is ok if the state of the
2936 * queues is such that a lock should be granted in the future
2937 * or that a bast should be fired, because the new master will
2938 * shuffle the lists on this lockres as soon as it is migrated.
2939 */
2940void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
2941 struct dlm_lock_resource *res)
2942{
2943 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
2944 return;
2945
2946 if (!res->migration_pending) {
2947 spin_unlock(&res->spinlock);
2948 return;
2949 }
2950
2951 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
2952 res->migration_pending = 0;
2953 res->state |= DLM_LOCK_RES_MIGRATING;
2954 spin_unlock(&res->spinlock);
2955 wake_up(&res->wq);
2956 wake_up(&dlm->migration_wq);
2957}