1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2.
11 *******************************************************************************
12 ******************************************************************************/
14 #include "dlm_internal.h"
15 #include "lockspace.h"
28 * Recovery waiting routines: these functions wait for a particular reply from
29 * a remote node, or for the remote node to report a certain status. They need
30 * to abort if the lockspace is stopped indicating a node has failed (perhaps
31 * the one being waited for).
35 * Wait until given function returns non-zero or lockspace is stopped
36 * (LS_RECOVERY_STOP set due to failure of a node in ls_nodes). When another
37 * function thinks it could have completed the waited-on task, they should wake
38 * up ls_wait_general to get an immediate response rather than waiting for the
39 * timer to detect the result. A timer wakes us up periodically while waiting
40 * to see if we should abort due to a node failure. This should only be called
41 * by the dlm_recoverd thread.
44 static void dlm_wait_timer_fn(unsigned long data)
46 struct dlm_ls *ls = (struct dlm_ls *) data;
47 mod_timer(&ls->ls_timer, jiffies + (dlm_config.ci_recover_timer * HZ));
48 wake_up(&ls->ls_wait_general);
51 int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls))
55 init_timer(&ls->ls_timer);
56 ls->ls_timer.function = dlm_wait_timer_fn;
57 ls->ls_timer.data = (long) ls;
58 ls->ls_timer.expires = jiffies + (dlm_config.ci_recover_timer * HZ);
59 add_timer(&ls->ls_timer);
61 wait_event(ls->ls_wait_general, testfn(ls) || dlm_recovery_stopped(ls));
62 del_timer_sync(&ls->ls_timer);
64 if (dlm_recovery_stopped(ls)) {
65 log_debug(ls, "dlm_wait_function aborted");
72 * An efficient way for all nodes to wait for all others to have a certain
73 * status. The node with the lowest nodeid polls all the others for their
74 * status (wait_status_all) and all the others poll the node with the low id
75 * for its accumulated result (wait_status_low). When all nodes have set
76 * status flag X, then status flag X_ALL will be set on the low nodeid.
79 uint32_t dlm_recover_status(struct dlm_ls *ls)
82 spin_lock(&ls->ls_recover_lock);
83 status = ls->ls_recover_status;
84 spin_unlock(&ls->ls_recover_lock);
88 static void _set_recover_status(struct dlm_ls *ls, uint32_t status)
90 ls->ls_recover_status |= status;
93 void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status)
95 spin_lock(&ls->ls_recover_lock);
96 _set_recover_status(ls, status);
97 spin_unlock(&ls->ls_recover_lock);
100 static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status,
103 struct dlm_rcom *rc = ls->ls_recover_buf;
104 struct dlm_member *memb;
105 int error = 0, delay;
107 list_for_each_entry(memb, &ls->ls_nodes, list) {
110 if (dlm_recovery_stopped(ls)) {
115 error = dlm_rcom_status(ls, memb->nodeid, 0);
120 dlm_slot_save(ls, rc, memb);
122 if (rc->rc_result & wait_status)
133 static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status,
134 uint32_t status_flags)
136 struct dlm_rcom *rc = ls->ls_recover_buf;
137 int error = 0, delay = 0, nodeid = ls->ls_low_nodeid;
140 if (dlm_recovery_stopped(ls)) {
145 error = dlm_rcom_status(ls, nodeid, status_flags);
149 if (rc->rc_result & wait_status)
159 static int wait_status(struct dlm_ls *ls, uint32_t status)
161 uint32_t status_all = status << 1;
164 if (ls->ls_low_nodeid == dlm_our_nodeid()) {
165 error = wait_status_all(ls, status, 0);
167 dlm_set_recover_status(ls, status_all);
169 error = wait_status_low(ls, status_all, 0);
174 int dlm_recover_members_wait(struct dlm_ls *ls)
176 struct dlm_member *memb;
177 struct dlm_slot *slots;
178 int num_slots, slots_size;
182 list_for_each_entry(memb, &ls->ls_nodes, list) {
184 memb->generation = 0;
187 if (ls->ls_low_nodeid == dlm_our_nodeid()) {
188 error = wait_status_all(ls, DLM_RS_NODES, 1);
192 /* slots array is sparse, slots_size may be > num_slots */
194 rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen);
196 spin_lock(&ls->ls_recover_lock);
197 _set_recover_status(ls, DLM_RS_NODES_ALL);
198 ls->ls_num_slots = num_slots;
199 ls->ls_slots_size = slots_size;
200 ls->ls_slots = slots;
201 ls->ls_generation = gen;
202 spin_unlock(&ls->ls_recover_lock);
204 dlm_set_recover_status(ls, DLM_RS_NODES_ALL);
207 error = wait_status_low(ls, DLM_RS_NODES_ALL, DLM_RSF_NEED_SLOTS);
211 dlm_slots_copy_in(ls);
217 int dlm_recover_directory_wait(struct dlm_ls *ls)
219 return wait_status(ls, DLM_RS_DIR);
222 int dlm_recover_locks_wait(struct dlm_ls *ls)
224 return wait_status(ls, DLM_RS_LOCKS);
227 int dlm_recover_done_wait(struct dlm_ls *ls)
229 return wait_status(ls, DLM_RS_DONE);
233 * The recover_list contains all the rsb's for which we've requested the new
234 * master nodeid. As replies are returned from the resource directories the
235 * rsb's are removed from the list. When the list is empty we're done.
237 * The recover_list is later similarly used for all rsb's for which we've sent
238 * new lkb's and need to receive new corresponding lkid's.
240 * We use the address of the rsb struct as a simple local identifier for the
241 * rsb so we can match an rcom reply with the rsb it was sent for.
244 static int recover_list_empty(struct dlm_ls *ls)
248 spin_lock(&ls->ls_recover_list_lock);
249 empty = list_empty(&ls->ls_recover_list);
250 spin_unlock(&ls->ls_recover_list_lock);
255 static void recover_list_add(struct dlm_rsb *r)
257 struct dlm_ls *ls = r->res_ls;
259 spin_lock(&ls->ls_recover_list_lock);
260 if (list_empty(&r->res_recover_list)) {
261 list_add_tail(&r->res_recover_list, &ls->ls_recover_list);
262 ls->ls_recover_list_count++;
265 spin_unlock(&ls->ls_recover_list_lock);
268 static void recover_list_del(struct dlm_rsb *r)
270 struct dlm_ls *ls = r->res_ls;
272 spin_lock(&ls->ls_recover_list_lock);
273 list_del_init(&r->res_recover_list);
274 ls->ls_recover_list_count--;
275 spin_unlock(&ls->ls_recover_list_lock);
280 static void recover_list_clear(struct dlm_ls *ls)
282 struct dlm_rsb *r, *s;
284 spin_lock(&ls->ls_recover_list_lock);
285 list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
286 list_del_init(&r->res_recover_list);
287 r->res_recover_locks_count = 0;
289 ls->ls_recover_list_count--;
292 if (ls->ls_recover_list_count != 0) {
293 log_error(ls, "warning: recover_list_count %d",
294 ls->ls_recover_list_count);
295 ls->ls_recover_list_count = 0;
297 spin_unlock(&ls->ls_recover_list_lock);
300 static int recover_idr_empty(struct dlm_ls *ls)
304 spin_lock(&ls->ls_recover_idr_lock);
305 if (ls->ls_recover_list_count)
307 spin_unlock(&ls->ls_recover_idr_lock);
312 static int recover_idr_add(struct dlm_rsb *r)
314 struct dlm_ls *ls = r->res_ls;
317 rv = idr_pre_get(&ls->ls_recover_idr, GFP_NOFS);
321 spin_lock(&ls->ls_recover_idr_lock);
323 spin_unlock(&ls->ls_recover_idr_lock);
326 rv = idr_get_new_above(&ls->ls_recover_idr, r, 1, &id);
328 spin_unlock(&ls->ls_recover_idr_lock);
332 ls->ls_recover_list_count++;
334 spin_unlock(&ls->ls_recover_idr_lock);
338 static void recover_idr_del(struct dlm_rsb *r)
340 struct dlm_ls *ls = r->res_ls;
342 spin_lock(&ls->ls_recover_idr_lock);
343 idr_remove(&ls->ls_recover_idr, r->res_id);
345 ls->ls_recover_list_count--;
346 spin_unlock(&ls->ls_recover_idr_lock);
351 static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id)
355 spin_lock(&ls->ls_recover_idr_lock);
356 r = idr_find(&ls->ls_recover_idr, (int)id);
357 spin_unlock(&ls->ls_recover_idr_lock);
361 static int recover_idr_clear_rsb(int id, void *p, void *data)
363 struct dlm_ls *ls = data;
364 struct dlm_rsb *r = p;
367 r->res_recover_locks_count = 0;
368 ls->ls_recover_list_count--;
374 static void recover_idr_clear(struct dlm_ls *ls)
376 spin_lock(&ls->ls_recover_idr_lock);
377 idr_for_each(&ls->ls_recover_idr, recover_idr_clear_rsb, ls);
378 idr_remove_all(&ls->ls_recover_idr);
380 if (ls->ls_recover_list_count != 0) {
381 log_error(ls, "warning: recover_list_count %d",
382 ls->ls_recover_list_count);
383 ls->ls_recover_list_count = 0;
385 spin_unlock(&ls->ls_recover_idr_lock);
389 /* Master recovery: find new master node for rsb's that were
390 mastered on nodes that have been removed.
394 dlm_send_rcom_lookup -> receive_rcom_lookup
396 receive_rcom_lookup_reply <-
397 dlm_recover_master_reply
404 * Set the lock master for all LKBs in a lock queue
405 * If we are the new master of the rsb, we may have received new
406 * MSTCPY locks from other nodes already which we need to ignore
407 * when setting the new nodeid.
410 static void set_lock_master(struct list_head *queue, int nodeid)
414 list_for_each_entry(lkb, queue, lkb_statequeue) {
415 if (!(lkb->lkb_flags & DLM_IFL_MSTCPY)) {
416 lkb->lkb_nodeid = nodeid;
422 static void set_master_lkbs(struct dlm_rsb *r)
424 set_lock_master(&r->res_grantqueue, r->res_nodeid);
425 set_lock_master(&r->res_convertqueue, r->res_nodeid);
426 set_lock_master(&r->res_waitqueue, r->res_nodeid);
430 * Propagate the new master nodeid to locks
431 * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider.
432 * The NEW_MASTER2 flag tells recover_lvb() and recover_grant() which
436 static void set_new_master(struct dlm_rsb *r)
439 rsb_set_flag(r, RSB_NEW_MASTER);
440 rsb_set_flag(r, RSB_NEW_MASTER2);
444 * We do async lookups on rsb's that need new masters. The rsb's
445 * waiting for a lookup reply are kept on the recover_list.
447 * Another node recovering the master may have sent us a rcom lookup,
448 * and our dlm_master_lookup() set it as the new master, along with
449 * NEW_MASTER so that we'll recover it here (this implies dir_nodeid
450 * equals our_nodeid below).
453 static int recover_master(struct dlm_rsb *r, unsigned int *count)
455 struct dlm_ls *ls = r->res_ls;
456 int our_nodeid, dir_nodeid;
463 is_removed = dlm_is_removed(ls, r->res_nodeid);
465 if (!is_removed && !rsb_flag(r, RSB_NEW_MASTER))
468 our_nodeid = dlm_our_nodeid();
469 dir_nodeid = dlm_dir_nodeid(r);
471 if (dir_nodeid == our_nodeid) {
473 r->res_master_nodeid = our_nodeid;
477 /* set master of lkbs to ourself when is_removed, or to
478 another new master which we set along with NEW_MASTER
479 in dlm_master_lookup */
484 error = dlm_send_rcom_lookup(r, dir_nodeid);
492 * All MSTCPY locks are purged and rebuilt, even if the master stayed the same.
493 * This is necessary because recovery can be started, aborted and restarted,
494 * causing the master nodeid to briefly change during the aborted recovery, and
495 * change back to the original value in the second recovery. The MSTCPY locks
496 * may or may not have been purged during the aborted recovery. Another node
497 * with an outstanding request in waiters list and a request reply saved in the
498 * requestqueue, cannot know whether it should ignore the reply and resend the
499 * request, or accept the reply and complete the request. It must do the
500 * former if the remote node purged MSTCPY locks, and it must do the later if
501 * the remote node did not. This is solved by always purging MSTCPY locks, in
502 * which case, the request reply would always be ignored and the request
506 static int recover_master_static(struct dlm_rsb *r, unsigned int *count)
508 int dir_nodeid = dlm_dir_nodeid(r);
509 int new_master = dir_nodeid;
511 if (dir_nodeid == dlm_our_nodeid())
514 dlm_purge_mstcpy_locks(r);
515 r->res_master_nodeid = dir_nodeid;
516 r->res_nodeid = new_master;
523 * Go through local root resources and for each rsb which has a master which
524 * has departed, get the new master nodeid from the directory. The dir will
525 * assign mastery to the first node to look up the new master. That means
526 * we'll discover in this lookup if we're the new master of any rsb's.
528 * We fire off all the dir lookup requests individually and asynchronously to
529 * the correct dir node.
532 int dlm_recover_masters(struct dlm_ls *ls)
535 unsigned int total = 0;
536 unsigned int count = 0;
537 int nodir = dlm_no_directory(ls);
540 log_debug(ls, "dlm_recover_masters");
542 down_read(&ls->ls_root_sem);
543 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
544 if (dlm_recovery_stopped(ls)) {
545 up_read(&ls->ls_root_sem);
552 error = recover_master_static(r, &count);
554 error = recover_master(r, &count);
560 up_read(&ls->ls_root_sem);
564 up_read(&ls->ls_root_sem);
566 log_debug(ls, "dlm_recover_masters %u of %u", count, total);
568 error = dlm_wait_function(ls, &recover_idr_empty);
571 recover_idr_clear(ls);
575 int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
578 int ret_nodeid, new_master;
580 r = recover_idr_find(ls, rc->rc_id);
582 log_error(ls, "dlm_recover_master_reply no id %llx",
583 (unsigned long long)rc->rc_id);
587 ret_nodeid = rc->rc_result;
589 if (ret_nodeid == dlm_our_nodeid())
592 new_master = ret_nodeid;
595 r->res_master_nodeid = ret_nodeid;
596 r->res_nodeid = new_master;
601 if (recover_idr_empty(ls))
602 wake_up(&ls->ls_wait_general);
608 /* Lock recovery: rebuild the process-copy locks we hold on a
609 remastered rsb on the new rsb master.
614 dlm_send_rcom_lock -> receive_rcom_lock
615 dlm_recover_master_copy
616 receive_rcom_lock_reply <-
617 dlm_recover_process_copy
622 * keep a count of the number of lkb's we send to the new master; when we get
623 * an equal number of replies then recovery for the rsb is done
626 static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head)
631 list_for_each_entry(lkb, head, lkb_statequeue) {
632 error = dlm_send_rcom_lock(r, lkb);
635 r->res_recover_locks_count++;
641 static int recover_locks(struct dlm_rsb *r)
647 DLM_ASSERT(!r->res_recover_locks_count, dlm_dump_rsb(r););
649 error = recover_locks_queue(r, &r->res_grantqueue);
652 error = recover_locks_queue(r, &r->res_convertqueue);
655 error = recover_locks_queue(r, &r->res_waitqueue);
659 if (r->res_recover_locks_count)
662 rsb_clear_flag(r, RSB_NEW_MASTER);
668 int dlm_recover_locks(struct dlm_ls *ls)
671 int error, count = 0;
673 down_read(&ls->ls_root_sem);
674 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
676 rsb_clear_flag(r, RSB_NEW_MASTER);
680 if (!rsb_flag(r, RSB_NEW_MASTER))
683 if (dlm_recovery_stopped(ls)) {
685 up_read(&ls->ls_root_sem);
689 error = recover_locks(r);
691 up_read(&ls->ls_root_sem);
695 count += r->res_recover_locks_count;
697 up_read(&ls->ls_root_sem);
699 log_debug(ls, "dlm_recover_locks %d out", count);
701 error = dlm_wait_function(ls, &recover_list_empty);
704 recover_list_clear(ls);
708 void dlm_recovered_lock(struct dlm_rsb *r)
710 DLM_ASSERT(rsb_flag(r, RSB_NEW_MASTER), dlm_dump_rsb(r););
712 r->res_recover_locks_count--;
713 if (!r->res_recover_locks_count) {
714 rsb_clear_flag(r, RSB_NEW_MASTER);
718 if (recover_list_empty(r->res_ls))
719 wake_up(&r->res_ls->ls_wait_general);
723 * The lvb needs to be recovered on all master rsb's. This includes setting
724 * the VALNOTVALID flag if necessary, and determining the correct lvb contents
725 * based on the lvb's of the locks held on the rsb.
727 * RSB_VALNOTVALID is set if there are only NL/CR locks on the rsb. If it
728 * was already set prior to recovery, it's not cleared, regardless of locks.
730 * The LVB contents are only considered for changing when this is a new master
731 * of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with
732 * mode > CR. If no lkb's exist with mode above CR, the lvb contents are taken
733 * from the lkb with the largest lvb sequence number.
736 static void recover_lvb(struct dlm_rsb *r)
738 struct dlm_lkb *lkb, *high_lkb = NULL;
739 uint32_t high_seq = 0;
740 int lock_lvb_exists = 0;
741 int big_lock_exists = 0;
742 int lvblen = r->res_ls->ls_lvblen;
744 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
745 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
750 if (lkb->lkb_grmode > DLM_LOCK_CR) {
755 if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
757 high_seq = lkb->lkb_lvbseq;
761 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
762 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
767 if (lkb->lkb_grmode > DLM_LOCK_CR) {
772 if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
774 high_seq = lkb->lkb_lvbseq;
779 if (!lock_lvb_exists)
782 if (!big_lock_exists)
783 rsb_set_flag(r, RSB_VALNOTVALID);
785 /* don't mess with the lvb unless we're the new master */
786 if (!rsb_flag(r, RSB_NEW_MASTER2))
789 if (!r->res_lvbptr) {
790 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
795 if (big_lock_exists) {
796 r->res_lvbseq = lkb->lkb_lvbseq;
797 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen);
798 } else if (high_lkb) {
799 r->res_lvbseq = high_lkb->lkb_lvbseq;
800 memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);
803 memset(r->res_lvbptr, 0, lvblen);
809 /* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks
810 converting PR->CW or CW->PR need to have their lkb_grmode set. */
812 static void recover_conversion(struct dlm_rsb *r)
817 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
818 if (lkb->lkb_grmode == DLM_LOCK_PR ||
819 lkb->lkb_grmode == DLM_LOCK_CW) {
820 grmode = lkb->lkb_grmode;
825 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
826 if (lkb->lkb_grmode != DLM_LOCK_IV)
829 lkb->lkb_grmode = lkb->lkb_rqmode;
831 lkb->lkb_grmode = grmode;
835 /* We've become the new master for this rsb and waiting/converting locks may
836 need to be granted in dlm_recover_grant() due to locks that may have
837 existed from a removed node. */
839 static void recover_grant(struct dlm_rsb *r)
841 if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
842 rsb_set_flag(r, RSB_RECOVER_GRANT);
845 void dlm_recover_rsbs(struct dlm_ls *ls)
848 unsigned int count = 0;
850 down_read(&ls->ls_root_sem);
851 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
854 if (rsb_flag(r, RSB_RECOVER_CONVERT))
855 recover_conversion(r);
856 if (rsb_flag(r, RSB_NEW_MASTER2))
861 rsb_clear_flag(r, RSB_RECOVER_CONVERT);
862 rsb_clear_flag(r, RSB_NEW_MASTER2);
865 up_read(&ls->ls_root_sem);
868 log_debug(ls, "dlm_recover_rsbs %d done", count);
871 /* Create a single list of all root rsb's to be used during recovery */
873 int dlm_create_root_list(struct dlm_ls *ls)
879 down_write(&ls->ls_root_sem);
880 if (!list_empty(&ls->ls_root_list)) {
881 log_error(ls, "root list not empty");
886 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
887 spin_lock(&ls->ls_rsbtbl[i].lock);
888 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
889 r = rb_entry(n, struct dlm_rsb, res_hashnode);
890 list_add(&r->res_root_list, &ls->ls_root_list);
894 if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[i].toss))
895 log_error(ls, "dlm_create_root_list toss not empty");
896 spin_unlock(&ls->ls_rsbtbl[i].lock);
899 up_write(&ls->ls_root_sem);
903 void dlm_release_root_list(struct dlm_ls *ls)
905 struct dlm_rsb *r, *safe;
907 down_write(&ls->ls_root_sem);
908 list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) {
909 list_del_init(&r->res_root_list);
912 up_write(&ls->ls_root_sem);
915 void dlm_clear_toss(struct dlm_ls *ls)
917 struct rb_node *n, *next;
919 unsigned int count = 0;
922 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
923 spin_lock(&ls->ls_rsbtbl[i].lock);
924 for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) {
926 r = rb_entry(n, struct dlm_rsb, res_hashnode);
927 rb_erase(n, &ls->ls_rsbtbl[i].toss);
931 spin_unlock(&ls->ls_rsbtbl[i].lock);
935 log_debug(ls, "dlm_clear_toss %u done", count);