[GFS2] Fix leak of gfs2_bufdata
[linux-2.6-block.git] / fs / dlm / recover.c
CommitLineData
e7fd4179
DT
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lockspace.h"
16#include "dir.h"
17#include "config.h"
18#include "ast.h"
19#include "memory.h"
20#include "rcom.h"
21#include "lock.h"
22#include "lowcomms.h"
23#include "member.h"
24#include "recover.h"
25
26
27/*
28 * Recovery waiting routines: these functions wait for a particular reply from
29 * a remote node, or for the remote node to report a certain status. They need
30 * to abort if the lockspace is stopped indicating a node has failed (perhaps
31 * the one being waited for).
32 */
33
34/*
35 * Wait until given function returns non-zero or lockspace is stopped
36 * (LS_RECOVERY_STOP set due to failure of a node in ls_nodes). When another
37 * function thinks it could have completed the waited-on task, they should wake
38 * up ls_wait_general to get an immediate response rather than waiting for the
39 * timer to detect the result. A timer wakes us up periodically while waiting
40 * to see if we should abort due to a node failure. This should only be called
41 * by the dlm_recoverd thread.
42 */
43
44static void dlm_wait_timer_fn(unsigned long data)
45{
46 struct dlm_ls *ls = (struct dlm_ls *) data;
47 mod_timer(&ls->ls_timer, jiffies + (dlm_config.recover_timer * HZ));
48 wake_up(&ls->ls_wait_general);
49}
50
51int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls))
52{
53 int error = 0;
54
55 init_timer(&ls->ls_timer);
56 ls->ls_timer.function = dlm_wait_timer_fn;
57 ls->ls_timer.data = (long) ls;
58 ls->ls_timer.expires = jiffies + (dlm_config.recover_timer * HZ);
59 add_timer(&ls->ls_timer);
60
61 wait_event(ls->ls_wait_general, testfn(ls) || dlm_recovery_stopped(ls));
62 del_timer_sync(&ls->ls_timer);
63
64 if (dlm_recovery_stopped(ls)) {
65 log_debug(ls, "dlm_wait_function aborted");
66 error = -EINTR;
67 }
68 return error;
69}
70
71/*
72 * An efficient way for all nodes to wait for all others to have a certain
73 * status. The node with the lowest nodeid polls all the others for their
74 * status (wait_status_all) and all the others poll the node with the low id
75 * for its accumulated result (wait_status_low). When all nodes have set
76 * status flag X, then status flag X_ALL will be set on the low nodeid.
77 */
78
79uint32_t dlm_recover_status(struct dlm_ls *ls)
80{
81 uint32_t status;
82 spin_lock(&ls->ls_recover_lock);
83 status = ls->ls_recover_status;
84 spin_unlock(&ls->ls_recover_lock);
85 return status;
86}
87
88void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status)
89{
90 spin_lock(&ls->ls_recover_lock);
91 ls->ls_recover_status |= status;
92 spin_unlock(&ls->ls_recover_lock);
93}
94
95static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status)
96{
97 struct dlm_rcom *rc = (struct dlm_rcom *) ls->ls_recover_buf;
98 struct dlm_member *memb;
99 int error = 0, delay;
100
101 list_for_each_entry(memb, &ls->ls_nodes, list) {
102 delay = 0;
103 for (;;) {
104 if (dlm_recovery_stopped(ls)) {
105 error = -EINTR;
106 goto out;
107 }
108
109 error = dlm_rcom_status(ls, memb->nodeid);
110 if (error)
111 goto out;
112
113 if (rc->rc_result & wait_status)
114 break;
115 if (delay < 1000)
116 delay += 20;
117 msleep(delay);
118 }
119 }
120 out:
121 return error;
122}
123
124static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status)
125{
126 struct dlm_rcom *rc = (struct dlm_rcom *) ls->ls_recover_buf;
127 int error = 0, delay = 0, nodeid = ls->ls_low_nodeid;
128
129 for (;;) {
130 if (dlm_recovery_stopped(ls)) {
131 error = -EINTR;
132 goto out;
133 }
134
135 error = dlm_rcom_status(ls, nodeid);
136 if (error)
137 break;
138
139 if (rc->rc_result & wait_status)
140 break;
141 if (delay < 1000)
142 delay += 20;
143 msleep(delay);
144 }
145 out:
146 return error;
147}
148
149static int wait_status(struct dlm_ls *ls, uint32_t status)
150{
151 uint32_t status_all = status << 1;
152 int error;
153
154 if (ls->ls_low_nodeid == dlm_our_nodeid()) {
155 error = wait_status_all(ls, status);
156 if (!error)
157 dlm_set_recover_status(ls, status_all);
158 } else
159 error = wait_status_low(ls, status_all);
160
161 return error;
162}
163
164int dlm_recover_members_wait(struct dlm_ls *ls)
165{
166 return wait_status(ls, DLM_RS_NODES);
167}
168
169int dlm_recover_directory_wait(struct dlm_ls *ls)
170{
171 return wait_status(ls, DLM_RS_DIR);
172}
173
174int dlm_recover_locks_wait(struct dlm_ls *ls)
175{
176 return wait_status(ls, DLM_RS_LOCKS);
177}
178
179int dlm_recover_done_wait(struct dlm_ls *ls)
180{
181 return wait_status(ls, DLM_RS_DONE);
182}
183
184/*
185 * The recover_list contains all the rsb's for which we've requested the new
186 * master nodeid. As replies are returned from the resource directories the
187 * rsb's are removed from the list. When the list is empty we're done.
188 *
189 * The recover_list is later similarly used for all rsb's for which we've sent
190 * new lkb's and need to receive new corresponding lkid's.
191 *
192 * We use the address of the rsb struct as a simple local identifier for the
193 * rsb so we can match an rcom reply with the rsb it was sent for.
194 */
195
196static int recover_list_empty(struct dlm_ls *ls)
197{
198 int empty;
199
200 spin_lock(&ls->ls_recover_list_lock);
201 empty = list_empty(&ls->ls_recover_list);
202 spin_unlock(&ls->ls_recover_list_lock);
203
204 return empty;
205}
206
207static void recover_list_add(struct dlm_rsb *r)
208{
209 struct dlm_ls *ls = r->res_ls;
210
211 spin_lock(&ls->ls_recover_list_lock);
212 if (list_empty(&r->res_recover_list)) {
213 list_add_tail(&r->res_recover_list, &ls->ls_recover_list);
214 ls->ls_recover_list_count++;
215 dlm_hold_rsb(r);
216 }
217 spin_unlock(&ls->ls_recover_list_lock);
218}
219
220static void recover_list_del(struct dlm_rsb *r)
221{
222 struct dlm_ls *ls = r->res_ls;
223
224 spin_lock(&ls->ls_recover_list_lock);
225 list_del_init(&r->res_recover_list);
226 ls->ls_recover_list_count--;
227 spin_unlock(&ls->ls_recover_list_lock);
228
229 dlm_put_rsb(r);
230}
231
232static struct dlm_rsb *recover_list_find(struct dlm_ls *ls, uint64_t id)
233{
234 struct dlm_rsb *r = NULL;
235
236 spin_lock(&ls->ls_recover_list_lock);
237
238 list_for_each_entry(r, &ls->ls_recover_list, res_recover_list) {
239 if (id == (unsigned long) r)
240 goto out;
241 }
242 r = NULL;
243 out:
244 spin_unlock(&ls->ls_recover_list_lock);
245 return r;
246}
247
248static void recover_list_clear(struct dlm_ls *ls)
249{
250 struct dlm_rsb *r, *s;
251
252 spin_lock(&ls->ls_recover_list_lock);
253 list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
254 list_del_init(&r->res_recover_list);
255 dlm_put_rsb(r);
256 ls->ls_recover_list_count--;
257 }
258
259 if (ls->ls_recover_list_count != 0) {
260 log_error(ls, "warning: recover_list_count %d",
261 ls->ls_recover_list_count);
262 ls->ls_recover_list_count = 0;
263 }
264 spin_unlock(&ls->ls_recover_list_lock);
265}
266
267
268/* Master recovery: find new master node for rsb's that were
269 mastered on nodes that have been removed.
270
271 dlm_recover_masters
272 recover_master
273 dlm_send_rcom_lookup -> receive_rcom_lookup
274 dlm_dir_lookup
275 receive_rcom_lookup_reply <-
276 dlm_recover_master_reply
277 set_new_master
278 set_master_lkbs
279 set_lock_master
280*/
281
282/*
283 * Set the lock master for all LKBs in a lock queue
284 * If we are the new master of the rsb, we may have received new
285 * MSTCPY locks from other nodes already which we need to ignore
286 * when setting the new nodeid.
287 */
288
289static void set_lock_master(struct list_head *queue, int nodeid)
290{
291 struct dlm_lkb *lkb;
292
293 list_for_each_entry(lkb, queue, lkb_statequeue)
294 if (!(lkb->lkb_flags & DLM_IFL_MSTCPY))
295 lkb->lkb_nodeid = nodeid;
296}
297
298static void set_master_lkbs(struct dlm_rsb *r)
299{
300 set_lock_master(&r->res_grantqueue, r->res_nodeid);
301 set_lock_master(&r->res_convertqueue, r->res_nodeid);
302 set_lock_master(&r->res_waitqueue, r->res_nodeid);
303}
304
305/*
306 * Propogate the new master nodeid to locks
307 * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider.
f7da790d
DT
308 * The NEW_MASTER2 flag tells recover_lvb() and set_locks_purged() which
309 * rsb's to consider.
e7fd4179
DT
310 */
311
312static void set_new_master(struct dlm_rsb *r, int nodeid)
313{
314 lock_rsb(r);
315 r->res_nodeid = nodeid;
316 set_master_lkbs(r);
317 rsb_set_flag(r, RSB_NEW_MASTER);
318 rsb_set_flag(r, RSB_NEW_MASTER2);
319 unlock_rsb(r);
320}
321
322/*
323 * We do async lookups on rsb's that need new masters. The rsb's
324 * waiting for a lookup reply are kept on the recover_list.
325 */
326
327static int recover_master(struct dlm_rsb *r)
328{
329 struct dlm_ls *ls = r->res_ls;
330 int error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid();
331
332 dir_nodeid = dlm_dir_nodeid(r);
333
334 if (dir_nodeid == our_nodeid) {
335 error = dlm_dir_lookup(ls, our_nodeid, r->res_name,
336 r->res_length, &ret_nodeid);
337 if (error)
338 log_error(ls, "recover dir lookup error %d", error);
339
340 if (ret_nodeid == our_nodeid)
341 ret_nodeid = 0;
342 set_new_master(r, ret_nodeid);
343 } else {
344 recover_list_add(r);
345 error = dlm_send_rcom_lookup(r, dir_nodeid);
346 }
347
348 return error;
349}
350
351/*
352 * When not using a directory, most resource names will hash to a new static
353 * master nodeid and the resource will need to be remastered.
354 */
355
356static int recover_master_static(struct dlm_rsb *r)
357{
358 int master = dlm_dir_nodeid(r);
359
360 if (master == dlm_our_nodeid())
361 master = 0;
362
363 if (r->res_nodeid != master) {
364 if (is_master(r))
365 dlm_purge_mstcpy_locks(r);
366 set_new_master(r, master);
367 return 1;
368 }
369 return 0;
370}
371
372/*
373 * Go through local root resources and for each rsb which has a master which
374 * has departed, get the new master nodeid from the directory. The dir will
375 * assign mastery to the first node to look up the new master. That means
376 * we'll discover in this lookup if we're the new master of any rsb's.
377 *
378 * We fire off all the dir lookup requests individually and asynchronously to
379 * the correct dir node.
380 */
381
382int dlm_recover_masters(struct dlm_ls *ls)
383{
384 struct dlm_rsb *r;
385 int error = 0, count = 0;
386
387 log_debug(ls, "dlm_recover_masters");
388
389 down_read(&ls->ls_root_sem);
390 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
391 if (dlm_recovery_stopped(ls)) {
392 up_read(&ls->ls_root_sem);
393 error = -EINTR;
394 goto out;
395 }
396
397 if (dlm_no_directory(ls))
398 count += recover_master_static(r);
399 else if (!is_master(r) && dlm_is_removed(ls, r->res_nodeid)) {
400 recover_master(r);
401 count++;
402 }
403
404 schedule();
405 }
406 up_read(&ls->ls_root_sem);
407
408 log_debug(ls, "dlm_recover_masters %d resources", count);
409
410 error = dlm_wait_function(ls, &recover_list_empty);
411 out:
412 if (error)
413 recover_list_clear(ls);
414 return error;
415}
416
417int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
418{
419 struct dlm_rsb *r;
420 int nodeid;
421
422 r = recover_list_find(ls, rc->rc_id);
423 if (!r) {
90135925 424 log_error(ls, "dlm_recover_master_reply no id %llx",
9229f013 425 (unsigned long long)rc->rc_id);
e7fd4179
DT
426 goto out;
427 }
428
429 nodeid = rc->rc_result;
430 if (nodeid == dlm_our_nodeid())
431 nodeid = 0;
432
433 set_new_master(r, nodeid);
434 recover_list_del(r);
435
436 if (recover_list_empty(ls))
437 wake_up(&ls->ls_wait_general);
438 out:
439 return 0;
440}
441
442
443/* Lock recovery: rebuild the process-copy locks we hold on a
444 remastered rsb on the new rsb master.
445
446 dlm_recover_locks
447 recover_locks
448 recover_locks_queue
449 dlm_send_rcom_lock -> receive_rcom_lock
450 dlm_recover_master_copy
451 receive_rcom_lock_reply <-
452 dlm_recover_process_copy
453*/
454
455
456/*
457 * keep a count of the number of lkb's we send to the new master; when we get
458 * an equal number of replies then recovery for the rsb is done
459 */
460
461static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head)
462{
463 struct dlm_lkb *lkb;
464 int error = 0;
465
466 list_for_each_entry(lkb, head, lkb_statequeue) {
467 error = dlm_send_rcom_lock(r, lkb);
468 if (error)
469 break;
470 r->res_recover_locks_count++;
471 }
472
473 return error;
474}
475
476static int all_queues_empty(struct dlm_rsb *r)
477{
478 if (!list_empty(&r->res_grantqueue) ||
479 !list_empty(&r->res_convertqueue) ||
480 !list_empty(&r->res_waitqueue))
90135925
DT
481 return 0;
482 return 1;
e7fd4179
DT
483}
484
485static int recover_locks(struct dlm_rsb *r)
486{
487 int error = 0;
488
489 lock_rsb(r);
490 if (all_queues_empty(r))
491 goto out;
492
493 DLM_ASSERT(!r->res_recover_locks_count, dlm_print_rsb(r););
494
495 error = recover_locks_queue(r, &r->res_grantqueue);
496 if (error)
497 goto out;
498 error = recover_locks_queue(r, &r->res_convertqueue);
499 if (error)
500 goto out;
501 error = recover_locks_queue(r, &r->res_waitqueue);
502 if (error)
503 goto out;
504
505 if (r->res_recover_locks_count)
506 recover_list_add(r);
507 else
508 rsb_clear_flag(r, RSB_NEW_MASTER);
509 out:
510 unlock_rsb(r);
511 return error;
512}
513
514int dlm_recover_locks(struct dlm_ls *ls)
515{
516 struct dlm_rsb *r;
517 int error, count = 0;
518
519 log_debug(ls, "dlm_recover_locks");
520
521 down_read(&ls->ls_root_sem);
522 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
523 if (is_master(r)) {
524 rsb_clear_flag(r, RSB_NEW_MASTER);
525 continue;
526 }
527
528 if (!rsb_flag(r, RSB_NEW_MASTER))
529 continue;
530
531 if (dlm_recovery_stopped(ls)) {
532 error = -EINTR;
533 up_read(&ls->ls_root_sem);
534 goto out;
535 }
536
537 error = recover_locks(r);
538 if (error) {
539 up_read(&ls->ls_root_sem);
540 goto out;
541 }
542
543 count += r->res_recover_locks_count;
544 }
545 up_read(&ls->ls_root_sem);
546
547 log_debug(ls, "dlm_recover_locks %d locks", count);
548
549 error = dlm_wait_function(ls, &recover_list_empty);
550 out:
551 if (error)
552 recover_list_clear(ls);
553 else
554 dlm_set_recover_status(ls, DLM_RS_LOCKS);
555 return error;
556}
557
558void dlm_recovered_lock(struct dlm_rsb *r)
559{
560 DLM_ASSERT(rsb_flag(r, RSB_NEW_MASTER), dlm_print_rsb(r););
561
562 r->res_recover_locks_count--;
563 if (!r->res_recover_locks_count) {
564 rsb_clear_flag(r, RSB_NEW_MASTER);
565 recover_list_del(r);
566 }
567
568 if (recover_list_empty(r->res_ls))
569 wake_up(&r->res_ls->ls_wait_general);
570}
571
572/*
573 * The lvb needs to be recovered on all master rsb's. This includes setting
574 * the VALNOTVALID flag if necessary, and determining the correct lvb contents
575 * based on the lvb's of the locks held on the rsb.
576 *
577 * RSB_VALNOTVALID is set if there are only NL/CR locks on the rsb. If it
578 * was already set prior to recovery, it's not cleared, regardless of locks.
579 *
580 * The LVB contents are only considered for changing when this is a new master
581 * of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with
582 * mode > CR. If no lkb's exist with mode above CR, the lvb contents are taken
583 * from the lkb with the largest lvb sequence number.
584 */
585
586static void recover_lvb(struct dlm_rsb *r)
587{
588 struct dlm_lkb *lkb, *high_lkb = NULL;
589 uint32_t high_seq = 0;
90135925
DT
590 int lock_lvb_exists = 0;
591 int big_lock_exists = 0;
e7fd4179
DT
592 int lvblen = r->res_ls->ls_lvblen;
593
594 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
595 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
596 continue;
597
90135925 598 lock_lvb_exists = 1;
e7fd4179
DT
599
600 if (lkb->lkb_grmode > DLM_LOCK_CR) {
90135925 601 big_lock_exists = 1;
e7fd4179
DT
602 goto setflag;
603 }
604
605 if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
606 high_lkb = lkb;
607 high_seq = lkb->lkb_lvbseq;
608 }
609 }
610
611 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
612 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
613 continue;
614
90135925 615 lock_lvb_exists = 1;
e7fd4179
DT
616
617 if (lkb->lkb_grmode > DLM_LOCK_CR) {
90135925 618 big_lock_exists = 1;
e7fd4179
DT
619 goto setflag;
620 }
621
622 if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
623 high_lkb = lkb;
624 high_seq = lkb->lkb_lvbseq;
625 }
626 }
627
628 setflag:
629 if (!lock_lvb_exists)
630 goto out;
631
632 if (!big_lock_exists)
633 rsb_set_flag(r, RSB_VALNOTVALID);
634
635 /* don't mess with the lvb unless we're the new master */
636 if (!rsb_flag(r, RSB_NEW_MASTER2))
637 goto out;
638
639 if (!r->res_lvbptr) {
640 r->res_lvbptr = allocate_lvb(r->res_ls);
641 if (!r->res_lvbptr)
642 goto out;
643 }
644
645 if (big_lock_exists) {
646 r->res_lvbseq = lkb->lkb_lvbseq;
647 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen);
648 } else if (high_lkb) {
649 r->res_lvbseq = high_lkb->lkb_lvbseq;
650 memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);
651 } else {
652 r->res_lvbseq = 0;
653 memset(r->res_lvbptr, 0, lvblen);
654 }
655 out:
656 return;
657}
658
659/* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks
660 converting PR->CW or CW->PR need to have their lkb_grmode set. */
661
662static void recover_conversion(struct dlm_rsb *r)
663{
664 struct dlm_lkb *lkb;
665 int grmode = -1;
666
667 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
668 if (lkb->lkb_grmode == DLM_LOCK_PR ||
669 lkb->lkb_grmode == DLM_LOCK_CW) {
670 grmode = lkb->lkb_grmode;
671 break;
672 }
673 }
674
675 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
676 if (lkb->lkb_grmode != DLM_LOCK_IV)
677 continue;
678 if (grmode == -1)
679 lkb->lkb_grmode = lkb->lkb_rqmode;
680 else
681 lkb->lkb_grmode = grmode;
682 }
683}
684
f7da790d
DT
685/* We've become the new master for this rsb and waiting/converting locks may
686 need to be granted in dlm_grant_after_purge() due to locks that may have
687 existed from a removed node. */
688
689static void set_locks_purged(struct dlm_rsb *r)
690{
691 if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
692 rsb_set_flag(r, RSB_LOCKS_PURGED);
693}
694
e7fd4179
DT
695void dlm_recover_rsbs(struct dlm_ls *ls)
696{
697 struct dlm_rsb *r;
698 int count = 0;
699
700 log_debug(ls, "dlm_recover_rsbs");
701
702 down_read(&ls->ls_root_sem);
703 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
704 lock_rsb(r);
705 if (is_master(r)) {
706 if (rsb_flag(r, RSB_RECOVER_CONVERT))
707 recover_conversion(r);
f7da790d
DT
708 if (rsb_flag(r, RSB_NEW_MASTER2))
709 set_locks_purged(r);
e7fd4179
DT
710 recover_lvb(r);
711 count++;
712 }
713 rsb_clear_flag(r, RSB_RECOVER_CONVERT);
f7da790d 714 rsb_clear_flag(r, RSB_NEW_MASTER2);
e7fd4179
DT
715 unlock_rsb(r);
716 }
717 up_read(&ls->ls_root_sem);
718
719 log_debug(ls, "dlm_recover_rsbs %d rsbs", count);
720}
721
722/* Create a single list of all root rsb's to be used during recovery */
723
724int dlm_create_root_list(struct dlm_ls *ls)
725{
726 struct dlm_rsb *r;
727 int i, error = 0;
728
729 down_write(&ls->ls_root_sem);
730 if (!list_empty(&ls->ls_root_list)) {
731 log_error(ls, "root list not empty");
732 error = -EINVAL;
733 goto out;
734 }
735
736 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
737 read_lock(&ls->ls_rsbtbl[i].lock);
738 list_for_each_entry(r, &ls->ls_rsbtbl[i].list, res_hashchain) {
739 list_add(&r->res_root_list, &ls->ls_root_list);
740 dlm_hold_rsb(r);
741 }
742 read_unlock(&ls->ls_rsbtbl[i].lock);
743 }
744 out:
745 up_write(&ls->ls_root_sem);
746 return error;
747}
748
749void dlm_release_root_list(struct dlm_ls *ls)
750{
751 struct dlm_rsb *r, *safe;
752
753 down_write(&ls->ls_root_sem);
754 list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) {
755 list_del_init(&r->res_root_list);
756 dlm_put_rsb(r);
757 }
758 up_write(&ls->ls_root_sem);
759}
760
761void dlm_clear_toss_list(struct dlm_ls *ls)
762{
763 struct dlm_rsb *r, *safe;
764 int i;
765
766 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
767 write_lock(&ls->ls_rsbtbl[i].lock);
768 list_for_each_entry_safe(r, safe, &ls->ls_rsbtbl[i].toss,
769 res_hashchain) {
770 list_del(&r->res_hashchain);
771 free_rsb(r);
772 }
773 write_unlock(&ls->ls_rsbtbl[i].lock);
774 }
775}
776