dlm: remove deadlock debug print
[linux-block.git] / fs / dlm / lockspace.c
CommitLineData
e7fd4179
DT
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
0f8e0d9a 5** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
e7fd4179
DT
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lockspace.h"
16#include "member.h"
17#include "recoverd.h"
18#include "ast.h"
19#include "dir.h"
20#include "lowcomms.h"
21#include "config.h"
22#include "memory.h"
23#include "lock.h"
c56b39cd 24#include "recover.h"
2896ee37 25#include "requestqueue.h"
0f8e0d9a 26#include "user.h"
e7fd4179 27
e7fd4179 28static int ls_count;
90135925 29static struct mutex ls_lock;
e7fd4179
DT
30static struct list_head lslist;
31static spinlock_t lslist_lock;
32static struct task_struct * scand_task;
33
34
35static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
36{
37 ssize_t ret = len;
38 int n = simple_strtol(buf, NULL, 0);
39
e2de7f56
PC
40 ls = dlm_find_lockspace_local(ls->ls_local_handle);
41 if (!ls)
42 return -EINVAL;
43
e7fd4179
DT
44 switch (n) {
45 case 0:
46 dlm_ls_stop(ls);
47 break;
48 case 1:
49 dlm_ls_start(ls);
50 break;
51 default:
52 ret = -EINVAL;
53 }
e2de7f56 54 dlm_put_lockspace(ls);
e7fd4179
DT
55 return ret;
56}
57
58static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
59{
60 ls->ls_uevent_result = simple_strtol(buf, NULL, 0);
61 set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
62 wake_up(&ls->ls_uevent_wait);
63 return len;
64}
65
66static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
67{
a1d144c7 68 return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
e7fd4179
DT
69}
70
71static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
72{
73 ls->ls_global_id = simple_strtoul(buf, NULL, 0);
74 return len;
75}
76
c56b39cd
DT
77static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
78{
79 uint32_t status = dlm_recover_status(ls);
a1d144c7 80 return snprintf(buf, PAGE_SIZE, "%x\n", status);
c56b39cd
DT
81}
82
faa0f267
DT
83static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf)
84{
a1d144c7 85 return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
faa0f267
DT
86}
87
e7fd4179
DT
88struct dlm_attr {
89 struct attribute attr;
90 ssize_t (*show)(struct dlm_ls *, char *);
91 ssize_t (*store)(struct dlm_ls *, const char *, size_t);
92};
93
94static struct dlm_attr dlm_attr_control = {
95 .attr = {.name = "control", .mode = S_IWUSR},
96 .store = dlm_control_store
97};
98
99static struct dlm_attr dlm_attr_event = {
100 .attr = {.name = "event_done", .mode = S_IWUSR},
101 .store = dlm_event_store
102};
103
104static struct dlm_attr dlm_attr_id = {
105 .attr = {.name = "id", .mode = S_IRUGO | S_IWUSR},
106 .show = dlm_id_show,
107 .store = dlm_id_store
108};
109
c56b39cd
DT
110static struct dlm_attr dlm_attr_recover_status = {
111 .attr = {.name = "recover_status", .mode = S_IRUGO},
112 .show = dlm_recover_status_show
113};
114
faa0f267
DT
115static struct dlm_attr dlm_attr_recover_nodeid = {
116 .attr = {.name = "recover_nodeid", .mode = S_IRUGO},
117 .show = dlm_recover_nodeid_show
118};
119
e7fd4179
DT
120static struct attribute *dlm_attrs[] = {
121 &dlm_attr_control.attr,
122 &dlm_attr_event.attr,
123 &dlm_attr_id.attr,
c56b39cd 124 &dlm_attr_recover_status.attr,
faa0f267 125 &dlm_attr_recover_nodeid.attr,
e7fd4179
DT
126 NULL,
127};
128
129static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
130 char *buf)
131{
132 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
133 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
134 return a->show ? a->show(ls, buf) : 0;
135}
136
137static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
138 const char *buf, size_t len)
139{
140 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
141 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
142 return a->store ? a->store(ls, buf, len) : len;
143}
144
ba542e3b
PC
145static void lockspace_kobj_release(struct kobject *k)
146{
147 struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj);
148 kfree(ls);
149}
150
52cf25d0 151static const struct sysfs_ops dlm_attr_ops = {
e7fd4179
DT
152 .show = dlm_attr_show,
153 .store = dlm_attr_store,
154};
155
156static struct kobj_type dlm_ktype = {
157 .default_attrs = dlm_attrs,
158 .sysfs_ops = &dlm_attr_ops,
ba542e3b 159 .release = lockspace_kobj_release,
e7fd4179
DT
160};
161
d405936b 162static struct kset *dlm_kset;
e7fd4179 163
e7fd4179
DT
164static int do_uevent(struct dlm_ls *ls, int in)
165{
166 int error;
167
168 if (in)
169 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
170 else
171 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
172
8b0e7b2c
DT
173 log_debug(ls, "%s the lockspace group...", in ? "joining" : "leaving");
174
175 /* dlm_controld will see the uevent, do the necessary group management
176 and then write to sysfs to wake us */
177
e7fd4179
DT
178 error = wait_event_interruptible(ls->ls_uevent_wait,
179 test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
8b0e7b2c
DT
180
181 log_debug(ls, "group event done %d %d", error, ls->ls_uevent_result);
182
e7fd4179
DT
183 if (error)
184 goto out;
185
186 error = ls->ls_uevent_result;
187 out:
8b0e7b2c
DT
188 if (error)
189 log_error(ls, "group %s failed %d %d", in ? "join" : "leave",
190 error, ls->ls_uevent_result);
e7fd4179
DT
191 return error;
192}
193
b4a5d4bc
SW
194static int dlm_uevent(struct kset *kset, struct kobject *kobj,
195 struct kobj_uevent_env *env)
196{
197 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
198
199 add_uevent_var(env, "LOCKSPACE=%s", ls->ls_name);
200 return 0;
201}
202
203static struct kset_uevent_ops dlm_uevent_ops = {
204 .uevent = dlm_uevent,
205};
e7fd4179 206
30727174 207int __init dlm_lockspace_init(void)
e7fd4179 208{
e7fd4179 209 ls_count = 0;
90135925 210 mutex_init(&ls_lock);
e7fd4179
DT
211 INIT_LIST_HEAD(&lslist);
212 spin_lock_init(&lslist_lock);
213
b4a5d4bc 214 dlm_kset = kset_create_and_add("dlm", &dlm_uevent_ops, kernel_kobj);
d405936b 215 if (!dlm_kset) {
8e24eea7 216 printk(KERN_WARNING "%s: can not create kset\n", __func__);
d405936b
GKH
217 return -ENOMEM;
218 }
219 return 0;
e7fd4179
DT
220}
221
222void dlm_lockspace_exit(void)
223{
d405936b 224 kset_unregister(dlm_kset);
e7fd4179
DT
225}
226
c1dcf65f
DT
227static struct dlm_ls *find_ls_to_scan(void)
228{
229 struct dlm_ls *ls;
230
231 spin_lock(&lslist_lock);
232 list_for_each_entry(ls, &lslist, ls_list) {
233 if (time_after_eq(jiffies, ls->ls_scan_time +
234 dlm_config.ci_scan_secs * HZ)) {
235 spin_unlock(&lslist_lock);
236 return ls;
237 }
238 }
239 spin_unlock(&lslist_lock);
240 return NULL;
241}
242
e7fd4179
DT
243static int dlm_scand(void *data)
244{
245 struct dlm_ls *ls;
246
247 while (!kthread_should_stop()) {
c1dcf65f
DT
248 ls = find_ls_to_scan();
249 if (ls) {
85e86edf 250 if (dlm_lock_recovery_try(ls)) {
c1dcf65f 251 ls->ls_scan_time = jiffies;
85e86edf 252 dlm_scan_rsbs(ls);
3ae1acf9 253 dlm_scan_timeout(ls);
c6ff669b 254 dlm_scan_waiters(ls);
85e86edf 255 dlm_unlock_recovery(ls);
c1dcf65f
DT
256 } else {
257 ls->ls_scan_time += HZ;
85e86edf 258 }
c6ff669b 259 continue;
85e86edf 260 }
c6ff669b 261 schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ);
e7fd4179
DT
262 }
263 return 0;
264}
265
266static int dlm_scand_start(void)
267{
268 struct task_struct *p;
269 int error = 0;
270
271 p = kthread_run(dlm_scand, NULL, "dlm_scand");
272 if (IS_ERR(p))
273 error = PTR_ERR(p);
274 else
275 scand_task = p;
276 return error;
277}
278
279static void dlm_scand_stop(void)
280{
281 kthread_stop(scand_task);
282}
283
e7fd4179
DT
284struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
285{
286 struct dlm_ls *ls;
287
288 spin_lock(&lslist_lock);
289
290 list_for_each_entry(ls, &lslist, ls_list) {
291 if (ls->ls_global_id == id) {
292 ls->ls_count++;
293 goto out;
294 }
295 }
296 ls = NULL;
297 out:
298 spin_unlock(&lslist_lock);
299 return ls;
300}
301
597d0cae 302struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
e7fd4179 303{
597d0cae 304 struct dlm_ls *ls;
e7fd4179
DT
305
306 spin_lock(&lslist_lock);
597d0cae
DT
307 list_for_each_entry(ls, &lslist, ls_list) {
308 if (ls->ls_local_handle == lockspace) {
309 ls->ls_count++;
310 goto out;
311 }
312 }
313 ls = NULL;
314 out:
315 spin_unlock(&lslist_lock);
316 return ls;
317}
318
319struct dlm_ls *dlm_find_lockspace_device(int minor)
320{
321 struct dlm_ls *ls;
322
323 spin_lock(&lslist_lock);
324 list_for_each_entry(ls, &lslist, ls_list) {
325 if (ls->ls_device.minor == minor) {
326 ls->ls_count++;
327 goto out;
328 }
329 }
330 ls = NULL;
331 out:
e7fd4179
DT
332 spin_unlock(&lslist_lock);
333 return ls;
334}
335
336void dlm_put_lockspace(struct dlm_ls *ls)
337{
338 spin_lock(&lslist_lock);
339 ls->ls_count--;
340 spin_unlock(&lslist_lock);
341}
342
343static void remove_lockspace(struct dlm_ls *ls)
344{
345 for (;;) {
346 spin_lock(&lslist_lock);
347 if (ls->ls_count == 0) {
0f8e0d9a 348 WARN_ON(ls->ls_create_count != 0);
e7fd4179
DT
349 list_del(&ls->ls_list);
350 spin_unlock(&lslist_lock);
351 return;
352 }
353 spin_unlock(&lslist_lock);
354 ssleep(1);
355 }
356}
357
358static int threads_start(void)
359{
360 int error;
361
362 /* Thread which process lock requests for all lockspace's */
363 error = dlm_astd_start();
364 if (error) {
365 log_print("cannot start dlm_astd thread %d", error);
366 goto fail;
367 }
368
369 error = dlm_scand_start();
370 if (error) {
371 log_print("cannot start dlm_scand thread %d", error);
372 goto astd_fail;
373 }
374
375 /* Thread for sending/receiving messages for all lockspace's */
376 error = dlm_lowcomms_start();
377 if (error) {
378 log_print("cannot start dlm lowcomms %d", error);
379 goto scand_fail;
380 }
381
382 return 0;
383
384 scand_fail:
385 dlm_scand_stop();
386 astd_fail:
387 dlm_astd_stop();
388 fail:
389 return error;
390}
391
392static void threads_stop(void)
393{
394 dlm_scand_stop();
395 dlm_lowcomms_stop();
396 dlm_astd_stop();
397}
398
08ce4c91 399static int new_lockspace(const char *name, int namelen, void **lockspace,
e7fd4179
DT
400 uint32_t flags, int lvblen)
401{
402 struct dlm_ls *ls;
0f8e0d9a 403 int i, size, error;
79d72b54 404 int do_unreg = 0;
e7fd4179
DT
405
406 if (namelen > DLM_LOCKSPACE_LEN)
407 return -EINVAL;
408
409 if (!lvblen || (lvblen % 8))
410 return -EINVAL;
411
412 if (!try_module_get(THIS_MODULE))
413 return -EINVAL;
414
dc68c7ed
DT
415 if (!dlm_user_daemon_available()) {
416 module_put(THIS_MODULE);
417 return -EUNATCH;
418 }
419
0f8e0d9a
DT
420 error = 0;
421
422 spin_lock(&lslist_lock);
423 list_for_each_entry(ls, &lslist, ls_list) {
424 WARN_ON(ls->ls_create_count <= 0);
425 if (ls->ls_namelen != namelen)
426 continue;
427 if (memcmp(ls->ls_name, name, namelen))
428 continue;
429 if (flags & DLM_LSFL_NEWEXCL) {
430 error = -EEXIST;
431 break;
432 }
433 ls->ls_create_count++;
8511a272
DT
434 *lockspace = ls;
435 error = 1;
0f8e0d9a 436 break;
e7fd4179 437 }
0f8e0d9a
DT
438 spin_unlock(&lslist_lock);
439
0f8e0d9a 440 if (error)
8511a272 441 goto out;
0f8e0d9a
DT
442
443 error = -ENOMEM;
e7fd4179 444
573c24c4 445 ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_NOFS);
e7fd4179
DT
446 if (!ls)
447 goto out;
e7fd4179
DT
448 memcpy(ls->ls_name, name, namelen);
449 ls->ls_namelen = namelen;
e7fd4179
DT
450 ls->ls_lvblen = lvblen;
451 ls->ls_count = 0;
452 ls->ls_flags = 0;
c1dcf65f 453 ls->ls_scan_time = jiffies;
e7fd4179 454
3ae1acf9
DT
455 if (flags & DLM_LSFL_TIMEWARN)
456 set_bit(LSFL_TIMEWARN, &ls->ls_flags);
3ae1acf9 457
fad59c13 458 /* ls_exflags are forced to match among nodes, and we don't
0f8e0d9a
DT
459 need to require all nodes to have some flags set */
460 ls->ls_exflags = (flags & ~(DLM_LSFL_TIMEWARN | DLM_LSFL_FS |
461 DLM_LSFL_NEWEXCL));
fad59c13 462
68c817a1 463 size = dlm_config.ci_rsbtbl_size;
e7fd4179
DT
464 ls->ls_rsbtbl_size = size;
465
c282af49 466 ls->ls_rsbtbl = vmalloc(sizeof(struct dlm_rsbtable) * size);
e7fd4179
DT
467 if (!ls->ls_rsbtbl)
468 goto out_lsfree;
469 for (i = 0; i < size; i++) {
470 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
471 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
c7be761a 472 spin_lock_init(&ls->ls_rsbtbl[i].lock);
e7fd4179
DT
473 }
474
3d6aa675
DT
475 idr_init(&ls->ls_lkbidr);
476 spin_lock_init(&ls->ls_lkbidr_spin);
e7fd4179 477
68c817a1 478 size = dlm_config.ci_dirtbl_size;
e7fd4179
DT
479 ls->ls_dirtbl_size = size;
480
c282af49 481 ls->ls_dirtbl = vmalloc(sizeof(struct dlm_dirtable) * size);
e7fd4179
DT
482 if (!ls->ls_dirtbl)
483 goto out_lkbfree;
484 for (i = 0; i < size; i++) {
485 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
305a47b1 486 spin_lock_init(&ls->ls_dirtbl[i].lock);
e7fd4179
DT
487 }
488
489 INIT_LIST_HEAD(&ls->ls_waiters);
90135925 490 mutex_init(&ls->ls_waiters_mutex);
ef0c2bb0
DT
491 INIT_LIST_HEAD(&ls->ls_orphans);
492 mutex_init(&ls->ls_orphans_mutex);
3ae1acf9
DT
493 INIT_LIST_HEAD(&ls->ls_timeout);
494 mutex_init(&ls->ls_timeout_mutex);
e7fd4179 495
3881ac04
DT
496 INIT_LIST_HEAD(&ls->ls_new_rsb);
497 spin_lock_init(&ls->ls_new_rsb_spin);
498
e7fd4179
DT
499 INIT_LIST_HEAD(&ls->ls_nodes);
500 INIT_LIST_HEAD(&ls->ls_nodes_gone);
501 ls->ls_num_nodes = 0;
502 ls->ls_low_nodeid = 0;
503 ls->ls_total_weight = 0;
504 ls->ls_node_array = NULL;
505
506 memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
507 ls->ls_stub_rsb.res_ls = ls;
508
5de6319b
DT
509 ls->ls_debug_rsb_dentry = NULL;
510 ls->ls_debug_waiters_dentry = NULL;
e7fd4179
DT
511
512 init_waitqueue_head(&ls->ls_uevent_wait);
513 ls->ls_uevent_result = 0;
8b0e7b2c
DT
514 init_completion(&ls->ls_members_done);
515 ls->ls_members_result = -1;
e7fd4179
DT
516
517 ls->ls_recoverd_task = NULL;
90135925 518 mutex_init(&ls->ls_recoverd_active);
e7fd4179 519 spin_lock_init(&ls->ls_recover_lock);
98f176fb
DT
520 spin_lock_init(&ls->ls_rcom_spin);
521 get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
e7fd4179
DT
522 ls->ls_recover_status = 0;
523 ls->ls_recover_seq = 0;
524 ls->ls_recover_args = NULL;
525 init_rwsem(&ls->ls_in_recovery);
c36258b5 526 init_rwsem(&ls->ls_recv_active);
e7fd4179 527 INIT_LIST_HEAD(&ls->ls_requestqueue);
90135925 528 mutex_init(&ls->ls_requestqueue_mutex);
597d0cae 529 mutex_init(&ls->ls_clear_proc_locks);
e7fd4179 530
573c24c4 531 ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS);
e7fd4179
DT
532 if (!ls->ls_recover_buf)
533 goto out_dirfree;
534
535 INIT_LIST_HEAD(&ls->ls_recover_list);
536 spin_lock_init(&ls->ls_recover_list_lock);
537 ls->ls_recover_list_count = 0;
597d0cae 538 ls->ls_local_handle = ls;
e7fd4179
DT
539 init_waitqueue_head(&ls->ls_wait_general);
540 INIT_LIST_HEAD(&ls->ls_root_list);
541 init_rwsem(&ls->ls_root_sem);
542
543 down_write(&ls->ls_in_recovery);
544
5f88f1ea 545 spin_lock(&lslist_lock);
0f8e0d9a 546 ls->ls_create_count = 1;
5f88f1ea
DT
547 list_add(&ls->ls_list, &lslist);
548 spin_unlock(&lslist_lock);
549
550 /* needs to find ls in lslist */
e7fd4179
DT
551 error = dlm_recoverd_start(ls);
552 if (error) {
553 log_error(ls, "can't start dlm_recoverd %d", error);
79d72b54 554 goto out_delist;
e7fd4179
DT
555 }
556
901195ed
GKH
557 ls->ls_kobj.kset = dlm_kset;
558 error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL,
559 "%s", ls->ls_name);
e7fd4179 560 if (error)
79d72b54 561 goto out_stop;
901195ed 562 kobject_uevent(&ls->ls_kobj, KOBJ_ADD);
79d72b54
DT
563
564 /* let kobject handle freeing of ls if there's an error */
565 do_unreg = 1;
e7fd4179 566
8b0e7b2c
DT
567 /* This uevent triggers dlm_controld in userspace to add us to the
568 group of nodes that are members of this lockspace (managed by the
569 cluster infrastructure.) Once it's done that, it tells us who the
570 current lockspace members are (via configfs) and then tells the
571 lockspace to start running (via sysfs) in dlm_ls_start(). */
572
e7fd4179
DT
573 error = do_uevent(ls, 1);
574 if (error)
79d72b54
DT
575 goto out_stop;
576
8b0e7b2c
DT
577 wait_for_completion(&ls->ls_members_done);
578 error = ls->ls_members_result;
579 if (error)
580 goto out_members;
581
79d72b54
DT
582 dlm_create_debug_file(ls);
583
584 log_debug(ls, "join complete");
e7fd4179
DT
585 *lockspace = ls;
586 return 0;
587
8b0e7b2c
DT
588 out_members:
589 do_uevent(ls, 0);
590 dlm_clear_members(ls);
591 kfree(ls->ls_node_array);
79d72b54 592 out_stop:
5f88f1ea 593 dlm_recoverd_stop(ls);
79d72b54 594 out_delist:
e7fd4179
DT
595 spin_lock(&lslist_lock);
596 list_del(&ls->ls_list);
597 spin_unlock(&lslist_lock);
e7fd4179
DT
598 kfree(ls->ls_recover_buf);
599 out_dirfree:
c282af49 600 vfree(ls->ls_dirtbl);
e7fd4179 601 out_lkbfree:
3d6aa675 602 idr_destroy(&ls->ls_lkbidr);
c282af49 603 vfree(ls->ls_rsbtbl);
e7fd4179 604 out_lsfree:
79d72b54 605 if (do_unreg)
197b12d6 606 kobject_put(&ls->ls_kobj);
79d72b54
DT
607 else
608 kfree(ls);
e7fd4179
DT
609 out:
610 module_put(THIS_MODULE);
611 return error;
612}
613
08ce4c91 614int dlm_new_lockspace(const char *name, int namelen, void **lockspace,
e7fd4179
DT
615 uint32_t flags, int lvblen)
616{
617 int error = 0;
618
90135925 619 mutex_lock(&ls_lock);
e7fd4179
DT
620 if (!ls_count)
621 error = threads_start();
622 if (error)
623 goto out;
624
625 error = new_lockspace(name, namelen, lockspace, flags, lvblen);
626 if (!error)
627 ls_count++;
8511a272
DT
628 if (error > 0)
629 error = 0;
630 if (!ls_count)
8b0e7b2c 631 threads_stop();
e7fd4179 632 out:
90135925 633 mutex_unlock(&ls_lock);
e7fd4179
DT
634 return error;
635}
636
3d6aa675 637static int lkb_idr_is_local(int id, void *p, void *data)
e7fd4179 638{
3d6aa675
DT
639 struct dlm_lkb *lkb = p;
640
641 if (!lkb->lkb_nodeid)
642 return 1;
643 return 0;
644}
645
646static int lkb_idr_is_any(int id, void *p, void *data)
647{
648 return 1;
649}
650
651static int lkb_idr_free(int id, void *p, void *data)
652{
653 struct dlm_lkb *lkb = p;
654
655 dlm_del_ast(lkb);
656
657 if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
658 dlm_free_lvb(lkb->lkb_lvbptr);
659
660 dlm_free_lkb(lkb);
661 return 0;
662}
663
664/* NOTE: We check the lkbidr here rather than the resource table.
665 This is because there may be LKBs queued as ASTs that have been unlinked
666 from their RSBs and are pending deletion once the AST has been delivered */
667
668static int lockspace_busy(struct dlm_ls *ls, int force)
669{
670 int rv;
671
672 spin_lock(&ls->ls_lkbidr_spin);
673 if (force == 0) {
674 rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls);
675 } else if (force == 1) {
676 rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_local, ls);
677 } else {
678 rv = 0;
e7fd4179 679 }
3d6aa675
DT
680 spin_unlock(&ls->ls_lkbidr_spin);
681 return rv;
e7fd4179
DT
682}
683
684static int release_lockspace(struct dlm_ls *ls, int force)
685{
e7fd4179
DT
686 struct dlm_rsb *rsb;
687 struct list_head *head;
0f8e0d9a
DT
688 int i, busy, rv;
689
3d6aa675 690 busy = lockspace_busy(ls, force);
0f8e0d9a
DT
691
692 spin_lock(&lslist_lock);
693 if (ls->ls_create_count == 1) {
3d6aa675 694 if (busy) {
0f8e0d9a 695 rv = -EBUSY;
3d6aa675 696 } else {
0f8e0d9a
DT
697 /* remove_lockspace takes ls off lslist */
698 ls->ls_create_count = 0;
699 rv = 0;
700 }
701 } else if (ls->ls_create_count > 1) {
702 rv = --ls->ls_create_count;
703 } else {
704 rv = -EINVAL;
705 }
706 spin_unlock(&lslist_lock);
707
708 if (rv) {
709 log_debug(ls, "release_lockspace no remove %d", rv);
710 return rv;
711 }
e7fd4179 712
0f8e0d9a 713 dlm_device_deregister(ls);
e7fd4179 714
dc68c7ed 715 if (force < 3 && dlm_user_daemon_available())
e7fd4179
DT
716 do_uevent(ls, 0);
717
718 dlm_recoverd_stop(ls);
719
720 remove_lockspace(ls);
721
722 dlm_delete_debug_file(ls);
723
724 dlm_astd_suspend();
725
726 kfree(ls->ls_recover_buf);
727
728 /*
729 * Free direntry structs.
730 */
731
732 dlm_dir_clear(ls);
c282af49 733 vfree(ls->ls_dirtbl);
e7fd4179
DT
734
735 /*
3d6aa675 736 * Free all lkb's in idr
e7fd4179
DT
737 */
738
3d6aa675
DT
739 idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls);
740 idr_remove_all(&ls->ls_lkbidr);
741 idr_destroy(&ls->ls_lkbidr);
e7fd4179 742
e7fd4179
DT
743 dlm_astd_resume();
744
e7fd4179
DT
745 /*
746 * Free all rsb's on rsbtbl[] lists
747 */
748
749 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
750 head = &ls->ls_rsbtbl[i].list;
751 while (!list_empty(head)) {
752 rsb = list_entry(head->next, struct dlm_rsb,
753 res_hashchain);
754
755 list_del(&rsb->res_hashchain);
52bda2b5 756 dlm_free_rsb(rsb);
e7fd4179
DT
757 }
758
759 head = &ls->ls_rsbtbl[i].toss;
760 while (!list_empty(head)) {
761 rsb = list_entry(head->next, struct dlm_rsb,
762 res_hashchain);
763 list_del(&rsb->res_hashchain);
52bda2b5 764 dlm_free_rsb(rsb);
e7fd4179
DT
765 }
766 }
767
c282af49 768 vfree(ls->ls_rsbtbl);
e7fd4179 769
3881ac04
DT
770 while (!list_empty(&ls->ls_new_rsb)) {
771 rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb,
772 res_hashchain);
773 list_del(&rsb->res_hashchain);
774 dlm_free_rsb(rsb);
775 }
776
e7fd4179
DT
777 /*
778 * Free structures on any other lists
779 */
780
2896ee37 781 dlm_purge_requestqueue(ls);
e7fd4179
DT
782 kfree(ls->ls_recover_args);
783 dlm_clear_free_entries(ls);
784 dlm_clear_members(ls);
785 dlm_clear_members_gone(ls);
786 kfree(ls->ls_node_array);
0f8e0d9a 787 log_debug(ls, "release_lockspace final free");
197b12d6 788 kobject_put(&ls->ls_kobj);
79d72b54 789 /* The ls structure will be freed when the kobject is done with */
e7fd4179 790
e7fd4179
DT
791 module_put(THIS_MODULE);
792 return 0;
793}
794
795/*
796 * Called when a system has released all its locks and is not going to use the
797 * lockspace any longer. We free everything we're managing for this lockspace.
798 * Remaining nodes will go through the recovery process as if we'd died. The
799 * lockspace must continue to function as usual, participating in recoveries,
800 * until this returns.
801 *
802 * Force has 4 possible values:
803 * 0 - don't destroy locksapce if it has any LKBs
804 * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
805 * 2 - destroy lockspace regardless of LKBs
806 * 3 - destroy lockspace as part of a forced shutdown
807 */
808
809int dlm_release_lockspace(void *lockspace, int force)
810{
811 struct dlm_ls *ls;
0f8e0d9a 812 int error;
e7fd4179
DT
813
814 ls = dlm_find_lockspace_local(lockspace);
815 if (!ls)
816 return -EINVAL;
817 dlm_put_lockspace(ls);
0f8e0d9a
DT
818
819 mutex_lock(&ls_lock);
820 error = release_lockspace(ls, force);
821 if (!error)
822 ls_count--;
278afcbf 823 if (!ls_count)
0f8e0d9a
DT
824 threads_stop();
825 mutex_unlock(&ls_lock);
826
827 return error;
e7fd4179
DT
828}
829
dc68c7ed
DT
830void dlm_stop_lockspaces(void)
831{
832 struct dlm_ls *ls;
833
834 restart:
835 spin_lock(&lslist_lock);
836 list_for_each_entry(ls, &lslist, ls_list) {
837 if (!test_bit(LSFL_RUNNING, &ls->ls_flags))
838 continue;
839 spin_unlock(&lslist_lock);
840 log_error(ls, "no userland control daemon, stopping lockspace");
841 dlm_ls_stop(ls);
842 goto restart;
843 }
844 spin_unlock(&lslist_lock);
845}
846