1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved.
6 #include <linux/miscdevice.h>
7 #include <linux/init.h>
8 #include <linux/wait.h>
9 #include <linux/file.h>
11 #include <linux/poll.h>
12 #include <linux/signal.h>
13 #include <linux/spinlock.h>
14 #include <linux/dlm.h>
15 #include <linux/dlm_device.h>
16 #include <linux/slab.h>
17 #include <linux/sched/signal.h>
19 #include <trace/events/dlm.h>
21 #include "dlm_internal.h"
22 #include "lockspace.h"
24 #include "lvb_table.h"
30 static const char name_prefix[] = "dlm";
31 static const struct file_operations device_fops;
32 static atomic_t dlm_monitor_opened;
33 static int dlm_monitor_unused = 1;
37 struct dlm_lock_params32 {
51 char lvb[DLM_USER_LVB_LEN];
55 struct dlm_write_request32 {
62 struct dlm_lock_params32 lock;
63 struct dlm_lspace_params lspace;
64 struct dlm_purge_params purge;
75 struct dlm_lock_result32 {
81 struct dlm_lksb32 lksb;
84 /* Offsets may be zero if no data is present */
88 static void compat_input(struct dlm_write_request *kb,
89 struct dlm_write_request32 *kb32,
92 kb->version[0] = kb32->version[0];
93 kb->version[1] = kb32->version[1];
94 kb->version[2] = kb32->version[2];
97 kb->is64bit = kb32->is64bit;
98 if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
99 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
100 kb->i.lspace.flags = kb32->i.lspace.flags;
101 kb->i.lspace.minor = kb32->i.lspace.minor;
102 memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
103 } else if (kb->cmd == DLM_USER_PURGE) {
104 kb->i.purge.nodeid = kb32->i.purge.nodeid;
105 kb->i.purge.pid = kb32->i.purge.pid;
107 kb->i.lock.mode = kb32->i.lock.mode;
108 kb->i.lock.namelen = kb32->i.lock.namelen;
109 kb->i.lock.flags = kb32->i.lock.flags;
110 kb->i.lock.lkid = kb32->i.lock.lkid;
111 kb->i.lock.parent = kb32->i.lock.parent;
112 kb->i.lock.xid = kb32->i.lock.xid;
113 kb->i.lock.timeout = kb32->i.lock.timeout;
114 kb->i.lock.castparam = (__user void *)(long)kb32->i.lock.castparam;
115 kb->i.lock.castaddr = (__user void *)(long)kb32->i.lock.castaddr;
116 kb->i.lock.bastparam = (__user void *)(long)kb32->i.lock.bastparam;
117 kb->i.lock.bastaddr = (__user void *)(long)kb32->i.lock.bastaddr;
118 kb->i.lock.lksb = (__user void *)(long)kb32->i.lock.lksb;
119 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
120 memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
124 static void compat_output(struct dlm_lock_result *res,
125 struct dlm_lock_result32 *res32)
127 memset(res32, 0, sizeof(*res32));
129 res32->version[0] = res->version[0];
130 res32->version[1] = res->version[1];
131 res32->version[2] = res->version[2];
133 res32->user_astaddr = (__u32)(__force long)res->user_astaddr;
134 res32->user_astparam = (__u32)(__force long)res->user_astparam;
135 res32->user_lksb = (__u32)(__force long)res->user_lksb;
136 res32->bast_mode = res->bast_mode;
138 res32->lvb_offset = res->lvb_offset;
139 res32->length = res->length;
141 res32->lksb.sb_status = res->lksb.sb_status;
142 res32->lksb.sb_flags = res->lksb.sb_flags;
143 res32->lksb.sb_lkid = res->lksb.sb_lkid;
144 res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
148 /* Figure out if this lock is at the end of its life and no longer
149 available for the application to use. The lkb still exists until
150 the final ast is read. A lock becomes EOL in three situations:
151 1. a noqueue request fails with EAGAIN
152 2. an unlock completes with EUNLOCK
153 3. a cancel of a waiting request completes with ECANCEL/EDEADLK
154 An EOL lock needs to be removed from the process's list of locks.
155 And we can't allow any new operation on an EOL lock. This is
156 not related to the lifetime of the lkb struct which is managed
157 entirely by refcount. */
159 static int lkb_is_endoflife(int mode, int status)
168 if (mode == DLM_LOCK_IV)
175 /* we could possibly check if the cancel of an orphan has resulted in the lkb
176 being removed and then remove that lkb from the orphans list and free it */
178 void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
179 int status, uint32_t sbflags)
182 struct dlm_user_args *ua;
183 struct dlm_user_proc *proc;
186 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
189 ls = lkb->lkb_resource->res_ls;
190 spin_lock(&ls->ls_clear_proc_locks);
192 /* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
193 can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed
194 lkb->ua so we can't try to use it. This second check is necessary
195 for cases where a completion ast is received for an operation that
196 began before clear_proc_locks did its cancel/unlock. */
198 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
201 DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
205 if ((flags & DLM_CB_BAST) && ua->bastaddr == NULL)
208 if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
209 lkb->lkb_flags |= DLM_IFL_ENDOFLIFE;
211 spin_lock(&proc->asts_spin);
213 rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags);
215 case DLM_ENQUEUE_CALLBACK_FAILURE:
216 spin_unlock(&proc->asts_spin);
219 case DLM_ENQUEUE_CALLBACK_NEED_SCHED:
220 kref_get(&lkb->lkb_ref);
221 list_add_tail(&lkb->lkb_cb_list, &proc->asts);
222 wake_up_interruptible(&proc->wait);
224 case DLM_ENQUEUE_CALLBACK_SUCCESS:
230 spin_unlock(&proc->asts_spin);
232 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
233 /* N.B. spin_lock locks_spin, not asts_spin */
234 spin_lock(&proc->locks_spin);
235 if (!list_empty(&lkb->lkb_ownqueue)) {
236 list_del_init(&lkb->lkb_ownqueue);
239 spin_unlock(&proc->locks_spin);
242 spin_unlock(&ls->ls_clear_proc_locks);
245 static int device_user_lock(struct dlm_user_proc *proc,
246 struct dlm_lock_params *params)
249 struct dlm_user_args *ua;
253 ls = dlm_find_lockspace_local(proc->lockspace);
257 if (!params->castaddr || !params->lksb) {
262 #ifdef CONFIG_DLM_DEPRECATED_API
264 pr_warn_once("========================================================\n"
265 "WARNING: the lkb timeout feature is being deprecated and\n"
266 " will be removed in v6.2!\n"
267 "========================================================\n");
270 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
274 ua->user_lksb = params->lksb;
275 ua->castparam = params->castparam;
276 ua->castaddr = params->castaddr;
277 ua->bastparam = params->bastparam;
278 ua->bastaddr = params->bastaddr;
279 ua->xid = params->xid;
281 if (params->flags & DLM_LKF_CONVERT) {
282 #ifdef CONFIG_DLM_DEPRECATED_API
283 error = dlm_user_convert(ls, ua,
284 params->mode, params->flags,
285 params->lkid, params->lvb,
286 (unsigned long) params->timeout);
288 error = dlm_user_convert(ls, ua,
289 params->mode, params->flags,
290 params->lkid, params->lvb);
292 } else if (params->flags & DLM_LKF_ORPHAN) {
293 error = dlm_user_adopt_orphan(ls, ua,
294 params->mode, params->flags,
295 params->name, params->namelen,
300 #ifdef CONFIG_DLM_DEPRECATED_API
301 error = dlm_user_request(ls, ua,
302 params->mode, params->flags,
303 params->name, params->namelen,
304 (unsigned long) params->timeout);
306 error = dlm_user_request(ls, ua,
307 params->mode, params->flags,
308 params->name, params->namelen);
311 error = ua->lksb.sb_lkid;
314 dlm_put_lockspace(ls);
318 static int device_user_unlock(struct dlm_user_proc *proc,
319 struct dlm_lock_params *params)
322 struct dlm_user_args *ua;
325 ls = dlm_find_lockspace_local(proc->lockspace);
329 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
333 ua->user_lksb = params->lksb;
334 ua->castparam = params->castparam;
335 ua->castaddr = params->castaddr;
337 if (params->flags & DLM_LKF_CANCEL)
338 error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
340 error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
343 dlm_put_lockspace(ls);
347 static int device_user_deadlock(struct dlm_user_proc *proc,
348 struct dlm_lock_params *params)
353 ls = dlm_find_lockspace_local(proc->lockspace);
357 error = dlm_user_deadlock(ls, params->flags, params->lkid);
359 dlm_put_lockspace(ls);
363 static int dlm_device_register(struct dlm_ls *ls, char *name)
367 /* The device is already registered. This happens when the
368 lockspace is created multiple times from userspace. */
369 if (ls->ls_device.name)
373 len = strlen(name) + strlen(name_prefix) + 2;
374 ls->ls_device.name = kzalloc(len, GFP_NOFS);
375 if (!ls->ls_device.name)
378 snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
380 ls->ls_device.fops = &device_fops;
381 ls->ls_device.minor = MISC_DYNAMIC_MINOR;
383 error = misc_register(&ls->ls_device);
385 kfree(ls->ls_device.name);
386 /* this has to be set to NULL
387 * to avoid a double-free in dlm_device_deregister
389 ls->ls_device.name = NULL;
395 int dlm_device_deregister(struct dlm_ls *ls)
397 /* The device is not registered. This happens when the lockspace
398 was never used from userspace, or when device_create_lockspace()
399 calls dlm_release_lockspace() after the register fails. */
400 if (!ls->ls_device.name)
403 misc_deregister(&ls->ls_device);
404 kfree(ls->ls_device.name);
408 static int device_user_purge(struct dlm_user_proc *proc,
409 struct dlm_purge_params *params)
414 ls = dlm_find_lockspace_local(proc->lockspace);
418 error = dlm_user_purge(ls, proc, params->nodeid, params->pid);
420 dlm_put_lockspace(ls);
424 static int device_create_lockspace(struct dlm_lspace_params *params)
426 dlm_lockspace_t *lockspace;
430 if (!capable(CAP_SYS_ADMIN))
433 error = dlm_new_user_lockspace(params->name, dlm_config.ci_cluster_name,
434 params->flags, DLM_USER_LVB_LEN, NULL,
435 NULL, NULL, &lockspace);
439 ls = dlm_find_lockspace_local(lockspace);
443 error = dlm_device_register(ls, params->name);
444 dlm_put_lockspace(ls);
447 dlm_release_lockspace(lockspace, 0);
449 error = ls->ls_device.minor;
454 static int device_remove_lockspace(struct dlm_lspace_params *params)
456 dlm_lockspace_t *lockspace;
458 int error, force = 0;
460 if (!capable(CAP_SYS_ADMIN))
463 ls = dlm_find_lockspace_device(params->minor);
467 if (params->flags & DLM_USER_LSFLG_FORCEFREE)
470 lockspace = ls->ls_local_handle;
471 dlm_put_lockspace(ls);
473 /* The final dlm_release_lockspace waits for references to go to
474 zero, so all processes will need to close their device for the
475 ls before the release will proceed. release also calls the
476 device_deregister above. Converting a positive return value
477 from release to zero means that userspace won't know when its
478 release was the final one, but it shouldn't need to know. */
480 error = dlm_release_lockspace(lockspace, force);
486 /* Check the user's version matches ours */
487 static int check_version(struct dlm_write_request *req)
489 if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
490 (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
491 req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
493 printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
494 "user (%d.%d.%d) kernel (%d.%d.%d)\n",
496 task_pid_nr(current),
500 DLM_DEVICE_VERSION_MAJOR,
501 DLM_DEVICE_VERSION_MINOR,
502 DLM_DEVICE_VERSION_PATCH);
512 * dlm_user_request -> request_lock
513 * dlm_user_convert -> convert_lock
516 * dlm_user_unlock -> unlock_lock
517 * dlm_user_cancel -> cancel_lock
519 * device_create_lockspace
522 * device_remove_lockspace
523 * dlm_release_lockspace
526 /* a write to a lockspace device is a lock or unlock request, a write
527 to the control device is to create/remove a lockspace */
529 static ssize_t device_write(struct file *file, const char __user *buf,
530 size_t count, loff_t *ppos)
532 struct dlm_user_proc *proc = file->private_data;
533 struct dlm_write_request *kbuf;
537 if (count < sizeof(struct dlm_write_request32))
539 if (count < sizeof(struct dlm_write_request))
544 * can't compare against COMPAT/dlm_write_request32 because
545 * we don't yet know if is64bit is zero
547 if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN)
550 kbuf = memdup_user_nul(buf, count);
552 return PTR_ERR(kbuf);
554 if (check_version(kbuf)) {
560 if (!kbuf->is64bit) {
561 struct dlm_write_request32 *k32buf;
564 if (count > sizeof(struct dlm_write_request32))
565 namelen = count - sizeof(struct dlm_write_request32);
567 k32buf = (struct dlm_write_request32 *)kbuf;
569 /* add 1 after namelen so that the name string is terminated */
570 kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
578 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
580 compat_input(kbuf, k32buf, namelen);
585 /* do we really need this? can a write happen after a close? */
586 if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
587 (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
598 log_print("no locking on control device");
601 error = device_user_lock(proc, &kbuf->i.lock);
604 case DLM_USER_UNLOCK:
606 log_print("no locking on control device");
609 error = device_user_unlock(proc, &kbuf->i.lock);
612 case DLM_USER_DEADLOCK:
614 log_print("no locking on control device");
617 error = device_user_deadlock(proc, &kbuf->i.lock);
620 case DLM_USER_CREATE_LOCKSPACE:
622 log_print("create/remove only on control device");
625 error = device_create_lockspace(&kbuf->i.lspace);
628 case DLM_USER_REMOVE_LOCKSPACE:
630 log_print("create/remove only on control device");
633 error = device_remove_lockspace(&kbuf->i.lspace);
638 log_print("no locking on control device");
641 error = device_user_purge(proc, &kbuf->i.purge);
645 log_print("Unknown command passed to DLM device : %d\n",
654 /* Every process that opens the lockspace device has its own "proc" structure
655 hanging off the open file that's used to keep track of locks owned by the
656 process and asts that need to be delivered to the process. */
658 static int device_open(struct inode *inode, struct file *file)
660 struct dlm_user_proc *proc;
663 ls = dlm_find_lockspace_device(iminor(inode));
667 proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS);
669 dlm_put_lockspace(ls);
673 proc->lockspace = ls->ls_local_handle;
674 INIT_LIST_HEAD(&proc->asts);
675 INIT_LIST_HEAD(&proc->locks);
676 INIT_LIST_HEAD(&proc->unlocking);
677 spin_lock_init(&proc->asts_spin);
678 spin_lock_init(&proc->locks_spin);
679 init_waitqueue_head(&proc->wait);
680 file->private_data = proc;
685 static int device_close(struct inode *inode, struct file *file)
687 struct dlm_user_proc *proc = file->private_data;
690 ls = dlm_find_lockspace_local(proc->lockspace);
694 set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
696 dlm_clear_proc_locks(ls, proc);
698 /* at this point no more lkb's should exist for this lockspace,
699 so there's no chance of dlm_user_add_ast() being called and
700 looking for lkb->ua->proc */
703 file->private_data = NULL;
705 dlm_put_lockspace(ls);
706 dlm_put_lockspace(ls); /* for the find in device_open() */
708 /* FIXME: AUTOFREE: if this ls is no longer used do
709 device_remove_lockspace() */
714 static int copy_result_to_user(struct dlm_user_args *ua, int compat,
715 uint32_t flags, int mode, int copy_lvb,
716 char __user *buf, size_t count)
719 struct dlm_lock_result32 result32;
721 struct dlm_lock_result result;
727 memset(&result, 0, sizeof(struct dlm_lock_result));
728 result.version[0] = DLM_DEVICE_VERSION_MAJOR;
729 result.version[1] = DLM_DEVICE_VERSION_MINOR;
730 result.version[2] = DLM_DEVICE_VERSION_PATCH;
731 memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr));
732 result.user_lksb = ua->user_lksb;
734 /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
735 in a conversion unless the conversion is successful. See code
736 in dlm_user_convert() for updating ua from ua_tmp. OpenVMS, though,
737 notes that a new blocking AST address and parameter are set even if
738 the conversion fails, so maybe we should just do that. */
740 if (flags & DLM_CB_BAST) {
741 result.user_astaddr = ua->bastaddr;
742 result.user_astparam = ua->bastparam;
743 result.bast_mode = mode;
745 result.user_astaddr = ua->castaddr;
746 result.user_astparam = ua->castparam;
751 len = sizeof(struct dlm_lock_result32);
754 len = sizeof(struct dlm_lock_result);
757 /* copy lvb to userspace if there is one, it's been updated, and
758 the user buffer has space for it */
760 if (copy_lvb && ua->lksb.sb_lvbptr && count >= len + DLM_USER_LVB_LEN) {
761 if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
767 result.lvb_offset = len;
768 len += DLM_USER_LVB_LEN;
775 compat_output(&result, &result32);
776 resultptr = &result32;
780 if (copy_to_user(buf, resultptr, struct_len))
788 static int copy_version_to_user(char __user *buf, size_t count)
790 struct dlm_device_version ver;
792 memset(&ver, 0, sizeof(struct dlm_device_version));
793 ver.version[0] = DLM_DEVICE_VERSION_MAJOR;
794 ver.version[1] = DLM_DEVICE_VERSION_MINOR;
795 ver.version[2] = DLM_DEVICE_VERSION_PATCH;
797 if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version)))
799 return sizeof(struct dlm_device_version);
802 /* a read returns a single ast described in a struct dlm_lock_result */
804 static ssize_t device_read(struct file *file, char __user *buf, size_t count,
807 struct dlm_user_proc *proc = file->private_data;
809 DECLARE_WAITQUEUE(wait, current);
810 struct dlm_callback *cb;
811 int rv, copy_lvb = 0;
812 int old_mode, new_mode;
814 if (count == sizeof(struct dlm_device_version)) {
815 rv = copy_version_to_user(buf, count);
820 log_print("non-version read from control device %zu", count);
825 if (count < sizeof(struct dlm_lock_result32))
827 if (count < sizeof(struct dlm_lock_result))
833 /* do we really need this? can a read happen after a close? */
834 if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
837 spin_lock(&proc->asts_spin);
838 if (list_empty(&proc->asts)) {
839 if (file->f_flags & O_NONBLOCK) {
840 spin_unlock(&proc->asts_spin);
844 add_wait_queue(&proc->wait, &wait);
847 set_current_state(TASK_INTERRUPTIBLE);
848 if (list_empty(&proc->asts) && !signal_pending(current)) {
849 spin_unlock(&proc->asts_spin);
851 spin_lock(&proc->asts_spin);
854 set_current_state(TASK_RUNNING);
855 remove_wait_queue(&proc->wait, &wait);
857 if (signal_pending(current)) {
858 spin_unlock(&proc->asts_spin);
863 /* if we empty lkb_callbacks, we don't want to unlock the spinlock
864 without removing lkb_cb_list; so empty lkb_cb_list is always
865 consistent with empty lkb_callbacks */
867 lkb = list_first_entry(&proc->asts, struct dlm_lkb, lkb_cb_list);
869 /* rem_lkb_callback sets a new lkb_last_cast */
870 old_mode = lkb->lkb_last_cast->mode;
872 rv = dlm_dequeue_lkb_callback(lkb, &cb);
874 case DLM_DEQUEUE_CALLBACK_EMPTY:
875 /* this shouldn't happen; lkb should have been removed from
876 * list when last item was dequeued
878 log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id);
879 list_del_init(&lkb->lkb_cb_list);
880 spin_unlock(&proc->asts_spin);
881 /* removes ref for proc->asts, may cause lkb to be freed */
885 case DLM_DEQUEUE_CALLBACK_LAST:
886 list_del_init(&lkb->lkb_cb_list);
887 lkb->lkb_flags &= ~DLM_IFL_CB_PENDING;
889 case DLM_DEQUEUE_CALLBACK_SUCCESS:
895 spin_unlock(&proc->asts_spin);
897 if (cb->flags & DLM_CB_BAST) {
898 trace_dlm_bast(lkb->lkb_resource->res_ls, lkb, cb->mode);
899 } else if (cb->flags & DLM_CB_CAST) {
902 if (!cb->sb_status && lkb->lkb_lksb->sb_lvbptr &&
903 dlm_lvb_operations[old_mode + 1][new_mode + 1])
906 lkb->lkb_lksb->sb_status = cb->sb_status;
907 lkb->lkb_lksb->sb_flags = cb->sb_flags;
908 trace_dlm_ast(lkb->lkb_resource->res_ls, lkb);
911 rv = copy_result_to_user(lkb->lkb_ua,
912 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
913 cb->flags, cb->mode, copy_lvb, buf, count);
915 kref_put(&cb->ref, dlm_release_callback);
917 /* removes ref for proc->asts, may cause lkb to be freed */
918 if (rv == DLM_DEQUEUE_CALLBACK_LAST)
924 static __poll_t device_poll(struct file *file, poll_table *wait)
926 struct dlm_user_proc *proc = file->private_data;
928 poll_wait(file, &proc->wait, wait);
930 spin_lock(&proc->asts_spin);
931 if (!list_empty(&proc->asts)) {
932 spin_unlock(&proc->asts_spin);
933 return EPOLLIN | EPOLLRDNORM;
935 spin_unlock(&proc->asts_spin);
939 int dlm_user_daemon_available(void)
941 /* dlm_controld hasn't started (or, has started, but not
942 properly populated configfs) */
944 if (!dlm_our_nodeid())
947 /* This is to deal with versions of dlm_controld that don't
948 know about the monitor device. We assume that if the
949 dlm_controld was started (above), but the monitor device
950 was never opened, that it's an old version. dlm_controld
951 should open the monitor device before populating configfs. */
953 if (dlm_monitor_unused)
956 return atomic_read(&dlm_monitor_opened) ? 1 : 0;
959 static int ctl_device_open(struct inode *inode, struct file *file)
961 file->private_data = NULL;
965 static int ctl_device_close(struct inode *inode, struct file *file)
970 static int monitor_device_open(struct inode *inode, struct file *file)
972 atomic_inc(&dlm_monitor_opened);
973 dlm_monitor_unused = 0;
977 static int monitor_device_close(struct inode *inode, struct file *file)
979 if (atomic_dec_and_test(&dlm_monitor_opened))
980 dlm_stop_lockspaces();
984 static const struct file_operations device_fops = {
986 .release = device_close,
988 .write = device_write,
990 .owner = THIS_MODULE,
991 .llseek = noop_llseek,
994 static const struct file_operations ctl_device_fops = {
995 .open = ctl_device_open,
996 .release = ctl_device_close,
998 .write = device_write,
999 .owner = THIS_MODULE,
1000 .llseek = noop_llseek,
1003 static struct miscdevice ctl_device = {
1004 .name = "dlm-control",
1005 .fops = &ctl_device_fops,
1006 .minor = MISC_DYNAMIC_MINOR,
1009 static const struct file_operations monitor_device_fops = {
1010 .open = monitor_device_open,
1011 .release = monitor_device_close,
1012 .owner = THIS_MODULE,
1013 .llseek = noop_llseek,
1016 static struct miscdevice monitor_device = {
1017 .name = "dlm-monitor",
1018 .fops = &monitor_device_fops,
1019 .minor = MISC_DYNAMIC_MINOR,
1022 int __init dlm_user_init(void)
1026 atomic_set(&dlm_monitor_opened, 0);
1028 error = misc_register(&ctl_device);
1030 log_print("misc_register failed for control device");
1034 error = misc_register(&monitor_device);
1036 log_print("misc_register failed for monitor device");
1037 misc_deregister(&ctl_device);
1043 void dlm_user_exit(void)
1045 misc_deregister(&ctl_device);
1046 misc_deregister(&monitor_device);