1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/fs/lockd/clntproc.c
5 * RPC procedures for the client side NLM implementation
7 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
15 #include <linux/nfs_fs.h>
16 #include <linux/utsname.h>
17 #include <linux/freezer.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/sunrpc/svc.h>
20 #include <linux/lockd/lockd.h>
22 #define NLMDBG_FACILITY NLMDBG_CLIENT
23 #define NLMCLNT_GRACE_WAIT (5*HZ)
24 #define NLMCLNT_POLL_TIMEOUT (30*HZ)
25 #define NLMCLNT_MAX_RETRIES 3
27 static int nlmclnt_test(struct nlm_rqst *, struct file_lock *);
28 static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
29 static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
30 static int nlm_stat_to_errno(__be32 stat);
31 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
32 static int nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);
34 static const struct rpc_call_ops nlmclnt_unlock_ops;
35 static const struct rpc_call_ops nlmclnt_cancel_ops;
38 * Cookie counter for NLM requests
40 static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
42 void nlmclnt_next_cookie(struct nlm_cookie *c)
44 u32 cookie = atomic_inc_return(&nlm_cookie);
46 memcpy(c->data, &cookie, 4);
50 static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner)
52 refcount_inc(&lockowner->count);
56 static void nlm_put_lockowner(struct nlm_lockowner *lockowner)
58 if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
60 list_del(&lockowner->list);
61 spin_unlock(&lockowner->host->h_lock);
62 nlmclnt_release_host(lockowner->host);
66 static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
68 struct nlm_lockowner *lockowner;
69 list_for_each_entry(lockowner, &host->h_lockowners, list) {
70 if (lockowner->pid == pid)
76 static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
80 res = host->h_pidcount++;
81 } while (nlm_pidbusy(host, res) < 0);
85 static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
87 struct nlm_lockowner *lockowner;
88 list_for_each_entry(lockowner, &host->h_lockowners, list) {
89 if (lockowner->owner != owner)
91 return nlm_get_lockowner(lockowner);
96 static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
98 struct nlm_lockowner *res, *new = NULL;
100 spin_lock(&host->h_lock);
101 res = __nlm_find_lockowner(host, owner);
103 spin_unlock(&host->h_lock);
104 new = kmalloc(sizeof(*new), GFP_KERNEL);
105 spin_lock(&host->h_lock);
106 res = __nlm_find_lockowner(host, owner);
107 if (res == NULL && new != NULL) {
109 refcount_set(&new->count, 1);
111 new->pid = __nlm_alloc_pid(host);
112 new->host = nlm_get_host(host);
113 list_add(&new->list, &host->h_lockowners);
117 spin_unlock(&host->h_lock);
123 * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
125 static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
127 struct nlm_args *argp = &req->a_args;
128 struct nlm_lock *lock = &argp->lock;
129 char *nodename = req->a_host->h_rpcclnt->cl_nodename;
131 nlmclnt_next_cookie(&argp->cookie);
132 memcpy(&lock->fh, NFS_FH(locks_inode(fl->fl_file)), sizeof(struct nfs_fh));
133 lock->caller = nodename;
134 lock->oh.data = req->a_owner;
135 lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
136 (unsigned int)fl->fl_u.nfs_fl.owner->pid,
138 lock->svid = fl->fl_u.nfs_fl.owner->pid;
139 lock->fl.fl_start = fl->fl_start;
140 lock->fl.fl_end = fl->fl_end;
141 lock->fl.fl_type = fl->fl_type;
144 static void nlmclnt_release_lockargs(struct nlm_rqst *req)
146 WARN_ON_ONCE(req->a_args.lock.fl.fl_ops != NULL);
150 * nlmclnt_proc - Perform a single client-side lock request
151 * @host: address of a valid nlm_host context representing the NLM server
152 * @cmd: fcntl-style file lock operation to perform
153 * @fl: address of arguments for the lock operation
154 * @data: address of data to be sent to callback operations
157 int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data)
159 struct nlm_rqst *call;
161 const struct nlmclnt_operations *nlmclnt_ops = host->h_nlmclnt_ops;
163 call = nlm_alloc_call(host);
167 if (nlmclnt_ops && nlmclnt_ops->nlmclnt_alloc_call)
168 nlmclnt_ops->nlmclnt_alloc_call(data);
170 nlmclnt_locks_init_private(fl, host);
171 if (!fl->fl_u.nfs_fl.owner) {
172 /* lockowner allocation has failed */
173 nlmclnt_release_call(call);
176 /* Set up the argument struct */
177 nlmclnt_setlockargs(call, fl);
178 call->a_callback_data = data;
180 if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
181 if (fl->fl_type != F_UNLCK) {
182 call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
183 status = nlmclnt_lock(call, fl);
185 status = nlmclnt_unlock(call, fl);
186 } else if (IS_GETLK(cmd))
187 status = nlmclnt_test(call, fl);
190 fl->fl_ops->fl_release_private(fl);
193 dprintk("lockd: clnt proc returns %d\n", status);
196 EXPORT_SYMBOL_GPL(nlmclnt_proc);
199 * Allocate an NLM RPC call struct
201 struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
203 struct nlm_rqst *call;
206 call = kzalloc(sizeof(*call), GFP_KERNEL);
208 refcount_set(&call->a_count, 1);
209 locks_init_lock(&call->a_args.lock.fl);
210 locks_init_lock(&call->a_res.lock.fl);
211 call->a_host = nlm_get_host(host);
216 printk("nlm_alloc_call: failed, waiting for memory\n");
217 schedule_timeout_interruptible(5*HZ);
222 void nlmclnt_release_call(struct nlm_rqst *call)
224 const struct nlmclnt_operations *nlmclnt_ops = call->a_host->h_nlmclnt_ops;
226 if (!refcount_dec_and_test(&call->a_count))
228 if (nlmclnt_ops && nlmclnt_ops->nlmclnt_release_call)
229 nlmclnt_ops->nlmclnt_release_call(call->a_callback_data);
230 nlmclnt_release_host(call->a_host);
231 nlmclnt_release_lockargs(call);
235 static void nlmclnt_rpc_release(void *data)
237 nlmclnt_release_call(data);
240 static int nlm_wait_on_grace(wait_queue_head_t *queue)
245 prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
247 schedule_timeout(NLMCLNT_GRACE_WAIT);
252 finish_wait(queue, &wait);
260 nlmclnt_call(const struct cred *cred, struct nlm_rqst *req, u32 proc)
262 struct nlm_host *host = req->a_host;
263 struct rpc_clnt *clnt;
264 struct nlm_args *argp = &req->a_args;
265 struct nlm_res *resp = &req->a_res;
266 struct rpc_message msg = {
273 dprintk("lockd: call procedure %d on %s\n",
274 (int)proc, host->h_name);
277 if (host->h_reclaiming && !argp->reclaim)
278 goto in_grace_period;
280 /* If we have no RPC client yet, create one. */
281 if ((clnt = nlm_bind_host(host)) == NULL)
283 msg.rpc_proc = &clnt->cl_procinfo[proc];
285 /* Perform the RPC call. If an error occurs, try again */
286 if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
287 dprintk("lockd: rpc_call returned error %d\n", -status);
289 case -EPROTONOSUPPORT:
295 nlm_rebind_host(host);
299 return signalled () ? -EINTR : status;
305 if (resp->status == nlm_lck_denied_grace_period) {
306 dprintk("lockd: server in grace period\n");
309 "lockd: spurious grace period reject?!\n");
313 if (!argp->reclaim) {
314 /* We appear to be out of the grace period */
315 wake_up_all(&host->h_gracewait);
317 dprintk("lockd: server returns status %d\n",
318 ntohl(resp->status));
319 return 0; /* Okay, call complete */
324 * The server has rebooted and appears to be in the grace
325 * period during which locks are only allowed to be
327 * We can only back off and try again later.
329 status = nlm_wait_on_grace(&host->h_gracewait);
330 } while (status == 0);
336 * Generic NLM call, async version.
338 static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
340 struct nlm_host *host = req->a_host;
341 struct rpc_clnt *clnt;
342 struct rpc_task_setup task_setup_data = {
344 .callback_ops = tk_ops,
345 .callback_data = req,
346 .flags = RPC_TASK_ASYNC,
349 dprintk("lockd: call procedure %d on %s (async)\n",
350 (int)proc, host->h_name);
352 /* If we have no RPC client yet, create one. */
353 clnt = nlm_bind_host(host);
356 msg->rpc_proc = &clnt->cl_procinfo[proc];
357 task_setup_data.rpc_client = clnt;
359 /* bootstrap and kick off the async RPC call */
360 return rpc_run_task(&task_setup_data);
362 tk_ops->rpc_release(req);
363 return ERR_PTR(-ENOLCK);
366 static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
368 struct rpc_task *task;
370 task = __nlm_async_call(req, proc, msg, tk_ops);
372 return PTR_ERR(task);
378 * NLM asynchronous call.
380 int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
382 struct rpc_message msg = {
383 .rpc_argp = &req->a_args,
384 .rpc_resp = &req->a_res,
386 return nlm_do_async_call(req, proc, &msg, tk_ops);
389 int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
391 struct rpc_message msg = {
392 .rpc_argp = &req->a_res,
394 return nlm_do_async_call(req, proc, &msg, tk_ops);
398 * NLM client asynchronous call.
400 * Note that although the calls are asynchronous, and are therefore
401 * guaranteed to complete, we still always attempt to wait for
402 * completion in order to be able to correctly track the lock
405 static int nlmclnt_async_call(const struct cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
407 struct rpc_message msg = {
408 .rpc_argp = &req->a_args,
409 .rpc_resp = &req->a_res,
412 struct rpc_task *task;
415 task = __nlm_async_call(req, proc, &msg, tk_ops);
417 return PTR_ERR(task);
418 err = rpc_wait_for_completion_task(task);
424 * TEST for the presence of a conflicting lock
427 nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
431 status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST);
435 switch (req->a_res.status) {
437 fl->fl_type = F_UNLCK;
441 * Report the conflicting lock back to the application.
443 fl->fl_start = req->a_res.lock.fl.fl_start;
444 fl->fl_end = req->a_res.lock.fl.fl_end;
445 fl->fl_type = req->a_res.lock.fl.fl_type;
446 fl->fl_pid = -req->a_res.lock.fl.fl_pid;
449 status = nlm_stat_to_errno(req->a_res.status);
452 nlmclnt_release_call(req);
456 static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
458 spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
459 new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
460 new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner);
461 list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
462 spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
465 static void nlmclnt_locks_release_private(struct file_lock *fl)
467 spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
468 list_del(&fl->fl_u.nfs_fl.list);
469 spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
470 nlm_put_lockowner(fl->fl_u.nfs_fl.owner);
473 static const struct file_lock_operations nlmclnt_lock_ops = {
474 .fl_copy_lock = nlmclnt_locks_copy_lock,
475 .fl_release_private = nlmclnt_locks_release_private,
478 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
480 fl->fl_u.nfs_fl.state = 0;
481 fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner);
482 INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list);
483 fl->fl_ops = &nlmclnt_lock_ops;
486 static int do_vfs_lock(struct file_lock *fl)
488 return locks_lock_file_wait(fl->fl_file, fl);
492 * LOCK: Try to create a lock
494 * Programmer Harassment Alert
496 * When given a blocking lock request in a sync RPC call, the HPUX lockd
497 * will faithfully return LCK_BLOCKED but never cares to notify us when
498 * the lock could be granted. This way, our local process could hang
499 * around forever waiting for the callback.
501 * Solution A: Implement busy-waiting
502 * Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
504 * For now I am implementing solution A, because I hate the idea of
505 * re-implementing lockd for a third time in two months. The async
506 * calls shouldn't be too hard to do, however.
508 * This is one of the lovely things about standards in the NFS area:
509 * they're so soft and squishy you can't really blame HP for doing this.
512 nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
514 const struct cred *cred = nfs_file_cred(fl->fl_file);
515 struct nlm_host *host = req->a_host;
516 struct nlm_res *resp = &req->a_res;
517 struct nlm_wait *block = NULL;
518 unsigned char fl_flags = fl->fl_flags;
519 unsigned char fl_type;
520 int status = -ENOLCK;
522 if (nsm_monitor(host) < 0)
524 req->a_args.state = nsm_local_state;
526 fl->fl_flags |= FL_ACCESS;
527 status = do_vfs_lock(fl);
528 fl->fl_flags = fl_flags;
532 block = nlmclnt_prepare_block(host, fl);
535 * Initialise resp->status to a valid non-zero value,
536 * since 0 == nlm_lck_granted
538 resp->status = nlm_lck_blocked;
540 /* Reboot protection */
541 fl->fl_u.nfs_fl.state = host->h_state;
542 status = nlmclnt_call(cred, req, NLMPROC_LOCK);
545 /* Did a reclaimer thread notify us of a server reboot? */
546 if (resp->status == nlm_lck_denied_grace_period)
548 if (resp->status != nlm_lck_blocked)
550 /* Wait on an NLM blocking lock */
551 status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
554 if (resp->status != nlm_lck_blocked)
558 /* if we were interrupted while blocking, then cancel the lock request
561 if (resp->status == nlm_lck_blocked) {
562 if (!req->a_args.block)
564 if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
568 if (resp->status == nlm_granted) {
569 down_read(&host->h_rwsem);
570 /* Check whether or not the server has rebooted */
571 if (fl->fl_u.nfs_fl.state != host->h_state) {
572 up_read(&host->h_rwsem);
575 /* Ensure the resulting lock will get added to granted list */
576 fl->fl_flags |= FL_SLEEP;
577 if (do_vfs_lock(fl) < 0)
578 printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
579 up_read(&host->h_rwsem);
580 fl->fl_flags = fl_flags;
586 * EAGAIN doesn't make sense for sleeping locks, and in some
587 * cases NLM_LCK_DENIED is returned for a permanent error. So
588 * turn it into an ENOLCK.
590 if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP))
593 status = nlm_stat_to_errno(resp->status);
595 nlmclnt_finish_block(block);
597 nlmclnt_release_call(req);
600 /* Fatal error: ensure that we remove the lock altogether */
601 dprintk("lockd: lock attempt ended in fatal error.\n"
602 " Attempting to unlock.\n");
603 nlmclnt_finish_block(block);
604 fl_type = fl->fl_type;
605 fl->fl_type = F_UNLCK;
606 down_read(&host->h_rwsem);
608 up_read(&host->h_rwsem);
609 fl->fl_type = fl_type;
610 fl->fl_flags = fl_flags;
611 nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
616 * RECLAIM: Try to reclaim a lock
619 nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl,
620 struct nlm_rqst *req)
624 memset(req, 0, sizeof(*req));
625 locks_init_lock(&req->a_args.lock.fl);
626 locks_init_lock(&req->a_res.lock.fl);
629 /* Set up the argument struct */
630 nlmclnt_setlockargs(req, fl);
631 req->a_args.reclaim = 1;
633 status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK);
634 if (status >= 0 && req->a_res.status == nlm_granted)
637 printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
638 "(errno %d, status %d)\n", fl->fl_pid,
639 status, ntohl(req->a_res.status));
642 * FIXME: This is a serious failure. We can
644 * a. Ignore the problem
645 * b. Send the owning process some signal (Linux doesn't have
646 * SIGLOST, though...)
647 * c. Retry the operation
649 * Until someone comes up with a simple implementation
650 * for b or c, I'll choose option a.
657 * UNLOCK: remove an existing lock
660 nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
662 struct nlm_host *host = req->a_host;
663 struct nlm_res *resp = &req->a_res;
665 unsigned char fl_flags = fl->fl_flags;
668 * Note: the server is supposed to either grant us the unlock
669 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
670 * case, we want to unlock.
672 fl->fl_flags |= FL_EXISTS;
673 down_read(&host->h_rwsem);
674 status = do_vfs_lock(fl);
675 up_read(&host->h_rwsem);
676 fl->fl_flags = fl_flags;
677 if (status == -ENOENT) {
682 refcount_inc(&req->a_count);
683 status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
684 NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
688 if (resp->status == nlm_granted)
691 if (resp->status != nlm_lck_denied_nolocks)
692 printk("lockd: unexpected unlock status: %d\n",
693 ntohl(resp->status));
694 /* What to do now? I'm out of my depth... */
697 nlmclnt_release_call(req);
701 static void nlmclnt_unlock_prepare(struct rpc_task *task, void *data)
703 struct nlm_rqst *req = data;
704 const struct nlmclnt_operations *nlmclnt_ops = req->a_host->h_nlmclnt_ops;
705 bool defer_call = false;
707 if (nlmclnt_ops && nlmclnt_ops->nlmclnt_unlock_prepare)
708 defer_call = nlmclnt_ops->nlmclnt_unlock_prepare(task, req->a_callback_data);
711 rpc_call_start(task);
714 static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
716 struct nlm_rqst *req = data;
717 u32 status = ntohl(req->a_res.status);
719 if (RPC_SIGNALLED(task))
722 if (task->tk_status < 0) {
723 dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
724 switch (task->tk_status) {
732 if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
733 rpc_delay(task, NLMCLNT_GRACE_WAIT);
736 if (status != NLM_LCK_GRANTED)
737 printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
741 nlm_rebind_host(req->a_host);
743 rpc_restart_call(task);
746 static const struct rpc_call_ops nlmclnt_unlock_ops = {
747 .rpc_call_prepare = nlmclnt_unlock_prepare,
748 .rpc_call_done = nlmclnt_unlock_callback,
749 .rpc_release = nlmclnt_rpc_release,
753 * Cancel a blocked lock request.
754 * We always use an async RPC call for this in order not to hang a
755 * process that has been Ctrl-C'ed.
757 static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
759 struct nlm_rqst *req;
762 dprintk("lockd: blocking lock attempt was interrupted by a signal.\n"
763 " Attempting to cancel lock.\n");
765 req = nlm_alloc_call(host);
768 req->a_flags = RPC_TASK_ASYNC;
770 nlmclnt_setlockargs(req, fl);
771 req->a_args.block = block;
773 refcount_inc(&req->a_count);
774 status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
775 NLMPROC_CANCEL, &nlmclnt_cancel_ops);
776 if (status == 0 && req->a_res.status == nlm_lck_denied)
778 nlmclnt_release_call(req);
782 static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
784 struct nlm_rqst *req = data;
785 u32 status = ntohl(req->a_res.status);
787 if (RPC_SIGNALLED(task))
790 if (task->tk_status < 0) {
791 dprintk("lockd: CANCEL call error %d, retrying.\n",
796 dprintk("lockd: cancel status %u (task %u)\n",
797 status, task->tk_pid);
800 case NLM_LCK_GRANTED:
801 case NLM_LCK_DENIED_GRACE_PERIOD:
803 /* Everything's good */
805 case NLM_LCK_DENIED_NOLOCKS:
806 dprintk("lockd: CANCEL failed (server has no locks)\n");
809 printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
817 /* Don't ever retry more than 3 times */
818 if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
820 nlm_rebind_host(req->a_host);
821 rpc_restart_call(task);
822 rpc_delay(task, 30 * HZ);
825 static const struct rpc_call_ops nlmclnt_cancel_ops = {
826 .rpc_call_done = nlmclnt_cancel_callback,
827 .rpc_release = nlmclnt_rpc_release,
831 * Convert an NLM status code to a generic kernel errno
834 nlm_stat_to_errno(__be32 status)
836 switch(ntohl(status)) {
837 case NLM_LCK_GRANTED:
841 case NLM_LCK_DENIED_NOLOCKS:
842 case NLM_LCK_DENIED_GRACE_PERIOD:
844 case NLM_LCK_BLOCKED:
845 printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
847 #ifdef CONFIG_LOCKD_V4
860 printk(KERN_NOTICE "lockd: unexpected server status %d\n",