Merge branch 'linus' into test
[linux-block.git] / fs / lockd / clntlock.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/lockd/clntlock.c
3 *
4 * Lock handling for the client side NLM implementation
5 *
6 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
7 */
8
9#include <linux/module.h>
10#include <linux/types.h>
11#include <linux/time.h>
12#include <linux/nfs_fs.h>
13#include <linux/sunrpc/clnt.h>
14#include <linux/sunrpc/svc.h>
15#include <linux/lockd/lockd.h>
16#include <linux/smp_lock.h>
17
18#define NLMDBG_FACILITY NLMDBG_CLIENT
19
20/*
21 * Local function prototypes
22 */
23static int reclaimer(void *ptr);
24
25/*
26 * The following functions handle blocking and granting from the
27 * client perspective.
28 */
29
30/*
31 * This is the representation of a blocked client lock.
32 */
33struct nlm_wait {
4f15e2b1 34 struct list_head b_list; /* linked list */
1da177e4
LT
35 wait_queue_head_t b_wait; /* where to wait on */
36 struct nlm_host * b_host;
37 struct file_lock * b_lock; /* local file lock */
38 unsigned short b_reclaim; /* got to reclaim lock */
e8c5c045 39 __be32 b_status; /* grant callback status */
1da177e4
LT
40};
41
4f15e2b1 42static LIST_HEAD(nlm_blocked);
1da177e4 43
52c4044d
CL
44/**
45 * nlmclnt_init - Set up per-NFS mount point lockd data structures
883bb163 46 * @nlm_init: pointer to arguments structure
52c4044d
CL
47 *
48 * Returns pointer to an appropriate nlm_host struct,
49 * or an ERR_PTR value.
50 */
883bb163 51struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
52c4044d
CL
52{
53 struct nlm_host *host;
883bb163 54 u32 nlm_version = (nlm_init->nfs_version == 2) ? 1 : 4;
52c4044d
CL
55 int status;
56
26a41409 57 status = lockd_up();
52c4044d
CL
58 if (status < 0)
59 return ERR_PTR(status);
60
d7d20440 61 host = nlmclnt_lookup_host(nlm_init->address, nlm_init->addrlen,
883bb163 62 nlm_init->protocol, nlm_version,
d7d20440 63 nlm_init->hostname);
52c4044d
CL
64 if (host == NULL) {
65 lockd_down();
66 return ERR_PTR(-ENOLCK);
67 }
68
69 return host;
70}
71EXPORT_SYMBOL_GPL(nlmclnt_init);
72
73/**
74 * nlmclnt_done - Release resources allocated by nlmclnt_init()
75 * @host: nlm_host structure reserved by nlmclnt_init()
76 *
77 */
78void nlmclnt_done(struct nlm_host *host)
79{
80 nlm_release_host(host);
81 lockd_down();
82}
83EXPORT_SYMBOL_GPL(nlmclnt_done);
84
1da177e4 85/*
ecdbf769 86 * Queue up a lock for blocking so that the GRANTED request can see it
1da177e4 87 */
3a649b88 88struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl)
ecdbf769
TM
89{
90 struct nlm_wait *block;
91
ecdbf769 92 block = kmalloc(sizeof(*block), GFP_KERNEL);
3a649b88
TM
93 if (block != NULL) {
94 block->b_host = host;
95 block->b_lock = fl;
96 init_waitqueue_head(&block->b_wait);
e8c5c045 97 block->b_status = nlm_lck_blocked;
3a649b88
TM
98 list_add(&block->b_list, &nlm_blocked);
99 }
100 return block;
ecdbf769
TM
101}
102
3a649b88 103void nlmclnt_finish_block(struct nlm_wait *block)
1da177e4 104{
ecdbf769
TM
105 if (block == NULL)
106 return;
ecdbf769
TM
107 list_del(&block->b_list);
108 kfree(block);
109}
1da177e4 110
ecdbf769
TM
111/*
112 * Block on a lock
113 */
3a649b88 114int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
ecdbf769 115{
ecdbf769 116 long ret;
1da177e4 117
ecdbf769
TM
118 /* A borken server might ask us to block even if we didn't
119 * request it. Just say no!
120 */
3a649b88 121 if (block == NULL)
ecdbf769 122 return -EAGAIN;
1da177e4
LT
123
124 /* Go to sleep waiting for GRANT callback. Some servers seem
125 * to lose callbacks, however, so we're going to poll from
126 * time to time just to make sure.
127 *
128 * For now, the retry frequency is pretty high; normally
129 * a 1 minute timeout would do. See the comment before
130 * nlmclnt_lock for an explanation.
131 */
ecdbf769 132 ret = wait_event_interruptible_timeout(block->b_wait,
e8c5c045 133 block->b_status != nlm_lck_blocked,
ecdbf769 134 timeout);
3a649b88
TM
135 if (ret < 0)
136 return -ERESTARTSYS;
137 req->a_res.status = block->b_status;
138 return 0;
1da177e4
LT
139}
140
141/*
142 * The server lockd has called us back to tell us the lock was granted
143 */
dcff09f1 144__be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock)
1da177e4 145{
5ac5f9d1
TM
146 const struct file_lock *fl = &lock->fl;
147 const struct nfs_fh *fh = &lock->fh;
1da177e4 148 struct nlm_wait *block;
52921e02 149 __be32 res = nlm_lck_denied;
1da177e4
LT
150
151 /*
152 * Look up blocked request based on arguments.
153 * Warning: must not use cookie to match it!
154 */
4f15e2b1 155 list_for_each_entry(block, &nlm_blocked, b_list) {
5ac5f9d1
TM
156 struct file_lock *fl_blocked = block->b_lock;
157
7bab377f
TM
158 if (fl_blocked->fl_start != fl->fl_start)
159 continue;
160 if (fl_blocked->fl_end != fl->fl_end)
161 continue;
162 /*
163 * Careful! The NLM server will return the 32-bit "pid" that
164 * we put on the wire: in this case the lockowner "pid".
165 */
166 if (fl_blocked->fl_u.nfs_fl.owner->pid != lock->svid)
5ac5f9d1 167 continue;
dcff09f1 168 if (!nlm_cmp_addr(nlm_addr(block->b_host), addr))
5ac5f9d1 169 continue;
225a719f 170 if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_path.dentry->d_inode) ,fh) != 0)
5ac5f9d1
TM
171 continue;
172 /* Alright, we found a lock. Set the return status
173 * and wake up the caller
174 */
e8c5c045 175 block->b_status = nlm_granted;
5ac5f9d1
TM
176 wake_up(&block->b_wait);
177 res = nlm_granted;
1da177e4 178 }
ecdbf769 179 return res;
1da177e4
LT
180}
181
182/*
183 * The following procedures deal with the recovery of locks after a
184 * server crash.
185 */
186
1da177e4
LT
187/*
188 * Reclaim all locks on server host. We do this by spawning a separate
189 * reclaimer thread.
190 */
191void
5c8dd29c 192nlmclnt_recovery(struct nlm_host *host)
1da177e4 193{
28df955a 194 if (!host->h_reclaiming++) {
1da177e4
LT
195 nlm_get_host(host);
196 __module_get(THIS_MODULE);
550facd1 197 if (kernel_thread(reclaimer, host, CLONE_FS | CLONE_FILES) < 0)
1da177e4
LT
198 module_put(THIS_MODULE);
199 }
200}
201
202static int
203reclaimer(void *ptr)
204{
205 struct nlm_host *host = (struct nlm_host *) ptr;
206 struct nlm_wait *block;
26bcbf96 207 struct file_lock *fl, *next;
28df955a 208 u32 nsmstate;
1da177e4
LT
209
210 daemonize("%s-reclaim", host->h_name);
211 allow_signal(SIGKILL);
212
5c8dd29c
OK
213 down_write(&host->h_rwsem);
214
1da177e4
LT
215 /* This one ensures that our parent doesn't terminate while the
216 * reclaim is in progress */
217 lock_kernel();
26a41409 218 lockd_up(); /* note: this cannot fail as lockd is already running */
1da177e4 219
d019bcf0 220 dprintk("lockd: reclaiming locks for host %s\n", host->h_name);
5c8dd29c 221
1da177e4 222restart:
28df955a 223 nsmstate = host->h_nsmstate;
5c8dd29c
OK
224
225 /* Force a portmap getport - the peer's lockd will
226 * most likely end up on a different port.
227 */
0ade060e 228 host->h_nextrebind = jiffies;
5c8dd29c
OK
229 nlm_rebind_host(host);
230
231 /* First, reclaim all locks that have been granted. */
232 list_splice_init(&host->h_granted, &host->h_reclaim);
26bcbf96 233 list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) {
4c060b53 234 list_del_init(&fl->fl_u.nfs_fl.list);
1da177e4 235
5c8dd29c 236 /* Why are we leaking memory here? --okir */
1da177e4 237 if (signalled())
4c060b53 238 continue;
28df955a
TM
239 if (nlmclnt_reclaim(host, fl) != 0)
240 continue;
241 list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted);
242 if (host->h_nsmstate != nsmstate) {
243 /* Argh! The server rebooted again! */
28df955a
TM
244 goto restart;
245 }
1da177e4 246 }
5c8dd29c
OK
247
248 host->h_reclaiming = 0;
249 up_write(&host->h_rwsem);
d019bcf0 250 dprintk("NLM: done reclaiming locks for host %s\n", host->h_name);
1da177e4
LT
251
252 /* Now, wake up all processes that sleep on a blocked lock */
4f15e2b1 253 list_for_each_entry(block, &nlm_blocked, b_list) {
1da177e4 254 if (block->b_host == host) {
e8c5c045 255 block->b_status = nlm_lck_denied_grace_period;
1da177e4
LT
256 wake_up(&block->b_wait);
257 }
258 }
259
260 /* Release host handle after use */
261 nlm_release_host(host);
262 lockd_down();
263 unlock_kernel();
264 module_put_and_exit(0);
265}