Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * fs/nfs/nfs4state.c | |
3 | * | |
4 | * Client-side XDR for NFSv4. | |
5 | * | |
6 | * Copyright (c) 2002 The Regents of the University of Michigan. | |
7 | * All rights reserved. | |
8 | * | |
9 | * Kendrick Smith <kmsmith@umich.edu> | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or without | |
12 | * modification, are permitted provided that the following conditions | |
13 | * are met: | |
14 | * | |
15 | * 1. Redistributions of source code must retain the above copyright | |
16 | * notice, this list of conditions and the following disclaimer. | |
17 | * 2. Redistributions in binary form must reproduce the above copyright | |
18 | * notice, this list of conditions and the following disclaimer in the | |
19 | * documentation and/or other materials provided with the distribution. | |
20 | * 3. Neither the name of the University nor the names of its | |
21 | * contributors may be used to endorse or promote products derived | |
22 | * from this software without specific prior written permission. | |
23 | * | |
24 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | |
25 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | |
26 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
27 | * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
28 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | |
31 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | |
32 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | |
33 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
34 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
35 | * | |
36 | * Implementation of the NFSv4 state model. For the time being, | |
37 | * this is minimal, but will be made much more complex in a | |
38 | * subsequent patch. | |
39 | */ | |
40 | ||
41 | #include <linux/config.h> | |
42 | #include <linux/slab.h> | |
43 | #include <linux/smp_lock.h> | |
44 | #include <linux/nfs_fs.h> | |
45 | #include <linux/nfs_idmap.h> | |
46 | #include <linux/workqueue.h> | |
47 | #include <linux/bitops.h> | |
48 | ||
4ce79717 | 49 | #include "nfs4_fs.h" |
1da177e4 LT |
50 | #include "callback.h" |
51 | #include "delegation.h" | |
52 | ||
53 | #define OPENOWNER_POOL_SIZE 8 | |
54 | ||
4ce79717 | 55 | const nfs4_stateid zero_stateid; |
1da177e4 | 56 | |
4ce79717 | 57 | static DEFINE_SPINLOCK(state_spinlock); |
1da177e4 LT |
58 | static LIST_HEAD(nfs4_clientid_list); |
59 | ||
60 | static void nfs4_recover_state(void *); | |
1da177e4 LT |
61 | |
62 | void | |
63 | init_nfsv4_state(struct nfs_server *server) | |
64 | { | |
65 | server->nfs4_state = NULL; | |
66 | INIT_LIST_HEAD(&server->nfs4_siblings); | |
67 | } | |
68 | ||
69 | void | |
70 | destroy_nfsv4_state(struct nfs_server *server) | |
71 | { | |
72 | if (server->mnt_path) { | |
73 | kfree(server->mnt_path); | |
74 | server->mnt_path = NULL; | |
75 | } | |
76 | if (server->nfs4_state) { | |
77 | nfs4_put_client(server->nfs4_state); | |
78 | server->nfs4_state = NULL; | |
79 | } | |
80 | } | |
81 | ||
82 | /* | |
83 | * nfs4_get_client(): returns an empty client structure | |
84 | * nfs4_put_client(): drops reference to client structure | |
85 | * | |
86 | * Since these are allocated/deallocated very rarely, we don't | |
87 | * bother putting them in a slab cache... | |
88 | */ | |
89 | static struct nfs4_client * | |
90 | nfs4_alloc_client(struct in_addr *addr) | |
91 | { | |
92 | struct nfs4_client *clp; | |
93 | ||
94 | if (nfs_callback_up() < 0) | |
95 | return NULL; | |
96 | if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL)) == NULL) { | |
97 | nfs_callback_down(); | |
98 | return NULL; | |
99 | } | |
100 | memset(clp, 0, sizeof(*clp)); | |
101 | memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr)); | |
102 | init_rwsem(&clp->cl_sem); | |
103 | INIT_LIST_HEAD(&clp->cl_delegations); | |
104 | INIT_LIST_HEAD(&clp->cl_state_owners); | |
105 | INIT_LIST_HEAD(&clp->cl_unused); | |
106 | spin_lock_init(&clp->cl_lock); | |
107 | atomic_set(&clp->cl_count, 1); | |
108 | INIT_WORK(&clp->cl_recoverd, nfs4_recover_state, clp); | |
109 | INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp); | |
110 | INIT_LIST_HEAD(&clp->cl_superblocks); | |
111 | init_waitqueue_head(&clp->cl_waitq); | |
112 | rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client"); | |
6a19275a | 113 | clp->cl_rpcclient = ERR_PTR(-EINVAL); |
1da177e4 LT |
114 | clp->cl_boot_time = CURRENT_TIME; |
115 | clp->cl_state = 1 << NFS4CLNT_OK; | |
116 | return clp; | |
117 | } | |
118 | ||
119 | static void | |
120 | nfs4_free_client(struct nfs4_client *clp) | |
121 | { | |
122 | struct nfs4_state_owner *sp; | |
123 | ||
124 | while (!list_empty(&clp->cl_unused)) { | |
125 | sp = list_entry(clp->cl_unused.next, | |
126 | struct nfs4_state_owner, | |
127 | so_list); | |
128 | list_del(&sp->so_list); | |
129 | kfree(sp); | |
130 | } | |
131 | BUG_ON(!list_empty(&clp->cl_state_owners)); | |
132 | if (clp->cl_cred) | |
133 | put_rpccred(clp->cl_cred); | |
134 | nfs_idmap_delete(clp); | |
6a19275a | 135 | if (!IS_ERR(clp->cl_rpcclient)) |
1da177e4 LT |
136 | rpc_shutdown_client(clp->cl_rpcclient); |
137 | kfree(clp); | |
138 | nfs_callback_down(); | |
139 | } | |
140 | ||
141 | static struct nfs4_client *__nfs4_find_client(struct in_addr *addr) | |
142 | { | |
143 | struct nfs4_client *clp; | |
144 | list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) { | |
145 | if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0) { | |
146 | atomic_inc(&clp->cl_count); | |
147 | return clp; | |
148 | } | |
149 | } | |
150 | return NULL; | |
151 | } | |
152 | ||
153 | struct nfs4_client *nfs4_find_client(struct in_addr *addr) | |
154 | { | |
155 | struct nfs4_client *clp; | |
156 | spin_lock(&state_spinlock); | |
157 | clp = __nfs4_find_client(addr); | |
158 | spin_unlock(&state_spinlock); | |
159 | return clp; | |
160 | } | |
161 | ||
162 | struct nfs4_client * | |
163 | nfs4_get_client(struct in_addr *addr) | |
164 | { | |
165 | struct nfs4_client *clp, *new = NULL; | |
166 | ||
167 | spin_lock(&state_spinlock); | |
168 | for (;;) { | |
169 | clp = __nfs4_find_client(addr); | |
170 | if (clp != NULL) | |
171 | break; | |
172 | clp = new; | |
173 | if (clp != NULL) { | |
174 | list_add(&clp->cl_servers, &nfs4_clientid_list); | |
175 | new = NULL; | |
176 | break; | |
177 | } | |
178 | spin_unlock(&state_spinlock); | |
179 | new = nfs4_alloc_client(addr); | |
180 | spin_lock(&state_spinlock); | |
181 | if (new == NULL) | |
182 | break; | |
183 | } | |
184 | spin_unlock(&state_spinlock); | |
185 | if (new) | |
186 | nfs4_free_client(new); | |
187 | return clp; | |
188 | } | |
189 | ||
190 | void | |
191 | nfs4_put_client(struct nfs4_client *clp) | |
192 | { | |
193 | if (!atomic_dec_and_lock(&clp->cl_count, &state_spinlock)) | |
194 | return; | |
195 | list_del(&clp->cl_servers); | |
196 | spin_unlock(&state_spinlock); | |
197 | BUG_ON(!list_empty(&clp->cl_superblocks)); | |
198 | wake_up_all(&clp->cl_waitq); | |
199 | rpc_wake_up(&clp->cl_rpcwaitq); | |
200 | nfs4_kill_renewd(clp); | |
201 | nfs4_free_client(clp); | |
202 | } | |
203 | ||
204 | static int __nfs4_init_client(struct nfs4_client *clp) | |
205 | { | |
206 | int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, nfs_callback_tcpport); | |
207 | if (status == 0) | |
208 | status = nfs4_proc_setclientid_confirm(clp); | |
209 | if (status == 0) | |
210 | nfs4_schedule_state_renewal(clp); | |
211 | return status; | |
212 | } | |
213 | ||
214 | int nfs4_init_client(struct nfs4_client *clp) | |
215 | { | |
216 | return nfs4_map_errors(__nfs4_init_client(clp)); | |
217 | } | |
218 | ||
219 | u32 | |
220 | nfs4_alloc_lockowner_id(struct nfs4_client *clp) | |
221 | { | |
222 | return clp->cl_lockowner_id ++; | |
223 | } | |
224 | ||
225 | static struct nfs4_state_owner * | |
226 | nfs4_client_grab_unused(struct nfs4_client *clp, struct rpc_cred *cred) | |
227 | { | |
228 | struct nfs4_state_owner *sp = NULL; | |
229 | ||
230 | if (!list_empty(&clp->cl_unused)) { | |
231 | sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list); | |
232 | atomic_inc(&sp->so_count); | |
233 | sp->so_cred = cred; | |
234 | list_move(&sp->so_list, &clp->cl_state_owners); | |
235 | clp->cl_nunused--; | |
236 | } | |
237 | return sp; | |
238 | } | |
239 | ||
240 | static struct nfs4_state_owner * | |
241 | nfs4_find_state_owner(struct nfs4_client *clp, struct rpc_cred *cred) | |
242 | { | |
243 | struct nfs4_state_owner *sp, *res = NULL; | |
244 | ||
245 | list_for_each_entry(sp, &clp->cl_state_owners, so_list) { | |
246 | if (sp->so_cred != cred) | |
247 | continue; | |
248 | atomic_inc(&sp->so_count); | |
249 | /* Move to the head of the list */ | |
250 | list_move(&sp->so_list, &clp->cl_state_owners); | |
251 | res = sp; | |
252 | break; | |
253 | } | |
254 | return res; | |
255 | } | |
256 | ||
257 | /* | |
258 | * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to | |
259 | * create a new state_owner. | |
260 | * | |
261 | */ | |
262 | static struct nfs4_state_owner * | |
263 | nfs4_alloc_state_owner(void) | |
264 | { | |
265 | struct nfs4_state_owner *sp; | |
266 | ||
267 | sp = kmalloc(sizeof(*sp),GFP_KERNEL); | |
268 | if (!sp) | |
269 | return NULL; | |
270 | init_MUTEX(&sp->so_sema); | |
271 | sp->so_seqid = 0; /* arbitrary */ | |
272 | INIT_LIST_HEAD(&sp->so_states); | |
273 | INIT_LIST_HEAD(&sp->so_delegations); | |
274 | atomic_set(&sp->so_count, 1); | |
275 | return sp; | |
276 | } | |
277 | ||
278 | void | |
279 | nfs4_drop_state_owner(struct nfs4_state_owner *sp) | |
280 | { | |
281 | struct nfs4_client *clp = sp->so_client; | |
282 | spin_lock(&clp->cl_lock); | |
283 | list_del_init(&sp->so_list); | |
284 | spin_unlock(&clp->cl_lock); | |
285 | } | |
286 | ||
287 | /* | |
288 | * Note: must be called with clp->cl_sem held in order to prevent races | |
289 | * with reboot recovery! | |
290 | */ | |
291 | struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred) | |
292 | { | |
293 | struct nfs4_client *clp = server->nfs4_state; | |
294 | struct nfs4_state_owner *sp, *new; | |
295 | ||
296 | get_rpccred(cred); | |
297 | new = nfs4_alloc_state_owner(); | |
298 | spin_lock(&clp->cl_lock); | |
299 | sp = nfs4_find_state_owner(clp, cred); | |
300 | if (sp == NULL) | |
301 | sp = nfs4_client_grab_unused(clp, cred); | |
302 | if (sp == NULL && new != NULL) { | |
303 | list_add(&new->so_list, &clp->cl_state_owners); | |
304 | new->so_client = clp; | |
305 | new->so_id = nfs4_alloc_lockowner_id(clp); | |
306 | new->so_cred = cred; | |
307 | sp = new; | |
308 | new = NULL; | |
309 | } | |
310 | spin_unlock(&clp->cl_lock); | |
311 | if (new) | |
312 | kfree(new); | |
313 | if (sp != NULL) | |
314 | return sp; | |
315 | put_rpccred(cred); | |
316 | return NULL; | |
317 | } | |
318 | ||
319 | /* | |
320 | * Must be called with clp->cl_sem held in order to avoid races | |
321 | * with state recovery... | |
322 | */ | |
323 | void nfs4_put_state_owner(struct nfs4_state_owner *sp) | |
324 | { | |
325 | struct nfs4_client *clp = sp->so_client; | |
326 | struct rpc_cred *cred = sp->so_cred; | |
327 | ||
328 | if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock)) | |
329 | return; | |
330 | if (clp->cl_nunused >= OPENOWNER_POOL_SIZE) | |
331 | goto out_free; | |
332 | if (list_empty(&sp->so_list)) | |
333 | goto out_free; | |
334 | list_move(&sp->so_list, &clp->cl_unused); | |
335 | clp->cl_nunused++; | |
336 | spin_unlock(&clp->cl_lock); | |
337 | put_rpccred(cred); | |
338 | cred = NULL; | |
339 | return; | |
340 | out_free: | |
341 | list_del(&sp->so_list); | |
342 | spin_unlock(&clp->cl_lock); | |
343 | put_rpccred(cred); | |
344 | kfree(sp); | |
345 | } | |
346 | ||
347 | static struct nfs4_state * | |
348 | nfs4_alloc_open_state(void) | |
349 | { | |
350 | struct nfs4_state *state; | |
351 | ||
352 | state = kmalloc(sizeof(*state), GFP_KERNEL); | |
353 | if (!state) | |
354 | return NULL; | |
355 | state->state = 0; | |
356 | state->nreaders = 0; | |
357 | state->nwriters = 0; | |
358 | state->flags = 0; | |
359 | memset(state->stateid.data, 0, sizeof(state->stateid.data)); | |
360 | atomic_set(&state->count, 1); | |
361 | INIT_LIST_HEAD(&state->lock_states); | |
362 | init_MUTEX(&state->lock_sema); | |
8d0a8a9d | 363 | spin_lock_init(&state->state_lock); |
1da177e4 LT |
364 | return state; |
365 | } | |
366 | ||
367 | static struct nfs4_state * | |
368 | __nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode) | |
369 | { | |
370 | struct nfs_inode *nfsi = NFS_I(inode); | |
371 | struct nfs4_state *state; | |
372 | ||
373 | mode &= (FMODE_READ|FMODE_WRITE); | |
374 | list_for_each_entry(state, &nfsi->open_states, inode_states) { | |
375 | if (state->owner->so_cred != cred) | |
376 | continue; | |
377 | if ((mode & FMODE_READ) != 0 && state->nreaders == 0) | |
378 | continue; | |
379 | if ((mode & FMODE_WRITE) != 0 && state->nwriters == 0) | |
380 | continue; | |
381 | if ((state->state & mode) != mode) | |
382 | continue; | |
383 | atomic_inc(&state->count); | |
384 | if (mode & FMODE_READ) | |
385 | state->nreaders++; | |
386 | if (mode & FMODE_WRITE) | |
387 | state->nwriters++; | |
388 | return state; | |
389 | } | |
390 | return NULL; | |
391 | } | |
392 | ||
393 | static struct nfs4_state * | |
394 | __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner) | |
395 | { | |
396 | struct nfs_inode *nfsi = NFS_I(inode); | |
397 | struct nfs4_state *state; | |
398 | ||
399 | list_for_each_entry(state, &nfsi->open_states, inode_states) { | |
400 | /* Is this in the process of being freed? */ | |
401 | if (state->nreaders == 0 && state->nwriters == 0) | |
402 | continue; | |
403 | if (state->owner == owner) { | |
404 | atomic_inc(&state->count); | |
405 | return state; | |
406 | } | |
407 | } | |
408 | return NULL; | |
409 | } | |
410 | ||
411 | struct nfs4_state * | |
412 | nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode) | |
413 | { | |
414 | struct nfs4_state *state; | |
415 | ||
416 | spin_lock(&inode->i_lock); | |
417 | state = __nfs4_find_state(inode, cred, mode); | |
418 | spin_unlock(&inode->i_lock); | |
419 | return state; | |
420 | } | |
421 | ||
422 | static void | |
423 | nfs4_free_open_state(struct nfs4_state *state) | |
424 | { | |
425 | kfree(state); | |
426 | } | |
427 | ||
428 | struct nfs4_state * | |
429 | nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner) | |
430 | { | |
431 | struct nfs4_state *state, *new; | |
432 | struct nfs_inode *nfsi = NFS_I(inode); | |
433 | ||
434 | spin_lock(&inode->i_lock); | |
435 | state = __nfs4_find_state_byowner(inode, owner); | |
436 | spin_unlock(&inode->i_lock); | |
437 | if (state) | |
438 | goto out; | |
439 | new = nfs4_alloc_open_state(); | |
440 | spin_lock(&inode->i_lock); | |
441 | state = __nfs4_find_state_byowner(inode, owner); | |
442 | if (state == NULL && new != NULL) { | |
443 | state = new; | |
444 | /* Caller *must* be holding owner->so_sem */ | |
445 | /* Note: The reclaim code dictates that we add stateless | |
446 | * and read-only stateids to the end of the list */ | |
447 | list_add_tail(&state->open_states, &owner->so_states); | |
448 | state->owner = owner; | |
449 | atomic_inc(&owner->so_count); | |
450 | list_add(&state->inode_states, &nfsi->open_states); | |
451 | state->inode = igrab(inode); | |
452 | spin_unlock(&inode->i_lock); | |
453 | } else { | |
454 | spin_unlock(&inode->i_lock); | |
455 | if (new) | |
456 | nfs4_free_open_state(new); | |
457 | } | |
458 | out: | |
459 | return state; | |
460 | } | |
461 | ||
462 | /* | |
463 | * Beware! Caller must be holding exactly one | |
464 | * reference to clp->cl_sem and owner->so_sema! | |
465 | */ | |
466 | void nfs4_put_open_state(struct nfs4_state *state) | |
467 | { | |
468 | struct inode *inode = state->inode; | |
469 | struct nfs4_state_owner *owner = state->owner; | |
470 | ||
471 | if (!atomic_dec_and_lock(&state->count, &inode->i_lock)) | |
472 | return; | |
473 | if (!list_empty(&state->inode_states)) | |
474 | list_del(&state->inode_states); | |
475 | spin_unlock(&inode->i_lock); | |
476 | list_del(&state->open_states); | |
477 | iput(inode); | |
478 | BUG_ON (state->state != 0); | |
479 | nfs4_free_open_state(state); | |
480 | nfs4_put_state_owner(owner); | |
481 | } | |
482 | ||
483 | /* | |
484 | * Beware! Caller must be holding no references to clp->cl_sem! | |
485 | * of owner->so_sema! | |
486 | */ | |
487 | void nfs4_close_state(struct nfs4_state *state, mode_t mode) | |
488 | { | |
489 | struct inode *inode = state->inode; | |
490 | struct nfs4_state_owner *owner = state->owner; | |
491 | struct nfs4_client *clp = owner->so_client; | |
492 | int newstate; | |
493 | ||
494 | atomic_inc(&owner->so_count); | |
495 | down_read(&clp->cl_sem); | |
496 | down(&owner->so_sema); | |
497 | /* Protect against nfs4_find_state() */ | |
498 | spin_lock(&inode->i_lock); | |
499 | if (mode & FMODE_READ) | |
500 | state->nreaders--; | |
501 | if (mode & FMODE_WRITE) | |
502 | state->nwriters--; | |
503 | if (state->nwriters == 0) { | |
504 | if (state->nreaders == 0) | |
505 | list_del_init(&state->inode_states); | |
506 | /* See reclaim code */ | |
507 | list_move_tail(&state->open_states, &owner->so_states); | |
508 | } | |
509 | spin_unlock(&inode->i_lock); | |
510 | newstate = 0; | |
511 | if (state->state != 0) { | |
512 | if (state->nreaders) | |
513 | newstate |= FMODE_READ; | |
514 | if (state->nwriters) | |
515 | newstate |= FMODE_WRITE; | |
516 | if (state->state == newstate) | |
517 | goto out; | |
518 | if (nfs4_do_close(inode, state, newstate) == -EINPROGRESS) | |
519 | return; | |
520 | } | |
521 | out: | |
522 | nfs4_put_open_state(state); | |
523 | up(&owner->so_sema); | |
524 | nfs4_put_state_owner(owner); | |
525 | up_read(&clp->cl_sem); | |
526 | } | |
527 | ||
528 | /* | |
529 | * Search the state->lock_states for an existing lock_owner | |
530 | * that is compatible with current->files | |
531 | */ | |
532 | static struct nfs4_lock_state * | |
533 | __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) | |
534 | { | |
535 | struct nfs4_lock_state *pos; | |
536 | list_for_each_entry(pos, &state->lock_states, ls_locks) { | |
537 | if (pos->ls_owner != fl_owner) | |
538 | continue; | |
539 | atomic_inc(&pos->ls_count); | |
540 | return pos; | |
541 | } | |
542 | return NULL; | |
543 | } | |
544 | ||
1da177e4 LT |
545 | /* |
546 | * Return a compatible lock_state. If no initialized lock_state structure | |
547 | * exists, return an uninitialized one. | |
548 | * | |
549 | * The caller must be holding state->lock_sema | |
550 | */ | |
551 | static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) | |
552 | { | |
553 | struct nfs4_lock_state *lsp; | |
554 | struct nfs4_client *clp = state->owner->so_client; | |
555 | ||
556 | lsp = kmalloc(sizeof(*lsp), GFP_KERNEL); | |
557 | if (lsp == NULL) | |
558 | return NULL; | |
559 | lsp->ls_flags = 0; | |
560 | lsp->ls_seqid = 0; /* arbitrary */ | |
1da177e4 LT |
561 | memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data)); |
562 | atomic_set(&lsp->ls_count, 1); | |
563 | lsp->ls_owner = fl_owner; | |
1da177e4 LT |
564 | spin_lock(&clp->cl_lock); |
565 | lsp->ls_id = nfs4_alloc_lockowner_id(clp); | |
566 | spin_unlock(&clp->cl_lock); | |
8d0a8a9d | 567 | INIT_LIST_HEAD(&lsp->ls_locks); |
1da177e4 LT |
568 | return lsp; |
569 | } | |
570 | ||
571 | /* | |
572 | * Return a compatible lock_state. If no initialized lock_state structure | |
573 | * exists, return an uninitialized one. | |
574 | * | |
575 | * The caller must be holding state->lock_sema and clp->cl_sem | |
576 | */ | |
8d0a8a9d | 577 | static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner) |
1da177e4 | 578 | { |
8d0a8a9d | 579 | struct nfs4_lock_state *lsp, *new = NULL; |
1da177e4 | 580 | |
8d0a8a9d TM |
581 | for(;;) { |
582 | spin_lock(&state->state_lock); | |
583 | lsp = __nfs4_find_lock_state(state, owner); | |
584 | if (lsp != NULL) | |
585 | break; | |
586 | if (new != NULL) { | |
587 | new->ls_state = state; | |
588 | list_add(&new->ls_locks, &state->lock_states); | |
589 | set_bit(LK_STATE_IN_USE, &state->flags); | |
590 | lsp = new; | |
591 | new = NULL; | |
592 | break; | |
593 | } | |
594 | spin_unlock(&state->state_lock); | |
595 | new = nfs4_alloc_lock_state(state, owner); | |
596 | if (new == NULL) | |
597 | return NULL; | |
598 | } | |
599 | spin_unlock(&state->state_lock); | |
600 | kfree(new); | |
1da177e4 LT |
601 | return lsp; |
602 | } | |
603 | ||
604 | /* | |
8d0a8a9d TM |
605 | * Release reference to lock_state, and free it if we see that |
606 | * it is no longer in use | |
1da177e4 | 607 | */ |
8d0a8a9d | 608 | static void nfs4_put_lock_state(struct nfs4_lock_state *lsp) |
1da177e4 | 609 | { |
8d0a8a9d | 610 | struct nfs4_state *state; |
1da177e4 | 611 | |
8d0a8a9d TM |
612 | if (lsp == NULL) |
613 | return; | |
614 | state = lsp->ls_state; | |
615 | if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock)) | |
616 | return; | |
617 | list_del(&lsp->ls_locks); | |
618 | if (list_empty(&state->lock_states)) | |
619 | clear_bit(LK_STATE_IN_USE, &state->flags); | |
620 | spin_unlock(&state->state_lock); | |
621 | kfree(lsp); | |
1da177e4 LT |
622 | } |
623 | ||
8d0a8a9d | 624 | static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) |
1da177e4 | 625 | { |
8d0a8a9d | 626 | struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner; |
1da177e4 | 627 | |
8d0a8a9d TM |
628 | dst->fl_u.nfs4_fl.owner = lsp; |
629 | atomic_inc(&lsp->ls_count); | |
630 | } | |
1da177e4 | 631 | |
8d0a8a9d | 632 | static void nfs4_fl_release_lock(struct file_lock *fl) |
1da177e4 | 633 | { |
8d0a8a9d | 634 | nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner); |
1da177e4 LT |
635 | } |
636 | ||
8d0a8a9d TM |
637 | static struct file_lock_operations nfs4_fl_lock_ops = { |
638 | .fl_copy_lock = nfs4_fl_copy_lock, | |
639 | .fl_release_private = nfs4_fl_release_lock, | |
640 | }; | |
641 | ||
642 | int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl) | |
1da177e4 | 643 | { |
8d0a8a9d TM |
644 | struct nfs4_lock_state *lsp; |
645 | ||
646 | if (fl->fl_ops != NULL) | |
647 | return 0; | |
648 | lsp = nfs4_get_lock_state(state, fl->fl_owner); | |
649 | if (lsp == NULL) | |
650 | return -ENOMEM; | |
651 | fl->fl_u.nfs4_fl.owner = lsp; | |
652 | fl->fl_ops = &nfs4_fl_lock_ops; | |
653 | return 0; | |
1da177e4 LT |
654 | } |
655 | ||
8d0a8a9d TM |
656 | /* |
657 | * Byte-range lock aware utility to initialize the stateid of read/write | |
658 | * requests. | |
1da177e4 | 659 | */ |
8d0a8a9d | 660 | void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner) |
1da177e4 | 661 | { |
8d0a8a9d | 662 | struct nfs4_lock_state *lsp; |
1da177e4 | 663 | |
8d0a8a9d TM |
664 | memcpy(dst, &state->stateid, sizeof(*dst)); |
665 | if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) | |
666 | return; | |
1da177e4 | 667 | |
8d0a8a9d TM |
668 | spin_lock(&state->state_lock); |
669 | lsp = __nfs4_find_lock_state(state, fl_owner); | |
670 | if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) | |
671 | memcpy(dst, &lsp->ls_stateid, sizeof(*dst)); | |
672 | spin_unlock(&state->state_lock); | |
1da177e4 LT |
673 | nfs4_put_lock_state(lsp); |
674 | } | |
675 | ||
676 | /* | |
8d0a8a9d TM |
677 | * Called with state->lock_sema and clp->cl_sem held. |
678 | */ | |
679 | void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp) | |
1da177e4 | 680 | { |
8d0a8a9d TM |
681 | if (status == NFS_OK || seqid_mutating_err(-status)) |
682 | lsp->ls_seqid++; | |
1da177e4 LT |
683 | } |
684 | ||
685 | /* | |
686 | * Called with sp->so_sema and clp->cl_sem held. | |
687 | * | |
688 | * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or | |
689 | * failed with a seqid incrementing error - | |
690 | * see comments nfs_fs.h:seqid_mutating_error() | |
691 | */ | |
692 | void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp) | |
693 | { | |
694 | if (status == NFS_OK || seqid_mutating_err(-status)) | |
695 | sp->so_seqid++; | |
696 | /* If the server returns BAD_SEQID, unhash state_owner here */ | |
697 | if (status == -NFS4ERR_BAD_SEQID) | |
698 | nfs4_drop_state_owner(sp); | |
699 | } | |
700 | ||
701 | static int reclaimer(void *); | |
702 | struct reclaimer_args { | |
703 | struct nfs4_client *clp; | |
704 | struct completion complete; | |
705 | }; | |
706 | ||
707 | /* | |
708 | * State recovery routine | |
709 | */ | |
710 | void | |
711 | nfs4_recover_state(void *data) | |
712 | { | |
713 | struct nfs4_client *clp = (struct nfs4_client *)data; | |
714 | struct reclaimer_args args = { | |
715 | .clp = clp, | |
716 | }; | |
717 | might_sleep(); | |
718 | ||
719 | init_completion(&args.complete); | |
720 | ||
721 | if (kernel_thread(reclaimer, &args, CLONE_KERNEL) < 0) | |
722 | goto out_failed_clear; | |
723 | wait_for_completion(&args.complete); | |
724 | return; | |
725 | out_failed_clear: | |
726 | set_bit(NFS4CLNT_OK, &clp->cl_state); | |
727 | wake_up_all(&clp->cl_waitq); | |
728 | rpc_wake_up(&clp->cl_rpcwaitq); | |
729 | } | |
730 | ||
731 | /* | |
732 | * Schedule a state recovery attempt | |
733 | */ | |
734 | void | |
735 | nfs4_schedule_state_recovery(struct nfs4_client *clp) | |
736 | { | |
737 | if (!clp) | |
738 | return; | |
739 | if (test_and_clear_bit(NFS4CLNT_OK, &clp->cl_state)) | |
740 | schedule_work(&clp->cl_recoverd); | |
741 | } | |
742 | ||
743 | static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state) | |
744 | { | |
745 | struct inode *inode = state->inode; | |
746 | struct file_lock *fl; | |
747 | int status = 0; | |
748 | ||
749 | for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) { | |
750 | if (!(fl->fl_flags & FL_POSIX)) | |
751 | continue; | |
752 | if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state) | |
753 | continue; | |
754 | status = ops->recover_lock(state, fl); | |
755 | if (status >= 0) | |
756 | continue; | |
757 | switch (status) { | |
758 | default: | |
759 | printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n", | |
760 | __FUNCTION__, status); | |
761 | case -NFS4ERR_EXPIRED: | |
762 | case -NFS4ERR_NO_GRACE: | |
763 | case -NFS4ERR_RECLAIM_BAD: | |
764 | case -NFS4ERR_RECLAIM_CONFLICT: | |
765 | /* kill_proc(fl->fl_owner, SIGLOST, 1); */ | |
766 | break; | |
767 | case -NFS4ERR_STALE_CLIENTID: | |
768 | goto out_err; | |
769 | } | |
770 | } | |
771 | return 0; | |
772 | out_err: | |
773 | return status; | |
774 | } | |
775 | ||
776 | static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp) | |
777 | { | |
778 | struct nfs4_state *state; | |
779 | struct nfs4_lock_state *lock; | |
780 | int status = 0; | |
781 | ||
782 | /* Note: we rely on the sp->so_states list being ordered | |
783 | * so that we always reclaim open(O_RDWR) and/or open(O_WRITE) | |
784 | * states first. | |
785 | * This is needed to ensure that the server won't give us any | |
786 | * read delegations that we have to return if, say, we are | |
787 | * recovering after a network partition or a reboot from a | |
788 | * server that doesn't support a grace period. | |
789 | */ | |
790 | list_for_each_entry(state, &sp->so_states, open_states) { | |
791 | if (state->state == 0) | |
792 | continue; | |
793 | status = ops->recover_open(sp, state); | |
794 | list_for_each_entry(lock, &state->lock_states, ls_locks) | |
795 | lock->ls_flags &= ~NFS_LOCK_INITIALIZED; | |
796 | if (status >= 0) { | |
797 | status = nfs4_reclaim_locks(ops, state); | |
798 | if (status < 0) | |
799 | goto out_err; | |
800 | list_for_each_entry(lock, &state->lock_states, ls_locks) { | |
801 | if (!(lock->ls_flags & NFS_LOCK_INITIALIZED)) | |
802 | printk("%s: Lock reclaim failed!\n", | |
803 | __FUNCTION__); | |
804 | } | |
805 | continue; | |
806 | } | |
807 | switch (status) { | |
808 | default: | |
809 | printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n", | |
810 | __FUNCTION__, status); | |
811 | case -ENOENT: | |
812 | case -NFS4ERR_RECLAIM_BAD: | |
813 | case -NFS4ERR_RECLAIM_CONFLICT: | |
814 | /* | |
815 | * Open state on this file cannot be recovered | |
816 | * All we can do is revert to using the zero stateid. | |
817 | */ | |
818 | memset(state->stateid.data, 0, | |
819 | sizeof(state->stateid.data)); | |
820 | /* Mark the file as being 'closed' */ | |
821 | state->state = 0; | |
822 | break; | |
823 | case -NFS4ERR_EXPIRED: | |
824 | case -NFS4ERR_NO_GRACE: | |
825 | case -NFS4ERR_STALE_CLIENTID: | |
826 | goto out_err; | |
827 | } | |
828 | } | |
829 | return 0; | |
830 | out_err: | |
831 | return status; | |
832 | } | |
833 | ||
834 | static int reclaimer(void *ptr) | |
835 | { | |
836 | struct reclaimer_args *args = (struct reclaimer_args *)ptr; | |
837 | struct nfs4_client *clp = args->clp; | |
838 | struct nfs4_state_owner *sp; | |
839 | struct nfs4_state_recovery_ops *ops; | |
840 | int status = 0; | |
841 | ||
842 | daemonize("%u.%u.%u.%u-reclaim", NIPQUAD(clp->cl_addr)); | |
843 | allow_signal(SIGKILL); | |
844 | ||
845 | atomic_inc(&clp->cl_count); | |
846 | complete(&args->complete); | |
847 | ||
848 | /* Ensure exclusive access to NFSv4 state */ | |
849 | lock_kernel(); | |
850 | down_write(&clp->cl_sem); | |
851 | /* Are there any NFS mounts out there? */ | |
852 | if (list_empty(&clp->cl_superblocks)) | |
853 | goto out; | |
854 | restart_loop: | |
855 | status = nfs4_proc_renew(clp); | |
856 | switch (status) { | |
857 | case 0: | |
858 | case -NFS4ERR_CB_PATH_DOWN: | |
859 | goto out; | |
860 | case -NFS4ERR_STALE_CLIENTID: | |
861 | case -NFS4ERR_LEASE_MOVED: | |
862 | ops = &nfs4_reboot_recovery_ops; | |
863 | break; | |
864 | default: | |
865 | ops = &nfs4_network_partition_recovery_ops; | |
866 | }; | |
867 | status = __nfs4_init_client(clp); | |
868 | if (status) | |
869 | goto out_error; | |
870 | /* Mark all delegations for reclaim */ | |
871 | nfs_delegation_mark_reclaim(clp); | |
872 | /* Note: list is protected by exclusive lock on cl->cl_sem */ | |
873 | list_for_each_entry(sp, &clp->cl_state_owners, so_list) { | |
874 | status = nfs4_reclaim_open_state(ops, sp); | |
875 | if (status < 0) { | |
876 | if (status == -NFS4ERR_NO_GRACE) { | |
877 | ops = &nfs4_network_partition_recovery_ops; | |
878 | status = nfs4_reclaim_open_state(ops, sp); | |
879 | } | |
880 | if (status == -NFS4ERR_STALE_CLIENTID) | |
881 | goto restart_loop; | |
882 | if (status == -NFS4ERR_EXPIRED) | |
883 | goto restart_loop; | |
884 | } | |
885 | } | |
886 | nfs_delegation_reap_unclaimed(clp); | |
887 | out: | |
888 | set_bit(NFS4CLNT_OK, &clp->cl_state); | |
889 | up_write(&clp->cl_sem); | |
890 | unlock_kernel(); | |
891 | wake_up_all(&clp->cl_waitq); | |
892 | rpc_wake_up(&clp->cl_rpcwaitq); | |
893 | if (status == -NFS4ERR_CB_PATH_DOWN) | |
894 | nfs_handle_cb_pathdown(clp); | |
895 | nfs4_put_client(clp); | |
896 | return 0; | |
897 | out_error: | |
898 | printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n", | |
899 | NIPQUAD(clp->cl_addr.s_addr), -status); | |
900 | goto out; | |
901 | } | |
902 | ||
903 | /* | |
904 | * Local variables: | |
905 | * c-basic-offset: 8 | |
906 | * End: | |
907 | */ |