NFSv4: Send RENEW requests to the server only when we're holding state
[linux-2.6-block.git] / fs / nfs / delegation.c
1 /*
2  * linux/fs/nfs/delegation.c
3  *
4  * Copyright (C) 2004 Trond Myklebust
5  *
6  * NFS file delegation management
7  *
8  */
9 #include <linux/config.h>
10 #include <linux/completion.h>
11 #include <linux/kthread.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/spinlock.h>
15
16 #include <linux/nfs4.h>
17 #include <linux/nfs_fs.h>
18 #include <linux/nfs_xdr.h>
19
20 #include "nfs4_fs.h"
21 #include "delegation.h"
22
23 static struct nfs_delegation *nfs_alloc_delegation(void)
24 {
25         return (struct nfs_delegation *)kmalloc(sizeof(struct nfs_delegation), GFP_KERNEL);
26 }
27
28 static void nfs_free_delegation(struct nfs_delegation *delegation)
29 {
30         if (delegation->cred)
31                 put_rpccred(delegation->cred);
32         kfree(delegation);
33 }
34
35 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
36 {
37         struct inode *inode = state->inode;
38         struct file_lock *fl;
39         int status;
40
41         for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
42                 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
43                         continue;
44                 if ((struct nfs_open_context *)fl->fl_file->private_data != ctx)
45                         continue;
46                 status = nfs4_lock_delegation_recall(state, fl);
47                 if (status >= 0)
48                         continue;
49                 switch (status) {
50                         default:
51                                 printk(KERN_ERR "%s: unhandled error %d.\n",
52                                                 __FUNCTION__, status);
53                         case -NFS4ERR_EXPIRED:
54                                 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
55                         case -NFS4ERR_STALE_CLIENTID:
56                                 nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs4_state);
57                                 goto out_err;
58                 }
59         }
60         return 0;
61 out_err:
62         return status;
63 }
64
65 static void nfs_delegation_claim_opens(struct inode *inode)
66 {
67         struct nfs_inode *nfsi = NFS_I(inode);
68         struct nfs_open_context *ctx;
69         struct nfs4_state *state;
70         int err;
71
72 again:
73         spin_lock(&inode->i_lock);
74         list_for_each_entry(ctx, &nfsi->open_files, list) {
75                 state = ctx->state;
76                 if (state == NULL)
77                         continue;
78                 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
79                         continue;
80                 get_nfs_open_context(ctx);
81                 spin_unlock(&inode->i_lock);
82                 err = nfs4_open_delegation_recall(ctx->dentry, state);
83                 if (err >= 0)
84                         err = nfs_delegation_claim_locks(ctx, state);
85                 put_nfs_open_context(ctx);
86                 if (err != 0)
87                         return;
88                 goto again;
89         }
90         spin_unlock(&inode->i_lock);
91 }
92
93 /*
94  * Set up a delegation on an inode
95  */
96 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
97 {
98         struct nfs_delegation *delegation = NFS_I(inode)->delegation;
99
100         if (delegation == NULL)
101                 return;
102         memcpy(delegation->stateid.data, res->delegation.data,
103                         sizeof(delegation->stateid.data));
104         delegation->type = res->delegation_type;
105         delegation->maxsize = res->maxsize;
106         put_rpccred(cred);
107         delegation->cred = get_rpccred(cred);
108         delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
109         NFS_I(inode)->delegation_state = delegation->type;
110         smp_wmb();
111 }
112
113 /*
114  * Set up a delegation on an inode
115  */
116 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
117 {
118         struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
119         struct nfs_inode *nfsi = NFS_I(inode);
120         struct nfs_delegation *delegation;
121         int status = 0;
122
123         /* Ensure we first revalidate the attributes and page cache! */
124         if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR)))
125                 __nfs_revalidate_inode(NFS_SERVER(inode), inode);
126
127         delegation = nfs_alloc_delegation();
128         if (delegation == NULL)
129                 return -ENOMEM;
130         memcpy(delegation->stateid.data, res->delegation.data,
131                         sizeof(delegation->stateid.data));
132         delegation->type = res->delegation_type;
133         delegation->maxsize = res->maxsize;
134         delegation->cred = get_rpccred(cred);
135         delegation->inode = inode;
136
137         spin_lock(&clp->cl_lock);
138         if (nfsi->delegation == NULL) {
139                 list_add(&delegation->super_list, &clp->cl_delegations);
140                 nfsi->delegation = delegation;
141                 nfsi->delegation_state = delegation->type;
142                 delegation = NULL;
143         } else {
144                 if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
145                                         sizeof(delegation->stateid)) != 0 ||
146                                 delegation->type != nfsi->delegation->type) {
147                         printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
148                                         __FUNCTION__, NIPQUAD(clp->cl_addr));
149                         status = -EIO;
150                 }
151         }
152         spin_unlock(&clp->cl_lock);
153         kfree(delegation);
154         return status;
155 }
156
157 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
158 {
159         int res = 0;
160
161         __nfs_revalidate_inode(NFS_SERVER(inode), inode);
162
163         res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
164         nfs_free_delegation(delegation);
165         return res;
166 }
167
168 /* Sync all data to disk upon delegation return */
169 static void nfs_msync_inode(struct inode *inode)
170 {
171         filemap_fdatawrite(inode->i_mapping);
172         nfs_wb_all(inode);
173         filemap_fdatawait(inode->i_mapping);
174 }
175
176 /*
177  * Basic procedure for returning a delegation to the server
178  */
179 int __nfs_inode_return_delegation(struct inode *inode)
180 {
181         struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
182         struct nfs_inode *nfsi = NFS_I(inode);
183         struct nfs_delegation *delegation;
184         int res = 0;
185
186         nfs_msync_inode(inode);
187         down_read(&clp->cl_sem);
188         /* Guard against new delegated open calls */
189         down_write(&nfsi->rwsem);
190         spin_lock(&clp->cl_lock);
191         delegation = nfsi->delegation;
192         if (delegation != NULL) {
193                 list_del_init(&delegation->super_list);
194                 nfsi->delegation = NULL;
195                 nfsi->delegation_state = 0;
196         }
197         spin_unlock(&clp->cl_lock);
198         nfs_delegation_claim_opens(inode);
199         up_write(&nfsi->rwsem);
200         up_read(&clp->cl_sem);
201         nfs_msync_inode(inode);
202
203         if (delegation != NULL)
204                 res = nfs_do_return_delegation(inode, delegation);
205         return res;
206 }
207
208 /*
209  * Return all delegations associated to a super block
210  */
211 void nfs_return_all_delegations(struct super_block *sb)
212 {
213         struct nfs4_client *clp = NFS_SB(sb)->nfs4_state;
214         struct nfs_delegation *delegation;
215         struct inode *inode;
216
217         if (clp == NULL)
218                 return;
219 restart:
220         spin_lock(&clp->cl_lock);
221         list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
222                 if (delegation->inode->i_sb != sb)
223                         continue;
224                 inode = igrab(delegation->inode);
225                 if (inode == NULL)
226                         continue;
227                 spin_unlock(&clp->cl_lock);
228                 nfs_inode_return_delegation(inode);
229                 iput(inode);
230                 goto restart;
231         }
232         spin_unlock(&clp->cl_lock);
233 }
234
235 int nfs_do_expire_all_delegations(void *ptr)
236 {
237         struct nfs4_client *clp = ptr;
238         struct nfs_delegation *delegation;
239         struct inode *inode;
240         int err = 0;
241
242         allow_signal(SIGKILL);
243 restart:
244         spin_lock(&clp->cl_lock);
245         if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
246                 goto out;
247         if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
248                 goto out;
249         list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
250                 inode = igrab(delegation->inode);
251                 if (inode == NULL)
252                         continue;
253                 spin_unlock(&clp->cl_lock);
254                 err = nfs_inode_return_delegation(inode);
255                 iput(inode);
256                 if (!err)
257                         goto restart;
258         }
259 out:
260         spin_unlock(&clp->cl_lock);
261         nfs4_put_client(clp);
262         module_put_and_exit(0);
263 }
264
265 void nfs_expire_all_delegations(struct nfs4_client *clp)
266 {
267         struct task_struct *task;
268
269         __module_get(THIS_MODULE);
270         atomic_inc(&clp->cl_count);
271         task = kthread_run(nfs_do_expire_all_delegations, clp,
272                         "%u.%u.%u.%u-delegreturn",
273                         NIPQUAD(clp->cl_addr));
274         if (!IS_ERR(task))
275                 return;
276         nfs4_put_client(clp);
277         module_put(THIS_MODULE);
278 }
279
280 /*
281  * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
282  */
283 void nfs_handle_cb_pathdown(struct nfs4_client *clp)
284 {
285         struct nfs_delegation *delegation;
286         struct inode *inode;
287
288         if (clp == NULL)
289                 return;
290 restart:
291         spin_lock(&clp->cl_lock);
292         list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
293                 inode = igrab(delegation->inode);
294                 if (inode == NULL)
295                         continue;
296                 spin_unlock(&clp->cl_lock);
297                 nfs_inode_return_delegation(inode);
298                 iput(inode);
299                 goto restart;
300         }
301         spin_unlock(&clp->cl_lock);
302 }
303
304 struct recall_threadargs {
305         struct inode *inode;
306         struct nfs4_client *clp;
307         const nfs4_stateid *stateid;
308
309         struct completion started;
310         int result;
311 };
312
313 static int recall_thread(void *data)
314 {
315         struct recall_threadargs *args = (struct recall_threadargs *)data;
316         struct inode *inode = igrab(args->inode);
317         struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
318         struct nfs_inode *nfsi = NFS_I(inode);
319         struct nfs_delegation *delegation;
320
321         daemonize("nfsv4-delegreturn");
322
323         nfs_msync_inode(inode);
324         down_read(&clp->cl_sem);
325         down_write(&nfsi->rwsem);
326         spin_lock(&clp->cl_lock);
327         delegation = nfsi->delegation;
328         if (delegation != NULL && memcmp(delegation->stateid.data,
329                                 args->stateid->data,
330                                 sizeof(delegation->stateid.data)) == 0) {
331                 list_del_init(&delegation->super_list);
332                 nfsi->delegation = NULL;
333                 nfsi->delegation_state = 0;
334                 args->result = 0;
335         } else {
336                 delegation = NULL;
337                 args->result = -ENOENT;
338         }
339         spin_unlock(&clp->cl_lock);
340         complete(&args->started);
341         nfs_delegation_claim_opens(inode);
342         up_write(&nfsi->rwsem);
343         up_read(&clp->cl_sem);
344         nfs_msync_inode(inode);
345
346         if (delegation != NULL)
347                 nfs_do_return_delegation(inode, delegation);
348         iput(inode);
349         module_put_and_exit(0);
350 }
351
352 /*
353  * Asynchronous delegation recall!
354  */
355 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
356 {
357         struct recall_threadargs data = {
358                 .inode = inode,
359                 .stateid = stateid,
360         };
361         int status;
362
363         init_completion(&data.started);
364         __module_get(THIS_MODULE);
365         status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
366         if (status < 0)
367                 goto out_module_put;
368         wait_for_completion(&data.started);
369         return data.result;
370 out_module_put:
371         module_put(THIS_MODULE);
372         return status;
373 }
374
375 /*
376  * Retrieve the inode associated with a delegation
377  */
378 struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle)
379 {
380         struct nfs_delegation *delegation;
381         struct inode *res = NULL;
382         spin_lock(&clp->cl_lock);
383         list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
384                 if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
385                         res = igrab(delegation->inode);
386                         break;
387                 }
388         }
389         spin_unlock(&clp->cl_lock);
390         return res;
391 }
392
393 /*
394  * Mark all delegations as needing to be reclaimed
395  */
396 void nfs_delegation_mark_reclaim(struct nfs4_client *clp)
397 {
398         struct nfs_delegation *delegation;
399         spin_lock(&clp->cl_lock);
400         list_for_each_entry(delegation, &clp->cl_delegations, super_list)
401                 delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
402         spin_unlock(&clp->cl_lock);
403 }
404
405 /*
406  * Reap all unclaimed delegations after reboot recovery is done
407  */
408 void nfs_delegation_reap_unclaimed(struct nfs4_client *clp)
409 {
410         struct nfs_delegation *delegation, *n;
411         LIST_HEAD(head);
412         spin_lock(&clp->cl_lock);
413         list_for_each_entry_safe(delegation, n, &clp->cl_delegations, super_list) {
414                 if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
415                         continue;
416                 list_move(&delegation->super_list, &head);
417                 NFS_I(delegation->inode)->delegation = NULL;
418                 NFS_I(delegation->inode)->delegation_state = 0;
419         }
420         spin_unlock(&clp->cl_lock);
421         while(!list_empty(&head)) {
422                 delegation = list_entry(head.next, struct nfs_delegation, super_list);
423                 list_del(&delegation->super_list);
424                 nfs_free_delegation(delegation);
425         }
426 }