NFSv4.1: CB_RECALL_SLOT must schedule a sequence op after updating targets
[linux-2.6-block.git] / fs / nfs / nfs4proc.c
... / ...
CommitLineData
1/*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/mm.h>
39#include <linux/delay.h>
40#include <linux/errno.h>
41#include <linux/string.h>
42#include <linux/ratelimit.h>
43#include <linux/printk.h>
44#include <linux/slab.h>
45#include <linux/sunrpc/clnt.h>
46#include <linux/nfs.h>
47#include <linux/nfs4.h>
48#include <linux/nfs_fs.h>
49#include <linux/nfs_page.h>
50#include <linux/nfs_mount.h>
51#include <linux/namei.h>
52#include <linux/mount.h>
53#include <linux/module.h>
54#include <linux/nfs_idmap.h>
55#include <linux/sunrpc/bc_xprt.h>
56#include <linux/xattr.h>
57#include <linux/utsname.h>
58#include <linux/freezer.h>
59
60#include "nfs4_fs.h"
61#include "delegation.h"
62#include "internal.h"
63#include "iostat.h"
64#include "callback.h"
65#include "pnfs.h"
66#include "netns.h"
67
68#define NFSDBG_FACILITY NFSDBG_PROC
69
70#define NFS4_POLL_RETRY_MIN (HZ/10)
71#define NFS4_POLL_RETRY_MAX (15*HZ)
72
73#define NFS4_MAX_LOOP_ON_RECOVER (10)
74
75struct nfs4_opendata;
76static int _nfs4_proc_open(struct nfs4_opendata *data);
77static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
78static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
79static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
80static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
81static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *);
82static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
83static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
84 struct nfs_fattr *fattr, struct iattr *sattr,
85 struct nfs4_state *state);
86#ifdef CONFIG_NFS_V4_1
87static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *);
88static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *);
89#endif
90/* Prevent leaks of NFSv4 errors into userland */
91static int nfs4_map_errors(int err)
92{
93 if (err >= -1000)
94 return err;
95 switch (err) {
96 case -NFS4ERR_RESOURCE:
97 return -EREMOTEIO;
98 case -NFS4ERR_WRONGSEC:
99 return -EPERM;
100 case -NFS4ERR_BADOWNER:
101 case -NFS4ERR_BADNAME:
102 return -EINVAL;
103 case -NFS4ERR_SHARE_DENIED:
104 return -EACCES;
105 case -NFS4ERR_MINOR_VERS_MISMATCH:
106 return -EPROTONOSUPPORT;
107 case -NFS4ERR_ACCESS:
108 return -EACCES;
109 default:
110 dprintk("%s could not handle NFSv4 error %d\n",
111 __func__, -err);
112 break;
113 }
114 return -EIO;
115}
116
117/*
118 * This is our standard bitmap for GETATTR requests.
119 */
120const u32 nfs4_fattr_bitmap[3] = {
121 FATTR4_WORD0_TYPE
122 | FATTR4_WORD0_CHANGE
123 | FATTR4_WORD0_SIZE
124 | FATTR4_WORD0_FSID
125 | FATTR4_WORD0_FILEID,
126 FATTR4_WORD1_MODE
127 | FATTR4_WORD1_NUMLINKS
128 | FATTR4_WORD1_OWNER
129 | FATTR4_WORD1_OWNER_GROUP
130 | FATTR4_WORD1_RAWDEV
131 | FATTR4_WORD1_SPACE_USED
132 | FATTR4_WORD1_TIME_ACCESS
133 | FATTR4_WORD1_TIME_METADATA
134 | FATTR4_WORD1_TIME_MODIFY
135};
136
137static const u32 nfs4_pnfs_open_bitmap[3] = {
138 FATTR4_WORD0_TYPE
139 | FATTR4_WORD0_CHANGE
140 | FATTR4_WORD0_SIZE
141 | FATTR4_WORD0_FSID
142 | FATTR4_WORD0_FILEID,
143 FATTR4_WORD1_MODE
144 | FATTR4_WORD1_NUMLINKS
145 | FATTR4_WORD1_OWNER
146 | FATTR4_WORD1_OWNER_GROUP
147 | FATTR4_WORD1_RAWDEV
148 | FATTR4_WORD1_SPACE_USED
149 | FATTR4_WORD1_TIME_ACCESS
150 | FATTR4_WORD1_TIME_METADATA
151 | FATTR4_WORD1_TIME_MODIFY,
152 FATTR4_WORD2_MDSTHRESHOLD
153};
154
155static const u32 nfs4_open_noattr_bitmap[3] = {
156 FATTR4_WORD0_TYPE
157 | FATTR4_WORD0_CHANGE
158 | FATTR4_WORD0_FILEID,
159};
160
161const u32 nfs4_statfs_bitmap[2] = {
162 FATTR4_WORD0_FILES_AVAIL
163 | FATTR4_WORD0_FILES_FREE
164 | FATTR4_WORD0_FILES_TOTAL,
165 FATTR4_WORD1_SPACE_AVAIL
166 | FATTR4_WORD1_SPACE_FREE
167 | FATTR4_WORD1_SPACE_TOTAL
168};
169
170const u32 nfs4_pathconf_bitmap[2] = {
171 FATTR4_WORD0_MAXLINK
172 | FATTR4_WORD0_MAXNAME,
173 0
174};
175
176const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
177 | FATTR4_WORD0_MAXREAD
178 | FATTR4_WORD0_MAXWRITE
179 | FATTR4_WORD0_LEASE_TIME,
180 FATTR4_WORD1_TIME_DELTA
181 | FATTR4_WORD1_FS_LAYOUT_TYPES,
182 FATTR4_WORD2_LAYOUT_BLKSIZE
183};
184
185const u32 nfs4_fs_locations_bitmap[2] = {
186 FATTR4_WORD0_TYPE
187 | FATTR4_WORD0_CHANGE
188 | FATTR4_WORD0_SIZE
189 | FATTR4_WORD0_FSID
190 | FATTR4_WORD0_FILEID
191 | FATTR4_WORD0_FS_LOCATIONS,
192 FATTR4_WORD1_MODE
193 | FATTR4_WORD1_NUMLINKS
194 | FATTR4_WORD1_OWNER
195 | FATTR4_WORD1_OWNER_GROUP
196 | FATTR4_WORD1_RAWDEV
197 | FATTR4_WORD1_SPACE_USED
198 | FATTR4_WORD1_TIME_ACCESS
199 | FATTR4_WORD1_TIME_METADATA
200 | FATTR4_WORD1_TIME_MODIFY
201 | FATTR4_WORD1_MOUNTED_ON_FILEID
202};
203
204static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
205 struct nfs4_readdir_arg *readdir)
206{
207 __be32 *start, *p;
208
209 if (cookie > 2) {
210 readdir->cookie = cookie;
211 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
212 return;
213 }
214
215 readdir->cookie = 0;
216 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
217 if (cookie == 2)
218 return;
219
220 /*
221 * NFSv4 servers do not return entries for '.' and '..'
222 * Therefore, we fake these entries here. We let '.'
223 * have cookie 0 and '..' have cookie 1. Note that
224 * when talking to the server, we always send cookie 0
225 * instead of 1 or 2.
226 */
227 start = p = kmap_atomic(*readdir->pages);
228
229 if (cookie == 0) {
230 *p++ = xdr_one; /* next */
231 *p++ = xdr_zero; /* cookie, first word */
232 *p++ = xdr_one; /* cookie, second word */
233 *p++ = xdr_one; /* entry len */
234 memcpy(p, ".\0\0\0", 4); /* entry */
235 p++;
236 *p++ = xdr_one; /* bitmap length */
237 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
238 *p++ = htonl(8); /* attribute buffer length */
239 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
240 }
241
242 *p++ = xdr_one; /* next */
243 *p++ = xdr_zero; /* cookie, first word */
244 *p++ = xdr_two; /* cookie, second word */
245 *p++ = xdr_two; /* entry len */
246 memcpy(p, "..\0\0", 4); /* entry */
247 p++;
248 *p++ = xdr_one; /* bitmap length */
249 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
250 *p++ = htonl(8); /* attribute buffer length */
251 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
252
253 readdir->pgbase = (char *)p - (char *)start;
254 readdir->count -= readdir->pgbase;
255 kunmap_atomic(start);
256}
257
258static int nfs4_wait_clnt_recover(struct nfs_client *clp)
259{
260 int res;
261
262 might_sleep();
263
264 res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
265 nfs_wait_bit_killable, TASK_KILLABLE);
266 if (res)
267 return res;
268
269 if (clp->cl_cons_state < 0)
270 return clp->cl_cons_state;
271 return 0;
272}
273
274static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
275{
276 int res = 0;
277
278 might_sleep();
279
280 if (*timeout <= 0)
281 *timeout = NFS4_POLL_RETRY_MIN;
282 if (*timeout > NFS4_POLL_RETRY_MAX)
283 *timeout = NFS4_POLL_RETRY_MAX;
284 freezable_schedule_timeout_killable(*timeout);
285 if (fatal_signal_pending(current))
286 res = -ERESTARTSYS;
287 *timeout <<= 1;
288 return res;
289}
290
291/* This is the error handling routine for processes that are allowed
292 * to sleep.
293 */
294static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
295{
296 struct nfs_client *clp = server->nfs_client;
297 struct nfs4_state *state = exception->state;
298 struct inode *inode = exception->inode;
299 int ret = errorcode;
300
301 exception->retry = 0;
302 switch(errorcode) {
303 case 0:
304 return 0;
305 case -NFS4ERR_OPENMODE:
306 if (inode && nfs4_have_delegation(inode, FMODE_READ)) {
307 nfs4_inode_return_delegation(inode);
308 exception->retry = 1;
309 return 0;
310 }
311 if (state == NULL)
312 break;
313 nfs4_schedule_stateid_recovery(server, state);
314 goto wait_on_recovery;
315 case -NFS4ERR_DELEG_REVOKED:
316 case -NFS4ERR_ADMIN_REVOKED:
317 case -NFS4ERR_BAD_STATEID:
318 if (state == NULL)
319 break;
320 nfs_remove_bad_delegation(state->inode);
321 nfs4_schedule_stateid_recovery(server, state);
322 goto wait_on_recovery;
323 case -NFS4ERR_EXPIRED:
324 if (state != NULL)
325 nfs4_schedule_stateid_recovery(server, state);
326 case -NFS4ERR_STALE_STATEID:
327 case -NFS4ERR_STALE_CLIENTID:
328 nfs4_schedule_lease_recovery(clp);
329 goto wait_on_recovery;
330#if defined(CONFIG_NFS_V4_1)
331 case -NFS4ERR_BADSESSION:
332 case -NFS4ERR_BADSLOT:
333 case -NFS4ERR_BAD_HIGH_SLOT:
334 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
335 case -NFS4ERR_DEADSESSION:
336 case -NFS4ERR_SEQ_FALSE_RETRY:
337 case -NFS4ERR_SEQ_MISORDERED:
338 dprintk("%s ERROR: %d Reset session\n", __func__,
339 errorcode);
340 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
341 goto wait_on_recovery;
342#endif /* defined(CONFIG_NFS_V4_1) */
343 case -NFS4ERR_FILE_OPEN:
344 if (exception->timeout > HZ) {
345 /* We have retried a decent amount, time to
346 * fail
347 */
348 ret = -EBUSY;
349 break;
350 }
351 case -NFS4ERR_GRACE:
352 case -NFS4ERR_DELAY:
353 case -EKEYEXPIRED:
354 ret = nfs4_delay(server->client, &exception->timeout);
355 if (ret != 0)
356 break;
357 case -NFS4ERR_RETRY_UNCACHED_REP:
358 case -NFS4ERR_OLD_STATEID:
359 exception->retry = 1;
360 break;
361 case -NFS4ERR_BADOWNER:
362 /* The following works around a Linux server bug! */
363 case -NFS4ERR_BADNAME:
364 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
365 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
366 exception->retry = 1;
367 printk(KERN_WARNING "NFS: v4 server %s "
368 "does not accept raw "
369 "uid/gids. "
370 "Reenabling the idmapper.\n",
371 server->nfs_client->cl_hostname);
372 }
373 }
374 /* We failed to handle the error */
375 return nfs4_map_errors(ret);
376wait_on_recovery:
377 ret = nfs4_wait_clnt_recover(clp);
378 if (ret == 0)
379 exception->retry = 1;
380 return ret;
381}
382
383
384static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
385{
386 spin_lock(&clp->cl_lock);
387 if (time_before(clp->cl_last_renewal,timestamp))
388 clp->cl_last_renewal = timestamp;
389 spin_unlock(&clp->cl_lock);
390}
391
392static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
393{
394 do_renew_lease(server->nfs_client, timestamp);
395}
396
397#if defined(CONFIG_NFS_V4_1)
398
399/*
400 * nfs4_shrink_slot_table - free retired slots from the slot table
401 */
402static void nfs4_shrink_slot_table(struct nfs4_slot_table *tbl, u32 newsize)
403{
404 struct nfs4_slot **p;
405 if (newsize >= tbl->max_slots)
406 return;
407
408 p = &tbl->slots;
409 while (newsize--)
410 p = &(*p)->next;
411 while (*p) {
412 struct nfs4_slot *slot = *p;
413
414 *p = slot->next;
415 kfree(slot);
416 tbl->max_slots--;
417 }
418}
419
420/*
421 * nfs4_free_slot - free a slot and efficiently update slot table.
422 *
423 * freeing a slot is trivially done by clearing its respective bit
424 * in the bitmap.
425 * If the freed slotid equals highest_used_slotid we want to update it
426 * so that the server would be able to size down the slot table if needed,
427 * otherwise we know that the highest_used_slotid is still in use.
428 * When updating highest_used_slotid there may be "holes" in the bitmap
429 * so we need to scan down from highest_used_slotid to 0 looking for the now
430 * highest slotid in use.
431 * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
432 *
433 * Must be called while holding tbl->slot_tbl_lock
434 */
435static void
436nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
437{
438 u32 slotid = slot->slot_nr;
439
440 /* clear used bit in bitmap */
441 __clear_bit(slotid, tbl->used_slots);
442
443 /* update highest_used_slotid when it is freed */
444 if (slotid == tbl->highest_used_slotid) {
445 u32 new_max = find_last_bit(tbl->used_slots, slotid);
446 if (new_max < slotid)
447 tbl->highest_used_slotid = new_max;
448 else
449 tbl->highest_used_slotid = NFS4_NO_SLOT;
450 }
451 dprintk("%s: slotid %u highest_used_slotid %d\n", __func__,
452 slotid, tbl->highest_used_slotid);
453}
454
455bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy)
456{
457 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
458 return true;
459}
460
461/*
462 * Signal state manager thread if session fore channel is drained
463 */
464static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
465{
466 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
467 rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq,
468 nfs4_set_task_privileged, NULL);
469 return;
470 }
471
472 if (ses->fc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
473 return;
474
475 dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
476 complete(&ses->fc_slot_table.complete);
477}
478
479/*
480 * Signal state manager thread if session back channel is drained
481 */
482void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
483{
484 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
485 ses->bc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
486 return;
487 dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
488 complete(&ses->bc_slot_table.complete);
489}
490
491static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
492{
493 struct nfs4_session *session;
494 struct nfs4_slot_table *tbl;
495
496 if (!res->sr_slot) {
497 /* just wake up the next guy waiting since
498 * we may have not consumed a slot after all */
499 dprintk("%s: No slot\n", __func__);
500 return;
501 }
502 tbl = res->sr_slot->table;
503 session = tbl->session;
504
505 spin_lock(&tbl->slot_tbl_lock);
506 nfs4_free_slot(tbl, res->sr_slot);
507 nfs4_check_drain_fc_complete(session);
508 spin_unlock(&tbl->slot_tbl_lock);
509 res->sr_slot = NULL;
510}
511
512/* Update the client's idea of target_highest_slotid */
513static void nfs41_set_target_slotid_locked(struct nfs4_slot_table *tbl,
514 u32 target_highest_slotid)
515{
516 unsigned int max_slotid, i;
517
518 if (tbl->target_highest_slotid == target_highest_slotid)
519 return;
520 tbl->target_highest_slotid = target_highest_slotid;
521 tbl->generation++;
522
523 max_slotid = min(NFS4_MAX_SLOT_TABLE - 1, tbl->target_highest_slotid);
524 for (i = tbl->max_slotid + 1; i <= max_slotid; i++)
525 rpc_wake_up_next(&tbl->slot_tbl_waitq);
526 tbl->max_slotid = max_slotid;
527}
528
529void nfs41_set_target_slotid(struct nfs4_slot_table *tbl,
530 u32 target_highest_slotid)
531{
532 spin_lock(&tbl->slot_tbl_lock);
533 nfs41_set_target_slotid_locked(tbl, target_highest_slotid);
534 spin_unlock(&tbl->slot_tbl_lock);
535}
536
537static void nfs41_set_server_slotid_locked(struct nfs4_slot_table *tbl,
538 u32 highest_slotid)
539{
540 if (tbl->server_highest_slotid == highest_slotid)
541 return;
542 if (tbl->highest_used_slotid > highest_slotid)
543 return;
544 /* Deallocate slots */
545 nfs4_shrink_slot_table(tbl, highest_slotid + 1);
546 tbl->server_highest_slotid = highest_slotid;
547}
548
549static void nfs41_update_target_slotid(struct nfs4_slot_table *tbl,
550 struct nfs4_slot *slot,
551 struct nfs4_sequence_res *res)
552{
553 spin_lock(&tbl->slot_tbl_lock);
554 if (tbl->generation != slot->generation)
555 goto out;
556 nfs41_set_server_slotid_locked(tbl, res->sr_highest_slotid);
557 nfs41_set_target_slotid_locked(tbl, res->sr_target_highest_slotid);
558out:
559 spin_unlock(&tbl->slot_tbl_lock);
560}
561
562static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
563{
564 struct nfs4_session *session;
565 struct nfs4_slot *slot;
566 unsigned long timestamp;
567 struct nfs_client *clp;
568
569 /*
570 * sr_status remains 1 if an RPC level error occurred. The server
571 * may or may not have processed the sequence operation..
572 * Proceed as if the server received and processed the sequence
573 * operation.
574 */
575 if (res->sr_status == 1)
576 res->sr_status = NFS_OK;
577
578 /* don't increment the sequence number if the task wasn't sent */
579 if (!RPC_WAS_SENT(task))
580 goto out;
581
582 slot = res->sr_slot;
583 session = slot->table->session;
584
585 /* Check the SEQUENCE operation status */
586 switch (res->sr_status) {
587 case 0:
588 /* Update the slot's sequence and clientid lease timer */
589 ++slot->seq_nr;
590 timestamp = slot->renewal_time;
591 clp = session->clp;
592 do_renew_lease(clp, timestamp);
593 /* Check sequence flags */
594 if (res->sr_status_flags != 0)
595 nfs4_schedule_lease_recovery(clp);
596 nfs41_update_target_slotid(slot->table, slot, res);
597 break;
598 case -NFS4ERR_DELAY:
599 /* The server detected a resend of the RPC call and
600 * returned NFS4ERR_DELAY as per Section 2.10.6.2
601 * of RFC5661.
602 */
603 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
604 __func__,
605 slot->slot_nr,
606 slot->seq_nr);
607 goto out_retry;
608 default:
609 /* Just update the slot sequence no. */
610 ++slot->seq_nr;
611 }
612out:
613 /* The session may be reset by one of the error handlers. */
614 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
615 nfs41_sequence_free_slot(res);
616 return 1;
617out_retry:
618 if (!rpc_restart_call(task))
619 goto out;
620 rpc_delay(task, NFS4_POLL_RETRY_MAX);
621 return 0;
622}
623
624static int nfs4_sequence_done(struct rpc_task *task,
625 struct nfs4_sequence_res *res)
626{
627 if (res->sr_slot == NULL)
628 return 1;
629 return nfs41_sequence_done(task, res);
630}
631
632static struct nfs4_slot *nfs4_new_slot(struct nfs4_slot_table *tbl,
633 u32 slotid, u32 seq_init, gfp_t gfp_mask)
634{
635 struct nfs4_slot *slot;
636
637 slot = kzalloc(sizeof(*slot), gfp_mask);
638 if (slot) {
639 slot->table = tbl;
640 slot->slot_nr = slotid;
641 slot->seq_nr = seq_init;
642 }
643 return slot;
644}
645
646static struct nfs4_slot *nfs4_find_or_create_slot(struct nfs4_slot_table *tbl,
647 u32 slotid, u32 seq_init, gfp_t gfp_mask)
648{
649 struct nfs4_slot **p, *slot;
650
651 p = &tbl->slots;
652 for (;;) {
653 if (*p == NULL) {
654 *p = nfs4_new_slot(tbl, tbl->max_slots,
655 seq_init, gfp_mask);
656 if (*p == NULL)
657 break;
658 tbl->max_slots++;
659 }
660 slot = *p;
661 if (slot->slot_nr == slotid)
662 return slot;
663 p = &slot->next;
664 }
665 return NULL;
666}
667
668/*
669 * nfs4_alloc_slot - efficiently look for a free slot
670 *
671 * nfs4_alloc_slot looks for an unset bit in the used_slots bitmap.
672 * If found, we mark the slot as used, update the highest_used_slotid,
673 * and respectively set up the sequence operation args.
674 *
675 * Note: must be called with under the slot_tbl_lock.
676 */
677static struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl)
678{
679 struct nfs4_slot *ret = NULL;
680 u32 slotid;
681
682 dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
683 __func__, tbl->used_slots[0], tbl->highest_used_slotid,
684 tbl->max_slotid + 1);
685 slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slotid + 1);
686 if (slotid > tbl->max_slotid)
687 goto out;
688 ret = nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT);
689 if (ret == NULL)
690 goto out;
691 __set_bit(slotid, tbl->used_slots);
692 if (slotid > tbl->highest_used_slotid ||
693 tbl->highest_used_slotid == NFS4_NO_SLOT)
694 tbl->highest_used_slotid = slotid;
695 ret->renewal_time = jiffies;
696 ret->generation = tbl->generation;
697
698out:
699 dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
700 __func__, tbl->used_slots[0], tbl->highest_used_slotid,
701 ret ? ret->slot_nr : -1);
702 return ret;
703}
704
705static void nfs41_init_sequence(struct nfs4_sequence_args *args,
706 struct nfs4_sequence_res *res, int cache_reply)
707{
708 args->sa_slot = NULL;
709 args->sa_cache_this = 0;
710 if (cache_reply)
711 args->sa_cache_this = 1;
712 res->sr_slot = NULL;
713}
714
715int nfs41_setup_sequence(struct nfs4_session *session,
716 struct nfs4_sequence_args *args,
717 struct nfs4_sequence_res *res,
718 struct rpc_task *task)
719{
720 struct nfs4_slot *slot;
721 struct nfs4_slot_table *tbl;
722
723 dprintk("--> %s\n", __func__);
724 /* slot already allocated? */
725 if (res->sr_slot != NULL)
726 return 0;
727
728 tbl = &session->fc_slot_table;
729
730 spin_lock(&tbl->slot_tbl_lock);
731 if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
732 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
733 /* The state manager will wait until the slot table is empty */
734 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
735 spin_unlock(&tbl->slot_tbl_lock);
736 dprintk("%s session is draining\n", __func__);
737 return -EAGAIN;
738 }
739
740 if (!rpc_queue_empty(&tbl->slot_tbl_waitq) &&
741 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
742 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
743 spin_unlock(&tbl->slot_tbl_lock);
744 dprintk("%s enforce FIFO order\n", __func__);
745 return -EAGAIN;
746 }
747
748 slot = nfs4_alloc_slot(tbl);
749 if (slot == NULL) {
750 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
751 spin_unlock(&tbl->slot_tbl_lock);
752 dprintk("<-- %s: no free slots\n", __func__);
753 return -EAGAIN;
754 }
755 spin_unlock(&tbl->slot_tbl_lock);
756
757 rpc_task_set_priority(task, RPC_PRIORITY_NORMAL);
758
759 args->sa_slot = slot;
760
761 dprintk("<-- %s slotid=%d seqid=%d\n", __func__,
762 slot->slot_nr, slot->seq_nr);
763
764 res->sr_slot = slot;
765 res->sr_status_flags = 0;
766 /*
767 * sr_status is only set in decode_sequence, and so will remain
768 * set to 1 if an rpc level failure occurs.
769 */
770 res->sr_status = 1;
771 return 0;
772}
773EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
774
775int nfs4_setup_sequence(const struct nfs_server *server,
776 struct nfs4_sequence_args *args,
777 struct nfs4_sequence_res *res,
778 struct rpc_task *task)
779{
780 struct nfs4_session *session = nfs4_get_session(server);
781 int ret = 0;
782
783 if (session == NULL)
784 goto out;
785
786 dprintk("--> %s clp %p session %p sr_slot %d\n",
787 __func__, session->clp, session, res->sr_slot ?
788 res->sr_slot->slot_nr : -1);
789
790 ret = nfs41_setup_sequence(session, args, res, task);
791out:
792 dprintk("<-- %s status=%d\n", __func__, ret);
793 return ret;
794}
795
796struct nfs41_call_sync_data {
797 const struct nfs_server *seq_server;
798 struct nfs4_sequence_args *seq_args;
799 struct nfs4_sequence_res *seq_res;
800};
801
802static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
803{
804 struct nfs41_call_sync_data *data = calldata;
805
806 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
807
808 if (nfs4_setup_sequence(data->seq_server, data->seq_args,
809 data->seq_res, task))
810 return;
811 rpc_call_start(task);
812}
813
814static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata)
815{
816 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
817 nfs41_call_sync_prepare(task, calldata);
818}
819
820static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
821{
822 struct nfs41_call_sync_data *data = calldata;
823
824 nfs41_sequence_done(task, data->seq_res);
825}
826
827static const struct rpc_call_ops nfs41_call_sync_ops = {
828 .rpc_call_prepare = nfs41_call_sync_prepare,
829 .rpc_call_done = nfs41_call_sync_done,
830};
831
832static const struct rpc_call_ops nfs41_call_priv_sync_ops = {
833 .rpc_call_prepare = nfs41_call_priv_sync_prepare,
834 .rpc_call_done = nfs41_call_sync_done,
835};
836
837static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
838 struct nfs_server *server,
839 struct rpc_message *msg,
840 struct nfs4_sequence_args *args,
841 struct nfs4_sequence_res *res,
842 int privileged)
843{
844 int ret;
845 struct rpc_task *task;
846 struct nfs41_call_sync_data data = {
847 .seq_server = server,
848 .seq_args = args,
849 .seq_res = res,
850 };
851 struct rpc_task_setup task_setup = {
852 .rpc_client = clnt,
853 .rpc_message = msg,
854 .callback_ops = &nfs41_call_sync_ops,
855 .callback_data = &data
856 };
857
858 if (privileged)
859 task_setup.callback_ops = &nfs41_call_priv_sync_ops;
860 task = rpc_run_task(&task_setup);
861 if (IS_ERR(task))
862 ret = PTR_ERR(task);
863 else {
864 ret = task->tk_status;
865 rpc_put_task(task);
866 }
867 return ret;
868}
869
870int _nfs4_call_sync_session(struct rpc_clnt *clnt,
871 struct nfs_server *server,
872 struct rpc_message *msg,
873 struct nfs4_sequence_args *args,
874 struct nfs4_sequence_res *res,
875 int cache_reply)
876{
877 nfs41_init_sequence(args, res, cache_reply);
878 return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0);
879}
880
881#else
882static inline
883void nfs41_init_sequence(struct nfs4_sequence_args *args,
884 struct nfs4_sequence_res *res, int cache_reply)
885{
886}
887
888static int nfs4_sequence_done(struct rpc_task *task,
889 struct nfs4_sequence_res *res)
890{
891 return 1;
892}
893#endif /* CONFIG_NFS_V4_1 */
894
895int _nfs4_call_sync(struct rpc_clnt *clnt,
896 struct nfs_server *server,
897 struct rpc_message *msg,
898 struct nfs4_sequence_args *args,
899 struct nfs4_sequence_res *res,
900 int cache_reply)
901{
902 nfs41_init_sequence(args, res, cache_reply);
903 return rpc_call_sync(clnt, msg, 0);
904}
905
906static inline
907int nfs4_call_sync(struct rpc_clnt *clnt,
908 struct nfs_server *server,
909 struct rpc_message *msg,
910 struct nfs4_sequence_args *args,
911 struct nfs4_sequence_res *res,
912 int cache_reply)
913{
914 return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
915 args, res, cache_reply);
916}
917
918static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
919{
920 struct nfs_inode *nfsi = NFS_I(dir);
921
922 spin_lock(&dir->i_lock);
923 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
924 if (!cinfo->atomic || cinfo->before != dir->i_version)
925 nfs_force_lookup_revalidate(dir);
926 dir->i_version = cinfo->after;
927 spin_unlock(&dir->i_lock);
928}
929
930struct nfs4_opendata {
931 struct kref kref;
932 struct nfs_openargs o_arg;
933 struct nfs_openres o_res;
934 struct nfs_open_confirmargs c_arg;
935 struct nfs_open_confirmres c_res;
936 struct nfs4_string owner_name;
937 struct nfs4_string group_name;
938 struct nfs_fattr f_attr;
939 struct dentry *dir;
940 struct dentry *dentry;
941 struct nfs4_state_owner *owner;
942 struct nfs4_state *state;
943 struct iattr attrs;
944 unsigned long timestamp;
945 unsigned int rpc_done : 1;
946 int rpc_status;
947 int cancelled;
948};
949
950
951static void nfs4_init_opendata_res(struct nfs4_opendata *p)
952{
953 p->o_res.f_attr = &p->f_attr;
954 p->o_res.seqid = p->o_arg.seqid;
955 p->c_res.seqid = p->c_arg.seqid;
956 p->o_res.server = p->o_arg.server;
957 p->o_res.access_request = p->o_arg.access;
958 nfs_fattr_init(&p->f_attr);
959 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
960}
961
962static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
963 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
964 const struct iattr *attrs,
965 gfp_t gfp_mask)
966{
967 struct dentry *parent = dget_parent(dentry);
968 struct inode *dir = parent->d_inode;
969 struct nfs_server *server = NFS_SERVER(dir);
970 struct nfs4_opendata *p;
971
972 p = kzalloc(sizeof(*p), gfp_mask);
973 if (p == NULL)
974 goto err;
975 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
976 if (p->o_arg.seqid == NULL)
977 goto err_free;
978 nfs_sb_active(dentry->d_sb);
979 p->dentry = dget(dentry);
980 p->dir = parent;
981 p->owner = sp;
982 atomic_inc(&sp->so_count);
983 p->o_arg.fh = NFS_FH(dir);
984 p->o_arg.open_flags = flags;
985 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
986 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
987 * will return permission denied for all bits until close */
988 if (!(flags & O_EXCL)) {
989 /* ask server to check for all possible rights as results
990 * are cached */
991 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
992 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE;
993 }
994 p->o_arg.clientid = server->nfs_client->cl_clientid;
995 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
996 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
997 p->o_arg.name = &dentry->d_name;
998 p->o_arg.server = server;
999 p->o_arg.bitmask = server->attr_bitmask;
1000 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1001 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
1002 if (attrs != NULL && attrs->ia_valid != 0) {
1003 __be32 verf[2];
1004
1005 p->o_arg.u.attrs = &p->attrs;
1006 memcpy(&p->attrs, attrs, sizeof(p->attrs));
1007
1008 verf[0] = jiffies;
1009 verf[1] = current->pid;
1010 memcpy(p->o_arg.u.verifier.data, verf,
1011 sizeof(p->o_arg.u.verifier.data));
1012 }
1013 p->c_arg.fh = &p->o_res.fh;
1014 p->c_arg.stateid = &p->o_res.stateid;
1015 p->c_arg.seqid = p->o_arg.seqid;
1016 nfs4_init_opendata_res(p);
1017 kref_init(&p->kref);
1018 return p;
1019err_free:
1020 kfree(p);
1021err:
1022 dput(parent);
1023 return NULL;
1024}
1025
1026static void nfs4_opendata_free(struct kref *kref)
1027{
1028 struct nfs4_opendata *p = container_of(kref,
1029 struct nfs4_opendata, kref);
1030 struct super_block *sb = p->dentry->d_sb;
1031
1032 nfs_free_seqid(p->o_arg.seqid);
1033 if (p->state != NULL)
1034 nfs4_put_open_state(p->state);
1035 nfs4_put_state_owner(p->owner);
1036 dput(p->dir);
1037 dput(p->dentry);
1038 nfs_sb_deactive(sb);
1039 nfs_fattr_free_names(&p->f_attr);
1040 kfree(p);
1041}
1042
1043static void nfs4_opendata_put(struct nfs4_opendata *p)
1044{
1045 if (p != NULL)
1046 kref_put(&p->kref, nfs4_opendata_free);
1047}
1048
1049static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
1050{
1051 int ret;
1052
1053 ret = rpc_wait_for_completion_task(task);
1054 return ret;
1055}
1056
1057static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
1058{
1059 int ret = 0;
1060
1061 if (open_mode & (O_EXCL|O_TRUNC))
1062 goto out;
1063 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1064 case FMODE_READ:
1065 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1066 && state->n_rdonly != 0;
1067 break;
1068 case FMODE_WRITE:
1069 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1070 && state->n_wronly != 0;
1071 break;
1072 case FMODE_READ|FMODE_WRITE:
1073 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1074 && state->n_rdwr != 0;
1075 }
1076out:
1077 return ret;
1078}
1079
1080static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
1081{
1082 if (delegation == NULL)
1083 return 0;
1084 if ((delegation->type & fmode) != fmode)
1085 return 0;
1086 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1087 return 0;
1088 nfs_mark_delegation_referenced(delegation);
1089 return 1;
1090}
1091
1092static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1093{
1094 switch (fmode) {
1095 case FMODE_WRITE:
1096 state->n_wronly++;
1097 break;
1098 case FMODE_READ:
1099 state->n_rdonly++;
1100 break;
1101 case FMODE_READ|FMODE_WRITE:
1102 state->n_rdwr++;
1103 }
1104 nfs4_state_set_mode_locked(state, state->state | fmode);
1105}
1106
1107static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1108{
1109 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1110 nfs4_stateid_copy(&state->stateid, stateid);
1111 nfs4_stateid_copy(&state->open_stateid, stateid);
1112 switch (fmode) {
1113 case FMODE_READ:
1114 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1115 break;
1116 case FMODE_WRITE:
1117 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1118 break;
1119 case FMODE_READ|FMODE_WRITE:
1120 set_bit(NFS_O_RDWR_STATE, &state->flags);
1121 }
1122}
1123
1124static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1125{
1126 write_seqlock(&state->seqlock);
1127 nfs_set_open_stateid_locked(state, stateid, fmode);
1128 write_sequnlock(&state->seqlock);
1129}
1130
1131static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
1132{
1133 /*
1134 * Protect the call to nfs4_state_set_mode_locked and
1135 * serialise the stateid update
1136 */
1137 write_seqlock(&state->seqlock);
1138 if (deleg_stateid != NULL) {
1139 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1140 set_bit(NFS_DELEGATED_STATE, &state->flags);
1141 }
1142 if (open_stateid != NULL)
1143 nfs_set_open_stateid_locked(state, open_stateid, fmode);
1144 write_sequnlock(&state->seqlock);
1145 spin_lock(&state->owner->so_lock);
1146 update_open_stateflags(state, fmode);
1147 spin_unlock(&state->owner->so_lock);
1148}
1149
1150static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
1151{
1152 struct nfs_inode *nfsi = NFS_I(state->inode);
1153 struct nfs_delegation *deleg_cur;
1154 int ret = 0;
1155
1156 fmode &= (FMODE_READ|FMODE_WRITE);
1157
1158 rcu_read_lock();
1159 deleg_cur = rcu_dereference(nfsi->delegation);
1160 if (deleg_cur == NULL)
1161 goto no_delegation;
1162
1163 spin_lock(&deleg_cur->lock);
1164 if (nfsi->delegation != deleg_cur ||
1165 (deleg_cur->type & fmode) != fmode)
1166 goto no_delegation_unlock;
1167
1168 if (delegation == NULL)
1169 delegation = &deleg_cur->stateid;
1170 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1171 goto no_delegation_unlock;
1172
1173 nfs_mark_delegation_referenced(deleg_cur);
1174 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1175 ret = 1;
1176no_delegation_unlock:
1177 spin_unlock(&deleg_cur->lock);
1178no_delegation:
1179 rcu_read_unlock();
1180
1181 if (!ret && open_stateid != NULL) {
1182 __update_open_stateid(state, open_stateid, NULL, fmode);
1183 ret = 1;
1184 }
1185
1186 return ret;
1187}
1188
1189
1190static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1191{
1192 struct nfs_delegation *delegation;
1193
1194 rcu_read_lock();
1195 delegation = rcu_dereference(NFS_I(inode)->delegation);
1196 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1197 rcu_read_unlock();
1198 return;
1199 }
1200 rcu_read_unlock();
1201 nfs4_inode_return_delegation(inode);
1202}
1203
1204static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1205{
1206 struct nfs4_state *state = opendata->state;
1207 struct nfs_inode *nfsi = NFS_I(state->inode);
1208 struct nfs_delegation *delegation;
1209 int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC);
1210 fmode_t fmode = opendata->o_arg.fmode;
1211 nfs4_stateid stateid;
1212 int ret = -EAGAIN;
1213
1214 for (;;) {
1215 if (can_open_cached(state, fmode, open_mode)) {
1216 spin_lock(&state->owner->so_lock);
1217 if (can_open_cached(state, fmode, open_mode)) {
1218 update_open_stateflags(state, fmode);
1219 spin_unlock(&state->owner->so_lock);
1220 goto out_return_state;
1221 }
1222 spin_unlock(&state->owner->so_lock);
1223 }
1224 rcu_read_lock();
1225 delegation = rcu_dereference(nfsi->delegation);
1226 if (!can_open_delegated(delegation, fmode)) {
1227 rcu_read_unlock();
1228 break;
1229 }
1230 /* Save the delegation */
1231 nfs4_stateid_copy(&stateid, &delegation->stateid);
1232 rcu_read_unlock();
1233 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1234 if (ret != 0)
1235 goto out;
1236 ret = -EAGAIN;
1237
1238 /* Try to update the stateid using the delegation */
1239 if (update_open_stateid(state, NULL, &stateid, fmode))
1240 goto out_return_state;
1241 }
1242out:
1243 return ERR_PTR(ret);
1244out_return_state:
1245 atomic_inc(&state->count);
1246 return state;
1247}
1248
1249static void
1250nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1251{
1252 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1253 struct nfs_delegation *delegation;
1254 int delegation_flags = 0;
1255
1256 rcu_read_lock();
1257 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1258 if (delegation)
1259 delegation_flags = delegation->flags;
1260 rcu_read_unlock();
1261 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
1262 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1263 "returning a delegation for "
1264 "OPEN(CLAIM_DELEGATE_CUR)\n",
1265 clp->cl_hostname);
1266 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1267 nfs_inode_set_delegation(state->inode,
1268 data->owner->so_cred,
1269 &data->o_res);
1270 else
1271 nfs_inode_reclaim_delegation(state->inode,
1272 data->owner->so_cred,
1273 &data->o_res);
1274}
1275
1276/*
1277 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1278 * and update the nfs4_state.
1279 */
1280static struct nfs4_state *
1281_nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1282{
1283 struct inode *inode = data->state->inode;
1284 struct nfs4_state *state = data->state;
1285 int ret;
1286
1287 if (!data->rpc_done) {
1288 ret = data->rpc_status;
1289 goto err;
1290 }
1291
1292 ret = -ESTALE;
1293 if (!(data->f_attr.valid & NFS_ATTR_FATTR_TYPE) ||
1294 !(data->f_attr.valid & NFS_ATTR_FATTR_FILEID) ||
1295 !(data->f_attr.valid & NFS_ATTR_FATTR_CHANGE))
1296 goto err;
1297
1298 ret = -ENOMEM;
1299 state = nfs4_get_open_state(inode, data->owner);
1300 if (state == NULL)
1301 goto err;
1302
1303 ret = nfs_refresh_inode(inode, &data->f_attr);
1304 if (ret)
1305 goto err;
1306
1307 if (data->o_res.delegation_type != 0)
1308 nfs4_opendata_check_deleg(data, state);
1309 update_open_stateid(state, &data->o_res.stateid, NULL,
1310 data->o_arg.fmode);
1311
1312 return state;
1313err:
1314 return ERR_PTR(ret);
1315
1316}
1317
1318static struct nfs4_state *
1319_nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1320{
1321 struct inode *inode;
1322 struct nfs4_state *state = NULL;
1323 int ret;
1324
1325 if (!data->rpc_done) {
1326 state = nfs4_try_open_cached(data);
1327 goto out;
1328 }
1329
1330 ret = -EAGAIN;
1331 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1332 goto err;
1333 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
1334 ret = PTR_ERR(inode);
1335 if (IS_ERR(inode))
1336 goto err;
1337 ret = -ENOMEM;
1338 state = nfs4_get_open_state(inode, data->owner);
1339 if (state == NULL)
1340 goto err_put_inode;
1341 if (data->o_res.delegation_type != 0)
1342 nfs4_opendata_check_deleg(data, state);
1343 update_open_stateid(state, &data->o_res.stateid, NULL,
1344 data->o_arg.fmode);
1345 iput(inode);
1346out:
1347 return state;
1348err_put_inode:
1349 iput(inode);
1350err:
1351 return ERR_PTR(ret);
1352}
1353
1354static struct nfs4_state *
1355nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1356{
1357 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
1358 return _nfs4_opendata_reclaim_to_nfs4_state(data);
1359 return _nfs4_opendata_to_nfs4_state(data);
1360}
1361
1362static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1363{
1364 struct nfs_inode *nfsi = NFS_I(state->inode);
1365 struct nfs_open_context *ctx;
1366
1367 spin_lock(&state->inode->i_lock);
1368 list_for_each_entry(ctx, &nfsi->open_files, list) {
1369 if (ctx->state != state)
1370 continue;
1371 get_nfs_open_context(ctx);
1372 spin_unlock(&state->inode->i_lock);
1373 return ctx;
1374 }
1375 spin_unlock(&state->inode->i_lock);
1376 return ERR_PTR(-ENOENT);
1377}
1378
1379static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
1380{
1381 struct nfs4_opendata *opendata;
1382
1383 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS);
1384 if (opendata == NULL)
1385 return ERR_PTR(-ENOMEM);
1386 opendata->state = state;
1387 atomic_inc(&state->count);
1388 return opendata;
1389}
1390
1391static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
1392{
1393 struct nfs4_state *newstate;
1394 int ret;
1395
1396 opendata->o_arg.open_flags = 0;
1397 opendata->o_arg.fmode = fmode;
1398 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1399 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1400 nfs4_init_opendata_res(opendata);
1401 ret = _nfs4_recover_proc_open(opendata);
1402 if (ret != 0)
1403 return ret;
1404 newstate = nfs4_opendata_to_nfs4_state(opendata);
1405 if (IS_ERR(newstate))
1406 return PTR_ERR(newstate);
1407 nfs4_close_state(newstate, fmode);
1408 *res = newstate;
1409 return 0;
1410}
1411
1412static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1413{
1414 struct nfs4_state *newstate;
1415 int ret;
1416
1417 /* memory barrier prior to reading state->n_* */
1418 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1419 smp_rmb();
1420 if (state->n_rdwr != 0) {
1421 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1422 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
1423 if (ret != 0)
1424 return ret;
1425 if (newstate != state)
1426 return -ESTALE;
1427 }
1428 if (state->n_wronly != 0) {
1429 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1430 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
1431 if (ret != 0)
1432 return ret;
1433 if (newstate != state)
1434 return -ESTALE;
1435 }
1436 if (state->n_rdonly != 0) {
1437 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1438 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1439 if (ret != 0)
1440 return ret;
1441 if (newstate != state)
1442 return -ESTALE;
1443 }
1444 /*
1445 * We may have performed cached opens for all three recoveries.
1446 * Check if we need to update the current stateid.
1447 */
1448 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1449 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1450 write_seqlock(&state->seqlock);
1451 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1452 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1453 write_sequnlock(&state->seqlock);
1454 }
1455 return 0;
1456}
1457
1458/*
1459 * OPEN_RECLAIM:
1460 * reclaim state on the server after a reboot.
1461 */
1462static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1463{
1464 struct nfs_delegation *delegation;
1465 struct nfs4_opendata *opendata;
1466 fmode_t delegation_type = 0;
1467 int status;
1468
1469 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1470 if (IS_ERR(opendata))
1471 return PTR_ERR(opendata);
1472 opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
1473 opendata->o_arg.fh = NFS_FH(state->inode);
1474 rcu_read_lock();
1475 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1476 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1477 delegation_type = delegation->type;
1478 rcu_read_unlock();
1479 opendata->o_arg.u.delegation_type = delegation_type;
1480 status = nfs4_open_recover(opendata, state);
1481 nfs4_opendata_put(opendata);
1482 return status;
1483}
1484
1485static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1486{
1487 struct nfs_server *server = NFS_SERVER(state->inode);
1488 struct nfs4_exception exception = { };
1489 int err;
1490 do {
1491 err = _nfs4_do_open_reclaim(ctx, state);
1492 if (err != -NFS4ERR_DELAY)
1493 break;
1494 nfs4_handle_exception(server, err, &exception);
1495 } while (exception.retry);
1496 return err;
1497}
1498
1499static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1500{
1501 struct nfs_open_context *ctx;
1502 int ret;
1503
1504 ctx = nfs4_state_find_open_context(state);
1505 if (IS_ERR(ctx))
1506 return PTR_ERR(ctx);
1507 ret = nfs4_do_open_reclaim(ctx, state);
1508 put_nfs_open_context(ctx);
1509 return ret;
1510}
1511
1512static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1513{
1514 struct nfs4_opendata *opendata;
1515 int ret;
1516
1517 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1518 if (IS_ERR(opendata))
1519 return PTR_ERR(opendata);
1520 opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
1521 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1522 ret = nfs4_open_recover(opendata, state);
1523 nfs4_opendata_put(opendata);
1524 return ret;
1525}
1526
1527int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1528{
1529 struct nfs4_exception exception = { };
1530 struct nfs_server *server = NFS_SERVER(state->inode);
1531 int err;
1532 do {
1533 err = _nfs4_open_delegation_recall(ctx, state, stateid);
1534 switch (err) {
1535 case 0:
1536 case -ENOENT:
1537 case -ESTALE:
1538 goto out;
1539 case -NFS4ERR_BADSESSION:
1540 case -NFS4ERR_BADSLOT:
1541 case -NFS4ERR_BAD_HIGH_SLOT:
1542 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1543 case -NFS4ERR_DEADSESSION:
1544 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1545 goto out;
1546 case -NFS4ERR_STALE_CLIENTID:
1547 case -NFS4ERR_STALE_STATEID:
1548 case -NFS4ERR_EXPIRED:
1549 /* Don't recall a delegation if it was lost */
1550 nfs4_schedule_lease_recovery(server->nfs_client);
1551 goto out;
1552 case -ERESTARTSYS:
1553 /*
1554 * The show must go on: exit, but mark the
1555 * stateid as needing recovery.
1556 */
1557 case -NFS4ERR_DELEG_REVOKED:
1558 case -NFS4ERR_ADMIN_REVOKED:
1559 case -NFS4ERR_BAD_STATEID:
1560 nfs_inode_find_state_and_recover(state->inode,
1561 stateid);
1562 nfs4_schedule_stateid_recovery(server, state);
1563 case -EKEYEXPIRED:
1564 /*
1565 * User RPCSEC_GSS context has expired.
1566 * We cannot recover this stateid now, so
1567 * skip it and allow recovery thread to
1568 * proceed.
1569 */
1570 case -ENOMEM:
1571 err = 0;
1572 goto out;
1573 }
1574 err = nfs4_handle_exception(server, err, &exception);
1575 } while (exception.retry);
1576out:
1577 return err;
1578}
1579
1580static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1581{
1582 struct nfs4_opendata *data = calldata;
1583
1584 data->rpc_status = task->tk_status;
1585 if (data->rpc_status == 0) {
1586 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1587 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1588 renew_lease(data->o_res.server, data->timestamp);
1589 data->rpc_done = 1;
1590 }
1591}
1592
1593static void nfs4_open_confirm_release(void *calldata)
1594{
1595 struct nfs4_opendata *data = calldata;
1596 struct nfs4_state *state = NULL;
1597
1598 /* If this request hasn't been cancelled, do nothing */
1599 if (data->cancelled == 0)
1600 goto out_free;
1601 /* In case of error, no cleanup! */
1602 if (!data->rpc_done)
1603 goto out_free;
1604 state = nfs4_opendata_to_nfs4_state(data);
1605 if (!IS_ERR(state))
1606 nfs4_close_state(state, data->o_arg.fmode);
1607out_free:
1608 nfs4_opendata_put(data);
1609}
1610
1611static const struct rpc_call_ops nfs4_open_confirm_ops = {
1612 .rpc_call_done = nfs4_open_confirm_done,
1613 .rpc_release = nfs4_open_confirm_release,
1614};
1615
1616/*
1617 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1618 */
1619static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1620{
1621 struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
1622 struct rpc_task *task;
1623 struct rpc_message msg = {
1624 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1625 .rpc_argp = &data->c_arg,
1626 .rpc_resp = &data->c_res,
1627 .rpc_cred = data->owner->so_cred,
1628 };
1629 struct rpc_task_setup task_setup_data = {
1630 .rpc_client = server->client,
1631 .rpc_message = &msg,
1632 .callback_ops = &nfs4_open_confirm_ops,
1633 .callback_data = data,
1634 .workqueue = nfsiod_workqueue,
1635 .flags = RPC_TASK_ASYNC,
1636 };
1637 int status;
1638
1639 kref_get(&data->kref);
1640 data->rpc_done = 0;
1641 data->rpc_status = 0;
1642 data->timestamp = jiffies;
1643 task = rpc_run_task(&task_setup_data);
1644 if (IS_ERR(task))
1645 return PTR_ERR(task);
1646 status = nfs4_wait_for_completion_rpc_task(task);
1647 if (status != 0) {
1648 data->cancelled = 1;
1649 smp_wmb();
1650 } else
1651 status = data->rpc_status;
1652 rpc_put_task(task);
1653 return status;
1654}
1655
1656static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1657{
1658 struct nfs4_opendata *data = calldata;
1659 struct nfs4_state_owner *sp = data->owner;
1660
1661 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1662 return;
1663 /*
1664 * Check if we still need to send an OPEN call, or if we can use
1665 * a delegation instead.
1666 */
1667 if (data->state != NULL) {
1668 struct nfs_delegation *delegation;
1669
1670 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1671 goto out_no_action;
1672 rcu_read_lock();
1673 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1674 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
1675 can_open_delegated(delegation, data->o_arg.fmode))
1676 goto unlock_no_action;
1677 rcu_read_unlock();
1678 }
1679 /* Update client id. */
1680 data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
1681 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
1682 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1683 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
1684 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1685 }
1686 data->timestamp = jiffies;
1687 if (nfs4_setup_sequence(data->o_arg.server,
1688 &data->o_arg.seq_args,
1689 &data->o_res.seq_res,
1690 task) != 0)
1691 nfs_release_seqid(data->o_arg.seqid);
1692 else
1693 rpc_call_start(task);
1694 return;
1695unlock_no_action:
1696 rcu_read_unlock();
1697out_no_action:
1698 task->tk_action = NULL;
1699
1700}
1701
1702static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata)
1703{
1704 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
1705 nfs4_open_prepare(task, calldata);
1706}
1707
1708static void nfs4_open_done(struct rpc_task *task, void *calldata)
1709{
1710 struct nfs4_opendata *data = calldata;
1711
1712 data->rpc_status = task->tk_status;
1713
1714 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1715 return;
1716
1717 if (task->tk_status == 0) {
1718 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
1719 switch (data->o_res.f_attr->mode & S_IFMT) {
1720 case S_IFREG:
1721 break;
1722 case S_IFLNK:
1723 data->rpc_status = -ELOOP;
1724 break;
1725 case S_IFDIR:
1726 data->rpc_status = -EISDIR;
1727 break;
1728 default:
1729 data->rpc_status = -ENOTDIR;
1730 }
1731 }
1732 renew_lease(data->o_res.server, data->timestamp);
1733 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1734 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1735 }
1736 data->rpc_done = 1;
1737}
1738
1739static void nfs4_open_release(void *calldata)
1740{
1741 struct nfs4_opendata *data = calldata;
1742 struct nfs4_state *state = NULL;
1743
1744 /* If this request hasn't been cancelled, do nothing */
1745 if (data->cancelled == 0)
1746 goto out_free;
1747 /* In case of error, no cleanup! */
1748 if (data->rpc_status != 0 || !data->rpc_done)
1749 goto out_free;
1750 /* In case we need an open_confirm, no cleanup! */
1751 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1752 goto out_free;
1753 state = nfs4_opendata_to_nfs4_state(data);
1754 if (!IS_ERR(state))
1755 nfs4_close_state(state, data->o_arg.fmode);
1756out_free:
1757 nfs4_opendata_put(data);
1758}
1759
1760static const struct rpc_call_ops nfs4_open_ops = {
1761 .rpc_call_prepare = nfs4_open_prepare,
1762 .rpc_call_done = nfs4_open_done,
1763 .rpc_release = nfs4_open_release,
1764};
1765
1766static const struct rpc_call_ops nfs4_recover_open_ops = {
1767 .rpc_call_prepare = nfs4_recover_open_prepare,
1768 .rpc_call_done = nfs4_open_done,
1769 .rpc_release = nfs4_open_release,
1770};
1771
1772static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
1773{
1774 struct inode *dir = data->dir->d_inode;
1775 struct nfs_server *server = NFS_SERVER(dir);
1776 struct nfs_openargs *o_arg = &data->o_arg;
1777 struct nfs_openres *o_res = &data->o_res;
1778 struct rpc_task *task;
1779 struct rpc_message msg = {
1780 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
1781 .rpc_argp = o_arg,
1782 .rpc_resp = o_res,
1783 .rpc_cred = data->owner->so_cred,
1784 };
1785 struct rpc_task_setup task_setup_data = {
1786 .rpc_client = server->client,
1787 .rpc_message = &msg,
1788 .callback_ops = &nfs4_open_ops,
1789 .callback_data = data,
1790 .workqueue = nfsiod_workqueue,
1791 .flags = RPC_TASK_ASYNC,
1792 };
1793 int status;
1794
1795 nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
1796 kref_get(&data->kref);
1797 data->rpc_done = 0;
1798 data->rpc_status = 0;
1799 data->cancelled = 0;
1800 if (isrecover)
1801 task_setup_data.callback_ops = &nfs4_recover_open_ops;
1802 task = rpc_run_task(&task_setup_data);
1803 if (IS_ERR(task))
1804 return PTR_ERR(task);
1805 status = nfs4_wait_for_completion_rpc_task(task);
1806 if (status != 0) {
1807 data->cancelled = 1;
1808 smp_wmb();
1809 } else
1810 status = data->rpc_status;
1811 rpc_put_task(task);
1812
1813 return status;
1814}
1815
1816static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
1817{
1818 struct inode *dir = data->dir->d_inode;
1819 struct nfs_openres *o_res = &data->o_res;
1820 int status;
1821
1822 status = nfs4_run_open_task(data, 1);
1823 if (status != 0 || !data->rpc_done)
1824 return status;
1825
1826 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
1827
1828 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1829 status = _nfs4_proc_open_confirm(data);
1830 if (status != 0)
1831 return status;
1832 }
1833
1834 return status;
1835}
1836
1837static int nfs4_opendata_access(struct rpc_cred *cred,
1838 struct nfs4_opendata *opendata,
1839 struct nfs4_state *state, fmode_t fmode)
1840{
1841 struct nfs_access_entry cache;
1842 u32 mask;
1843
1844 /* access call failed or for some reason the server doesn't
1845 * support any access modes -- defer access call until later */
1846 if (opendata->o_res.access_supported == 0)
1847 return 0;
1848
1849 mask = 0;
1850 /* don't check MAY_WRITE - a newly created file may not have
1851 * write mode bits, but POSIX allows the creating process to write */
1852 if (fmode & FMODE_READ)
1853 mask |= MAY_READ;
1854 if (fmode & FMODE_EXEC)
1855 mask |= MAY_EXEC;
1856
1857 cache.cred = cred;
1858 cache.jiffies = jiffies;
1859 nfs_access_set_mask(&cache, opendata->o_res.access_result);
1860 nfs_access_add_cache(state->inode, &cache);
1861
1862 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
1863 return 0;
1864
1865 /* even though OPEN succeeded, access is denied. Close the file */
1866 nfs4_close_state(state, fmode);
1867 return -EACCES;
1868}
1869
1870/*
1871 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
1872 */
1873static int _nfs4_proc_open(struct nfs4_opendata *data)
1874{
1875 struct inode *dir = data->dir->d_inode;
1876 struct nfs_server *server = NFS_SERVER(dir);
1877 struct nfs_openargs *o_arg = &data->o_arg;
1878 struct nfs_openres *o_res = &data->o_res;
1879 int status;
1880
1881 status = nfs4_run_open_task(data, 0);
1882 if (!data->rpc_done)
1883 return status;
1884 if (status != 0) {
1885 if (status == -NFS4ERR_BADNAME &&
1886 !(o_arg->open_flags & O_CREAT))
1887 return -ENOENT;
1888 return status;
1889 }
1890
1891 nfs_fattr_map_and_free_names(server, &data->f_attr);
1892
1893 if (o_arg->open_flags & O_CREAT)
1894 update_changeattr(dir, &o_res->cinfo);
1895 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
1896 server->caps &= ~NFS_CAP_POSIX_LOCK;
1897 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1898 status = _nfs4_proc_open_confirm(data);
1899 if (status != 0)
1900 return status;
1901 }
1902 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
1903 _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
1904 return 0;
1905}
1906
1907static int nfs4_client_recover_expired_lease(struct nfs_client *clp)
1908{
1909 unsigned int loop;
1910 int ret;
1911
1912 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
1913 ret = nfs4_wait_clnt_recover(clp);
1914 if (ret != 0)
1915 break;
1916 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
1917 !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
1918 break;
1919 nfs4_schedule_state_manager(clp);
1920 ret = -EIO;
1921 }
1922 return ret;
1923}
1924
1925static int nfs4_recover_expired_lease(struct nfs_server *server)
1926{
1927 return nfs4_client_recover_expired_lease(server->nfs_client);
1928}
1929
1930/*
1931 * OPEN_EXPIRED:
1932 * reclaim state on the server after a network partition.
1933 * Assumes caller holds the appropriate lock
1934 */
1935static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1936{
1937 struct nfs4_opendata *opendata;
1938 int ret;
1939
1940 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1941 if (IS_ERR(opendata))
1942 return PTR_ERR(opendata);
1943 ret = nfs4_open_recover(opendata, state);
1944 if (ret == -ESTALE)
1945 d_drop(ctx->dentry);
1946 nfs4_opendata_put(opendata);
1947 return ret;
1948}
1949
1950static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1951{
1952 struct nfs_server *server = NFS_SERVER(state->inode);
1953 struct nfs4_exception exception = { };
1954 int err;
1955
1956 do {
1957 err = _nfs4_open_expired(ctx, state);
1958 switch (err) {
1959 default:
1960 goto out;
1961 case -NFS4ERR_GRACE:
1962 case -NFS4ERR_DELAY:
1963 nfs4_handle_exception(server, err, &exception);
1964 err = 0;
1965 }
1966 } while (exception.retry);
1967out:
1968 return err;
1969}
1970
1971static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1972{
1973 struct nfs_open_context *ctx;
1974 int ret;
1975
1976 ctx = nfs4_state_find_open_context(state);
1977 if (IS_ERR(ctx))
1978 return PTR_ERR(ctx);
1979 ret = nfs4_do_open_expired(ctx, state);
1980 put_nfs_open_context(ctx);
1981 return ret;
1982}
1983
1984#if defined(CONFIG_NFS_V4_1)
1985static void nfs41_clear_delegation_stateid(struct nfs4_state *state)
1986{
1987 struct nfs_server *server = NFS_SERVER(state->inode);
1988 nfs4_stateid *stateid = &state->stateid;
1989 int status;
1990
1991 /* If a state reset has been done, test_stateid is unneeded */
1992 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1993 return;
1994
1995 status = nfs41_test_stateid(server, stateid);
1996 if (status != NFS_OK) {
1997 /* Free the stateid unless the server explicitly
1998 * informs us the stateid is unrecognized. */
1999 if (status != -NFS4ERR_BAD_STATEID)
2000 nfs41_free_stateid(server, stateid);
2001 nfs_remove_bad_delegation(state->inode);
2002
2003 write_seqlock(&state->seqlock);
2004 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2005 write_sequnlock(&state->seqlock);
2006 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2007 }
2008}
2009
2010/**
2011 * nfs41_check_open_stateid - possibly free an open stateid
2012 *
2013 * @state: NFSv4 state for an inode
2014 *
2015 * Returns NFS_OK if recovery for this stateid is now finished.
2016 * Otherwise a negative NFS4ERR value is returned.
2017 */
2018static int nfs41_check_open_stateid(struct nfs4_state *state)
2019{
2020 struct nfs_server *server = NFS_SERVER(state->inode);
2021 nfs4_stateid *stateid = &state->open_stateid;
2022 int status;
2023
2024 /* If a state reset has been done, test_stateid is unneeded */
2025 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) &&
2026 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) &&
2027 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0))
2028 return -NFS4ERR_BAD_STATEID;
2029
2030 status = nfs41_test_stateid(server, stateid);
2031 if (status != NFS_OK) {
2032 /* Free the stateid unless the server explicitly
2033 * informs us the stateid is unrecognized. */
2034 if (status != -NFS4ERR_BAD_STATEID)
2035 nfs41_free_stateid(server, stateid);
2036
2037 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2038 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2039 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2040 }
2041 return status;
2042}
2043
2044static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2045{
2046 int status;
2047
2048 nfs41_clear_delegation_stateid(state);
2049 status = nfs41_check_open_stateid(state);
2050 if (status != NFS_OK)
2051 status = nfs4_open_expired(sp, state);
2052 return status;
2053}
2054#endif
2055
2056/*
2057 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2058 * fields corresponding to attributes that were used to store the verifier.
2059 * Make sure we clobber those fields in the later setattr call
2060 */
2061static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
2062{
2063 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
2064 !(sattr->ia_valid & ATTR_ATIME_SET))
2065 sattr->ia_valid |= ATTR_ATIME;
2066
2067 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
2068 !(sattr->ia_valid & ATTR_MTIME_SET))
2069 sattr->ia_valid |= ATTR_MTIME;
2070}
2071
2072/*
2073 * Returns a referenced nfs4_state
2074 */
2075static int _nfs4_do_open(struct inode *dir,
2076 struct dentry *dentry,
2077 fmode_t fmode,
2078 int flags,
2079 struct iattr *sattr,
2080 struct rpc_cred *cred,
2081 struct nfs4_state **res,
2082 struct nfs4_threshold **ctx_th)
2083{
2084 struct nfs4_state_owner *sp;
2085 struct nfs4_state *state = NULL;
2086 struct nfs_server *server = NFS_SERVER(dir);
2087 struct nfs4_opendata *opendata;
2088 int status;
2089
2090 /* Protect against reboot recovery conflicts */
2091 status = -ENOMEM;
2092 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
2093 if (sp == NULL) {
2094 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
2095 goto out_err;
2096 }
2097 status = nfs4_recover_expired_lease(server);
2098 if (status != 0)
2099 goto err_put_state_owner;
2100 if (dentry->d_inode != NULL)
2101 nfs4_return_incompatible_delegation(dentry->d_inode, fmode);
2102 status = -ENOMEM;
2103 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL);
2104 if (opendata == NULL)
2105 goto err_put_state_owner;
2106
2107 if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
2108 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
2109 if (!opendata->f_attr.mdsthreshold)
2110 goto err_opendata_put;
2111 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
2112 }
2113 if (dentry->d_inode != NULL)
2114 opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
2115
2116 status = _nfs4_proc_open(opendata);
2117 if (status != 0)
2118 goto err_opendata_put;
2119
2120 state = nfs4_opendata_to_nfs4_state(opendata);
2121 status = PTR_ERR(state);
2122 if (IS_ERR(state))
2123 goto err_opendata_put;
2124 if (server->caps & NFS_CAP_POSIX_LOCK)
2125 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
2126
2127 status = nfs4_opendata_access(cred, opendata, state, fmode);
2128 if (status != 0)
2129 goto err_opendata_put;
2130
2131 if (opendata->o_arg.open_flags & O_EXCL) {
2132 nfs4_exclusive_attrset(opendata, sattr);
2133
2134 nfs_fattr_init(opendata->o_res.f_attr);
2135 status = nfs4_do_setattr(state->inode, cred,
2136 opendata->o_res.f_attr, sattr,
2137 state);
2138 if (status == 0)
2139 nfs_setattr_update_inode(state->inode, sattr);
2140 nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
2141 }
2142
2143 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
2144 *ctx_th = opendata->f_attr.mdsthreshold;
2145 else
2146 kfree(opendata->f_attr.mdsthreshold);
2147 opendata->f_attr.mdsthreshold = NULL;
2148
2149 nfs4_opendata_put(opendata);
2150 nfs4_put_state_owner(sp);
2151 *res = state;
2152 return 0;
2153err_opendata_put:
2154 kfree(opendata->f_attr.mdsthreshold);
2155 nfs4_opendata_put(opendata);
2156err_put_state_owner:
2157 nfs4_put_state_owner(sp);
2158out_err:
2159 *res = NULL;
2160 return status;
2161}
2162
2163
2164static struct nfs4_state *nfs4_do_open(struct inode *dir,
2165 struct dentry *dentry,
2166 fmode_t fmode,
2167 int flags,
2168 struct iattr *sattr,
2169 struct rpc_cred *cred,
2170 struct nfs4_threshold **ctx_th)
2171{
2172 struct nfs4_exception exception = { };
2173 struct nfs4_state *res;
2174 int status;
2175
2176 fmode &= FMODE_READ|FMODE_WRITE|FMODE_EXEC;
2177 do {
2178 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred,
2179 &res, ctx_th);
2180 if (status == 0)
2181 break;
2182 /* NOTE: BAD_SEQID means the server and client disagree about the
2183 * book-keeping w.r.t. state-changing operations
2184 * (OPEN/CLOSE/LOCK/LOCKU...)
2185 * It is actually a sign of a bug on the client or on the server.
2186 *
2187 * If we receive a BAD_SEQID error in the particular case of
2188 * doing an OPEN, we assume that nfs_increment_open_seqid() will
2189 * have unhashed the old state_owner for us, and that we can
2190 * therefore safely retry using a new one. We should still warn
2191 * the user though...
2192 */
2193 if (status == -NFS4ERR_BAD_SEQID) {
2194 pr_warn_ratelimited("NFS: v4 server %s "
2195 " returned a bad sequence-id error!\n",
2196 NFS_SERVER(dir)->nfs_client->cl_hostname);
2197 exception.retry = 1;
2198 continue;
2199 }
2200 /*
2201 * BAD_STATEID on OPEN means that the server cancelled our
2202 * state before it received the OPEN_CONFIRM.
2203 * Recover by retrying the request as per the discussion
2204 * on Page 181 of RFC3530.
2205 */
2206 if (status == -NFS4ERR_BAD_STATEID) {
2207 exception.retry = 1;
2208 continue;
2209 }
2210 if (status == -EAGAIN) {
2211 /* We must have found a delegation */
2212 exception.retry = 1;
2213 continue;
2214 }
2215 res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
2216 status, &exception));
2217 } while (exception.retry);
2218 return res;
2219}
2220
2221static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2222 struct nfs_fattr *fattr, struct iattr *sattr,
2223 struct nfs4_state *state)
2224{
2225 struct nfs_server *server = NFS_SERVER(inode);
2226 struct nfs_setattrargs arg = {
2227 .fh = NFS_FH(inode),
2228 .iap = sattr,
2229 .server = server,
2230 .bitmask = server->attr_bitmask,
2231 };
2232 struct nfs_setattrres res = {
2233 .fattr = fattr,
2234 .server = server,
2235 };
2236 struct rpc_message msg = {
2237 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
2238 .rpc_argp = &arg,
2239 .rpc_resp = &res,
2240 .rpc_cred = cred,
2241 };
2242 unsigned long timestamp = jiffies;
2243 int status;
2244
2245 nfs_fattr_init(fattr);
2246
2247 if (state != NULL) {
2248 struct nfs_lockowner lockowner = {
2249 .l_owner = current->files,
2250 .l_pid = current->tgid,
2251 };
2252 nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
2253 &lockowner);
2254 } else if (nfs4_copy_delegation_stateid(&arg.stateid, inode,
2255 FMODE_WRITE)) {
2256 /* Use that stateid */
2257 } else
2258 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
2259
2260 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2261 if (status == 0 && state != NULL)
2262 renew_lease(server, timestamp);
2263 return status;
2264}
2265
2266static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2267 struct nfs_fattr *fattr, struct iattr *sattr,
2268 struct nfs4_state *state)
2269{
2270 struct nfs_server *server = NFS_SERVER(inode);
2271 struct nfs4_exception exception = {
2272 .state = state,
2273 .inode = inode,
2274 };
2275 int err;
2276 do {
2277 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state);
2278 switch (err) {
2279 case -NFS4ERR_OPENMODE:
2280 if (state && !(state->state & FMODE_WRITE)) {
2281 err = -EBADF;
2282 if (sattr->ia_valid & ATTR_OPEN)
2283 err = -EACCES;
2284 goto out;
2285 }
2286 }
2287 err = nfs4_handle_exception(server, err, &exception);
2288 } while (exception.retry);
2289out:
2290 return err;
2291}
2292
2293struct nfs4_closedata {
2294 struct inode *inode;
2295 struct nfs4_state *state;
2296 struct nfs_closeargs arg;
2297 struct nfs_closeres res;
2298 struct nfs_fattr fattr;
2299 unsigned long timestamp;
2300 bool roc;
2301 u32 roc_barrier;
2302};
2303
2304static void nfs4_free_closedata(void *data)
2305{
2306 struct nfs4_closedata *calldata = data;
2307 struct nfs4_state_owner *sp = calldata->state->owner;
2308 struct super_block *sb = calldata->state->inode->i_sb;
2309
2310 if (calldata->roc)
2311 pnfs_roc_release(calldata->state->inode);
2312 nfs4_put_open_state(calldata->state);
2313 nfs_free_seqid(calldata->arg.seqid);
2314 nfs4_put_state_owner(sp);
2315 nfs_sb_deactive_async(sb);
2316 kfree(calldata);
2317}
2318
2319static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
2320 fmode_t fmode)
2321{
2322 spin_lock(&state->owner->so_lock);
2323 if (!(fmode & FMODE_READ))
2324 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2325 if (!(fmode & FMODE_WRITE))
2326 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2327 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2328 spin_unlock(&state->owner->so_lock);
2329}
2330
2331static void nfs4_close_done(struct rpc_task *task, void *data)
2332{
2333 struct nfs4_closedata *calldata = data;
2334 struct nfs4_state *state = calldata->state;
2335 struct nfs_server *server = NFS_SERVER(calldata->inode);
2336
2337 dprintk("%s: begin!\n", __func__);
2338 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2339 return;
2340 /* hmm. we are done with the inode, and in the process of freeing
2341 * the state_owner. we keep this around to process errors
2342 */
2343 switch (task->tk_status) {
2344 case 0:
2345 if (calldata->roc)
2346 pnfs_roc_set_barrier(state->inode,
2347 calldata->roc_barrier);
2348 nfs_set_open_stateid(state, &calldata->res.stateid, 0);
2349 renew_lease(server, calldata->timestamp);
2350 nfs4_close_clear_stateid_flags(state,
2351 calldata->arg.fmode);
2352 break;
2353 case -NFS4ERR_STALE_STATEID:
2354 case -NFS4ERR_OLD_STATEID:
2355 case -NFS4ERR_BAD_STATEID:
2356 case -NFS4ERR_EXPIRED:
2357 if (calldata->arg.fmode == 0)
2358 break;
2359 default:
2360 if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
2361 rpc_restart_call_prepare(task);
2362 }
2363 nfs_release_seqid(calldata->arg.seqid);
2364 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2365 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2366}
2367
2368static void nfs4_close_prepare(struct rpc_task *task, void *data)
2369{
2370 struct nfs4_closedata *calldata = data;
2371 struct nfs4_state *state = calldata->state;
2372 struct inode *inode = calldata->inode;
2373 int call_close = 0;
2374
2375 dprintk("%s: begin!\n", __func__);
2376 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2377 return;
2378
2379 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2380 calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
2381 spin_lock(&state->owner->so_lock);
2382 /* Calculate the change in open mode */
2383 if (state->n_rdwr == 0) {
2384 if (state->n_rdonly == 0) {
2385 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
2386 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2387 calldata->arg.fmode &= ~FMODE_READ;
2388 }
2389 if (state->n_wronly == 0) {
2390 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
2391 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2392 calldata->arg.fmode &= ~FMODE_WRITE;
2393 }
2394 }
2395 spin_unlock(&state->owner->so_lock);
2396
2397 if (!call_close) {
2398 /* Note: exit _without_ calling nfs4_close_done */
2399 task->tk_action = NULL;
2400 goto out;
2401 }
2402
2403 if (calldata->arg.fmode == 0) {
2404 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2405 if (calldata->roc &&
2406 pnfs_roc_drain(inode, &calldata->roc_barrier, task))
2407 goto out;
2408 }
2409
2410 nfs_fattr_init(calldata->res.fattr);
2411 calldata->timestamp = jiffies;
2412 if (nfs4_setup_sequence(NFS_SERVER(inode),
2413 &calldata->arg.seq_args,
2414 &calldata->res.seq_res,
2415 task) != 0)
2416 nfs_release_seqid(calldata->arg.seqid);
2417 else
2418 rpc_call_start(task);
2419out:
2420 dprintk("%s: done!\n", __func__);
2421}
2422
2423static const struct rpc_call_ops nfs4_close_ops = {
2424 .rpc_call_prepare = nfs4_close_prepare,
2425 .rpc_call_done = nfs4_close_done,
2426 .rpc_release = nfs4_free_closedata,
2427};
2428
2429/*
2430 * It is possible for data to be read/written from a mem-mapped file
2431 * after the sys_close call (which hits the vfs layer as a flush).
2432 * This means that we can't safely call nfsv4 close on a file until
2433 * the inode is cleared. This in turn means that we are not good
2434 * NFSv4 citizens - we do not indicate to the server to update the file's
2435 * share state even when we are done with one of the three share
2436 * stateid's in the inode.
2437 *
2438 * NOTE: Caller must be holding the sp->so_owner semaphore!
2439 */
2440int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
2441{
2442 struct nfs_server *server = NFS_SERVER(state->inode);
2443 struct nfs4_closedata *calldata;
2444 struct nfs4_state_owner *sp = state->owner;
2445 struct rpc_task *task;
2446 struct rpc_message msg = {
2447 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2448 .rpc_cred = state->owner->so_cred,
2449 };
2450 struct rpc_task_setup task_setup_data = {
2451 .rpc_client = server->client,
2452 .rpc_message = &msg,
2453 .callback_ops = &nfs4_close_ops,
2454 .workqueue = nfsiod_workqueue,
2455 .flags = RPC_TASK_ASYNC,
2456 };
2457 int status = -ENOMEM;
2458
2459 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2460 if (calldata == NULL)
2461 goto out;
2462 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2463 calldata->inode = state->inode;
2464 calldata->state = state;
2465 calldata->arg.fh = NFS_FH(state->inode);
2466 calldata->arg.stateid = &state->open_stateid;
2467 /* Serialization for the sequence id */
2468 calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
2469 if (calldata->arg.seqid == NULL)
2470 goto out_free_calldata;
2471 calldata->arg.fmode = 0;
2472 calldata->arg.bitmask = server->cache_consistency_bitmask;
2473 calldata->res.fattr = &calldata->fattr;
2474 calldata->res.seqid = calldata->arg.seqid;
2475 calldata->res.server = server;
2476 calldata->roc = pnfs_roc(state->inode);
2477 nfs_sb_active(calldata->inode->i_sb);
2478
2479 msg.rpc_argp = &calldata->arg;
2480 msg.rpc_resp = &calldata->res;
2481 task_setup_data.callback_data = calldata;
2482 task = rpc_run_task(&task_setup_data);
2483 if (IS_ERR(task))
2484 return PTR_ERR(task);
2485 status = 0;
2486 if (wait)
2487 status = rpc_wait_for_completion_task(task);
2488 rpc_put_task(task);
2489 return status;
2490out_free_calldata:
2491 kfree(calldata);
2492out:
2493 nfs4_put_open_state(state);
2494 nfs4_put_state_owner(sp);
2495 return status;
2496}
2497
2498static struct inode *
2499nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
2500{
2501 struct nfs4_state *state;
2502
2503 /* Protect against concurrent sillydeletes */
2504 state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr,
2505 ctx->cred, &ctx->mdsthreshold);
2506 if (IS_ERR(state))
2507 return ERR_CAST(state);
2508 ctx->state = state;
2509 return igrab(state->inode);
2510}
2511
2512static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
2513{
2514 if (ctx->state == NULL)
2515 return;
2516 if (is_sync)
2517 nfs4_close_sync(ctx->state, ctx->mode);
2518 else
2519 nfs4_close_state(ctx->state, ctx->mode);
2520}
2521
2522static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2523{
2524 struct nfs4_server_caps_arg args = {
2525 .fhandle = fhandle,
2526 };
2527 struct nfs4_server_caps_res res = {};
2528 struct rpc_message msg = {
2529 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
2530 .rpc_argp = &args,
2531 .rpc_resp = &res,
2532 };
2533 int status;
2534
2535 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2536 if (status == 0) {
2537 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2538 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2539 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2540 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2541 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2542 NFS_CAP_CTIME|NFS_CAP_MTIME);
2543 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
2544 server->caps |= NFS_CAP_ACLS;
2545 if (res.has_links != 0)
2546 server->caps |= NFS_CAP_HARDLINKS;
2547 if (res.has_symlinks != 0)
2548 server->caps |= NFS_CAP_SYMLINKS;
2549 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2550 server->caps |= NFS_CAP_FILEID;
2551 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2552 server->caps |= NFS_CAP_MODE;
2553 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2554 server->caps |= NFS_CAP_NLINK;
2555 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2556 server->caps |= NFS_CAP_OWNER;
2557 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2558 server->caps |= NFS_CAP_OWNER_GROUP;
2559 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2560 server->caps |= NFS_CAP_ATIME;
2561 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2562 server->caps |= NFS_CAP_CTIME;
2563 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2564 server->caps |= NFS_CAP_MTIME;
2565
2566 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2567 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2568 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
2569 server->acl_bitmask = res.acl_bitmask;
2570 server->fh_expire_type = res.fh_expire_type;
2571 }
2572
2573 return status;
2574}
2575
2576int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2577{
2578 struct nfs4_exception exception = { };
2579 int err;
2580 do {
2581 err = nfs4_handle_exception(server,
2582 _nfs4_server_capabilities(server, fhandle),
2583 &exception);
2584 } while (exception.retry);
2585 return err;
2586}
2587
2588static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2589 struct nfs_fsinfo *info)
2590{
2591 struct nfs4_lookup_root_arg args = {
2592 .bitmask = nfs4_fattr_bitmap,
2593 };
2594 struct nfs4_lookup_res res = {
2595 .server = server,
2596 .fattr = info->fattr,
2597 .fh = fhandle,
2598 };
2599 struct rpc_message msg = {
2600 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
2601 .rpc_argp = &args,
2602 .rpc_resp = &res,
2603 };
2604
2605 nfs_fattr_init(info->fattr);
2606 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2607}
2608
2609static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2610 struct nfs_fsinfo *info)
2611{
2612 struct nfs4_exception exception = { };
2613 int err;
2614 do {
2615 err = _nfs4_lookup_root(server, fhandle, info);
2616 switch (err) {
2617 case 0:
2618 case -NFS4ERR_WRONGSEC:
2619 goto out;
2620 default:
2621 err = nfs4_handle_exception(server, err, &exception);
2622 }
2623 } while (exception.retry);
2624out:
2625 return err;
2626}
2627
2628static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2629 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
2630{
2631 struct rpc_auth *auth;
2632 int ret;
2633
2634 auth = rpcauth_create(flavor, server->client);
2635 if (IS_ERR(auth)) {
2636 ret = -EIO;
2637 goto out;
2638 }
2639 ret = nfs4_lookup_root(server, fhandle, info);
2640out:
2641 return ret;
2642}
2643
2644static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2645 struct nfs_fsinfo *info)
2646{
2647 int i, len, status = 0;
2648 rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS];
2649
2650 len = rpcauth_list_flavors(flav_array, ARRAY_SIZE(flav_array));
2651 if (len < 0)
2652 return len;
2653
2654 for (i = 0; i < len; i++) {
2655 /* AUTH_UNIX is the default flavor if none was specified,
2656 * thus has already been tried. */
2657 if (flav_array[i] == RPC_AUTH_UNIX)
2658 continue;
2659
2660 status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
2661 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
2662 continue;
2663 break;
2664 }
2665 /*
2666 * -EACCESS could mean that the user doesn't have correct permissions
2667 * to access the mount. It could also mean that we tried to mount
2668 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
2669 * existing mount programs don't handle -EACCES very well so it should
2670 * be mapped to -EPERM instead.
2671 */
2672 if (status == -EACCES)
2673 status = -EPERM;
2674 return status;
2675}
2676
2677/*
2678 * get the file handle for the "/" directory on the server
2679 */
2680int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
2681 struct nfs_fsinfo *info)
2682{
2683 int minor_version = server->nfs_client->cl_minorversion;
2684 int status = nfs4_lookup_root(server, fhandle, info);
2685 if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR))
2686 /*
2687 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM
2688 * by nfs4_map_errors() as this function exits.
2689 */
2690 status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info);
2691 if (status == 0)
2692 status = nfs4_server_capabilities(server, fhandle);
2693 if (status == 0)
2694 status = nfs4_do_fsinfo(server, fhandle, info);
2695 return nfs4_map_errors(status);
2696}
2697
2698static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
2699 struct nfs_fsinfo *info)
2700{
2701 int error;
2702 struct nfs_fattr *fattr = info->fattr;
2703
2704 error = nfs4_server_capabilities(server, mntfh);
2705 if (error < 0) {
2706 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
2707 return error;
2708 }
2709
2710 error = nfs4_proc_getattr(server, mntfh, fattr);
2711 if (error < 0) {
2712 dprintk("nfs4_get_root: getattr error = %d\n", -error);
2713 return error;
2714 }
2715
2716 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
2717 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
2718 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
2719
2720 return error;
2721}
2722
2723/*
2724 * Get locations and (maybe) other attributes of a referral.
2725 * Note that we'll actually follow the referral later when
2726 * we detect fsid mismatch in inode revalidation
2727 */
2728static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
2729 const struct qstr *name, struct nfs_fattr *fattr,
2730 struct nfs_fh *fhandle)
2731{
2732 int status = -ENOMEM;
2733 struct page *page = NULL;
2734 struct nfs4_fs_locations *locations = NULL;
2735
2736 page = alloc_page(GFP_KERNEL);
2737 if (page == NULL)
2738 goto out;
2739 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
2740 if (locations == NULL)
2741 goto out;
2742
2743 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
2744 if (status != 0)
2745 goto out;
2746 /* Make sure server returned a different fsid for the referral */
2747 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
2748 dprintk("%s: server did not return a different fsid for"
2749 " a referral at %s\n", __func__, name->name);
2750 status = -EIO;
2751 goto out;
2752 }
2753 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
2754 nfs_fixup_referral_attributes(&locations->fattr);
2755
2756 /* replace the lookup nfs_fattr with the locations nfs_fattr */
2757 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
2758 memset(fhandle, 0, sizeof(struct nfs_fh));
2759out:
2760 if (page)
2761 __free_page(page);
2762 kfree(locations);
2763 return status;
2764}
2765
2766static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2767{
2768 struct nfs4_getattr_arg args = {
2769 .fh = fhandle,
2770 .bitmask = server->attr_bitmask,
2771 };
2772 struct nfs4_getattr_res res = {
2773 .fattr = fattr,
2774 .server = server,
2775 };
2776 struct rpc_message msg = {
2777 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
2778 .rpc_argp = &args,
2779 .rpc_resp = &res,
2780 };
2781
2782 nfs_fattr_init(fattr);
2783 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2784}
2785
2786static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2787{
2788 struct nfs4_exception exception = { };
2789 int err;
2790 do {
2791 err = nfs4_handle_exception(server,
2792 _nfs4_proc_getattr(server, fhandle, fattr),
2793 &exception);
2794 } while (exception.retry);
2795 return err;
2796}
2797
2798/*
2799 * The file is not closed if it is opened due to the a request to change
2800 * the size of the file. The open call will not be needed once the
2801 * VFS layer lookup-intents are implemented.
2802 *
2803 * Close is called when the inode is destroyed.
2804 * If we haven't opened the file for O_WRONLY, we
2805 * need to in the size_change case to obtain a stateid.
2806 *
2807 * Got race?
2808 * Because OPEN is always done by name in nfsv4, it is
2809 * possible that we opened a different file by the same
2810 * name. We can recognize this race condition, but we
2811 * can't do anything about it besides returning an error.
2812 *
2813 * This will be fixed with VFS changes (lookup-intent).
2814 */
2815static int
2816nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
2817 struct iattr *sattr)
2818{
2819 struct inode *inode = dentry->d_inode;
2820 struct rpc_cred *cred = NULL;
2821 struct nfs4_state *state = NULL;
2822 int status;
2823
2824 if (pnfs_ld_layoutret_on_setattr(inode))
2825 pnfs_return_layout(inode);
2826
2827 nfs_fattr_init(fattr);
2828
2829 /* Deal with open(O_TRUNC) */
2830 if (sattr->ia_valid & ATTR_OPEN)
2831 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
2832
2833 /* Optimization: if the end result is no change, don't RPC */
2834 if ((sattr->ia_valid & ~(ATTR_FILE)) == 0)
2835 return 0;
2836
2837 /* Search for an existing open(O_WRITE) file */
2838 if (sattr->ia_valid & ATTR_FILE) {
2839 struct nfs_open_context *ctx;
2840
2841 ctx = nfs_file_open_context(sattr->ia_file);
2842 if (ctx) {
2843 cred = ctx->cred;
2844 state = ctx->state;
2845 }
2846 }
2847
2848 status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
2849 if (status == 0)
2850 nfs_setattr_update_inode(inode, sattr);
2851 return status;
2852}
2853
2854static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
2855 const struct qstr *name, struct nfs_fh *fhandle,
2856 struct nfs_fattr *fattr)
2857{
2858 struct nfs_server *server = NFS_SERVER(dir);
2859 int status;
2860 struct nfs4_lookup_arg args = {
2861 .bitmask = server->attr_bitmask,
2862 .dir_fh = NFS_FH(dir),
2863 .name = name,
2864 };
2865 struct nfs4_lookup_res res = {
2866 .server = server,
2867 .fattr = fattr,
2868 .fh = fhandle,
2869 };
2870 struct rpc_message msg = {
2871 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
2872 .rpc_argp = &args,
2873 .rpc_resp = &res,
2874 };
2875
2876 nfs_fattr_init(fattr);
2877
2878 dprintk("NFS call lookup %s\n", name->name);
2879 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
2880 dprintk("NFS reply lookup: %d\n", status);
2881 return status;
2882}
2883
2884static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
2885{
2886 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
2887 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
2888 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
2889 fattr->nlink = 2;
2890}
2891
2892static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
2893 struct qstr *name, struct nfs_fh *fhandle,
2894 struct nfs_fattr *fattr)
2895{
2896 struct nfs4_exception exception = { };
2897 struct rpc_clnt *client = *clnt;
2898 int err;
2899 do {
2900 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr);
2901 switch (err) {
2902 case -NFS4ERR_BADNAME:
2903 err = -ENOENT;
2904 goto out;
2905 case -NFS4ERR_MOVED:
2906 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
2907 goto out;
2908 case -NFS4ERR_WRONGSEC:
2909 err = -EPERM;
2910 if (client != *clnt)
2911 goto out;
2912
2913 client = nfs4_create_sec_client(client, dir, name);
2914 if (IS_ERR(client))
2915 return PTR_ERR(client);
2916
2917 exception.retry = 1;
2918 break;
2919 default:
2920 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
2921 }
2922 } while (exception.retry);
2923
2924out:
2925 if (err == 0)
2926 *clnt = client;
2927 else if (client != *clnt)
2928 rpc_shutdown_client(client);
2929
2930 return err;
2931}
2932
2933static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
2934 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2935{
2936 int status;
2937 struct rpc_clnt *client = NFS_CLIENT(dir);
2938
2939 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2940 if (client != NFS_CLIENT(dir)) {
2941 rpc_shutdown_client(client);
2942 nfs_fixup_secinfo_attributes(fattr);
2943 }
2944 return status;
2945}
2946
2947struct rpc_clnt *
2948nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
2949 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2950{
2951 int status;
2952 struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir));
2953
2954 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2955 if (status < 0) {
2956 rpc_shutdown_client(client);
2957 return ERR_PTR(status);
2958 }
2959 return client;
2960}
2961
2962static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2963{
2964 struct nfs_server *server = NFS_SERVER(inode);
2965 struct nfs4_accessargs args = {
2966 .fh = NFS_FH(inode),
2967 .bitmask = server->cache_consistency_bitmask,
2968 };
2969 struct nfs4_accessres res = {
2970 .server = server,
2971 };
2972 struct rpc_message msg = {
2973 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
2974 .rpc_argp = &args,
2975 .rpc_resp = &res,
2976 .rpc_cred = entry->cred,
2977 };
2978 int mode = entry->mask;
2979 int status;
2980
2981 /*
2982 * Determine which access bits we want to ask for...
2983 */
2984 if (mode & MAY_READ)
2985 args.access |= NFS4_ACCESS_READ;
2986 if (S_ISDIR(inode->i_mode)) {
2987 if (mode & MAY_WRITE)
2988 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
2989 if (mode & MAY_EXEC)
2990 args.access |= NFS4_ACCESS_LOOKUP;
2991 } else {
2992 if (mode & MAY_WRITE)
2993 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
2994 if (mode & MAY_EXEC)
2995 args.access |= NFS4_ACCESS_EXECUTE;
2996 }
2997
2998 res.fattr = nfs_alloc_fattr();
2999 if (res.fattr == NULL)
3000 return -ENOMEM;
3001
3002 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3003 if (!status) {
3004 nfs_access_set_mask(entry, res.access);
3005 nfs_refresh_inode(inode, res.fattr);
3006 }
3007 nfs_free_fattr(res.fattr);
3008 return status;
3009}
3010
3011static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3012{
3013 struct nfs4_exception exception = { };
3014 int err;
3015 do {
3016 err = nfs4_handle_exception(NFS_SERVER(inode),
3017 _nfs4_proc_access(inode, entry),
3018 &exception);
3019 } while (exception.retry);
3020 return err;
3021}
3022
3023/*
3024 * TODO: For the time being, we don't try to get any attributes
3025 * along with any of the zero-copy operations READ, READDIR,
3026 * READLINK, WRITE.
3027 *
3028 * In the case of the first three, we want to put the GETATTR
3029 * after the read-type operation -- this is because it is hard
3030 * to predict the length of a GETATTR response in v4, and thus
3031 * align the READ data correctly. This means that the GETATTR
3032 * may end up partially falling into the page cache, and we should
3033 * shift it into the 'tail' of the xdr_buf before processing.
3034 * To do this efficiently, we need to know the total length
3035 * of data received, which doesn't seem to be available outside
3036 * of the RPC layer.
3037 *
3038 * In the case of WRITE, we also want to put the GETATTR after
3039 * the operation -- in this case because we want to make sure
3040 * we get the post-operation mtime and size.
3041 *
3042 * Both of these changes to the XDR layer would in fact be quite
3043 * minor, but I decided to leave them for a subsequent patch.
3044 */
3045static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
3046 unsigned int pgbase, unsigned int pglen)
3047{
3048 struct nfs4_readlink args = {
3049 .fh = NFS_FH(inode),
3050 .pgbase = pgbase,
3051 .pglen = pglen,
3052 .pages = &page,
3053 };
3054 struct nfs4_readlink_res res;
3055 struct rpc_message msg = {
3056 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
3057 .rpc_argp = &args,
3058 .rpc_resp = &res,
3059 };
3060
3061 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
3062}
3063
3064static int nfs4_proc_readlink(struct inode *inode, struct page *page,
3065 unsigned int pgbase, unsigned int pglen)
3066{
3067 struct nfs4_exception exception = { };
3068 int err;
3069 do {
3070 err = nfs4_handle_exception(NFS_SERVER(inode),
3071 _nfs4_proc_readlink(inode, page, pgbase, pglen),
3072 &exception);
3073 } while (exception.retry);
3074 return err;
3075}
3076
3077/*
3078 * This is just for mknod. open(O_CREAT) will always do ->open_context().
3079 */
3080static int
3081nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3082 int flags)
3083{
3084 struct nfs_open_context *ctx;
3085 struct nfs4_state *state;
3086 int status = 0;
3087
3088 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
3089 if (IS_ERR(ctx))
3090 return PTR_ERR(ctx);
3091
3092 sattr->ia_mode &= ~current_umask();
3093 state = nfs4_do_open(dir, dentry, ctx->mode,
3094 flags, sattr, ctx->cred,
3095 &ctx->mdsthreshold);
3096 d_drop(dentry);
3097 if (IS_ERR(state)) {
3098 status = PTR_ERR(state);
3099 goto out;
3100 }
3101 d_add(dentry, igrab(state->inode));
3102 nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
3103 ctx->state = state;
3104out:
3105 put_nfs_open_context(ctx);
3106 return status;
3107}
3108
3109static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
3110{
3111 struct nfs_server *server = NFS_SERVER(dir);
3112 struct nfs_removeargs args = {
3113 .fh = NFS_FH(dir),
3114 .name = *name,
3115 };
3116 struct nfs_removeres res = {
3117 .server = server,
3118 };
3119 struct rpc_message msg = {
3120 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
3121 .rpc_argp = &args,
3122 .rpc_resp = &res,
3123 };
3124 int status;
3125
3126 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
3127 if (status == 0)
3128 update_changeattr(dir, &res.cinfo);
3129 return status;
3130}
3131
3132static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
3133{
3134 struct nfs4_exception exception = { };
3135 int err;
3136 do {
3137 err = nfs4_handle_exception(NFS_SERVER(dir),
3138 _nfs4_proc_remove(dir, name),
3139 &exception);
3140 } while (exception.retry);
3141 return err;
3142}
3143
3144static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
3145{
3146 struct nfs_server *server = NFS_SERVER(dir);
3147 struct nfs_removeargs *args = msg->rpc_argp;
3148 struct nfs_removeres *res = msg->rpc_resp;
3149
3150 res->server = server;
3151 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
3152 nfs41_init_sequence(&args->seq_args, &res->seq_res, 1);
3153}
3154
3155static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
3156{
3157 if (nfs4_setup_sequence(NFS_SERVER(data->dir),
3158 &data->args.seq_args,
3159 &data->res.seq_res,
3160 task))
3161 return;
3162 rpc_call_start(task);
3163}
3164
3165static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
3166{
3167 struct nfs_removeres *res = task->tk_msg.rpc_resp;
3168
3169 if (!nfs4_sequence_done(task, &res->seq_res))
3170 return 0;
3171 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
3172 return 0;
3173 update_changeattr(dir, &res->cinfo);
3174 return 1;
3175}
3176
3177static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
3178{
3179 struct nfs_server *server = NFS_SERVER(dir);
3180 struct nfs_renameargs *arg = msg->rpc_argp;
3181 struct nfs_renameres *res = msg->rpc_resp;
3182
3183 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
3184 res->server = server;
3185 nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1);
3186}
3187
3188static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
3189{
3190 if (nfs4_setup_sequence(NFS_SERVER(data->old_dir),
3191 &data->args.seq_args,
3192 &data->res.seq_res,
3193 task))
3194 return;
3195 rpc_call_start(task);
3196}
3197
3198static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
3199 struct inode *new_dir)
3200{
3201 struct nfs_renameres *res = task->tk_msg.rpc_resp;
3202
3203 if (!nfs4_sequence_done(task, &res->seq_res))
3204 return 0;
3205 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
3206 return 0;
3207
3208 update_changeattr(old_dir, &res->old_cinfo);
3209 update_changeattr(new_dir, &res->new_cinfo);
3210 return 1;
3211}
3212
3213static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
3214 struct inode *new_dir, struct qstr *new_name)
3215{
3216 struct nfs_server *server = NFS_SERVER(old_dir);
3217 struct nfs_renameargs arg = {
3218 .old_dir = NFS_FH(old_dir),
3219 .new_dir = NFS_FH(new_dir),
3220 .old_name = old_name,
3221 .new_name = new_name,
3222 };
3223 struct nfs_renameres res = {
3224 .server = server,
3225 };
3226 struct rpc_message msg = {
3227 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
3228 .rpc_argp = &arg,
3229 .rpc_resp = &res,
3230 };
3231 int status = -ENOMEM;
3232
3233 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3234 if (!status) {
3235 update_changeattr(old_dir, &res.old_cinfo);
3236 update_changeattr(new_dir, &res.new_cinfo);
3237 }
3238 return status;
3239}
3240
3241static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
3242 struct inode *new_dir, struct qstr *new_name)
3243{
3244 struct nfs4_exception exception = { };
3245 int err;
3246 do {
3247 err = nfs4_handle_exception(NFS_SERVER(old_dir),
3248 _nfs4_proc_rename(old_dir, old_name,
3249 new_dir, new_name),
3250 &exception);
3251 } while (exception.retry);
3252 return err;
3253}
3254
3255static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3256{
3257 struct nfs_server *server = NFS_SERVER(inode);
3258 struct nfs4_link_arg arg = {
3259 .fh = NFS_FH(inode),
3260 .dir_fh = NFS_FH(dir),
3261 .name = name,
3262 .bitmask = server->attr_bitmask,
3263 };
3264 struct nfs4_link_res res = {
3265 .server = server,
3266 };
3267 struct rpc_message msg = {
3268 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
3269 .rpc_argp = &arg,
3270 .rpc_resp = &res,
3271 };
3272 int status = -ENOMEM;
3273
3274 res.fattr = nfs_alloc_fattr();
3275 if (res.fattr == NULL)
3276 goto out;
3277
3278 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3279 if (!status) {
3280 update_changeattr(dir, &res.cinfo);
3281 nfs_post_op_update_inode(inode, res.fattr);
3282 }
3283out:
3284 nfs_free_fattr(res.fattr);
3285 return status;
3286}
3287
3288static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3289{
3290 struct nfs4_exception exception = { };
3291 int err;
3292 do {
3293 err = nfs4_handle_exception(NFS_SERVER(inode),
3294 _nfs4_proc_link(inode, dir, name),
3295 &exception);
3296 } while (exception.retry);
3297 return err;
3298}
3299
3300struct nfs4_createdata {
3301 struct rpc_message msg;
3302 struct nfs4_create_arg arg;
3303 struct nfs4_create_res res;
3304 struct nfs_fh fh;
3305 struct nfs_fattr fattr;
3306};
3307
3308static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3309 struct qstr *name, struct iattr *sattr, u32 ftype)
3310{
3311 struct nfs4_createdata *data;
3312
3313 data = kzalloc(sizeof(*data), GFP_KERNEL);
3314 if (data != NULL) {
3315 struct nfs_server *server = NFS_SERVER(dir);
3316
3317 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3318 data->msg.rpc_argp = &data->arg;
3319 data->msg.rpc_resp = &data->res;
3320 data->arg.dir_fh = NFS_FH(dir);
3321 data->arg.server = server;
3322 data->arg.name = name;
3323 data->arg.attrs = sattr;
3324 data->arg.ftype = ftype;
3325 data->arg.bitmask = server->attr_bitmask;
3326 data->res.server = server;
3327 data->res.fh = &data->fh;
3328 data->res.fattr = &data->fattr;
3329 nfs_fattr_init(data->res.fattr);
3330 }
3331 return data;
3332}
3333
3334static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3335{
3336 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3337 &data->arg.seq_args, &data->res.seq_res, 1);
3338 if (status == 0) {
3339 update_changeattr(dir, &data->res.dir_cinfo);
3340 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
3341 }
3342 return status;
3343}
3344
3345static void nfs4_free_createdata(struct nfs4_createdata *data)
3346{
3347 kfree(data);
3348}
3349
3350static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3351 struct page *page, unsigned int len, struct iattr *sattr)
3352{
3353 struct nfs4_createdata *data;
3354 int status = -ENAMETOOLONG;
3355
3356 if (len > NFS4_MAXPATHLEN)
3357 goto out;
3358
3359 status = -ENOMEM;
3360 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3361 if (data == NULL)
3362 goto out;
3363
3364 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3365 data->arg.u.symlink.pages = &page;
3366 data->arg.u.symlink.len = len;
3367
3368 status = nfs4_do_create(dir, dentry, data);
3369
3370 nfs4_free_createdata(data);
3371out:
3372 return status;
3373}
3374
3375static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3376 struct page *page, unsigned int len, struct iattr *sattr)
3377{
3378 struct nfs4_exception exception = { };
3379 int err;
3380 do {
3381 err = nfs4_handle_exception(NFS_SERVER(dir),
3382 _nfs4_proc_symlink(dir, dentry, page,
3383 len, sattr),
3384 &exception);
3385 } while (exception.retry);
3386 return err;
3387}
3388
3389static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3390 struct iattr *sattr)
3391{
3392 struct nfs4_createdata *data;
3393 int status = -ENOMEM;
3394
3395 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
3396 if (data == NULL)
3397 goto out;
3398
3399 status = nfs4_do_create(dir, dentry, data);
3400
3401 nfs4_free_createdata(data);
3402out:
3403 return status;
3404}
3405
3406static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3407 struct iattr *sattr)
3408{
3409 struct nfs4_exception exception = { };
3410 int err;
3411
3412 sattr->ia_mode &= ~current_umask();
3413 do {
3414 err = nfs4_handle_exception(NFS_SERVER(dir),
3415 _nfs4_proc_mkdir(dir, dentry, sattr),
3416 &exception);
3417 } while (exception.retry);
3418 return err;
3419}
3420
3421static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3422 u64 cookie, struct page **pages, unsigned int count, int plus)
3423{
3424 struct inode *dir = dentry->d_inode;
3425 struct nfs4_readdir_arg args = {
3426 .fh = NFS_FH(dir),
3427 .pages = pages,
3428 .pgbase = 0,
3429 .count = count,
3430 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
3431 .plus = plus,
3432 };
3433 struct nfs4_readdir_res res;
3434 struct rpc_message msg = {
3435 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
3436 .rpc_argp = &args,
3437 .rpc_resp = &res,
3438 .rpc_cred = cred,
3439 };
3440 int status;
3441
3442 dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
3443 dentry->d_parent->d_name.name,
3444 dentry->d_name.name,
3445 (unsigned long long)cookie);
3446 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
3447 res.pgbase = args.pgbase;
3448 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
3449 if (status >= 0) {
3450 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
3451 status += args.pgbase;
3452 }
3453
3454 nfs_invalidate_atime(dir);
3455
3456 dprintk("%s: returns %d\n", __func__, status);
3457 return status;
3458}
3459
3460static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3461 u64 cookie, struct page **pages, unsigned int count, int plus)
3462{
3463 struct nfs4_exception exception = { };
3464 int err;
3465 do {
3466 err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
3467 _nfs4_proc_readdir(dentry, cred, cookie,
3468 pages, count, plus),
3469 &exception);
3470 } while (exception.retry);
3471 return err;
3472}
3473
3474static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3475 struct iattr *sattr, dev_t rdev)
3476{
3477 struct nfs4_createdata *data;
3478 int mode = sattr->ia_mode;
3479 int status = -ENOMEM;
3480
3481 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
3482 if (data == NULL)
3483 goto out;
3484
3485 if (S_ISFIFO(mode))
3486 data->arg.ftype = NF4FIFO;
3487 else if (S_ISBLK(mode)) {
3488 data->arg.ftype = NF4BLK;
3489 data->arg.u.device.specdata1 = MAJOR(rdev);
3490 data->arg.u.device.specdata2 = MINOR(rdev);
3491 }
3492 else if (S_ISCHR(mode)) {
3493 data->arg.ftype = NF4CHR;
3494 data->arg.u.device.specdata1 = MAJOR(rdev);
3495 data->arg.u.device.specdata2 = MINOR(rdev);
3496 } else if (!S_ISSOCK(mode)) {
3497 status = -EINVAL;
3498 goto out_free;
3499 }
3500
3501 status = nfs4_do_create(dir, dentry, data);
3502out_free:
3503 nfs4_free_createdata(data);
3504out:
3505 return status;
3506}
3507
3508static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3509 struct iattr *sattr, dev_t rdev)
3510{
3511 struct nfs4_exception exception = { };
3512 int err;
3513
3514 sattr->ia_mode &= ~current_umask();
3515 do {
3516 err = nfs4_handle_exception(NFS_SERVER(dir),
3517 _nfs4_proc_mknod(dir, dentry, sattr, rdev),
3518 &exception);
3519 } while (exception.retry);
3520 return err;
3521}
3522
3523static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
3524 struct nfs_fsstat *fsstat)
3525{
3526 struct nfs4_statfs_arg args = {
3527 .fh = fhandle,
3528 .bitmask = server->attr_bitmask,
3529 };
3530 struct nfs4_statfs_res res = {
3531 .fsstat = fsstat,
3532 };
3533 struct rpc_message msg = {
3534 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
3535 .rpc_argp = &args,
3536 .rpc_resp = &res,
3537 };
3538
3539 nfs_fattr_init(fsstat->fattr);
3540 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3541}
3542
3543static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
3544{
3545 struct nfs4_exception exception = { };
3546 int err;
3547 do {
3548 err = nfs4_handle_exception(server,
3549 _nfs4_proc_statfs(server, fhandle, fsstat),
3550 &exception);
3551 } while (exception.retry);
3552 return err;
3553}
3554
3555static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
3556 struct nfs_fsinfo *fsinfo)
3557{
3558 struct nfs4_fsinfo_arg args = {
3559 .fh = fhandle,
3560 .bitmask = server->attr_bitmask,
3561 };
3562 struct nfs4_fsinfo_res res = {
3563 .fsinfo = fsinfo,
3564 };
3565 struct rpc_message msg = {
3566 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
3567 .rpc_argp = &args,
3568 .rpc_resp = &res,
3569 };
3570
3571 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3572}
3573
3574static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3575{
3576 struct nfs4_exception exception = { };
3577 int err;
3578
3579 do {
3580 err = nfs4_handle_exception(server,
3581 _nfs4_do_fsinfo(server, fhandle, fsinfo),
3582 &exception);
3583 } while (exception.retry);
3584 return err;
3585}
3586
3587static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3588{
3589 int error;
3590
3591 nfs_fattr_init(fsinfo->fattr);
3592 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
3593 if (error == 0) {
3594 /* block layout checks this! */
3595 server->pnfs_blksize = fsinfo->blksize;
3596 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
3597 }
3598
3599 return error;
3600}
3601
3602static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3603 struct nfs_pathconf *pathconf)
3604{
3605 struct nfs4_pathconf_arg args = {
3606 .fh = fhandle,
3607 .bitmask = server->attr_bitmask,
3608 };
3609 struct nfs4_pathconf_res res = {
3610 .pathconf = pathconf,
3611 };
3612 struct rpc_message msg = {
3613 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
3614 .rpc_argp = &args,
3615 .rpc_resp = &res,
3616 };
3617
3618 /* None of the pathconf attributes are mandatory to implement */
3619 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
3620 memset(pathconf, 0, sizeof(*pathconf));
3621 return 0;
3622 }
3623
3624 nfs_fattr_init(pathconf->fattr);
3625 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3626}
3627
3628static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3629 struct nfs_pathconf *pathconf)
3630{
3631 struct nfs4_exception exception = { };
3632 int err;
3633
3634 do {
3635 err = nfs4_handle_exception(server,
3636 _nfs4_proc_pathconf(server, fhandle, pathconf),
3637 &exception);
3638 } while (exception.retry);
3639 return err;
3640}
3641
3642void __nfs4_read_done_cb(struct nfs_read_data *data)
3643{
3644 nfs_invalidate_atime(data->header->inode);
3645}
3646
3647static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
3648{
3649 struct nfs_server *server = NFS_SERVER(data->header->inode);
3650
3651 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
3652 rpc_restart_call_prepare(task);
3653 return -EAGAIN;
3654 }
3655
3656 __nfs4_read_done_cb(data);
3657 if (task->tk_status > 0)
3658 renew_lease(server, data->timestamp);
3659 return 0;
3660}
3661
3662static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
3663{
3664
3665 dprintk("--> %s\n", __func__);
3666
3667 if (!nfs4_sequence_done(task, &data->res.seq_res))
3668 return -EAGAIN;
3669
3670 return data->read_done_cb ? data->read_done_cb(task, data) :
3671 nfs4_read_done_cb(task, data);
3672}
3673
3674static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
3675{
3676 data->timestamp = jiffies;
3677 data->read_done_cb = nfs4_read_done_cb;
3678 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
3679 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
3680}
3681
3682static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
3683{
3684 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
3685 &data->args.seq_args,
3686 &data->res.seq_res,
3687 task))
3688 return;
3689 rpc_call_start(task);
3690}
3691
3692static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
3693{
3694 struct inode *inode = data->header->inode;
3695
3696 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
3697 rpc_restart_call_prepare(task);
3698 return -EAGAIN;
3699 }
3700 if (task->tk_status >= 0) {
3701 renew_lease(NFS_SERVER(inode), data->timestamp);
3702 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
3703 }
3704 return 0;
3705}
3706
3707static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
3708{
3709 if (!nfs4_sequence_done(task, &data->res.seq_res))
3710 return -EAGAIN;
3711 return data->write_done_cb ? data->write_done_cb(task, data) :
3712 nfs4_write_done_cb(task, data);
3713}
3714
3715static
3716bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data)
3717{
3718 const struct nfs_pgio_header *hdr = data->header;
3719
3720 /* Don't request attributes for pNFS or O_DIRECT writes */
3721 if (data->ds_clp != NULL || hdr->dreq != NULL)
3722 return false;
3723 /* Otherwise, request attributes if and only if we don't hold
3724 * a delegation
3725 */
3726 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
3727}
3728
3729static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
3730{
3731 struct nfs_server *server = NFS_SERVER(data->header->inode);
3732
3733 if (!nfs4_write_need_cache_consistency_data(data)) {
3734 data->args.bitmask = NULL;
3735 data->res.fattr = NULL;
3736 } else
3737 data->args.bitmask = server->cache_consistency_bitmask;
3738
3739 if (!data->write_done_cb)
3740 data->write_done_cb = nfs4_write_done_cb;
3741 data->res.server = server;
3742 data->timestamp = jiffies;
3743
3744 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
3745 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3746}
3747
3748static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
3749{
3750 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
3751 &data->args.seq_args,
3752 &data->res.seq_res,
3753 task))
3754 return;
3755 rpc_call_start(task);
3756}
3757
3758static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
3759{
3760 if (nfs4_setup_sequence(NFS_SERVER(data->inode),
3761 &data->args.seq_args,
3762 &data->res.seq_res,
3763 task))
3764 return;
3765 rpc_call_start(task);
3766}
3767
3768static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
3769{
3770 struct inode *inode = data->inode;
3771
3772 if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
3773 rpc_restart_call_prepare(task);
3774 return -EAGAIN;
3775 }
3776 return 0;
3777}
3778
3779static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
3780{
3781 if (!nfs4_sequence_done(task, &data->res.seq_res))
3782 return -EAGAIN;
3783 return data->commit_done_cb(task, data);
3784}
3785
3786static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
3787{
3788 struct nfs_server *server = NFS_SERVER(data->inode);
3789
3790 if (data->commit_done_cb == NULL)
3791 data->commit_done_cb = nfs4_commit_done_cb;
3792 data->res.server = server;
3793 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
3794 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3795}
3796
3797struct nfs4_renewdata {
3798 struct nfs_client *client;
3799 unsigned long timestamp;
3800};
3801
3802/*
3803 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
3804 * standalone procedure for queueing an asynchronous RENEW.
3805 */
3806static void nfs4_renew_release(void *calldata)
3807{
3808 struct nfs4_renewdata *data = calldata;
3809 struct nfs_client *clp = data->client;
3810
3811 if (atomic_read(&clp->cl_count) > 1)
3812 nfs4_schedule_state_renewal(clp);
3813 nfs_put_client(clp);
3814 kfree(data);
3815}
3816
3817static void nfs4_renew_done(struct rpc_task *task, void *calldata)
3818{
3819 struct nfs4_renewdata *data = calldata;
3820 struct nfs_client *clp = data->client;
3821 unsigned long timestamp = data->timestamp;
3822
3823 if (task->tk_status < 0) {
3824 /* Unless we're shutting down, schedule state recovery! */
3825 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
3826 return;
3827 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
3828 nfs4_schedule_lease_recovery(clp);
3829 return;
3830 }
3831 nfs4_schedule_path_down_recovery(clp);
3832 }
3833 do_renew_lease(clp, timestamp);
3834}
3835
3836static const struct rpc_call_ops nfs4_renew_ops = {
3837 .rpc_call_done = nfs4_renew_done,
3838 .rpc_release = nfs4_renew_release,
3839};
3840
3841static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
3842{
3843 struct rpc_message msg = {
3844 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3845 .rpc_argp = clp,
3846 .rpc_cred = cred,
3847 };
3848 struct nfs4_renewdata *data;
3849
3850 if (renew_flags == 0)
3851 return 0;
3852 if (!atomic_inc_not_zero(&clp->cl_count))
3853 return -EIO;
3854 data = kmalloc(sizeof(*data), GFP_NOFS);
3855 if (data == NULL)
3856 return -ENOMEM;
3857 data->client = clp;
3858 data->timestamp = jiffies;
3859 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
3860 &nfs4_renew_ops, data);
3861}
3862
3863static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
3864{
3865 struct rpc_message msg = {
3866 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3867 .rpc_argp = clp,
3868 .rpc_cred = cred,
3869 };
3870 unsigned long now = jiffies;
3871 int status;
3872
3873 status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
3874 if (status < 0)
3875 return status;
3876 do_renew_lease(clp, now);
3877 return 0;
3878}
3879
3880static inline int nfs4_server_supports_acls(struct nfs_server *server)
3881{
3882 return (server->caps & NFS_CAP_ACLS)
3883 && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3884 && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
3885}
3886
3887/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
3888 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
3889 * the stack.
3890 */
3891#define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
3892
3893static int buf_to_pages_noslab(const void *buf, size_t buflen,
3894 struct page **pages, unsigned int *pgbase)
3895{
3896 struct page *newpage, **spages;
3897 int rc = 0;
3898 size_t len;
3899 spages = pages;
3900
3901 do {
3902 len = min_t(size_t, PAGE_SIZE, buflen);
3903 newpage = alloc_page(GFP_KERNEL);
3904
3905 if (newpage == NULL)
3906 goto unwind;
3907 memcpy(page_address(newpage), buf, len);
3908 buf += len;
3909 buflen -= len;
3910 *pages++ = newpage;
3911 rc++;
3912 } while (buflen != 0);
3913
3914 return rc;
3915
3916unwind:
3917 for(; rc > 0; rc--)
3918 __free_page(spages[rc-1]);
3919 return -ENOMEM;
3920}
3921
3922struct nfs4_cached_acl {
3923 int cached;
3924 size_t len;
3925 char data[0];
3926};
3927
3928static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
3929{
3930 struct nfs_inode *nfsi = NFS_I(inode);
3931
3932 spin_lock(&inode->i_lock);
3933 kfree(nfsi->nfs4_acl);
3934 nfsi->nfs4_acl = acl;
3935 spin_unlock(&inode->i_lock);
3936}
3937
3938static void nfs4_zap_acl_attr(struct inode *inode)
3939{
3940 nfs4_set_cached_acl(inode, NULL);
3941}
3942
3943static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
3944{
3945 struct nfs_inode *nfsi = NFS_I(inode);
3946 struct nfs4_cached_acl *acl;
3947 int ret = -ENOENT;
3948
3949 spin_lock(&inode->i_lock);
3950 acl = nfsi->nfs4_acl;
3951 if (acl == NULL)
3952 goto out;
3953 if (buf == NULL) /* user is just asking for length */
3954 goto out_len;
3955 if (acl->cached == 0)
3956 goto out;
3957 ret = -ERANGE; /* see getxattr(2) man page */
3958 if (acl->len > buflen)
3959 goto out;
3960 memcpy(buf, acl->data, acl->len);
3961out_len:
3962 ret = acl->len;
3963out:
3964 spin_unlock(&inode->i_lock);
3965 return ret;
3966}
3967
3968static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
3969{
3970 struct nfs4_cached_acl *acl;
3971 size_t buflen = sizeof(*acl) + acl_len;
3972
3973 if (buflen <= PAGE_SIZE) {
3974 acl = kmalloc(buflen, GFP_KERNEL);
3975 if (acl == NULL)
3976 goto out;
3977 acl->cached = 1;
3978 _copy_from_pages(acl->data, pages, pgbase, acl_len);
3979 } else {
3980 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
3981 if (acl == NULL)
3982 goto out;
3983 acl->cached = 0;
3984 }
3985 acl->len = acl_len;
3986out:
3987 nfs4_set_cached_acl(inode, acl);
3988}
3989
3990/*
3991 * The getxattr API returns the required buffer length when called with a
3992 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
3993 * the required buf. On a NULL buf, we send a page of data to the server
3994 * guessing that the ACL request can be serviced by a page. If so, we cache
3995 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
3996 * the cache. If not so, we throw away the page, and cache the required
3997 * length. The next getxattr call will then produce another round trip to
3998 * the server, this time with the input buf of the required size.
3999 */
4000static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4001{
4002 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
4003 struct nfs_getaclargs args = {
4004 .fh = NFS_FH(inode),
4005 .acl_pages = pages,
4006 .acl_len = buflen,
4007 };
4008 struct nfs_getaclres res = {
4009 .acl_len = buflen,
4010 };
4011 struct rpc_message msg = {
4012 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
4013 .rpc_argp = &args,
4014 .rpc_resp = &res,
4015 };
4016 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4017 int ret = -ENOMEM, i;
4018
4019 /* As long as we're doing a round trip to the server anyway,
4020 * let's be prepared for a page of acl data. */
4021 if (npages == 0)
4022 npages = 1;
4023 if (npages > ARRAY_SIZE(pages))
4024 return -ERANGE;
4025
4026 for (i = 0; i < npages; i++) {
4027 pages[i] = alloc_page(GFP_KERNEL);
4028 if (!pages[i])
4029 goto out_free;
4030 }
4031
4032 /* for decoding across pages */
4033 res.acl_scratch = alloc_page(GFP_KERNEL);
4034 if (!res.acl_scratch)
4035 goto out_free;
4036
4037 args.acl_len = npages * PAGE_SIZE;
4038 args.acl_pgbase = 0;
4039
4040 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
4041 __func__, buf, buflen, npages, args.acl_len);
4042 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
4043 &msg, &args.seq_args, &res.seq_res, 0);
4044 if (ret)
4045 goto out_free;
4046
4047 /* Handle the case where the passed-in buffer is too short */
4048 if (res.acl_flags & NFS4_ACL_TRUNC) {
4049 /* Did the user only issue a request for the acl length? */
4050 if (buf == NULL)
4051 goto out_ok;
4052 ret = -ERANGE;
4053 goto out_free;
4054 }
4055 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
4056 if (buf)
4057 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
4058out_ok:
4059 ret = res.acl_len;
4060out_free:
4061 for (i = 0; i < npages; i++)
4062 if (pages[i])
4063 __free_page(pages[i]);
4064 if (res.acl_scratch)
4065 __free_page(res.acl_scratch);
4066 return ret;
4067}
4068
4069static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4070{
4071 struct nfs4_exception exception = { };
4072 ssize_t ret;
4073 do {
4074 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
4075 if (ret >= 0)
4076 break;
4077 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
4078 } while (exception.retry);
4079 return ret;
4080}
4081
4082static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
4083{
4084 struct nfs_server *server = NFS_SERVER(inode);
4085 int ret;
4086
4087 if (!nfs4_server_supports_acls(server))
4088 return -EOPNOTSUPP;
4089 ret = nfs_revalidate_inode(server, inode);
4090 if (ret < 0)
4091 return ret;
4092 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
4093 nfs_zap_acl_cache(inode);
4094 ret = nfs4_read_cached_acl(inode, buf, buflen);
4095 if (ret != -ENOENT)
4096 /* -ENOENT is returned if there is no ACL or if there is an ACL
4097 * but no cached acl data, just the acl length */
4098 return ret;
4099 return nfs4_get_acl_uncached(inode, buf, buflen);
4100}
4101
4102static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4103{
4104 struct nfs_server *server = NFS_SERVER(inode);
4105 struct page *pages[NFS4ACL_MAXPAGES];
4106 struct nfs_setaclargs arg = {
4107 .fh = NFS_FH(inode),
4108 .acl_pages = pages,
4109 .acl_len = buflen,
4110 };
4111 struct nfs_setaclres res;
4112 struct rpc_message msg = {
4113 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
4114 .rpc_argp = &arg,
4115 .rpc_resp = &res,
4116 };
4117 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4118 int ret, i;
4119
4120 if (!nfs4_server_supports_acls(server))
4121 return -EOPNOTSUPP;
4122 if (npages > ARRAY_SIZE(pages))
4123 return -ERANGE;
4124 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
4125 if (i < 0)
4126 return i;
4127 nfs4_inode_return_delegation(inode);
4128 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4129
4130 /*
4131 * Free each page after tx, so the only ref left is
4132 * held by the network stack
4133 */
4134 for (; i > 0; i--)
4135 put_page(pages[i-1]);
4136
4137 /*
4138 * Acl update can result in inode attribute update.
4139 * so mark the attribute cache invalid.
4140 */
4141 spin_lock(&inode->i_lock);
4142 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
4143 spin_unlock(&inode->i_lock);
4144 nfs_access_zap_cache(inode);
4145 nfs_zap_acl_cache(inode);
4146 return ret;
4147}
4148
4149static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4150{
4151 struct nfs4_exception exception = { };
4152 int err;
4153 do {
4154 err = nfs4_handle_exception(NFS_SERVER(inode),
4155 __nfs4_proc_set_acl(inode, buf, buflen),
4156 &exception);
4157 } while (exception.retry);
4158 return err;
4159}
4160
4161static int
4162nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
4163{
4164 struct nfs_client *clp = server->nfs_client;
4165
4166 if (task->tk_status >= 0)
4167 return 0;
4168 switch(task->tk_status) {
4169 case -NFS4ERR_DELEG_REVOKED:
4170 case -NFS4ERR_ADMIN_REVOKED:
4171 case -NFS4ERR_BAD_STATEID:
4172 if (state == NULL)
4173 break;
4174 nfs_remove_bad_delegation(state->inode);
4175 case -NFS4ERR_OPENMODE:
4176 if (state == NULL)
4177 break;
4178 nfs4_schedule_stateid_recovery(server, state);
4179 goto wait_on_recovery;
4180 case -NFS4ERR_EXPIRED:
4181 if (state != NULL)
4182 nfs4_schedule_stateid_recovery(server, state);
4183 case -NFS4ERR_STALE_STATEID:
4184 case -NFS4ERR_STALE_CLIENTID:
4185 nfs4_schedule_lease_recovery(clp);
4186 goto wait_on_recovery;
4187#if defined(CONFIG_NFS_V4_1)
4188 case -NFS4ERR_BADSESSION:
4189 case -NFS4ERR_BADSLOT:
4190 case -NFS4ERR_BAD_HIGH_SLOT:
4191 case -NFS4ERR_DEADSESSION:
4192 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4193 case -NFS4ERR_SEQ_FALSE_RETRY:
4194 case -NFS4ERR_SEQ_MISORDERED:
4195 dprintk("%s ERROR %d, Reset session\n", __func__,
4196 task->tk_status);
4197 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
4198 task->tk_status = 0;
4199 return -EAGAIN;
4200#endif /* CONFIG_NFS_V4_1 */
4201 case -NFS4ERR_DELAY:
4202 nfs_inc_server_stats(server, NFSIOS_DELAY);
4203 case -NFS4ERR_GRACE:
4204 case -EKEYEXPIRED:
4205 rpc_delay(task, NFS4_POLL_RETRY_MAX);
4206 task->tk_status = 0;
4207 return -EAGAIN;
4208 case -NFS4ERR_RETRY_UNCACHED_REP:
4209 case -NFS4ERR_OLD_STATEID:
4210 task->tk_status = 0;
4211 return -EAGAIN;
4212 }
4213 task->tk_status = nfs4_map_errors(task->tk_status);
4214 return 0;
4215wait_on_recovery:
4216 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
4217 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
4218 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
4219 task->tk_status = 0;
4220 return -EAGAIN;
4221}
4222
4223static void nfs4_init_boot_verifier(const struct nfs_client *clp,
4224 nfs4_verifier *bootverf)
4225{
4226 __be32 verf[2];
4227
4228 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
4229 /* An impossible timestamp guarantees this value
4230 * will never match a generated boot time. */
4231 verf[0] = 0;
4232 verf[1] = (__be32)(NSEC_PER_SEC + 1);
4233 } else {
4234 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
4235 verf[0] = (__be32)nn->boot_time.tv_sec;
4236 verf[1] = (__be32)nn->boot_time.tv_nsec;
4237 }
4238 memcpy(bootverf->data, verf, sizeof(bootverf->data));
4239}
4240
4241static unsigned int
4242nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
4243 char *buf, size_t len)
4244{
4245 unsigned int result;
4246
4247 rcu_read_lock();
4248 result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s",
4249 clp->cl_ipaddr,
4250 rpc_peeraddr2str(clp->cl_rpcclient,
4251 RPC_DISPLAY_ADDR),
4252 rpc_peeraddr2str(clp->cl_rpcclient,
4253 RPC_DISPLAY_PROTO));
4254 rcu_read_unlock();
4255 return result;
4256}
4257
4258static unsigned int
4259nfs4_init_uniform_client_string(const struct nfs_client *clp,
4260 char *buf, size_t len)
4261{
4262 char *nodename = clp->cl_rpcclient->cl_nodename;
4263
4264 if (nfs4_client_id_uniquifier[0] != '\0')
4265 nodename = nfs4_client_id_uniquifier;
4266 return scnprintf(buf, len, "Linux NFSv%u.%u %s",
4267 clp->rpc_ops->version, clp->cl_minorversion,
4268 nodename);
4269}
4270
4271/**
4272 * nfs4_proc_setclientid - Negotiate client ID
4273 * @clp: state data structure
4274 * @program: RPC program for NFSv4 callback service
4275 * @port: IP port number for NFS4 callback service
4276 * @cred: RPC credential to use for this call
4277 * @res: where to place the result
4278 *
4279 * Returns zero, a negative errno, or a negative NFS4ERR status code.
4280 */
4281int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
4282 unsigned short port, struct rpc_cred *cred,
4283 struct nfs4_setclientid_res *res)
4284{
4285 nfs4_verifier sc_verifier;
4286 struct nfs4_setclientid setclientid = {
4287 .sc_verifier = &sc_verifier,
4288 .sc_prog = program,
4289 .sc_cb_ident = clp->cl_cb_ident,
4290 };
4291 struct rpc_message msg = {
4292 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
4293 .rpc_argp = &setclientid,
4294 .rpc_resp = res,
4295 .rpc_cred = cred,
4296 };
4297 int status;
4298
4299 /* nfs_client_id4 */
4300 nfs4_init_boot_verifier(clp, &sc_verifier);
4301 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
4302 setclientid.sc_name_len =
4303 nfs4_init_uniform_client_string(clp,
4304 setclientid.sc_name,
4305 sizeof(setclientid.sc_name));
4306 else
4307 setclientid.sc_name_len =
4308 nfs4_init_nonuniform_client_string(clp,
4309 setclientid.sc_name,
4310 sizeof(setclientid.sc_name));
4311 /* cb_client4 */
4312 rcu_read_lock();
4313 setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
4314 sizeof(setclientid.sc_netid),
4315 rpc_peeraddr2str(clp->cl_rpcclient,
4316 RPC_DISPLAY_NETID));
4317 rcu_read_unlock();
4318 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
4319 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
4320 clp->cl_ipaddr, port >> 8, port & 255);
4321
4322 dprintk("NFS call setclientid auth=%s, '%.*s'\n",
4323 clp->cl_rpcclient->cl_auth->au_ops->au_name,
4324 setclientid.sc_name_len, setclientid.sc_name);
4325 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4326 dprintk("NFS reply setclientid: %d\n", status);
4327 return status;
4328}
4329
4330/**
4331 * nfs4_proc_setclientid_confirm - Confirm client ID
4332 * @clp: state data structure
4333 * @res: result of a previous SETCLIENTID
4334 * @cred: RPC credential to use for this call
4335 *
4336 * Returns zero, a negative errno, or a negative NFS4ERR status code.
4337 */
4338int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
4339 struct nfs4_setclientid_res *arg,
4340 struct rpc_cred *cred)
4341{
4342 struct nfs_fsinfo fsinfo;
4343 struct rpc_message msg = {
4344 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
4345 .rpc_argp = arg,
4346 .rpc_resp = &fsinfo,
4347 .rpc_cred = cred,
4348 };
4349 unsigned long now;
4350 int status;
4351
4352 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
4353 clp->cl_rpcclient->cl_auth->au_ops->au_name,
4354 clp->cl_clientid);
4355 now = jiffies;
4356 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4357 if (status == 0) {
4358 spin_lock(&clp->cl_lock);
4359 clp->cl_lease_time = fsinfo.lease_time * HZ;
4360 clp->cl_last_renewal = now;
4361 spin_unlock(&clp->cl_lock);
4362 }
4363 dprintk("NFS reply setclientid_confirm: %d\n", status);
4364 return status;
4365}
4366
4367struct nfs4_delegreturndata {
4368 struct nfs4_delegreturnargs args;
4369 struct nfs4_delegreturnres res;
4370 struct nfs_fh fh;
4371 nfs4_stateid stateid;
4372 unsigned long timestamp;
4373 struct nfs_fattr fattr;
4374 int rpc_status;
4375};
4376
4377static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
4378{
4379 struct nfs4_delegreturndata *data = calldata;
4380
4381 if (!nfs4_sequence_done(task, &data->res.seq_res))
4382 return;
4383
4384 switch (task->tk_status) {
4385 case -NFS4ERR_STALE_STATEID:
4386 case -NFS4ERR_EXPIRED:
4387 case 0:
4388 renew_lease(data->res.server, data->timestamp);
4389 break;
4390 default:
4391 if (nfs4_async_handle_error(task, data->res.server, NULL) ==
4392 -EAGAIN) {
4393 rpc_restart_call_prepare(task);
4394 return;
4395 }
4396 }
4397 data->rpc_status = task->tk_status;
4398}
4399
4400static void nfs4_delegreturn_release(void *calldata)
4401{
4402 kfree(calldata);
4403}
4404
4405#if defined(CONFIG_NFS_V4_1)
4406static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
4407{
4408 struct nfs4_delegreturndata *d_data;
4409
4410 d_data = (struct nfs4_delegreturndata *)data;
4411
4412 if (nfs4_setup_sequence(d_data->res.server,
4413 &d_data->args.seq_args,
4414 &d_data->res.seq_res, task))
4415 return;
4416 rpc_call_start(task);
4417}
4418#endif /* CONFIG_NFS_V4_1 */
4419
4420static const struct rpc_call_ops nfs4_delegreturn_ops = {
4421#if defined(CONFIG_NFS_V4_1)
4422 .rpc_call_prepare = nfs4_delegreturn_prepare,
4423#endif /* CONFIG_NFS_V4_1 */
4424 .rpc_call_done = nfs4_delegreturn_done,
4425 .rpc_release = nfs4_delegreturn_release,
4426};
4427
4428static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4429{
4430 struct nfs4_delegreturndata *data;
4431 struct nfs_server *server = NFS_SERVER(inode);
4432 struct rpc_task *task;
4433 struct rpc_message msg = {
4434 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
4435 .rpc_cred = cred,
4436 };
4437 struct rpc_task_setup task_setup_data = {
4438 .rpc_client = server->client,
4439 .rpc_message = &msg,
4440 .callback_ops = &nfs4_delegreturn_ops,
4441 .flags = RPC_TASK_ASYNC,
4442 };
4443 int status = 0;
4444
4445 data = kzalloc(sizeof(*data), GFP_NOFS);
4446 if (data == NULL)
4447 return -ENOMEM;
4448 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4449 data->args.fhandle = &data->fh;
4450 data->args.stateid = &data->stateid;
4451 data->args.bitmask = server->cache_consistency_bitmask;
4452 nfs_copy_fh(&data->fh, NFS_FH(inode));
4453 nfs4_stateid_copy(&data->stateid, stateid);
4454 data->res.fattr = &data->fattr;
4455 data->res.server = server;
4456 nfs_fattr_init(data->res.fattr);
4457 data->timestamp = jiffies;
4458 data->rpc_status = 0;
4459
4460 task_setup_data.callback_data = data;
4461 msg.rpc_argp = &data->args;
4462 msg.rpc_resp = &data->res;
4463 task = rpc_run_task(&task_setup_data);
4464 if (IS_ERR(task))
4465 return PTR_ERR(task);
4466 if (!issync)
4467 goto out;
4468 status = nfs4_wait_for_completion_rpc_task(task);
4469 if (status != 0)
4470 goto out;
4471 status = data->rpc_status;
4472 if (status == 0)
4473 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
4474 else
4475 nfs_refresh_inode(inode, &data->fattr);
4476out:
4477 rpc_put_task(task);
4478 return status;
4479}
4480
4481int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4482{
4483 struct nfs_server *server = NFS_SERVER(inode);
4484 struct nfs4_exception exception = { };
4485 int err;
4486 do {
4487 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
4488 switch (err) {
4489 case -NFS4ERR_STALE_STATEID:
4490 case -NFS4ERR_EXPIRED:
4491 case 0:
4492 return 0;
4493 }
4494 err = nfs4_handle_exception(server, err, &exception);
4495 } while (exception.retry);
4496 return err;
4497}
4498
4499#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
4500#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
4501
4502/*
4503 * sleep, with exponential backoff, and retry the LOCK operation.
4504 */
4505static unsigned long
4506nfs4_set_lock_task_retry(unsigned long timeout)
4507{
4508 freezable_schedule_timeout_killable(timeout);
4509 timeout <<= 1;
4510 if (timeout > NFS4_LOCK_MAXTIMEOUT)
4511 return NFS4_LOCK_MAXTIMEOUT;
4512 return timeout;
4513}
4514
4515static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4516{
4517 struct inode *inode = state->inode;
4518 struct nfs_server *server = NFS_SERVER(inode);
4519 struct nfs_client *clp = server->nfs_client;
4520 struct nfs_lockt_args arg = {
4521 .fh = NFS_FH(inode),
4522 .fl = request,
4523 };
4524 struct nfs_lockt_res res = {
4525 .denied = request,
4526 };
4527 struct rpc_message msg = {
4528 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
4529 .rpc_argp = &arg,
4530 .rpc_resp = &res,
4531 .rpc_cred = state->owner->so_cred,
4532 };
4533 struct nfs4_lock_state *lsp;
4534 int status;
4535
4536 arg.lock_owner.clientid = clp->cl_clientid;
4537 status = nfs4_set_lock_state(state, request);
4538 if (status != 0)
4539 goto out;
4540 lsp = request->fl_u.nfs4_fl.owner;
4541 arg.lock_owner.id = lsp->ls_seqid.owner_id;
4542 arg.lock_owner.s_dev = server->s_dev;
4543 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4544 switch (status) {
4545 case 0:
4546 request->fl_type = F_UNLCK;
4547 break;
4548 case -NFS4ERR_DENIED:
4549 status = 0;
4550 }
4551 request->fl_ops->fl_release_private(request);
4552out:
4553 return status;
4554}
4555
4556static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4557{
4558 struct nfs4_exception exception = { };
4559 int err;
4560
4561 do {
4562 err = nfs4_handle_exception(NFS_SERVER(state->inode),
4563 _nfs4_proc_getlk(state, cmd, request),
4564 &exception);
4565 } while (exception.retry);
4566 return err;
4567}
4568
4569static int do_vfs_lock(struct file *file, struct file_lock *fl)
4570{
4571 int res = 0;
4572 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
4573 case FL_POSIX:
4574 res = posix_lock_file_wait(file, fl);
4575 break;
4576 case FL_FLOCK:
4577 res = flock_lock_file_wait(file, fl);
4578 break;
4579 default:
4580 BUG();
4581 }
4582 return res;
4583}
4584
4585struct nfs4_unlockdata {
4586 struct nfs_locku_args arg;
4587 struct nfs_locku_res res;
4588 struct nfs4_lock_state *lsp;
4589 struct nfs_open_context *ctx;
4590 struct file_lock fl;
4591 const struct nfs_server *server;
4592 unsigned long timestamp;
4593};
4594
4595static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
4596 struct nfs_open_context *ctx,
4597 struct nfs4_lock_state *lsp,
4598 struct nfs_seqid *seqid)
4599{
4600 struct nfs4_unlockdata *p;
4601 struct inode *inode = lsp->ls_state->inode;
4602
4603 p = kzalloc(sizeof(*p), GFP_NOFS);
4604 if (p == NULL)
4605 return NULL;
4606 p->arg.fh = NFS_FH(inode);
4607 p->arg.fl = &p->fl;
4608 p->arg.seqid = seqid;
4609 p->res.seqid = seqid;
4610 p->arg.stateid = &lsp->ls_stateid;
4611 p->lsp = lsp;
4612 atomic_inc(&lsp->ls_count);
4613 /* Ensure we don't close file until we're done freeing locks! */
4614 p->ctx = get_nfs_open_context(ctx);
4615 memcpy(&p->fl, fl, sizeof(p->fl));
4616 p->server = NFS_SERVER(inode);
4617 return p;
4618}
4619
4620static void nfs4_locku_release_calldata(void *data)
4621{
4622 struct nfs4_unlockdata *calldata = data;
4623 nfs_free_seqid(calldata->arg.seqid);
4624 nfs4_put_lock_state(calldata->lsp);
4625 put_nfs_open_context(calldata->ctx);
4626 kfree(calldata);
4627}
4628
4629static void nfs4_locku_done(struct rpc_task *task, void *data)
4630{
4631 struct nfs4_unlockdata *calldata = data;
4632
4633 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
4634 return;
4635 switch (task->tk_status) {
4636 case 0:
4637 nfs4_stateid_copy(&calldata->lsp->ls_stateid,
4638 &calldata->res.stateid);
4639 renew_lease(calldata->server, calldata->timestamp);
4640 break;
4641 case -NFS4ERR_BAD_STATEID:
4642 case -NFS4ERR_OLD_STATEID:
4643 case -NFS4ERR_STALE_STATEID:
4644 case -NFS4ERR_EXPIRED:
4645 break;
4646 default:
4647 if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
4648 rpc_restart_call_prepare(task);
4649 }
4650 nfs_release_seqid(calldata->arg.seqid);
4651}
4652
4653static void nfs4_locku_prepare(struct rpc_task *task, void *data)
4654{
4655 struct nfs4_unlockdata *calldata = data;
4656
4657 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
4658 return;
4659 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
4660 /* Note: exit _without_ running nfs4_locku_done */
4661 task->tk_action = NULL;
4662 return;
4663 }
4664 calldata->timestamp = jiffies;
4665 if (nfs4_setup_sequence(calldata->server,
4666 &calldata->arg.seq_args,
4667 &calldata->res.seq_res,
4668 task) != 0)
4669 nfs_release_seqid(calldata->arg.seqid);
4670 else
4671 rpc_call_start(task);
4672}
4673
4674static const struct rpc_call_ops nfs4_locku_ops = {
4675 .rpc_call_prepare = nfs4_locku_prepare,
4676 .rpc_call_done = nfs4_locku_done,
4677 .rpc_release = nfs4_locku_release_calldata,
4678};
4679
4680static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
4681 struct nfs_open_context *ctx,
4682 struct nfs4_lock_state *lsp,
4683 struct nfs_seqid *seqid)
4684{
4685 struct nfs4_unlockdata *data;
4686 struct rpc_message msg = {
4687 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
4688 .rpc_cred = ctx->cred,
4689 };
4690 struct rpc_task_setup task_setup_data = {
4691 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
4692 .rpc_message = &msg,
4693 .callback_ops = &nfs4_locku_ops,
4694 .workqueue = nfsiod_workqueue,
4695 .flags = RPC_TASK_ASYNC,
4696 };
4697
4698 /* Ensure this is an unlock - when canceling a lock, the
4699 * canceled lock is passed in, and it won't be an unlock.
4700 */
4701 fl->fl_type = F_UNLCK;
4702
4703 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
4704 if (data == NULL) {
4705 nfs_free_seqid(seqid);
4706 return ERR_PTR(-ENOMEM);
4707 }
4708
4709 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4710 msg.rpc_argp = &data->arg;
4711 msg.rpc_resp = &data->res;
4712 task_setup_data.callback_data = data;
4713 return rpc_run_task(&task_setup_data);
4714}
4715
4716static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
4717{
4718 struct nfs_inode *nfsi = NFS_I(state->inode);
4719 struct nfs_seqid *seqid;
4720 struct nfs4_lock_state *lsp;
4721 struct rpc_task *task;
4722 int status = 0;
4723 unsigned char fl_flags = request->fl_flags;
4724
4725 status = nfs4_set_lock_state(state, request);
4726 /* Unlock _before_ we do the RPC call */
4727 request->fl_flags |= FL_EXISTS;
4728 down_read(&nfsi->rwsem);
4729 if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
4730 up_read(&nfsi->rwsem);
4731 goto out;
4732 }
4733 up_read(&nfsi->rwsem);
4734 if (status != 0)
4735 goto out;
4736 /* Is this a delegated lock? */
4737 if (test_bit(NFS_DELEGATED_STATE, &state->flags))
4738 goto out;
4739 lsp = request->fl_u.nfs4_fl.owner;
4740 seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
4741 status = -ENOMEM;
4742 if (seqid == NULL)
4743 goto out;
4744 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
4745 status = PTR_ERR(task);
4746 if (IS_ERR(task))
4747 goto out;
4748 status = nfs4_wait_for_completion_rpc_task(task);
4749 rpc_put_task(task);
4750out:
4751 request->fl_flags = fl_flags;
4752 return status;
4753}
4754
4755struct nfs4_lockdata {
4756 struct nfs_lock_args arg;
4757 struct nfs_lock_res res;
4758 struct nfs4_lock_state *lsp;
4759 struct nfs_open_context *ctx;
4760 struct file_lock fl;
4761 unsigned long timestamp;
4762 int rpc_status;
4763 int cancelled;
4764 struct nfs_server *server;
4765};
4766
4767static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
4768 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
4769 gfp_t gfp_mask)
4770{
4771 struct nfs4_lockdata *p;
4772 struct inode *inode = lsp->ls_state->inode;
4773 struct nfs_server *server = NFS_SERVER(inode);
4774
4775 p = kzalloc(sizeof(*p), gfp_mask);
4776 if (p == NULL)
4777 return NULL;
4778
4779 p->arg.fh = NFS_FH(inode);
4780 p->arg.fl = &p->fl;
4781 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
4782 if (p->arg.open_seqid == NULL)
4783 goto out_free;
4784 p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
4785 if (p->arg.lock_seqid == NULL)
4786 goto out_free_seqid;
4787 p->arg.lock_stateid = &lsp->ls_stateid;
4788 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
4789 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
4790 p->arg.lock_owner.s_dev = server->s_dev;
4791 p->res.lock_seqid = p->arg.lock_seqid;
4792 p->lsp = lsp;
4793 p->server = server;
4794 atomic_inc(&lsp->ls_count);
4795 p->ctx = get_nfs_open_context(ctx);
4796 memcpy(&p->fl, fl, sizeof(p->fl));
4797 return p;
4798out_free_seqid:
4799 nfs_free_seqid(p->arg.open_seqid);
4800out_free:
4801 kfree(p);
4802 return NULL;
4803}
4804
4805static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
4806{
4807 struct nfs4_lockdata *data = calldata;
4808 struct nfs4_state *state = data->lsp->ls_state;
4809
4810 dprintk("%s: begin!\n", __func__);
4811 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
4812 return;
4813 /* Do we need to do an open_to_lock_owner? */
4814 if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
4815 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
4816 goto out_release_lock_seqid;
4817 data->arg.open_stateid = &state->stateid;
4818 data->arg.new_lock_owner = 1;
4819 data->res.open_seqid = data->arg.open_seqid;
4820 } else
4821 data->arg.new_lock_owner = 0;
4822 data->timestamp = jiffies;
4823 if (nfs4_setup_sequence(data->server,
4824 &data->arg.seq_args,
4825 &data->res.seq_res,
4826 task) == 0) {
4827 rpc_call_start(task);
4828 return;
4829 }
4830 nfs_release_seqid(data->arg.open_seqid);
4831out_release_lock_seqid:
4832 nfs_release_seqid(data->arg.lock_seqid);
4833 dprintk("%s: done!, ret = %d\n", __func__, task->tk_status);
4834}
4835
4836static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
4837{
4838 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
4839 nfs4_lock_prepare(task, calldata);
4840}
4841
4842static void nfs4_lock_done(struct rpc_task *task, void *calldata)
4843{
4844 struct nfs4_lockdata *data = calldata;
4845
4846 dprintk("%s: begin!\n", __func__);
4847
4848 if (!nfs4_sequence_done(task, &data->res.seq_res))
4849 return;
4850
4851 data->rpc_status = task->tk_status;
4852 if (data->arg.new_lock_owner != 0) {
4853 if (data->rpc_status == 0)
4854 nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
4855 else
4856 goto out;
4857 }
4858 if (data->rpc_status == 0) {
4859 nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid);
4860 set_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags);
4861 renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
4862 }
4863out:
4864 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
4865}
4866
4867static void nfs4_lock_release(void *calldata)
4868{
4869 struct nfs4_lockdata *data = calldata;
4870
4871 dprintk("%s: begin!\n", __func__);
4872 nfs_free_seqid(data->arg.open_seqid);
4873 if (data->cancelled != 0) {
4874 struct rpc_task *task;
4875 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
4876 data->arg.lock_seqid);
4877 if (!IS_ERR(task))
4878 rpc_put_task_async(task);
4879 dprintk("%s: cancelling lock!\n", __func__);
4880 } else
4881 nfs_free_seqid(data->arg.lock_seqid);
4882 nfs4_put_lock_state(data->lsp);
4883 put_nfs_open_context(data->ctx);
4884 kfree(data);
4885 dprintk("%s: done!\n", __func__);
4886}
4887
4888static const struct rpc_call_ops nfs4_lock_ops = {
4889 .rpc_call_prepare = nfs4_lock_prepare,
4890 .rpc_call_done = nfs4_lock_done,
4891 .rpc_release = nfs4_lock_release,
4892};
4893
4894static const struct rpc_call_ops nfs4_recover_lock_ops = {
4895 .rpc_call_prepare = nfs4_recover_lock_prepare,
4896 .rpc_call_done = nfs4_lock_done,
4897 .rpc_release = nfs4_lock_release,
4898};
4899
4900static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
4901{
4902 switch (error) {
4903 case -NFS4ERR_ADMIN_REVOKED:
4904 case -NFS4ERR_BAD_STATEID:
4905 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4906 if (new_lock_owner != 0 ||
4907 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
4908 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
4909 break;
4910 case -NFS4ERR_STALE_STATEID:
4911 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4912 case -NFS4ERR_EXPIRED:
4913 nfs4_schedule_lease_recovery(server->nfs_client);
4914 };
4915}
4916
4917static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
4918{
4919 struct nfs4_lockdata *data;
4920 struct rpc_task *task;
4921 struct rpc_message msg = {
4922 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
4923 .rpc_cred = state->owner->so_cred,
4924 };
4925 struct rpc_task_setup task_setup_data = {
4926 .rpc_client = NFS_CLIENT(state->inode),
4927 .rpc_message = &msg,
4928 .callback_ops = &nfs4_lock_ops,
4929 .workqueue = nfsiod_workqueue,
4930 .flags = RPC_TASK_ASYNC,
4931 };
4932 int ret;
4933
4934 dprintk("%s: begin!\n", __func__);
4935 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
4936 fl->fl_u.nfs4_fl.owner,
4937 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
4938 if (data == NULL)
4939 return -ENOMEM;
4940 if (IS_SETLKW(cmd))
4941 data->arg.block = 1;
4942 if (recovery_type > NFS_LOCK_NEW) {
4943 if (recovery_type == NFS_LOCK_RECLAIM)
4944 data->arg.reclaim = NFS_LOCK_RECLAIM;
4945 task_setup_data.callback_ops = &nfs4_recover_lock_ops;
4946 }
4947 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4948 msg.rpc_argp = &data->arg;
4949 msg.rpc_resp = &data->res;
4950 task_setup_data.callback_data = data;
4951 task = rpc_run_task(&task_setup_data);
4952 if (IS_ERR(task))
4953 return PTR_ERR(task);
4954 ret = nfs4_wait_for_completion_rpc_task(task);
4955 if (ret == 0) {
4956 ret = data->rpc_status;
4957 if (ret)
4958 nfs4_handle_setlk_error(data->server, data->lsp,
4959 data->arg.new_lock_owner, ret);
4960 } else
4961 data->cancelled = 1;
4962 rpc_put_task(task);
4963 dprintk("%s: done, ret = %d!\n", __func__, ret);
4964 return ret;
4965}
4966
4967static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
4968{
4969 struct nfs_server *server = NFS_SERVER(state->inode);
4970 struct nfs4_exception exception = {
4971 .inode = state->inode,
4972 };
4973 int err;
4974
4975 do {
4976 /* Cache the lock if possible... */
4977 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4978 return 0;
4979 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
4980 if (err != -NFS4ERR_DELAY)
4981 break;
4982 nfs4_handle_exception(server, err, &exception);
4983 } while (exception.retry);
4984 return err;
4985}
4986
4987static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
4988{
4989 struct nfs_server *server = NFS_SERVER(state->inode);
4990 struct nfs4_exception exception = {
4991 .inode = state->inode,
4992 };
4993 int err;
4994
4995 err = nfs4_set_lock_state(state, request);
4996 if (err != 0)
4997 return err;
4998 do {
4999 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5000 return 0;
5001 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
5002 switch (err) {
5003 default:
5004 goto out;
5005 case -NFS4ERR_GRACE:
5006 case -NFS4ERR_DELAY:
5007 nfs4_handle_exception(server, err, &exception);
5008 err = 0;
5009 }
5010 } while (exception.retry);
5011out:
5012 return err;
5013}
5014
5015#if defined(CONFIG_NFS_V4_1)
5016/**
5017 * nfs41_check_expired_locks - possibly free a lock stateid
5018 *
5019 * @state: NFSv4 state for an inode
5020 *
5021 * Returns NFS_OK if recovery for this stateid is now finished.
5022 * Otherwise a negative NFS4ERR value is returned.
5023 */
5024static int nfs41_check_expired_locks(struct nfs4_state *state)
5025{
5026 int status, ret = -NFS4ERR_BAD_STATEID;
5027 struct nfs4_lock_state *lsp;
5028 struct nfs_server *server = NFS_SERVER(state->inode);
5029
5030 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
5031 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
5032 status = nfs41_test_stateid(server, &lsp->ls_stateid);
5033 if (status != NFS_OK) {
5034 /* Free the stateid unless the server
5035 * informs us the stateid is unrecognized. */
5036 if (status != -NFS4ERR_BAD_STATEID)
5037 nfs41_free_stateid(server,
5038 &lsp->ls_stateid);
5039 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
5040 ret = status;
5041 }
5042 }
5043 };
5044
5045 return ret;
5046}
5047
5048static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
5049{
5050 int status = NFS_OK;
5051
5052 if (test_bit(LK_STATE_IN_USE, &state->flags))
5053 status = nfs41_check_expired_locks(state);
5054 if (status != NFS_OK)
5055 status = nfs4_lock_expired(state, request);
5056 return status;
5057}
5058#endif
5059
5060static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5061{
5062 struct nfs_inode *nfsi = NFS_I(state->inode);
5063 unsigned char fl_flags = request->fl_flags;
5064 int status = -ENOLCK;
5065
5066 if ((fl_flags & FL_POSIX) &&
5067 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
5068 goto out;
5069 /* Is this a delegated open? */
5070 status = nfs4_set_lock_state(state, request);
5071 if (status != 0)
5072 goto out;
5073 request->fl_flags |= FL_ACCESS;
5074 status = do_vfs_lock(request->fl_file, request);
5075 if (status < 0)
5076 goto out;
5077 down_read(&nfsi->rwsem);
5078 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
5079 /* Yes: cache locks! */
5080 /* ...but avoid races with delegation recall... */
5081 request->fl_flags = fl_flags & ~FL_SLEEP;
5082 status = do_vfs_lock(request->fl_file, request);
5083 goto out_unlock;
5084 }
5085 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
5086 if (status != 0)
5087 goto out_unlock;
5088 /* Note: we always want to sleep here! */
5089 request->fl_flags = fl_flags | FL_SLEEP;
5090 if (do_vfs_lock(request->fl_file, request) < 0)
5091 printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock "
5092 "manager!\n", __func__);
5093out_unlock:
5094 up_read(&nfsi->rwsem);
5095out:
5096 request->fl_flags = fl_flags;
5097 return status;
5098}
5099
5100static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5101{
5102 struct nfs4_exception exception = {
5103 .state = state,
5104 .inode = state->inode,
5105 };
5106 int err;
5107
5108 do {
5109 err = _nfs4_proc_setlk(state, cmd, request);
5110 if (err == -NFS4ERR_DENIED)
5111 err = -EAGAIN;
5112 err = nfs4_handle_exception(NFS_SERVER(state->inode),
5113 err, &exception);
5114 } while (exception.retry);
5115 return err;
5116}
5117
5118static int
5119nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
5120{
5121 struct nfs_open_context *ctx;
5122 struct nfs4_state *state;
5123 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
5124 int status;
5125
5126 /* verify open state */
5127 ctx = nfs_file_open_context(filp);
5128 state = ctx->state;
5129
5130 if (request->fl_start < 0 || request->fl_end < 0)
5131 return -EINVAL;
5132
5133 if (IS_GETLK(cmd)) {
5134 if (state != NULL)
5135 return nfs4_proc_getlk(state, F_GETLK, request);
5136 return 0;
5137 }
5138
5139 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
5140 return -EINVAL;
5141
5142 if (request->fl_type == F_UNLCK) {
5143 if (state != NULL)
5144 return nfs4_proc_unlck(state, cmd, request);
5145 return 0;
5146 }
5147
5148 if (state == NULL)
5149 return -ENOLCK;
5150 /*
5151 * Don't rely on the VFS having checked the file open mode,
5152 * since it won't do this for flock() locks.
5153 */
5154 switch (request->fl_type) {
5155 case F_RDLCK:
5156 if (!(filp->f_mode & FMODE_READ))
5157 return -EBADF;
5158 break;
5159 case F_WRLCK:
5160 if (!(filp->f_mode & FMODE_WRITE))
5161 return -EBADF;
5162 }
5163
5164 do {
5165 status = nfs4_proc_setlk(state, cmd, request);
5166 if ((status != -EAGAIN) || IS_SETLK(cmd))
5167 break;
5168 timeout = nfs4_set_lock_task_retry(timeout);
5169 status = -ERESTARTSYS;
5170 if (signalled())
5171 break;
5172 } while(status < 0);
5173 return status;
5174}
5175
5176int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
5177{
5178 struct nfs_server *server = NFS_SERVER(state->inode);
5179 struct nfs4_exception exception = { };
5180 int err;
5181
5182 err = nfs4_set_lock_state(state, fl);
5183 if (err != 0)
5184 goto out;
5185 do {
5186 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
5187 switch (err) {
5188 default:
5189 printk(KERN_ERR "NFS: %s: unhandled error "
5190 "%d.\n", __func__, err);
5191 case 0:
5192 case -ESTALE:
5193 goto out;
5194 case -NFS4ERR_EXPIRED:
5195 nfs4_schedule_stateid_recovery(server, state);
5196 case -NFS4ERR_STALE_CLIENTID:
5197 case -NFS4ERR_STALE_STATEID:
5198 nfs4_schedule_lease_recovery(server->nfs_client);
5199 goto out;
5200 case -NFS4ERR_BADSESSION:
5201 case -NFS4ERR_BADSLOT:
5202 case -NFS4ERR_BAD_HIGH_SLOT:
5203 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
5204 case -NFS4ERR_DEADSESSION:
5205 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
5206 goto out;
5207 case -ERESTARTSYS:
5208 /*
5209 * The show must go on: exit, but mark the
5210 * stateid as needing recovery.
5211 */
5212 case -NFS4ERR_DELEG_REVOKED:
5213 case -NFS4ERR_ADMIN_REVOKED:
5214 case -NFS4ERR_BAD_STATEID:
5215 case -NFS4ERR_OPENMODE:
5216 nfs4_schedule_stateid_recovery(server, state);
5217 err = 0;
5218 goto out;
5219 case -EKEYEXPIRED:
5220 /*
5221 * User RPCSEC_GSS context has expired.
5222 * We cannot recover this stateid now, so
5223 * skip it and allow recovery thread to
5224 * proceed.
5225 */
5226 err = 0;
5227 goto out;
5228 case -ENOMEM:
5229 case -NFS4ERR_DENIED:
5230 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
5231 err = 0;
5232 goto out;
5233 case -NFS4ERR_DELAY:
5234 break;
5235 }
5236 err = nfs4_handle_exception(server, err, &exception);
5237 } while (exception.retry);
5238out:
5239 return err;
5240}
5241
5242struct nfs_release_lockowner_data {
5243 struct nfs4_lock_state *lsp;
5244 struct nfs_server *server;
5245 struct nfs_release_lockowner_args args;
5246};
5247
5248static void nfs4_release_lockowner_release(void *calldata)
5249{
5250 struct nfs_release_lockowner_data *data = calldata;
5251 nfs4_free_lock_state(data->server, data->lsp);
5252 kfree(calldata);
5253}
5254
5255static const struct rpc_call_ops nfs4_release_lockowner_ops = {
5256 .rpc_release = nfs4_release_lockowner_release,
5257};
5258
5259int nfs4_release_lockowner(struct nfs4_lock_state *lsp)
5260{
5261 struct nfs_server *server = lsp->ls_state->owner->so_server;
5262 struct nfs_release_lockowner_data *data;
5263 struct rpc_message msg = {
5264 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
5265 };
5266
5267 if (server->nfs_client->cl_mvops->minor_version != 0)
5268 return -EINVAL;
5269 data = kmalloc(sizeof(*data), GFP_NOFS);
5270 if (!data)
5271 return -ENOMEM;
5272 data->lsp = lsp;
5273 data->server = server;
5274 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
5275 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
5276 data->args.lock_owner.s_dev = server->s_dev;
5277 msg.rpc_argp = &data->args;
5278 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
5279 return 0;
5280}
5281
5282#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
5283
5284static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
5285 const void *buf, size_t buflen,
5286 int flags, int type)
5287{
5288 if (strcmp(key, "") != 0)
5289 return -EINVAL;
5290
5291 return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
5292}
5293
5294static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
5295 void *buf, size_t buflen, int type)
5296{
5297 if (strcmp(key, "") != 0)
5298 return -EINVAL;
5299
5300 return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
5301}
5302
5303static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
5304 size_t list_len, const char *name,
5305 size_t name_len, int type)
5306{
5307 size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
5308
5309 if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
5310 return 0;
5311
5312 if (list && len <= list_len)
5313 memcpy(list, XATTR_NAME_NFSV4_ACL, len);
5314 return len;
5315}
5316
5317/*
5318 * nfs_fhget will use either the mounted_on_fileid or the fileid
5319 */
5320static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
5321{
5322 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
5323 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
5324 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
5325 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
5326 return;
5327
5328 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
5329 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
5330 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
5331 fattr->nlink = 2;
5332}
5333
5334static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
5335 const struct qstr *name,
5336 struct nfs4_fs_locations *fs_locations,
5337 struct page *page)
5338{
5339 struct nfs_server *server = NFS_SERVER(dir);
5340 u32 bitmask[2] = {
5341 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
5342 };
5343 struct nfs4_fs_locations_arg args = {
5344 .dir_fh = NFS_FH(dir),
5345 .name = name,
5346 .page = page,
5347 .bitmask = bitmask,
5348 };
5349 struct nfs4_fs_locations_res res = {
5350 .fs_locations = fs_locations,
5351 };
5352 struct rpc_message msg = {
5353 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
5354 .rpc_argp = &args,
5355 .rpc_resp = &res,
5356 };
5357 int status;
5358
5359 dprintk("%s: start\n", __func__);
5360
5361 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
5362 * is not supported */
5363 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
5364 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
5365 else
5366 bitmask[0] |= FATTR4_WORD0_FILEID;
5367
5368 nfs_fattr_init(&fs_locations->fattr);
5369 fs_locations->server = server;
5370 fs_locations->nlocations = 0;
5371 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
5372 dprintk("%s: returned status = %d\n", __func__, status);
5373 return status;
5374}
5375
5376int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
5377 const struct qstr *name,
5378 struct nfs4_fs_locations *fs_locations,
5379 struct page *page)
5380{
5381 struct nfs4_exception exception = { };
5382 int err;
5383 do {
5384 err = nfs4_handle_exception(NFS_SERVER(dir),
5385 _nfs4_proc_fs_locations(client, dir, name, fs_locations, page),
5386 &exception);
5387 } while (exception.retry);
5388 return err;
5389}
5390
5391static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
5392{
5393 int status;
5394 struct nfs4_secinfo_arg args = {
5395 .dir_fh = NFS_FH(dir),
5396 .name = name,
5397 };
5398 struct nfs4_secinfo_res res = {
5399 .flavors = flavors,
5400 };
5401 struct rpc_message msg = {
5402 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
5403 .rpc_argp = &args,
5404 .rpc_resp = &res,
5405 };
5406
5407 dprintk("NFS call secinfo %s\n", name->name);
5408 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
5409 dprintk("NFS reply secinfo: %d\n", status);
5410 return status;
5411}
5412
5413int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
5414 struct nfs4_secinfo_flavors *flavors)
5415{
5416 struct nfs4_exception exception = { };
5417 int err;
5418 do {
5419 err = nfs4_handle_exception(NFS_SERVER(dir),
5420 _nfs4_proc_secinfo(dir, name, flavors),
5421 &exception);
5422 } while (exception.retry);
5423 return err;
5424}
5425
5426#ifdef CONFIG_NFS_V4_1
5427/*
5428 * Check the exchange flags returned by the server for invalid flags, having
5429 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
5430 * DS flags set.
5431 */
5432static int nfs4_check_cl_exchange_flags(u32 flags)
5433{
5434 if (flags & ~EXCHGID4_FLAG_MASK_R)
5435 goto out_inval;
5436 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
5437 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
5438 goto out_inval;
5439 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
5440 goto out_inval;
5441 return NFS_OK;
5442out_inval:
5443 return -NFS4ERR_INVAL;
5444}
5445
5446static bool
5447nfs41_same_server_scope(struct nfs41_server_scope *a,
5448 struct nfs41_server_scope *b)
5449{
5450 if (a->server_scope_sz == b->server_scope_sz &&
5451 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
5452 return true;
5453
5454 return false;
5455}
5456
5457/*
5458 * nfs4_proc_bind_conn_to_session()
5459 *
5460 * The 4.1 client currently uses the same TCP connection for the
5461 * fore and backchannel.
5462 */
5463int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
5464{
5465 int status;
5466 struct nfs41_bind_conn_to_session_res res;
5467 struct rpc_message msg = {
5468 .rpc_proc =
5469 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
5470 .rpc_argp = clp,
5471 .rpc_resp = &res,
5472 .rpc_cred = cred,
5473 };
5474
5475 dprintk("--> %s\n", __func__);
5476
5477 res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
5478 if (unlikely(res.session == NULL)) {
5479 status = -ENOMEM;
5480 goto out;
5481 }
5482
5483 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5484 if (status == 0) {
5485 if (memcmp(res.session->sess_id.data,
5486 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
5487 dprintk("NFS: %s: Session ID mismatch\n", __func__);
5488 status = -EIO;
5489 goto out_session;
5490 }
5491 if (res.dir != NFS4_CDFS4_BOTH) {
5492 dprintk("NFS: %s: Unexpected direction from server\n",
5493 __func__);
5494 status = -EIO;
5495 goto out_session;
5496 }
5497 if (res.use_conn_in_rdma_mode) {
5498 dprintk("NFS: %s: Server returned RDMA mode = true\n",
5499 __func__);
5500 status = -EIO;
5501 goto out_session;
5502 }
5503 }
5504out_session:
5505 kfree(res.session);
5506out:
5507 dprintk("<-- %s status= %d\n", __func__, status);
5508 return status;
5509}
5510
5511/*
5512 * nfs4_proc_exchange_id()
5513 *
5514 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5515 *
5516 * Since the clientid has expired, all compounds using sessions
5517 * associated with the stale clientid will be returning
5518 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
5519 * be in some phase of session reset.
5520 */
5521int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
5522{
5523 nfs4_verifier verifier;
5524 struct nfs41_exchange_id_args args = {
5525 .verifier = &verifier,
5526 .client = clp,
5527 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
5528 };
5529 struct nfs41_exchange_id_res res = {
5530 0
5531 };
5532 int status;
5533 struct rpc_message msg = {
5534 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
5535 .rpc_argp = &args,
5536 .rpc_resp = &res,
5537 .rpc_cred = cred,
5538 };
5539
5540 nfs4_init_boot_verifier(clp, &verifier);
5541 args.id_len = nfs4_init_uniform_client_string(clp, args.id,
5542 sizeof(args.id));
5543 dprintk("NFS call exchange_id auth=%s, '%.*s'\n",
5544 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5545 args.id_len, args.id);
5546
5547 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
5548 GFP_NOFS);
5549 if (unlikely(res.server_owner == NULL)) {
5550 status = -ENOMEM;
5551 goto out;
5552 }
5553
5554 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
5555 GFP_NOFS);
5556 if (unlikely(res.server_scope == NULL)) {
5557 status = -ENOMEM;
5558 goto out_server_owner;
5559 }
5560
5561 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
5562 if (unlikely(res.impl_id == NULL)) {
5563 status = -ENOMEM;
5564 goto out_server_scope;
5565 }
5566
5567 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5568 if (status == 0)
5569 status = nfs4_check_cl_exchange_flags(res.flags);
5570
5571 if (status == 0) {
5572 clp->cl_clientid = res.clientid;
5573 clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R);
5574 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R))
5575 clp->cl_seqid = res.seqid;
5576
5577 kfree(clp->cl_serverowner);
5578 clp->cl_serverowner = res.server_owner;
5579 res.server_owner = NULL;
5580
5581 /* use the most recent implementation id */
5582 kfree(clp->cl_implid);
5583 clp->cl_implid = res.impl_id;
5584
5585 if (clp->cl_serverscope != NULL &&
5586 !nfs41_same_server_scope(clp->cl_serverscope,
5587 res.server_scope)) {
5588 dprintk("%s: server_scope mismatch detected\n",
5589 __func__);
5590 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
5591 kfree(clp->cl_serverscope);
5592 clp->cl_serverscope = NULL;
5593 }
5594
5595 if (clp->cl_serverscope == NULL) {
5596 clp->cl_serverscope = res.server_scope;
5597 goto out;
5598 }
5599 } else
5600 kfree(res.impl_id);
5601
5602out_server_owner:
5603 kfree(res.server_owner);
5604out_server_scope:
5605 kfree(res.server_scope);
5606out:
5607 if (clp->cl_implid != NULL)
5608 dprintk("NFS reply exchange_id: Server Implementation ID: "
5609 "domain: %s, name: %s, date: %llu,%u\n",
5610 clp->cl_implid->domain, clp->cl_implid->name,
5611 clp->cl_implid->date.seconds,
5612 clp->cl_implid->date.nseconds);
5613 dprintk("NFS reply exchange_id: %d\n", status);
5614 return status;
5615}
5616
5617static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
5618 struct rpc_cred *cred)
5619{
5620 struct rpc_message msg = {
5621 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
5622 .rpc_argp = clp,
5623 .rpc_cred = cred,
5624 };
5625 int status;
5626
5627 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5628 if (status)
5629 dprintk("NFS: Got error %d from the server %s on "
5630 "DESTROY_CLIENTID.", status, clp->cl_hostname);
5631 return status;
5632}
5633
5634static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
5635 struct rpc_cred *cred)
5636{
5637 unsigned int loop;
5638 int ret;
5639
5640 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
5641 ret = _nfs4_proc_destroy_clientid(clp, cred);
5642 switch (ret) {
5643 case -NFS4ERR_DELAY:
5644 case -NFS4ERR_CLIENTID_BUSY:
5645 ssleep(1);
5646 break;
5647 default:
5648 return ret;
5649 }
5650 }
5651 return 0;
5652}
5653
5654int nfs4_destroy_clientid(struct nfs_client *clp)
5655{
5656 struct rpc_cred *cred;
5657 int ret = 0;
5658
5659 if (clp->cl_mvops->minor_version < 1)
5660 goto out;
5661 if (clp->cl_exchange_flags == 0)
5662 goto out;
5663 if (clp->cl_preserve_clid)
5664 goto out;
5665 cred = nfs4_get_exchange_id_cred(clp);
5666 ret = nfs4_proc_destroy_clientid(clp, cred);
5667 if (cred)
5668 put_rpccred(cred);
5669 switch (ret) {
5670 case 0:
5671 case -NFS4ERR_STALE_CLIENTID:
5672 clp->cl_exchange_flags = 0;
5673 }
5674out:
5675 return ret;
5676}
5677
5678struct nfs4_get_lease_time_data {
5679 struct nfs4_get_lease_time_args *args;
5680 struct nfs4_get_lease_time_res *res;
5681 struct nfs_client *clp;
5682};
5683
5684static void nfs4_get_lease_time_prepare(struct rpc_task *task,
5685 void *calldata)
5686{
5687 int ret;
5688 struct nfs4_get_lease_time_data *data =
5689 (struct nfs4_get_lease_time_data *)calldata;
5690
5691 dprintk("--> %s\n", __func__);
5692 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
5693 /* just setup sequence, do not trigger session recovery
5694 since we're invoked within one */
5695 ret = nfs41_setup_sequence(data->clp->cl_session,
5696 &data->args->la_seq_args,
5697 &data->res->lr_seq_res, task);
5698
5699 if (ret != -EAGAIN)
5700 rpc_call_start(task);
5701 dprintk("<-- %s\n", __func__);
5702}
5703
5704/*
5705 * Called from nfs4_state_manager thread for session setup, so don't recover
5706 * from sequence operation or clientid errors.
5707 */
5708static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
5709{
5710 struct nfs4_get_lease_time_data *data =
5711 (struct nfs4_get_lease_time_data *)calldata;
5712
5713 dprintk("--> %s\n", __func__);
5714 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
5715 return;
5716 switch (task->tk_status) {
5717 case -NFS4ERR_DELAY:
5718 case -NFS4ERR_GRACE:
5719 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
5720 rpc_delay(task, NFS4_POLL_RETRY_MIN);
5721 task->tk_status = 0;
5722 /* fall through */
5723 case -NFS4ERR_RETRY_UNCACHED_REP:
5724 rpc_restart_call_prepare(task);
5725 return;
5726 }
5727 dprintk("<-- %s\n", __func__);
5728}
5729
5730static const struct rpc_call_ops nfs4_get_lease_time_ops = {
5731 .rpc_call_prepare = nfs4_get_lease_time_prepare,
5732 .rpc_call_done = nfs4_get_lease_time_done,
5733};
5734
5735int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
5736{
5737 struct rpc_task *task;
5738 struct nfs4_get_lease_time_args args;
5739 struct nfs4_get_lease_time_res res = {
5740 .lr_fsinfo = fsinfo,
5741 };
5742 struct nfs4_get_lease_time_data data = {
5743 .args = &args,
5744 .res = &res,
5745 .clp = clp,
5746 };
5747 struct rpc_message msg = {
5748 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
5749 .rpc_argp = &args,
5750 .rpc_resp = &res,
5751 };
5752 struct rpc_task_setup task_setup = {
5753 .rpc_client = clp->cl_rpcclient,
5754 .rpc_message = &msg,
5755 .callback_ops = &nfs4_get_lease_time_ops,
5756 .callback_data = &data,
5757 .flags = RPC_TASK_TIMEOUT,
5758 };
5759 int status;
5760
5761 nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
5762 dprintk("--> %s\n", __func__);
5763 task = rpc_run_task(&task_setup);
5764
5765 if (IS_ERR(task))
5766 status = PTR_ERR(task);
5767 else {
5768 status = task->tk_status;
5769 rpc_put_task(task);
5770 }
5771 dprintk("<-- %s return %d\n", __func__, status);
5772
5773 return status;
5774}
5775
5776static int nfs4_grow_slot_table(struct nfs4_slot_table *tbl,
5777 u32 max_reqs, u32 ivalue)
5778{
5779 if (max_reqs <= tbl->max_slots)
5780 return 0;
5781 if (nfs4_find_or_create_slot(tbl, max_reqs - 1, ivalue, GFP_NOFS))
5782 return 0;
5783 return -ENOMEM;
5784}
5785
5786static void nfs4_reset_slot_table(struct nfs4_slot_table *tbl,
5787 u32 server_highest_slotid,
5788 u32 ivalue)
5789{
5790 struct nfs4_slot **p;
5791
5792 nfs4_shrink_slot_table(tbl, server_highest_slotid + 1);
5793 p = &tbl->slots;
5794 while (*p) {
5795 (*p)->seq_nr = ivalue;
5796 p = &(*p)->next;
5797 }
5798 tbl->highest_used_slotid = NFS4_NO_SLOT;
5799 tbl->target_highest_slotid = server_highest_slotid;
5800 tbl->server_highest_slotid = server_highest_slotid;
5801 tbl->max_slotid = server_highest_slotid;
5802}
5803
5804/*
5805 * (re)Initialise a slot table
5806 */
5807static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl,
5808 u32 max_reqs, u32 ivalue)
5809{
5810 int ret;
5811
5812 dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__,
5813 max_reqs, tbl->max_slots);
5814
5815 if (max_reqs > NFS4_MAX_SLOT_TABLE)
5816 max_reqs = NFS4_MAX_SLOT_TABLE;
5817
5818 ret = nfs4_grow_slot_table(tbl, max_reqs, ivalue);
5819 if (ret)
5820 goto out;
5821
5822 spin_lock(&tbl->slot_tbl_lock);
5823 nfs4_reset_slot_table(tbl, max_reqs - 1, ivalue);
5824 spin_unlock(&tbl->slot_tbl_lock);
5825
5826 dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
5827 tbl, tbl->slots, tbl->max_slots);
5828out:
5829 dprintk("<-- %s: return %d\n", __func__, ret);
5830 return ret;
5831}
5832
5833/* Destroy the slot table */
5834static void nfs4_destroy_slot_tables(struct nfs4_session *session)
5835{
5836 nfs4_shrink_slot_table(&session->fc_slot_table, 0);
5837 nfs4_shrink_slot_table(&session->bc_slot_table, 0);
5838}
5839
5840/*
5841 * Initialize or reset the forechannel and backchannel tables
5842 */
5843static int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
5844{
5845 struct nfs4_slot_table *tbl;
5846 int status;
5847
5848 dprintk("--> %s\n", __func__);
5849 /* Fore channel */
5850 tbl = &ses->fc_slot_table;
5851 tbl->session = ses;
5852 status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
5853 if (status) /* -ENOMEM */
5854 return status;
5855 /* Back channel */
5856 tbl = &ses->bc_slot_table;
5857 tbl->session = ses;
5858 status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
5859 if (status && tbl->slots == NULL)
5860 /* Fore and back channel share a connection so get
5861 * both slot tables or neither */
5862 nfs4_destroy_slot_tables(ses);
5863 return status;
5864}
5865
5866struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
5867{
5868 struct nfs4_session *session;
5869 struct nfs4_slot_table *tbl;
5870
5871 session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
5872 if (!session)
5873 return NULL;
5874
5875 tbl = &session->fc_slot_table;
5876 tbl->highest_used_slotid = NFS4_NO_SLOT;
5877 spin_lock_init(&tbl->slot_tbl_lock);
5878 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
5879 init_completion(&tbl->complete);
5880
5881 tbl = &session->bc_slot_table;
5882 tbl->highest_used_slotid = NFS4_NO_SLOT;
5883 spin_lock_init(&tbl->slot_tbl_lock);
5884 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
5885 init_completion(&tbl->complete);
5886
5887 session->session_state = 1<<NFS4_SESSION_INITING;
5888
5889 session->clp = clp;
5890 return session;
5891}
5892
5893void nfs4_destroy_session(struct nfs4_session *session)
5894{
5895 struct rpc_xprt *xprt;
5896 struct rpc_cred *cred;
5897
5898 cred = nfs4_get_exchange_id_cred(session->clp);
5899 nfs4_proc_destroy_session(session, cred);
5900 if (cred)
5901 put_rpccred(cred);
5902
5903 rcu_read_lock();
5904 xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
5905 rcu_read_unlock();
5906 dprintk("%s Destroy backchannel for xprt %p\n",
5907 __func__, xprt);
5908 xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
5909 nfs4_destroy_slot_tables(session);
5910 kfree(session);
5911}
5912
5913/*
5914 * Initialize the values to be used by the client in CREATE_SESSION
5915 * If nfs4_init_session set the fore channel request and response sizes,
5916 * use them.
5917 *
5918 * Set the back channel max_resp_sz_cached to zero to force the client to
5919 * always set csa_cachethis to FALSE because the current implementation
5920 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
5921 */
5922static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
5923{
5924 struct nfs4_session *session = args->client->cl_session;
5925 unsigned int mxrqst_sz = session->fc_target_max_rqst_sz,
5926 mxresp_sz = session->fc_target_max_resp_sz;
5927
5928 if (mxrqst_sz == 0)
5929 mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
5930 if (mxresp_sz == 0)
5931 mxresp_sz = NFS_MAX_FILE_IO_SIZE;
5932 /* Fore channel attributes */
5933 args->fc_attrs.max_rqst_sz = mxrqst_sz;
5934 args->fc_attrs.max_resp_sz = mxresp_sz;
5935 args->fc_attrs.max_ops = NFS4_MAX_OPS;
5936 args->fc_attrs.max_reqs = max_session_slots;
5937
5938 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
5939 "max_ops=%u max_reqs=%u\n",
5940 __func__,
5941 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
5942 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
5943
5944 /* Back channel attributes */
5945 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
5946 args->bc_attrs.max_resp_sz = PAGE_SIZE;
5947 args->bc_attrs.max_resp_sz_cached = 0;
5948 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
5949 args->bc_attrs.max_reqs = 1;
5950
5951 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
5952 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
5953 __func__,
5954 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
5955 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
5956 args->bc_attrs.max_reqs);
5957}
5958
5959static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5960{
5961 struct nfs4_channel_attrs *sent = &args->fc_attrs;
5962 struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
5963
5964 if (rcvd->max_resp_sz > sent->max_resp_sz)
5965 return -EINVAL;
5966 /*
5967 * Our requested max_ops is the minimum we need; we're not
5968 * prepared to break up compounds into smaller pieces than that.
5969 * So, no point even trying to continue if the server won't
5970 * cooperate:
5971 */
5972 if (rcvd->max_ops < sent->max_ops)
5973 return -EINVAL;
5974 if (rcvd->max_reqs == 0)
5975 return -EINVAL;
5976 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
5977 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
5978 return 0;
5979}
5980
5981static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5982{
5983 struct nfs4_channel_attrs *sent = &args->bc_attrs;
5984 struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
5985
5986 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
5987 return -EINVAL;
5988 if (rcvd->max_resp_sz < sent->max_resp_sz)
5989 return -EINVAL;
5990 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
5991 return -EINVAL;
5992 /* These would render the backchannel useless: */
5993 if (rcvd->max_ops != sent->max_ops)
5994 return -EINVAL;
5995 if (rcvd->max_reqs != sent->max_reqs)
5996 return -EINVAL;
5997 return 0;
5998}
5999
6000static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
6001 struct nfs4_session *session)
6002{
6003 int ret;
6004
6005 ret = nfs4_verify_fore_channel_attrs(args, session);
6006 if (ret)
6007 return ret;
6008 return nfs4_verify_back_channel_attrs(args, session);
6009}
6010
6011static int _nfs4_proc_create_session(struct nfs_client *clp,
6012 struct rpc_cred *cred)
6013{
6014 struct nfs4_session *session = clp->cl_session;
6015 struct nfs41_create_session_args args = {
6016 .client = clp,
6017 .cb_program = NFS4_CALLBACK,
6018 };
6019 struct nfs41_create_session_res res = {
6020 .client = clp,
6021 };
6022 struct rpc_message msg = {
6023 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
6024 .rpc_argp = &args,
6025 .rpc_resp = &res,
6026 .rpc_cred = cred,
6027 };
6028 int status;
6029
6030 nfs4_init_channel_attrs(&args);
6031 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
6032
6033 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
6034
6035 if (!status) {
6036 /* Verify the session's negotiated channel_attrs values */
6037 status = nfs4_verify_channel_attrs(&args, session);
6038 /* Increment the clientid slot sequence id */
6039 clp->cl_seqid++;
6040 }
6041
6042 return status;
6043}
6044
6045/*
6046 * Issues a CREATE_SESSION operation to the server.
6047 * It is the responsibility of the caller to verify the session is
6048 * expired before calling this routine.
6049 */
6050int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
6051{
6052 int status;
6053 unsigned *ptr;
6054 struct nfs4_session *session = clp->cl_session;
6055
6056 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
6057
6058 status = _nfs4_proc_create_session(clp, cred);
6059 if (status)
6060 goto out;
6061
6062 /* Init or reset the session slot tables */
6063 status = nfs4_setup_session_slot_tables(session);
6064 dprintk("slot table setup returned %d\n", status);
6065 if (status)
6066 goto out;
6067
6068 ptr = (unsigned *)&session->sess_id.data[0];
6069 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
6070 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
6071out:
6072 dprintk("<-- %s\n", __func__);
6073 return status;
6074}
6075
6076/*
6077 * Issue the over-the-wire RPC DESTROY_SESSION.
6078 * The caller must serialize access to this routine.
6079 */
6080int nfs4_proc_destroy_session(struct nfs4_session *session,
6081 struct rpc_cred *cred)
6082{
6083 struct rpc_message msg = {
6084 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
6085 .rpc_argp = session,
6086 .rpc_cred = cred,
6087 };
6088 int status = 0;
6089
6090 dprintk("--> nfs4_proc_destroy_session\n");
6091
6092 /* session is still being setup */
6093 if (session->clp->cl_cons_state != NFS_CS_READY)
6094 return status;
6095
6096 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
6097
6098 if (status)
6099 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
6100 "Session has been destroyed regardless...\n", status);
6101
6102 dprintk("<-- nfs4_proc_destroy_session\n");
6103 return status;
6104}
6105
6106/*
6107 * With sessions, the client is not marked ready until after a
6108 * successful EXCHANGE_ID and CREATE_SESSION.
6109 *
6110 * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
6111 * other versions of NFS can be tried.
6112 */
6113static int nfs41_check_session_ready(struct nfs_client *clp)
6114{
6115 int ret;
6116
6117 if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
6118 ret = nfs4_client_recover_expired_lease(clp);
6119 if (ret)
6120 return ret;
6121 }
6122 if (clp->cl_cons_state < NFS_CS_READY)
6123 return -EPROTONOSUPPORT;
6124 smp_rmb();
6125 return 0;
6126}
6127
6128int nfs4_init_session(struct nfs_server *server)
6129{
6130 struct nfs_client *clp = server->nfs_client;
6131 struct nfs4_session *session;
6132 unsigned int target_max_rqst_sz = NFS_MAX_FILE_IO_SIZE;
6133 unsigned int target_max_resp_sz = NFS_MAX_FILE_IO_SIZE;
6134
6135 if (!nfs4_has_session(clp))
6136 return 0;
6137
6138 if (server->rsize != 0)
6139 target_max_resp_sz = server->rsize;
6140 target_max_resp_sz += nfs41_maxread_overhead;
6141
6142 if (server->wsize != 0)
6143 target_max_rqst_sz = server->wsize;
6144 target_max_rqst_sz += nfs41_maxwrite_overhead;
6145
6146 session = clp->cl_session;
6147 spin_lock(&clp->cl_lock);
6148 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
6149 /* Initialise targets and channel attributes */
6150 session->fc_target_max_rqst_sz = target_max_rqst_sz;
6151 session->fc_attrs.max_rqst_sz = target_max_rqst_sz;
6152 session->fc_target_max_resp_sz = target_max_resp_sz;
6153 session->fc_attrs.max_resp_sz = target_max_resp_sz;
6154 } else {
6155 /* Just adjust the targets */
6156 if (target_max_rqst_sz > session->fc_target_max_rqst_sz) {
6157 session->fc_target_max_rqst_sz = target_max_rqst_sz;
6158 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
6159 }
6160 if (target_max_resp_sz > session->fc_target_max_resp_sz) {
6161 session->fc_target_max_resp_sz = target_max_resp_sz;
6162 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
6163 }
6164 }
6165 spin_unlock(&clp->cl_lock);
6166
6167 if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
6168 nfs4_schedule_lease_recovery(clp);
6169
6170 return nfs41_check_session_ready(clp);
6171}
6172
6173int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time)
6174{
6175 struct nfs4_session *session = clp->cl_session;
6176 int ret;
6177
6178 spin_lock(&clp->cl_lock);
6179 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
6180 /*
6181 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
6182 * DS lease to be equal to the MDS lease.
6183 */
6184 clp->cl_lease_time = lease_time;
6185 clp->cl_last_renewal = jiffies;
6186 }
6187 spin_unlock(&clp->cl_lock);
6188
6189 ret = nfs41_check_session_ready(clp);
6190 if (ret)
6191 return ret;
6192 /* Test for the DS role */
6193 if (!is_ds_client(clp))
6194 return -ENODEV;
6195 return 0;
6196}
6197EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
6198
6199
6200/*
6201 * Renew the cl_session lease.
6202 */
6203struct nfs4_sequence_data {
6204 struct nfs_client *clp;
6205 struct nfs4_sequence_args args;
6206 struct nfs4_sequence_res res;
6207};
6208
6209static void nfs41_sequence_release(void *data)
6210{
6211 struct nfs4_sequence_data *calldata = data;
6212 struct nfs_client *clp = calldata->clp;
6213
6214 if (atomic_read(&clp->cl_count) > 1)
6215 nfs4_schedule_state_renewal(clp);
6216 nfs_put_client(clp);
6217 kfree(calldata);
6218}
6219
6220static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
6221{
6222 switch(task->tk_status) {
6223 case -NFS4ERR_DELAY:
6224 rpc_delay(task, NFS4_POLL_RETRY_MAX);
6225 return -EAGAIN;
6226 default:
6227 nfs4_schedule_lease_recovery(clp);
6228 }
6229 return 0;
6230}
6231
6232static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
6233{
6234 struct nfs4_sequence_data *calldata = data;
6235 struct nfs_client *clp = calldata->clp;
6236
6237 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
6238 return;
6239
6240 if (task->tk_status < 0) {
6241 dprintk("%s ERROR %d\n", __func__, task->tk_status);
6242 if (atomic_read(&clp->cl_count) == 1)
6243 goto out;
6244
6245 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
6246 rpc_restart_call_prepare(task);
6247 return;
6248 }
6249 }
6250 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
6251out:
6252 dprintk("<-- %s\n", __func__);
6253}
6254
6255static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
6256{
6257 struct nfs4_sequence_data *calldata = data;
6258 struct nfs_client *clp = calldata->clp;
6259 struct nfs4_sequence_args *args;
6260 struct nfs4_sequence_res *res;
6261
6262 args = task->tk_msg.rpc_argp;
6263 res = task->tk_msg.rpc_resp;
6264
6265 if (nfs41_setup_sequence(clp->cl_session, args, res, task))
6266 return;
6267 rpc_call_start(task);
6268}
6269
6270static void nfs41_sequence_prepare_privileged(struct rpc_task *task, void *data)
6271{
6272 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
6273 nfs41_sequence_prepare(task, data);
6274}
6275
6276static const struct rpc_call_ops nfs41_sequence_ops = {
6277 .rpc_call_done = nfs41_sequence_call_done,
6278 .rpc_call_prepare = nfs41_sequence_prepare,
6279 .rpc_release = nfs41_sequence_release,
6280};
6281
6282static const struct rpc_call_ops nfs41_sequence_privileged_ops = {
6283 .rpc_call_done = nfs41_sequence_call_done,
6284 .rpc_call_prepare = nfs41_sequence_prepare_privileged,
6285 .rpc_release = nfs41_sequence_release,
6286};
6287
6288static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred,
6289 const struct rpc_call_ops *seq_ops)
6290{
6291 struct nfs4_sequence_data *calldata;
6292 struct rpc_message msg = {
6293 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
6294 .rpc_cred = cred,
6295 };
6296 struct rpc_task_setup task_setup_data = {
6297 .rpc_client = clp->cl_rpcclient,
6298 .rpc_message = &msg,
6299 .callback_ops = seq_ops,
6300 .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
6301 };
6302
6303 if (!atomic_inc_not_zero(&clp->cl_count))
6304 return ERR_PTR(-EIO);
6305 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
6306 if (calldata == NULL) {
6307 nfs_put_client(clp);
6308 return ERR_PTR(-ENOMEM);
6309 }
6310 nfs41_init_sequence(&calldata->args, &calldata->res, 0);
6311 msg.rpc_argp = &calldata->args;
6312 msg.rpc_resp = &calldata->res;
6313 calldata->clp = clp;
6314 task_setup_data.callback_data = calldata;
6315
6316 return rpc_run_task(&task_setup_data);
6317}
6318
6319static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
6320{
6321 struct rpc_task *task;
6322 int ret = 0;
6323
6324 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
6325 return 0;
6326 task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_ops);
6327 if (IS_ERR(task))
6328 ret = PTR_ERR(task);
6329 else
6330 rpc_put_task_async(task);
6331 dprintk("<-- %s status=%d\n", __func__, ret);
6332 return ret;
6333}
6334
6335static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
6336{
6337 struct rpc_task *task;
6338 int ret;
6339
6340 task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_privileged_ops);
6341 if (IS_ERR(task)) {
6342 ret = PTR_ERR(task);
6343 goto out;
6344 }
6345 ret = rpc_wait_for_completion_task(task);
6346 if (!ret) {
6347 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
6348
6349 if (task->tk_status == 0)
6350 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
6351 ret = task->tk_status;
6352 }
6353 rpc_put_task(task);
6354out:
6355 dprintk("<-- %s status=%d\n", __func__, ret);
6356 return ret;
6357}
6358
6359struct nfs4_reclaim_complete_data {
6360 struct nfs_client *clp;
6361 struct nfs41_reclaim_complete_args arg;
6362 struct nfs41_reclaim_complete_res res;
6363};
6364
6365static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
6366{
6367 struct nfs4_reclaim_complete_data *calldata = data;
6368
6369 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
6370 if (nfs41_setup_sequence(calldata->clp->cl_session,
6371 &calldata->arg.seq_args,
6372 &calldata->res.seq_res, task))
6373 return;
6374
6375 rpc_call_start(task);
6376}
6377
6378static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
6379{
6380 switch(task->tk_status) {
6381 case 0:
6382 case -NFS4ERR_COMPLETE_ALREADY:
6383 case -NFS4ERR_WRONG_CRED: /* What to do here? */
6384 break;
6385 case -NFS4ERR_DELAY:
6386 rpc_delay(task, NFS4_POLL_RETRY_MAX);
6387 /* fall through */
6388 case -NFS4ERR_RETRY_UNCACHED_REP:
6389 return -EAGAIN;
6390 default:
6391 nfs4_schedule_lease_recovery(clp);
6392 }
6393 return 0;
6394}
6395
6396static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
6397{
6398 struct nfs4_reclaim_complete_data *calldata = data;
6399 struct nfs_client *clp = calldata->clp;
6400 struct nfs4_sequence_res *res = &calldata->res.seq_res;
6401
6402 dprintk("--> %s\n", __func__);
6403 if (!nfs41_sequence_done(task, res))
6404 return;
6405
6406 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
6407 rpc_restart_call_prepare(task);
6408 return;
6409 }
6410 dprintk("<-- %s\n", __func__);
6411}
6412
6413static void nfs4_free_reclaim_complete_data(void *data)
6414{
6415 struct nfs4_reclaim_complete_data *calldata = data;
6416
6417 kfree(calldata);
6418}
6419
6420static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
6421 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
6422 .rpc_call_done = nfs4_reclaim_complete_done,
6423 .rpc_release = nfs4_free_reclaim_complete_data,
6424};
6425
6426/*
6427 * Issue a global reclaim complete.
6428 */
6429static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
6430{
6431 struct nfs4_reclaim_complete_data *calldata;
6432 struct rpc_task *task;
6433 struct rpc_message msg = {
6434 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
6435 };
6436 struct rpc_task_setup task_setup_data = {
6437 .rpc_client = clp->cl_rpcclient,
6438 .rpc_message = &msg,
6439 .callback_ops = &nfs4_reclaim_complete_call_ops,
6440 .flags = RPC_TASK_ASYNC,
6441 };
6442 int status = -ENOMEM;
6443
6444 dprintk("--> %s\n", __func__);
6445 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
6446 if (calldata == NULL)
6447 goto out;
6448 calldata->clp = clp;
6449 calldata->arg.one_fs = 0;
6450
6451 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
6452 msg.rpc_argp = &calldata->arg;
6453 msg.rpc_resp = &calldata->res;
6454 task_setup_data.callback_data = calldata;
6455 task = rpc_run_task(&task_setup_data);
6456 if (IS_ERR(task)) {
6457 status = PTR_ERR(task);
6458 goto out;
6459 }
6460 status = nfs4_wait_for_completion_rpc_task(task);
6461 if (status == 0)
6462 status = task->tk_status;
6463 rpc_put_task(task);
6464 return 0;
6465out:
6466 dprintk("<-- %s status=%d\n", __func__, status);
6467 return status;
6468}
6469
6470static void
6471nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
6472{
6473 struct nfs4_layoutget *lgp = calldata;
6474 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6475
6476 dprintk("--> %s\n", __func__);
6477 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
6478 * right now covering the LAYOUTGET we are about to send.
6479 * However, that is not so catastrophic, and there seems
6480 * to be no way to prevent it completely.
6481 */
6482 if (nfs4_setup_sequence(server, &lgp->args.seq_args,
6483 &lgp->res.seq_res, task))
6484 return;
6485 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
6486 NFS_I(lgp->args.inode)->layout,
6487 lgp->args.ctx->state)) {
6488 rpc_exit(task, NFS4_OK);
6489 return;
6490 }
6491 rpc_call_start(task);
6492}
6493
6494static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
6495{
6496 struct nfs4_layoutget *lgp = calldata;
6497 struct inode *inode = lgp->args.inode;
6498 struct nfs_server *server = NFS_SERVER(inode);
6499 struct pnfs_layout_hdr *lo;
6500 struct nfs4_state *state = NULL;
6501
6502 dprintk("--> %s\n", __func__);
6503
6504 if (!nfs4_sequence_done(task, &lgp->res.seq_res))
6505 goto out;
6506
6507 switch (task->tk_status) {
6508 case 0:
6509 goto out;
6510 case -NFS4ERR_LAYOUTTRYLATER:
6511 case -NFS4ERR_RECALLCONFLICT:
6512 task->tk_status = -NFS4ERR_DELAY;
6513 break;
6514 case -NFS4ERR_EXPIRED:
6515 case -NFS4ERR_BAD_STATEID:
6516 spin_lock(&inode->i_lock);
6517 lo = NFS_I(inode)->layout;
6518 if (!lo || list_empty(&lo->plh_segs)) {
6519 spin_unlock(&inode->i_lock);
6520 /* If the open stateid was bad, then recover it. */
6521 state = lgp->args.ctx->state;
6522 } else {
6523 LIST_HEAD(head);
6524
6525 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
6526 spin_unlock(&inode->i_lock);
6527 /* Mark the bad layout state as invalid, then
6528 * retry using the open stateid. */
6529 pnfs_free_lseg_list(&head);
6530 }
6531 }
6532 if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
6533 rpc_restart_call_prepare(task);
6534out:
6535 dprintk("<-- %s\n", __func__);
6536}
6537
6538static size_t max_response_pages(struct nfs_server *server)
6539{
6540 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
6541 return nfs_page_array_len(0, max_resp_sz);
6542}
6543
6544static void nfs4_free_pages(struct page **pages, size_t size)
6545{
6546 int i;
6547
6548 if (!pages)
6549 return;
6550
6551 for (i = 0; i < size; i++) {
6552 if (!pages[i])
6553 break;
6554 __free_page(pages[i]);
6555 }
6556 kfree(pages);
6557}
6558
6559static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
6560{
6561 struct page **pages;
6562 int i;
6563
6564 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
6565 if (!pages) {
6566 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
6567 return NULL;
6568 }
6569
6570 for (i = 0; i < size; i++) {
6571 pages[i] = alloc_page(gfp_flags);
6572 if (!pages[i]) {
6573 dprintk("%s: failed to allocate page\n", __func__);
6574 nfs4_free_pages(pages, size);
6575 return NULL;
6576 }
6577 }
6578
6579 return pages;
6580}
6581
6582static void nfs4_layoutget_release(void *calldata)
6583{
6584 struct nfs4_layoutget *lgp = calldata;
6585 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6586 size_t max_pages = max_response_pages(server);
6587
6588 dprintk("--> %s\n", __func__);
6589 nfs4_free_pages(lgp->args.layout.pages, max_pages);
6590 put_nfs_open_context(lgp->args.ctx);
6591 kfree(calldata);
6592 dprintk("<-- %s\n", __func__);
6593}
6594
6595static const struct rpc_call_ops nfs4_layoutget_call_ops = {
6596 .rpc_call_prepare = nfs4_layoutget_prepare,
6597 .rpc_call_done = nfs4_layoutget_done,
6598 .rpc_release = nfs4_layoutget_release,
6599};
6600
6601struct pnfs_layout_segment *
6602nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
6603{
6604 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6605 size_t max_pages = max_response_pages(server);
6606 struct rpc_task *task;
6607 struct rpc_message msg = {
6608 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
6609 .rpc_argp = &lgp->args,
6610 .rpc_resp = &lgp->res,
6611 };
6612 struct rpc_task_setup task_setup_data = {
6613 .rpc_client = server->client,
6614 .rpc_message = &msg,
6615 .callback_ops = &nfs4_layoutget_call_ops,
6616 .callback_data = lgp,
6617 .flags = RPC_TASK_ASYNC,
6618 };
6619 struct pnfs_layout_segment *lseg = NULL;
6620 int status = 0;
6621
6622 dprintk("--> %s\n", __func__);
6623
6624 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
6625 if (!lgp->args.layout.pages) {
6626 nfs4_layoutget_release(lgp);
6627 return ERR_PTR(-ENOMEM);
6628 }
6629 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
6630
6631 lgp->res.layoutp = &lgp->args.layout;
6632 lgp->res.seq_res.sr_slot = NULL;
6633 nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
6634 task = rpc_run_task(&task_setup_data);
6635 if (IS_ERR(task))
6636 return ERR_CAST(task);
6637 status = nfs4_wait_for_completion_rpc_task(task);
6638 if (status == 0)
6639 status = task->tk_status;
6640 if (status == 0)
6641 lseg = pnfs_layout_process(lgp);
6642 rpc_put_task(task);
6643 dprintk("<-- %s status=%d\n", __func__, status);
6644 if (status)
6645 return ERR_PTR(status);
6646 return lseg;
6647}
6648
6649static void
6650nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
6651{
6652 struct nfs4_layoutreturn *lrp = calldata;
6653
6654 dprintk("--> %s\n", __func__);
6655 if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args,
6656 &lrp->res.seq_res, task))
6657 return;
6658 rpc_call_start(task);
6659}
6660
6661static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
6662{
6663 struct nfs4_layoutreturn *lrp = calldata;
6664 struct nfs_server *server;
6665
6666 dprintk("--> %s\n", __func__);
6667
6668 if (!nfs4_sequence_done(task, &lrp->res.seq_res))
6669 return;
6670
6671 server = NFS_SERVER(lrp->args.inode);
6672 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6673 rpc_restart_call_prepare(task);
6674 return;
6675 }
6676 dprintk("<-- %s\n", __func__);
6677}
6678
6679static void nfs4_layoutreturn_release(void *calldata)
6680{
6681 struct nfs4_layoutreturn *lrp = calldata;
6682 struct pnfs_layout_hdr *lo = lrp->args.layout;
6683
6684 dprintk("--> %s\n", __func__);
6685 spin_lock(&lo->plh_inode->i_lock);
6686 if (lrp->res.lrs_present)
6687 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
6688 lo->plh_block_lgets--;
6689 spin_unlock(&lo->plh_inode->i_lock);
6690 pnfs_put_layout_hdr(lrp->args.layout);
6691 kfree(calldata);
6692 dprintk("<-- %s\n", __func__);
6693}
6694
6695static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
6696 .rpc_call_prepare = nfs4_layoutreturn_prepare,
6697 .rpc_call_done = nfs4_layoutreturn_done,
6698 .rpc_release = nfs4_layoutreturn_release,
6699};
6700
6701int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
6702{
6703 struct rpc_task *task;
6704 struct rpc_message msg = {
6705 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
6706 .rpc_argp = &lrp->args,
6707 .rpc_resp = &lrp->res,
6708 };
6709 struct rpc_task_setup task_setup_data = {
6710 .rpc_client = lrp->clp->cl_rpcclient,
6711 .rpc_message = &msg,
6712 .callback_ops = &nfs4_layoutreturn_call_ops,
6713 .callback_data = lrp,
6714 };
6715 int status;
6716
6717 dprintk("--> %s\n", __func__);
6718 nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
6719 task = rpc_run_task(&task_setup_data);
6720 if (IS_ERR(task))
6721 return PTR_ERR(task);
6722 status = task->tk_status;
6723 dprintk("<-- %s status=%d\n", __func__, status);
6724 rpc_put_task(task);
6725 return status;
6726}
6727
6728/*
6729 * Retrieve the list of Data Server devices from the MDS.
6730 */
6731static int _nfs4_getdevicelist(struct nfs_server *server,
6732 const struct nfs_fh *fh,
6733 struct pnfs_devicelist *devlist)
6734{
6735 struct nfs4_getdevicelist_args args = {
6736 .fh = fh,
6737 .layoutclass = server->pnfs_curr_ld->id,
6738 };
6739 struct nfs4_getdevicelist_res res = {
6740 .devlist = devlist,
6741 };
6742 struct rpc_message msg = {
6743 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST],
6744 .rpc_argp = &args,
6745 .rpc_resp = &res,
6746 };
6747 int status;
6748
6749 dprintk("--> %s\n", __func__);
6750 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
6751 &res.seq_res, 0);
6752 dprintk("<-- %s status=%d\n", __func__, status);
6753 return status;
6754}
6755
6756int nfs4_proc_getdevicelist(struct nfs_server *server,
6757 const struct nfs_fh *fh,
6758 struct pnfs_devicelist *devlist)
6759{
6760 struct nfs4_exception exception = { };
6761 int err;
6762
6763 do {
6764 err = nfs4_handle_exception(server,
6765 _nfs4_getdevicelist(server, fh, devlist),
6766 &exception);
6767 } while (exception.retry);
6768
6769 dprintk("%s: err=%d, num_devs=%u\n", __func__,
6770 err, devlist->num_devs);
6771
6772 return err;
6773}
6774EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist);
6775
6776static int
6777_nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6778{
6779 struct nfs4_getdeviceinfo_args args = {
6780 .pdev = pdev,
6781 };
6782 struct nfs4_getdeviceinfo_res res = {
6783 .pdev = pdev,
6784 };
6785 struct rpc_message msg = {
6786 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
6787 .rpc_argp = &args,
6788 .rpc_resp = &res,
6789 };
6790 int status;
6791
6792 dprintk("--> %s\n", __func__);
6793 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6794 dprintk("<-- %s status=%d\n", __func__, status);
6795
6796 return status;
6797}
6798
6799int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6800{
6801 struct nfs4_exception exception = { };
6802 int err;
6803
6804 do {
6805 err = nfs4_handle_exception(server,
6806 _nfs4_proc_getdeviceinfo(server, pdev),
6807 &exception);
6808 } while (exception.retry);
6809 return err;
6810}
6811EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
6812
6813static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
6814{
6815 struct nfs4_layoutcommit_data *data = calldata;
6816 struct nfs_server *server = NFS_SERVER(data->args.inode);
6817
6818 if (nfs4_setup_sequence(server, &data->args.seq_args,
6819 &data->res.seq_res, task))
6820 return;
6821 rpc_call_start(task);
6822}
6823
6824static void
6825nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
6826{
6827 struct nfs4_layoutcommit_data *data = calldata;
6828 struct nfs_server *server = NFS_SERVER(data->args.inode);
6829
6830 if (!nfs4_sequence_done(task, &data->res.seq_res))
6831 return;
6832
6833 switch (task->tk_status) { /* Just ignore these failures */
6834 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
6835 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
6836 case -NFS4ERR_BADLAYOUT: /* no layout */
6837 case -NFS4ERR_GRACE: /* loca_recalim always false */
6838 task->tk_status = 0;
6839 break;
6840 case 0:
6841 nfs_post_op_update_inode_force_wcc(data->args.inode,
6842 data->res.fattr);
6843 break;
6844 default:
6845 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6846 rpc_restart_call_prepare(task);
6847 return;
6848 }
6849 }
6850}
6851
6852static void nfs4_layoutcommit_release(void *calldata)
6853{
6854 struct nfs4_layoutcommit_data *data = calldata;
6855 struct pnfs_layout_segment *lseg, *tmp;
6856 unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
6857
6858 pnfs_cleanup_layoutcommit(data);
6859 /* Matched by references in pnfs_set_layoutcommit */
6860 list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
6861 list_del_init(&lseg->pls_lc_list);
6862 if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
6863 &lseg->pls_flags))
6864 pnfs_put_lseg(lseg);
6865 }
6866
6867 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
6868 smp_mb__after_clear_bit();
6869 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
6870
6871 put_rpccred(data->cred);
6872 kfree(data);
6873}
6874
6875static const struct rpc_call_ops nfs4_layoutcommit_ops = {
6876 .rpc_call_prepare = nfs4_layoutcommit_prepare,
6877 .rpc_call_done = nfs4_layoutcommit_done,
6878 .rpc_release = nfs4_layoutcommit_release,
6879};
6880
6881int
6882nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
6883{
6884 struct rpc_message msg = {
6885 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
6886 .rpc_argp = &data->args,
6887 .rpc_resp = &data->res,
6888 .rpc_cred = data->cred,
6889 };
6890 struct rpc_task_setup task_setup_data = {
6891 .task = &data->task,
6892 .rpc_client = NFS_CLIENT(data->args.inode),
6893 .rpc_message = &msg,
6894 .callback_ops = &nfs4_layoutcommit_ops,
6895 .callback_data = data,
6896 .flags = RPC_TASK_ASYNC,
6897 };
6898 struct rpc_task *task;
6899 int status = 0;
6900
6901 dprintk("NFS: %4d initiating layoutcommit call. sync %d "
6902 "lbw: %llu inode %lu\n",
6903 data->task.tk_pid, sync,
6904 data->args.lastbytewritten,
6905 data->args.inode->i_ino);
6906
6907 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
6908 task = rpc_run_task(&task_setup_data);
6909 if (IS_ERR(task))
6910 return PTR_ERR(task);
6911 if (sync == false)
6912 goto out;
6913 status = nfs4_wait_for_completion_rpc_task(task);
6914 if (status != 0)
6915 goto out;
6916 status = task->tk_status;
6917out:
6918 dprintk("%s: status %d\n", __func__, status);
6919 rpc_put_task(task);
6920 return status;
6921}
6922
6923static int
6924_nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6925 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6926{
6927 struct nfs41_secinfo_no_name_args args = {
6928 .style = SECINFO_STYLE_CURRENT_FH,
6929 };
6930 struct nfs4_secinfo_res res = {
6931 .flavors = flavors,
6932 };
6933 struct rpc_message msg = {
6934 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
6935 .rpc_argp = &args,
6936 .rpc_resp = &res,
6937 };
6938 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6939}
6940
6941static int
6942nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6943 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6944{
6945 struct nfs4_exception exception = { };
6946 int err;
6947 do {
6948 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6949 switch (err) {
6950 case 0:
6951 case -NFS4ERR_WRONGSEC:
6952 case -NFS4ERR_NOTSUPP:
6953 goto out;
6954 default:
6955 err = nfs4_handle_exception(server, err, &exception);
6956 }
6957 } while (exception.retry);
6958out:
6959 return err;
6960}
6961
6962static int
6963nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
6964 struct nfs_fsinfo *info)
6965{
6966 int err;
6967 struct page *page;
6968 rpc_authflavor_t flavor;
6969 struct nfs4_secinfo_flavors *flavors;
6970
6971 page = alloc_page(GFP_KERNEL);
6972 if (!page) {
6973 err = -ENOMEM;
6974 goto out;
6975 }
6976
6977 flavors = page_address(page);
6978 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6979
6980 /*
6981 * Fall back on "guess and check" method if
6982 * the server doesn't support SECINFO_NO_NAME
6983 */
6984 if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
6985 err = nfs4_find_root_sec(server, fhandle, info);
6986 goto out_freepage;
6987 }
6988 if (err)
6989 goto out_freepage;
6990
6991 flavor = nfs_find_best_sec(flavors);
6992 if (err == 0)
6993 err = nfs4_lookup_root_sec(server, fhandle, info, flavor);
6994
6995out_freepage:
6996 put_page(page);
6997 if (err == -EACCES)
6998 return -EPERM;
6999out:
7000 return err;
7001}
7002
7003static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
7004{
7005 int status;
7006 struct nfs41_test_stateid_args args = {
7007 .stateid = stateid,
7008 };
7009 struct nfs41_test_stateid_res res;
7010 struct rpc_message msg = {
7011 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
7012 .rpc_argp = &args,
7013 .rpc_resp = &res,
7014 };
7015
7016 dprintk("NFS call test_stateid %p\n", stateid);
7017 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
7018 status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
7019 if (status != NFS_OK) {
7020 dprintk("NFS reply test_stateid: failed, %d\n", status);
7021 return status;
7022 }
7023 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
7024 return -res.status;
7025}
7026
7027/**
7028 * nfs41_test_stateid - perform a TEST_STATEID operation
7029 *
7030 * @server: server / transport on which to perform the operation
7031 * @stateid: state ID to test
7032 *
7033 * Returns NFS_OK if the server recognizes that "stateid" is valid.
7034 * Otherwise a negative NFS4ERR value is returned if the operation
7035 * failed or the state ID is not currently valid.
7036 */
7037static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
7038{
7039 struct nfs4_exception exception = { };
7040 int err;
7041 do {
7042 err = _nfs41_test_stateid(server, stateid);
7043 if (err != -NFS4ERR_DELAY)
7044 break;
7045 nfs4_handle_exception(server, err, &exception);
7046 } while (exception.retry);
7047 return err;
7048}
7049
7050static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
7051{
7052 struct nfs41_free_stateid_args args = {
7053 .stateid = stateid,
7054 };
7055 struct nfs41_free_stateid_res res;
7056 struct rpc_message msg = {
7057 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
7058 .rpc_argp = &args,
7059 .rpc_resp = &res,
7060 };
7061 int status;
7062
7063 dprintk("NFS call free_stateid %p\n", stateid);
7064 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
7065 status = nfs4_call_sync_sequence(server->client, server, &msg,
7066 &args.seq_args, &res.seq_res, 1);
7067 dprintk("NFS reply free_stateid: %d\n", status);
7068 return status;
7069}
7070
7071/**
7072 * nfs41_free_stateid - perform a FREE_STATEID operation
7073 *
7074 * @server: server / transport on which to perform the operation
7075 * @stateid: state ID to release
7076 *
7077 * Returns NFS_OK if the server freed "stateid". Otherwise a
7078 * negative NFS4ERR value is returned.
7079 */
7080static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
7081{
7082 struct nfs4_exception exception = { };
7083 int err;
7084 do {
7085 err = _nfs4_free_stateid(server, stateid);
7086 if (err != -NFS4ERR_DELAY)
7087 break;
7088 nfs4_handle_exception(server, err, &exception);
7089 } while (exception.retry);
7090 return err;
7091}
7092
7093static bool nfs41_match_stateid(const nfs4_stateid *s1,
7094 const nfs4_stateid *s2)
7095{
7096 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
7097 return false;
7098
7099 if (s1->seqid == s2->seqid)
7100 return true;
7101 if (s1->seqid == 0 || s2->seqid == 0)
7102 return true;
7103
7104 return false;
7105}
7106
7107#endif /* CONFIG_NFS_V4_1 */
7108
7109static bool nfs4_match_stateid(const nfs4_stateid *s1,
7110 const nfs4_stateid *s2)
7111{
7112 return nfs4_stateid_match(s1, s2);
7113}
7114
7115
7116static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
7117 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
7118 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
7119 .recover_open = nfs4_open_reclaim,
7120 .recover_lock = nfs4_lock_reclaim,
7121 .establish_clid = nfs4_init_clientid,
7122 .get_clid_cred = nfs4_get_setclientid_cred,
7123 .detect_trunking = nfs40_discover_server_trunking,
7124};
7125
7126#if defined(CONFIG_NFS_V4_1)
7127static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
7128 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
7129 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
7130 .recover_open = nfs4_open_reclaim,
7131 .recover_lock = nfs4_lock_reclaim,
7132 .establish_clid = nfs41_init_clientid,
7133 .get_clid_cred = nfs4_get_exchange_id_cred,
7134 .reclaim_complete = nfs41_proc_reclaim_complete,
7135 .detect_trunking = nfs41_discover_server_trunking,
7136};
7137#endif /* CONFIG_NFS_V4_1 */
7138
7139static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
7140 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
7141 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
7142 .recover_open = nfs4_open_expired,
7143 .recover_lock = nfs4_lock_expired,
7144 .establish_clid = nfs4_init_clientid,
7145 .get_clid_cred = nfs4_get_setclientid_cred,
7146};
7147
7148#if defined(CONFIG_NFS_V4_1)
7149static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
7150 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
7151 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
7152 .recover_open = nfs41_open_expired,
7153 .recover_lock = nfs41_lock_expired,
7154 .establish_clid = nfs41_init_clientid,
7155 .get_clid_cred = nfs4_get_exchange_id_cred,
7156};
7157#endif /* CONFIG_NFS_V4_1 */
7158
7159static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
7160 .sched_state_renewal = nfs4_proc_async_renew,
7161 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
7162 .renew_lease = nfs4_proc_renew,
7163};
7164
7165#if defined(CONFIG_NFS_V4_1)
7166static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
7167 .sched_state_renewal = nfs41_proc_async_sequence,
7168 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
7169 .renew_lease = nfs4_proc_sequence,
7170};
7171#endif
7172
7173static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
7174 .minor_version = 0,
7175 .call_sync = _nfs4_call_sync,
7176 .match_stateid = nfs4_match_stateid,
7177 .find_root_sec = nfs4_find_root_sec,
7178 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
7179 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
7180 .state_renewal_ops = &nfs40_state_renewal_ops,
7181};
7182
7183#if defined(CONFIG_NFS_V4_1)
7184static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
7185 .minor_version = 1,
7186 .call_sync = _nfs4_call_sync_session,
7187 .match_stateid = nfs41_match_stateid,
7188 .find_root_sec = nfs41_find_root_sec,
7189 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
7190 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
7191 .state_renewal_ops = &nfs41_state_renewal_ops,
7192};
7193#endif
7194
7195const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
7196 [0] = &nfs_v4_0_minor_ops,
7197#if defined(CONFIG_NFS_V4_1)
7198 [1] = &nfs_v4_1_minor_ops,
7199#endif
7200};
7201
7202const struct inode_operations nfs4_dir_inode_operations = {
7203 .create = nfs_create,
7204 .lookup = nfs_lookup,
7205 .atomic_open = nfs_atomic_open,
7206 .link = nfs_link,
7207 .unlink = nfs_unlink,
7208 .symlink = nfs_symlink,
7209 .mkdir = nfs_mkdir,
7210 .rmdir = nfs_rmdir,
7211 .mknod = nfs_mknod,
7212 .rename = nfs_rename,
7213 .permission = nfs_permission,
7214 .getattr = nfs_getattr,
7215 .setattr = nfs_setattr,
7216 .getxattr = generic_getxattr,
7217 .setxattr = generic_setxattr,
7218 .listxattr = generic_listxattr,
7219 .removexattr = generic_removexattr,
7220};
7221
7222static const struct inode_operations nfs4_file_inode_operations = {
7223 .permission = nfs_permission,
7224 .getattr = nfs_getattr,
7225 .setattr = nfs_setattr,
7226 .getxattr = generic_getxattr,
7227 .setxattr = generic_setxattr,
7228 .listxattr = generic_listxattr,
7229 .removexattr = generic_removexattr,
7230};
7231
7232const struct nfs_rpc_ops nfs_v4_clientops = {
7233 .version = 4, /* protocol version */
7234 .dentry_ops = &nfs4_dentry_operations,
7235 .dir_inode_ops = &nfs4_dir_inode_operations,
7236 .file_inode_ops = &nfs4_file_inode_operations,
7237 .file_ops = &nfs4_file_operations,
7238 .getroot = nfs4_proc_get_root,
7239 .submount = nfs4_submount,
7240 .try_mount = nfs4_try_mount,
7241 .getattr = nfs4_proc_getattr,
7242 .setattr = nfs4_proc_setattr,
7243 .lookup = nfs4_proc_lookup,
7244 .access = nfs4_proc_access,
7245 .readlink = nfs4_proc_readlink,
7246 .create = nfs4_proc_create,
7247 .remove = nfs4_proc_remove,
7248 .unlink_setup = nfs4_proc_unlink_setup,
7249 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
7250 .unlink_done = nfs4_proc_unlink_done,
7251 .rename = nfs4_proc_rename,
7252 .rename_setup = nfs4_proc_rename_setup,
7253 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
7254 .rename_done = nfs4_proc_rename_done,
7255 .link = nfs4_proc_link,
7256 .symlink = nfs4_proc_symlink,
7257 .mkdir = nfs4_proc_mkdir,
7258 .rmdir = nfs4_proc_remove,
7259 .readdir = nfs4_proc_readdir,
7260 .mknod = nfs4_proc_mknod,
7261 .statfs = nfs4_proc_statfs,
7262 .fsinfo = nfs4_proc_fsinfo,
7263 .pathconf = nfs4_proc_pathconf,
7264 .set_capabilities = nfs4_server_capabilities,
7265 .decode_dirent = nfs4_decode_dirent,
7266 .read_setup = nfs4_proc_read_setup,
7267 .read_pageio_init = pnfs_pageio_init_read,
7268 .read_rpc_prepare = nfs4_proc_read_rpc_prepare,
7269 .read_done = nfs4_read_done,
7270 .write_setup = nfs4_proc_write_setup,
7271 .write_pageio_init = pnfs_pageio_init_write,
7272 .write_rpc_prepare = nfs4_proc_write_rpc_prepare,
7273 .write_done = nfs4_write_done,
7274 .commit_setup = nfs4_proc_commit_setup,
7275 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
7276 .commit_done = nfs4_commit_done,
7277 .lock = nfs4_proc_lock,
7278 .clear_acl_cache = nfs4_zap_acl_attr,
7279 .close_context = nfs4_close_context,
7280 .open_context = nfs4_atomic_open,
7281 .have_delegation = nfs4_have_delegation,
7282 .return_delegation = nfs4_inode_return_delegation,
7283 .alloc_client = nfs4_alloc_client,
7284 .init_client = nfs4_init_client,
7285 .free_client = nfs4_free_client,
7286 .create_server = nfs4_create_server,
7287 .clone_server = nfs_clone_server,
7288};
7289
7290static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
7291 .prefix = XATTR_NAME_NFSV4_ACL,
7292 .list = nfs4_xattr_list_nfs4_acl,
7293 .get = nfs4_xattr_get_nfs4_acl,
7294 .set = nfs4_xattr_set_nfs4_acl,
7295};
7296
7297const struct xattr_handler *nfs4_xattr_handlers[] = {
7298 &nfs4_xattr_nfs4_acl_handler,
7299 NULL
7300};
7301
7302/*
7303 * Local variables:
7304 * c-basic-offset: 8
7305 * End:
7306 */