NFSv4: Recovery of recalled read delegations is broken
[linux-2.6-block.git] / fs / nfs / nfs4proc.c
... / ...
CommitLineData
1/*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/mm.h>
39#include <linux/delay.h>
40#include <linux/errno.h>
41#include <linux/file.h>
42#include <linux/string.h>
43#include <linux/ratelimit.h>
44#include <linux/printk.h>
45#include <linux/slab.h>
46#include <linux/sunrpc/clnt.h>
47#include <linux/nfs.h>
48#include <linux/nfs4.h>
49#include <linux/nfs_fs.h>
50#include <linux/nfs_page.h>
51#include <linux/nfs_mount.h>
52#include <linux/namei.h>
53#include <linux/mount.h>
54#include <linux/module.h>
55#include <linux/xattr.h>
56#include <linux/utsname.h>
57#include <linux/freezer.h>
58
59#include "nfs4_fs.h"
60#include "delegation.h"
61#include "internal.h"
62#include "iostat.h"
63#include "callback.h"
64#include "pnfs.h"
65#include "netns.h"
66#include "nfs4idmap.h"
67#include "nfs4session.h"
68#include "fscache.h"
69
70#include "nfs4trace.h"
71
72#define NFSDBG_FACILITY NFSDBG_PROC
73
74#define NFS4_POLL_RETRY_MIN (HZ/10)
75#define NFS4_POLL_RETRY_MAX (15*HZ)
76
77struct nfs4_opendata;
78static int _nfs4_proc_open(struct nfs4_opendata *data);
79static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
80static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
81static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *, long *);
82static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
83static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label);
84static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label);
85static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
86 struct nfs_fattr *fattr, struct iattr *sattr,
87 struct nfs4_state *state, struct nfs4_label *ilabel,
88 struct nfs4_label *olabel);
89#ifdef CONFIG_NFS_V4_1
90static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
91 struct rpc_cred *);
92static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *,
93 struct rpc_cred *);
94#endif
95
96#ifdef CONFIG_NFS_V4_SECURITY_LABEL
97static inline struct nfs4_label *
98nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
99 struct iattr *sattr, struct nfs4_label *label)
100{
101 int err;
102
103 if (label == NULL)
104 return NULL;
105
106 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
107 return NULL;
108
109 err = security_dentry_init_security(dentry, sattr->ia_mode,
110 &dentry->d_name, (void **)&label->label, &label->len);
111 if (err == 0)
112 return label;
113
114 return NULL;
115}
116static inline void
117nfs4_label_release_security(struct nfs4_label *label)
118{
119 if (label)
120 security_release_secctx(label->label, label->len);
121}
122static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
123{
124 if (label)
125 return server->attr_bitmask;
126
127 return server->attr_bitmask_nl;
128}
129#else
130static inline struct nfs4_label *
131nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
132 struct iattr *sattr, struct nfs4_label *l)
133{ return NULL; }
134static inline void
135nfs4_label_release_security(struct nfs4_label *label)
136{ return; }
137static inline u32 *
138nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
139{ return server->attr_bitmask; }
140#endif
141
142/* Prevent leaks of NFSv4 errors into userland */
143static int nfs4_map_errors(int err)
144{
145 if (err >= -1000)
146 return err;
147 switch (err) {
148 case -NFS4ERR_RESOURCE:
149 case -NFS4ERR_LAYOUTTRYLATER:
150 case -NFS4ERR_RECALLCONFLICT:
151 return -EREMOTEIO;
152 case -NFS4ERR_WRONGSEC:
153 case -NFS4ERR_WRONG_CRED:
154 return -EPERM;
155 case -NFS4ERR_BADOWNER:
156 case -NFS4ERR_BADNAME:
157 return -EINVAL;
158 case -NFS4ERR_SHARE_DENIED:
159 return -EACCES;
160 case -NFS4ERR_MINOR_VERS_MISMATCH:
161 return -EPROTONOSUPPORT;
162 case -NFS4ERR_FILE_OPEN:
163 return -EBUSY;
164 default:
165 dprintk("%s could not handle NFSv4 error %d\n",
166 __func__, -err);
167 break;
168 }
169 return -EIO;
170}
171
172/*
173 * This is our standard bitmap for GETATTR requests.
174 */
175const u32 nfs4_fattr_bitmap[3] = {
176 FATTR4_WORD0_TYPE
177 | FATTR4_WORD0_CHANGE
178 | FATTR4_WORD0_SIZE
179 | FATTR4_WORD0_FSID
180 | FATTR4_WORD0_FILEID,
181 FATTR4_WORD1_MODE
182 | FATTR4_WORD1_NUMLINKS
183 | FATTR4_WORD1_OWNER
184 | FATTR4_WORD1_OWNER_GROUP
185 | FATTR4_WORD1_RAWDEV
186 | FATTR4_WORD1_SPACE_USED
187 | FATTR4_WORD1_TIME_ACCESS
188 | FATTR4_WORD1_TIME_METADATA
189 | FATTR4_WORD1_TIME_MODIFY
190 | FATTR4_WORD1_MOUNTED_ON_FILEID,
191#ifdef CONFIG_NFS_V4_SECURITY_LABEL
192 FATTR4_WORD2_SECURITY_LABEL
193#endif
194};
195
196static const u32 nfs4_pnfs_open_bitmap[3] = {
197 FATTR4_WORD0_TYPE
198 | FATTR4_WORD0_CHANGE
199 | FATTR4_WORD0_SIZE
200 | FATTR4_WORD0_FSID
201 | FATTR4_WORD0_FILEID,
202 FATTR4_WORD1_MODE
203 | FATTR4_WORD1_NUMLINKS
204 | FATTR4_WORD1_OWNER
205 | FATTR4_WORD1_OWNER_GROUP
206 | FATTR4_WORD1_RAWDEV
207 | FATTR4_WORD1_SPACE_USED
208 | FATTR4_WORD1_TIME_ACCESS
209 | FATTR4_WORD1_TIME_METADATA
210 | FATTR4_WORD1_TIME_MODIFY,
211 FATTR4_WORD2_MDSTHRESHOLD
212};
213
214static const u32 nfs4_open_noattr_bitmap[3] = {
215 FATTR4_WORD0_TYPE
216 | FATTR4_WORD0_CHANGE
217 | FATTR4_WORD0_FILEID,
218};
219
220const u32 nfs4_statfs_bitmap[3] = {
221 FATTR4_WORD0_FILES_AVAIL
222 | FATTR4_WORD0_FILES_FREE
223 | FATTR4_WORD0_FILES_TOTAL,
224 FATTR4_WORD1_SPACE_AVAIL
225 | FATTR4_WORD1_SPACE_FREE
226 | FATTR4_WORD1_SPACE_TOTAL
227};
228
229const u32 nfs4_pathconf_bitmap[3] = {
230 FATTR4_WORD0_MAXLINK
231 | FATTR4_WORD0_MAXNAME,
232 0
233};
234
235const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
236 | FATTR4_WORD0_MAXREAD
237 | FATTR4_WORD0_MAXWRITE
238 | FATTR4_WORD0_LEASE_TIME,
239 FATTR4_WORD1_TIME_DELTA
240 | FATTR4_WORD1_FS_LAYOUT_TYPES,
241 FATTR4_WORD2_LAYOUT_BLKSIZE
242};
243
244const u32 nfs4_fs_locations_bitmap[3] = {
245 FATTR4_WORD0_TYPE
246 | FATTR4_WORD0_CHANGE
247 | FATTR4_WORD0_SIZE
248 | FATTR4_WORD0_FSID
249 | FATTR4_WORD0_FILEID
250 | FATTR4_WORD0_FS_LOCATIONS,
251 FATTR4_WORD1_MODE
252 | FATTR4_WORD1_NUMLINKS
253 | FATTR4_WORD1_OWNER
254 | FATTR4_WORD1_OWNER_GROUP
255 | FATTR4_WORD1_RAWDEV
256 | FATTR4_WORD1_SPACE_USED
257 | FATTR4_WORD1_TIME_ACCESS
258 | FATTR4_WORD1_TIME_METADATA
259 | FATTR4_WORD1_TIME_MODIFY
260 | FATTR4_WORD1_MOUNTED_ON_FILEID,
261};
262
263static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
264 struct nfs4_readdir_arg *readdir)
265{
266 __be32 *start, *p;
267
268 if (cookie > 2) {
269 readdir->cookie = cookie;
270 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
271 return;
272 }
273
274 readdir->cookie = 0;
275 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
276 if (cookie == 2)
277 return;
278
279 /*
280 * NFSv4 servers do not return entries for '.' and '..'
281 * Therefore, we fake these entries here. We let '.'
282 * have cookie 0 and '..' have cookie 1. Note that
283 * when talking to the server, we always send cookie 0
284 * instead of 1 or 2.
285 */
286 start = p = kmap_atomic(*readdir->pages);
287
288 if (cookie == 0) {
289 *p++ = xdr_one; /* next */
290 *p++ = xdr_zero; /* cookie, first word */
291 *p++ = xdr_one; /* cookie, second word */
292 *p++ = xdr_one; /* entry len */
293 memcpy(p, ".\0\0\0", 4); /* entry */
294 p++;
295 *p++ = xdr_one; /* bitmap length */
296 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
297 *p++ = htonl(8); /* attribute buffer length */
298 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
299 }
300
301 *p++ = xdr_one; /* next */
302 *p++ = xdr_zero; /* cookie, first word */
303 *p++ = xdr_two; /* cookie, second word */
304 *p++ = xdr_two; /* entry len */
305 memcpy(p, "..\0\0", 4); /* entry */
306 p++;
307 *p++ = xdr_one; /* bitmap length */
308 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
309 *p++ = htonl(8); /* attribute buffer length */
310 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
311
312 readdir->pgbase = (char *)p - (char *)start;
313 readdir->count -= readdir->pgbase;
314 kunmap_atomic(start);
315}
316
317static long nfs4_update_delay(long *timeout)
318{
319 long ret;
320 if (!timeout)
321 return NFS4_POLL_RETRY_MAX;
322 if (*timeout <= 0)
323 *timeout = NFS4_POLL_RETRY_MIN;
324 if (*timeout > NFS4_POLL_RETRY_MAX)
325 *timeout = NFS4_POLL_RETRY_MAX;
326 ret = *timeout;
327 *timeout <<= 1;
328 return ret;
329}
330
331static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
332{
333 int res = 0;
334
335 might_sleep();
336
337 freezable_schedule_timeout_killable_unsafe(
338 nfs4_update_delay(timeout));
339 if (fatal_signal_pending(current))
340 res = -ERESTARTSYS;
341 return res;
342}
343
344/* This is the error handling routine for processes that are allowed
345 * to sleep.
346 */
347int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
348{
349 struct nfs_client *clp = server->nfs_client;
350 struct nfs4_state *state = exception->state;
351 struct inode *inode = exception->inode;
352 int ret = errorcode;
353
354 exception->retry = 0;
355 switch(errorcode) {
356 case 0:
357 return 0;
358 case -NFS4ERR_OPENMODE:
359 case -NFS4ERR_DELEG_REVOKED:
360 case -NFS4ERR_ADMIN_REVOKED:
361 case -NFS4ERR_BAD_STATEID:
362 if (inode && nfs4_have_delegation(inode, FMODE_READ)) {
363 nfs4_inode_return_delegation(inode);
364 exception->retry = 1;
365 return 0;
366 }
367 if (state == NULL)
368 break;
369 ret = nfs4_schedule_stateid_recovery(server, state);
370 if (ret < 0)
371 break;
372 goto wait_on_recovery;
373 case -NFS4ERR_EXPIRED:
374 if (state != NULL) {
375 ret = nfs4_schedule_stateid_recovery(server, state);
376 if (ret < 0)
377 break;
378 }
379 case -NFS4ERR_STALE_STATEID:
380 case -NFS4ERR_STALE_CLIENTID:
381 nfs4_schedule_lease_recovery(clp);
382 goto wait_on_recovery;
383 case -NFS4ERR_MOVED:
384 ret = nfs4_schedule_migration_recovery(server);
385 if (ret < 0)
386 break;
387 goto wait_on_recovery;
388 case -NFS4ERR_LEASE_MOVED:
389 nfs4_schedule_lease_moved_recovery(clp);
390 goto wait_on_recovery;
391#if defined(CONFIG_NFS_V4_1)
392 case -NFS4ERR_BADSESSION:
393 case -NFS4ERR_BADSLOT:
394 case -NFS4ERR_BAD_HIGH_SLOT:
395 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
396 case -NFS4ERR_DEADSESSION:
397 case -NFS4ERR_SEQ_FALSE_RETRY:
398 case -NFS4ERR_SEQ_MISORDERED:
399 dprintk("%s ERROR: %d Reset session\n", __func__,
400 errorcode);
401 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
402 goto wait_on_recovery;
403#endif /* defined(CONFIG_NFS_V4_1) */
404 case -NFS4ERR_FILE_OPEN:
405 if (exception->timeout > HZ) {
406 /* We have retried a decent amount, time to
407 * fail
408 */
409 ret = -EBUSY;
410 break;
411 }
412 case -NFS4ERR_GRACE:
413 case -NFS4ERR_DELAY:
414 ret = nfs4_delay(server->client, &exception->timeout);
415 if (ret != 0)
416 break;
417 case -NFS4ERR_RETRY_UNCACHED_REP:
418 case -NFS4ERR_OLD_STATEID:
419 exception->retry = 1;
420 break;
421 case -NFS4ERR_BADOWNER:
422 /* The following works around a Linux server bug! */
423 case -NFS4ERR_BADNAME:
424 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
425 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
426 exception->retry = 1;
427 printk(KERN_WARNING "NFS: v4 server %s "
428 "does not accept raw "
429 "uid/gids. "
430 "Reenabling the idmapper.\n",
431 server->nfs_client->cl_hostname);
432 }
433 }
434 /* We failed to handle the error */
435 return nfs4_map_errors(ret);
436wait_on_recovery:
437 ret = nfs4_wait_clnt_recover(clp);
438 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
439 return -EIO;
440 if (ret == 0)
441 exception->retry = 1;
442 return ret;
443}
444
445/*
446 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
447 * or 'false' otherwise.
448 */
449static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
450{
451 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
452
453 if (flavor == RPC_AUTH_GSS_KRB5I ||
454 flavor == RPC_AUTH_GSS_KRB5P)
455 return true;
456
457 return false;
458}
459
460static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
461{
462 spin_lock(&clp->cl_lock);
463 if (time_before(clp->cl_last_renewal,timestamp))
464 clp->cl_last_renewal = timestamp;
465 spin_unlock(&clp->cl_lock);
466}
467
468static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
469{
470 struct nfs_client *clp = server->nfs_client;
471
472 if (!nfs4_has_session(clp))
473 do_renew_lease(clp, timestamp);
474}
475
476struct nfs4_call_sync_data {
477 const struct nfs_server *seq_server;
478 struct nfs4_sequence_args *seq_args;
479 struct nfs4_sequence_res *seq_res;
480};
481
482void nfs4_init_sequence(struct nfs4_sequence_args *args,
483 struct nfs4_sequence_res *res, int cache_reply)
484{
485 args->sa_slot = NULL;
486 args->sa_cache_this = cache_reply;
487 args->sa_privileged = 0;
488
489 res->sr_slot = NULL;
490}
491
492static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
493{
494 args->sa_privileged = 1;
495}
496
497int nfs40_setup_sequence(struct nfs4_slot_table *tbl,
498 struct nfs4_sequence_args *args,
499 struct nfs4_sequence_res *res,
500 struct rpc_task *task)
501{
502 struct nfs4_slot *slot;
503
504 /* slot already allocated? */
505 if (res->sr_slot != NULL)
506 goto out_start;
507
508 spin_lock(&tbl->slot_tbl_lock);
509 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
510 goto out_sleep;
511
512 slot = nfs4_alloc_slot(tbl);
513 if (IS_ERR(slot)) {
514 if (slot == ERR_PTR(-ENOMEM))
515 task->tk_timeout = HZ >> 2;
516 goto out_sleep;
517 }
518 spin_unlock(&tbl->slot_tbl_lock);
519
520 args->sa_slot = slot;
521 res->sr_slot = slot;
522
523out_start:
524 rpc_call_start(task);
525 return 0;
526
527out_sleep:
528 if (args->sa_privileged)
529 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
530 NULL, RPC_PRIORITY_PRIVILEGED);
531 else
532 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
533 spin_unlock(&tbl->slot_tbl_lock);
534 return -EAGAIN;
535}
536EXPORT_SYMBOL_GPL(nfs40_setup_sequence);
537
538static int nfs40_sequence_done(struct rpc_task *task,
539 struct nfs4_sequence_res *res)
540{
541 struct nfs4_slot *slot = res->sr_slot;
542 struct nfs4_slot_table *tbl;
543
544 if (slot == NULL)
545 goto out;
546
547 tbl = slot->table;
548 spin_lock(&tbl->slot_tbl_lock);
549 if (!nfs41_wake_and_assign_slot(tbl, slot))
550 nfs4_free_slot(tbl, slot);
551 spin_unlock(&tbl->slot_tbl_lock);
552
553 res->sr_slot = NULL;
554out:
555 return 1;
556}
557
558#if defined(CONFIG_NFS_V4_1)
559
560static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
561{
562 struct nfs4_session *session;
563 struct nfs4_slot_table *tbl;
564 struct nfs4_slot *slot = res->sr_slot;
565 bool send_new_highest_used_slotid = false;
566
567 tbl = slot->table;
568 session = tbl->session;
569
570 spin_lock(&tbl->slot_tbl_lock);
571 /* Be nice to the server: try to ensure that the last transmitted
572 * value for highest_user_slotid <= target_highest_slotid
573 */
574 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
575 send_new_highest_used_slotid = true;
576
577 if (nfs41_wake_and_assign_slot(tbl, slot)) {
578 send_new_highest_used_slotid = false;
579 goto out_unlock;
580 }
581 nfs4_free_slot(tbl, slot);
582
583 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
584 send_new_highest_used_slotid = false;
585out_unlock:
586 spin_unlock(&tbl->slot_tbl_lock);
587 res->sr_slot = NULL;
588 if (send_new_highest_used_slotid)
589 nfs41_notify_server(session->clp);
590}
591
592int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
593{
594 struct nfs4_session *session;
595 struct nfs4_slot *slot = res->sr_slot;
596 struct nfs_client *clp;
597 bool interrupted = false;
598 int ret = 1;
599
600 if (slot == NULL)
601 goto out_noaction;
602 /* don't increment the sequence number if the task wasn't sent */
603 if (!RPC_WAS_SENT(task))
604 goto out;
605
606 session = slot->table->session;
607
608 if (slot->interrupted) {
609 slot->interrupted = 0;
610 interrupted = true;
611 }
612
613 trace_nfs4_sequence_done(session, res);
614 /* Check the SEQUENCE operation status */
615 switch (res->sr_status) {
616 case 0:
617 /* Update the slot's sequence and clientid lease timer */
618 ++slot->seq_nr;
619 clp = session->clp;
620 do_renew_lease(clp, res->sr_timestamp);
621 /* Check sequence flags */
622 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
623 nfs41_update_target_slotid(slot->table, slot, res);
624 break;
625 case 1:
626 /*
627 * sr_status remains 1 if an RPC level error occurred.
628 * The server may or may not have processed the sequence
629 * operation..
630 * Mark the slot as having hosted an interrupted RPC call.
631 */
632 slot->interrupted = 1;
633 goto out;
634 case -NFS4ERR_DELAY:
635 /* The server detected a resend of the RPC call and
636 * returned NFS4ERR_DELAY as per Section 2.10.6.2
637 * of RFC5661.
638 */
639 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
640 __func__,
641 slot->slot_nr,
642 slot->seq_nr);
643 goto out_retry;
644 case -NFS4ERR_BADSLOT:
645 /*
646 * The slot id we used was probably retired. Try again
647 * using a different slot id.
648 */
649 goto retry_nowait;
650 case -NFS4ERR_SEQ_MISORDERED:
651 /*
652 * Was the last operation on this sequence interrupted?
653 * If so, retry after bumping the sequence number.
654 */
655 if (interrupted) {
656 ++slot->seq_nr;
657 goto retry_nowait;
658 }
659 /*
660 * Could this slot have been previously retired?
661 * If so, then the server may be expecting seq_nr = 1!
662 */
663 if (slot->seq_nr != 1) {
664 slot->seq_nr = 1;
665 goto retry_nowait;
666 }
667 break;
668 case -NFS4ERR_SEQ_FALSE_RETRY:
669 ++slot->seq_nr;
670 goto retry_nowait;
671 default:
672 /* Just update the slot sequence no. */
673 ++slot->seq_nr;
674 }
675out:
676 /* The session may be reset by one of the error handlers. */
677 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
678 nfs41_sequence_free_slot(res);
679out_noaction:
680 return ret;
681retry_nowait:
682 if (rpc_restart_call_prepare(task)) {
683 task->tk_status = 0;
684 ret = 0;
685 }
686 goto out;
687out_retry:
688 if (!rpc_restart_call(task))
689 goto out;
690 rpc_delay(task, NFS4_POLL_RETRY_MAX);
691 return 0;
692}
693EXPORT_SYMBOL_GPL(nfs41_sequence_done);
694
695int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
696{
697 if (res->sr_slot == NULL)
698 return 1;
699 if (!res->sr_slot->table->session)
700 return nfs40_sequence_done(task, res);
701 return nfs41_sequence_done(task, res);
702}
703EXPORT_SYMBOL_GPL(nfs4_sequence_done);
704
705int nfs41_setup_sequence(struct nfs4_session *session,
706 struct nfs4_sequence_args *args,
707 struct nfs4_sequence_res *res,
708 struct rpc_task *task)
709{
710 struct nfs4_slot *slot;
711 struct nfs4_slot_table *tbl;
712
713 dprintk("--> %s\n", __func__);
714 /* slot already allocated? */
715 if (res->sr_slot != NULL)
716 goto out_success;
717
718 tbl = &session->fc_slot_table;
719
720 task->tk_timeout = 0;
721
722 spin_lock(&tbl->slot_tbl_lock);
723 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) &&
724 !args->sa_privileged) {
725 /* The state manager will wait until the slot table is empty */
726 dprintk("%s session is draining\n", __func__);
727 goto out_sleep;
728 }
729
730 slot = nfs4_alloc_slot(tbl);
731 if (IS_ERR(slot)) {
732 /* If out of memory, try again in 1/4 second */
733 if (slot == ERR_PTR(-ENOMEM))
734 task->tk_timeout = HZ >> 2;
735 dprintk("<-- %s: no free slots\n", __func__);
736 goto out_sleep;
737 }
738 spin_unlock(&tbl->slot_tbl_lock);
739
740 args->sa_slot = slot;
741
742 dprintk("<-- %s slotid=%u seqid=%u\n", __func__,
743 slot->slot_nr, slot->seq_nr);
744
745 res->sr_slot = slot;
746 res->sr_timestamp = jiffies;
747 res->sr_status_flags = 0;
748 /*
749 * sr_status is only set in decode_sequence, and so will remain
750 * set to 1 if an rpc level failure occurs.
751 */
752 res->sr_status = 1;
753 trace_nfs4_setup_sequence(session, args);
754out_success:
755 rpc_call_start(task);
756 return 0;
757out_sleep:
758 /* Privileged tasks are queued with top priority */
759 if (args->sa_privileged)
760 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
761 NULL, RPC_PRIORITY_PRIVILEGED);
762 else
763 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
764 spin_unlock(&tbl->slot_tbl_lock);
765 return -EAGAIN;
766}
767EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
768
769static int nfs4_setup_sequence(const struct nfs_server *server,
770 struct nfs4_sequence_args *args,
771 struct nfs4_sequence_res *res,
772 struct rpc_task *task)
773{
774 struct nfs4_session *session = nfs4_get_session(server);
775 int ret = 0;
776
777 if (!session)
778 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
779 args, res, task);
780
781 dprintk("--> %s clp %p session %p sr_slot %u\n",
782 __func__, session->clp, session, res->sr_slot ?
783 res->sr_slot->slot_nr : NFS4_NO_SLOT);
784
785 ret = nfs41_setup_sequence(session, args, res, task);
786
787 dprintk("<-- %s status=%d\n", __func__, ret);
788 return ret;
789}
790
791static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
792{
793 struct nfs4_call_sync_data *data = calldata;
794 struct nfs4_session *session = nfs4_get_session(data->seq_server);
795
796 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
797
798 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task);
799}
800
801static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
802{
803 struct nfs4_call_sync_data *data = calldata;
804
805 nfs41_sequence_done(task, data->seq_res);
806}
807
808static const struct rpc_call_ops nfs41_call_sync_ops = {
809 .rpc_call_prepare = nfs41_call_sync_prepare,
810 .rpc_call_done = nfs41_call_sync_done,
811};
812
813#else /* !CONFIG_NFS_V4_1 */
814
815static int nfs4_setup_sequence(const struct nfs_server *server,
816 struct nfs4_sequence_args *args,
817 struct nfs4_sequence_res *res,
818 struct rpc_task *task)
819{
820 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
821 args, res, task);
822}
823
824int nfs4_sequence_done(struct rpc_task *task,
825 struct nfs4_sequence_res *res)
826{
827 return nfs40_sequence_done(task, res);
828}
829EXPORT_SYMBOL_GPL(nfs4_sequence_done);
830
831#endif /* !CONFIG_NFS_V4_1 */
832
833static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
834{
835 struct nfs4_call_sync_data *data = calldata;
836 nfs4_setup_sequence(data->seq_server,
837 data->seq_args, data->seq_res, task);
838}
839
840static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
841{
842 struct nfs4_call_sync_data *data = calldata;
843 nfs4_sequence_done(task, data->seq_res);
844}
845
846static const struct rpc_call_ops nfs40_call_sync_ops = {
847 .rpc_call_prepare = nfs40_call_sync_prepare,
848 .rpc_call_done = nfs40_call_sync_done,
849};
850
851static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
852 struct nfs_server *server,
853 struct rpc_message *msg,
854 struct nfs4_sequence_args *args,
855 struct nfs4_sequence_res *res)
856{
857 int ret;
858 struct rpc_task *task;
859 struct nfs_client *clp = server->nfs_client;
860 struct nfs4_call_sync_data data = {
861 .seq_server = server,
862 .seq_args = args,
863 .seq_res = res,
864 };
865 struct rpc_task_setup task_setup = {
866 .rpc_client = clnt,
867 .rpc_message = msg,
868 .callback_ops = clp->cl_mvops->call_sync_ops,
869 .callback_data = &data
870 };
871
872 task = rpc_run_task(&task_setup);
873 if (IS_ERR(task))
874 ret = PTR_ERR(task);
875 else {
876 ret = task->tk_status;
877 rpc_put_task(task);
878 }
879 return ret;
880}
881
882int nfs4_call_sync(struct rpc_clnt *clnt,
883 struct nfs_server *server,
884 struct rpc_message *msg,
885 struct nfs4_sequence_args *args,
886 struct nfs4_sequence_res *res,
887 int cache_reply)
888{
889 nfs4_init_sequence(args, res, cache_reply);
890 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
891}
892
893static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
894{
895 struct nfs_inode *nfsi = NFS_I(dir);
896
897 spin_lock(&dir->i_lock);
898 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
899 if (!cinfo->atomic || cinfo->before != dir->i_version)
900 nfs_force_lookup_revalidate(dir);
901 dir->i_version = cinfo->after;
902 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
903 nfs_fscache_invalidate(dir);
904 spin_unlock(&dir->i_lock);
905}
906
907struct nfs4_opendata {
908 struct kref kref;
909 struct nfs_openargs o_arg;
910 struct nfs_openres o_res;
911 struct nfs_open_confirmargs c_arg;
912 struct nfs_open_confirmres c_res;
913 struct nfs4_string owner_name;
914 struct nfs4_string group_name;
915 struct nfs4_label *a_label;
916 struct nfs_fattr f_attr;
917 struct nfs4_label *f_label;
918 struct dentry *dir;
919 struct dentry *dentry;
920 struct nfs4_state_owner *owner;
921 struct nfs4_state *state;
922 struct iattr attrs;
923 unsigned long timestamp;
924 unsigned int rpc_done : 1;
925 unsigned int file_created : 1;
926 unsigned int is_recover : 1;
927 int rpc_status;
928 int cancelled;
929};
930
931static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
932 int err, struct nfs4_exception *exception)
933{
934 if (err != -EINVAL)
935 return false;
936 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
937 return false;
938 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
939 exception->retry = 1;
940 return true;
941}
942
943static u32
944nfs4_map_atomic_open_share(struct nfs_server *server,
945 fmode_t fmode, int openflags)
946{
947 u32 res = 0;
948
949 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
950 case FMODE_READ:
951 res = NFS4_SHARE_ACCESS_READ;
952 break;
953 case FMODE_WRITE:
954 res = NFS4_SHARE_ACCESS_WRITE;
955 break;
956 case FMODE_READ|FMODE_WRITE:
957 res = NFS4_SHARE_ACCESS_BOTH;
958 }
959 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
960 goto out;
961 /* Want no delegation if we're using O_DIRECT */
962 if (openflags & O_DIRECT)
963 res |= NFS4_SHARE_WANT_NO_DELEG;
964out:
965 return res;
966}
967
968static enum open_claim_type4
969nfs4_map_atomic_open_claim(struct nfs_server *server,
970 enum open_claim_type4 claim)
971{
972 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
973 return claim;
974 switch (claim) {
975 default:
976 return claim;
977 case NFS4_OPEN_CLAIM_FH:
978 return NFS4_OPEN_CLAIM_NULL;
979 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
980 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
981 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
982 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
983 }
984}
985
986static void nfs4_init_opendata_res(struct nfs4_opendata *p)
987{
988 p->o_res.f_attr = &p->f_attr;
989 p->o_res.f_label = p->f_label;
990 p->o_res.seqid = p->o_arg.seqid;
991 p->c_res.seqid = p->c_arg.seqid;
992 p->o_res.server = p->o_arg.server;
993 p->o_res.access_request = p->o_arg.access;
994 nfs_fattr_init(&p->f_attr);
995 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
996}
997
998static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
999 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1000 const struct iattr *attrs,
1001 struct nfs4_label *label,
1002 enum open_claim_type4 claim,
1003 gfp_t gfp_mask)
1004{
1005 struct dentry *parent = dget_parent(dentry);
1006 struct inode *dir = d_inode(parent);
1007 struct nfs_server *server = NFS_SERVER(dir);
1008 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1009 struct nfs4_opendata *p;
1010
1011 p = kzalloc(sizeof(*p), gfp_mask);
1012 if (p == NULL)
1013 goto err;
1014
1015 p->f_label = nfs4_label_alloc(server, gfp_mask);
1016 if (IS_ERR(p->f_label))
1017 goto err_free_p;
1018
1019 p->a_label = nfs4_label_alloc(server, gfp_mask);
1020 if (IS_ERR(p->a_label))
1021 goto err_free_f;
1022
1023 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1024 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1025 if (IS_ERR(p->o_arg.seqid))
1026 goto err_free_label;
1027 nfs_sb_active(dentry->d_sb);
1028 p->dentry = dget(dentry);
1029 p->dir = parent;
1030 p->owner = sp;
1031 atomic_inc(&sp->so_count);
1032 p->o_arg.open_flags = flags;
1033 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1034 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1035 fmode, flags);
1036 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
1037 * will return permission denied for all bits until close */
1038 if (!(flags & O_EXCL)) {
1039 /* ask server to check for all possible rights as results
1040 * are cached */
1041 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
1042 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE;
1043 }
1044 p->o_arg.clientid = server->nfs_client->cl_clientid;
1045 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1046 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1047 p->o_arg.name = &dentry->d_name;
1048 p->o_arg.server = server;
1049 p->o_arg.bitmask = nfs4_bitmask(server, label);
1050 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1051 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1052 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1053 switch (p->o_arg.claim) {
1054 case NFS4_OPEN_CLAIM_NULL:
1055 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1056 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1057 p->o_arg.fh = NFS_FH(dir);
1058 break;
1059 case NFS4_OPEN_CLAIM_PREVIOUS:
1060 case NFS4_OPEN_CLAIM_FH:
1061 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1062 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1063 p->o_arg.fh = NFS_FH(d_inode(dentry));
1064 }
1065 if (attrs != NULL && attrs->ia_valid != 0) {
1066 __u32 verf[2];
1067
1068 p->o_arg.u.attrs = &p->attrs;
1069 memcpy(&p->attrs, attrs, sizeof(p->attrs));
1070
1071 verf[0] = jiffies;
1072 verf[1] = current->pid;
1073 memcpy(p->o_arg.u.verifier.data, verf,
1074 sizeof(p->o_arg.u.verifier.data));
1075 }
1076 p->c_arg.fh = &p->o_res.fh;
1077 p->c_arg.stateid = &p->o_res.stateid;
1078 p->c_arg.seqid = p->o_arg.seqid;
1079 nfs4_init_opendata_res(p);
1080 kref_init(&p->kref);
1081 return p;
1082
1083err_free_label:
1084 nfs4_label_free(p->a_label);
1085err_free_f:
1086 nfs4_label_free(p->f_label);
1087err_free_p:
1088 kfree(p);
1089err:
1090 dput(parent);
1091 return NULL;
1092}
1093
1094static void nfs4_opendata_free(struct kref *kref)
1095{
1096 struct nfs4_opendata *p = container_of(kref,
1097 struct nfs4_opendata, kref);
1098 struct super_block *sb = p->dentry->d_sb;
1099
1100 nfs_free_seqid(p->o_arg.seqid);
1101 if (p->state != NULL)
1102 nfs4_put_open_state(p->state);
1103 nfs4_put_state_owner(p->owner);
1104
1105 nfs4_label_free(p->a_label);
1106 nfs4_label_free(p->f_label);
1107
1108 dput(p->dir);
1109 dput(p->dentry);
1110 nfs_sb_deactive(sb);
1111 nfs_fattr_free_names(&p->f_attr);
1112 kfree(p->f_attr.mdsthreshold);
1113 kfree(p);
1114}
1115
1116static void nfs4_opendata_put(struct nfs4_opendata *p)
1117{
1118 if (p != NULL)
1119 kref_put(&p->kref, nfs4_opendata_free);
1120}
1121
1122static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
1123{
1124 int ret;
1125
1126 ret = rpc_wait_for_completion_task(task);
1127 return ret;
1128}
1129
1130static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1131 fmode_t fmode)
1132{
1133 switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1134 case FMODE_READ|FMODE_WRITE:
1135 return state->n_rdwr != 0;
1136 case FMODE_WRITE:
1137 return state->n_wronly != 0;
1138 case FMODE_READ:
1139 return state->n_rdonly != 0;
1140 }
1141 WARN_ON_ONCE(1);
1142 return false;
1143}
1144
1145static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
1146{
1147 int ret = 0;
1148
1149 if (open_mode & (O_EXCL|O_TRUNC))
1150 goto out;
1151 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1152 case FMODE_READ:
1153 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1154 && state->n_rdonly != 0;
1155 break;
1156 case FMODE_WRITE:
1157 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1158 && state->n_wronly != 0;
1159 break;
1160 case FMODE_READ|FMODE_WRITE:
1161 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1162 && state->n_rdwr != 0;
1163 }
1164out:
1165 return ret;
1166}
1167
1168static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
1169 enum open_claim_type4 claim)
1170{
1171 if (delegation == NULL)
1172 return 0;
1173 if ((delegation->type & fmode) != fmode)
1174 return 0;
1175 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
1176 return 0;
1177 switch (claim) {
1178 case NFS4_OPEN_CLAIM_NULL:
1179 case NFS4_OPEN_CLAIM_FH:
1180 break;
1181 case NFS4_OPEN_CLAIM_PREVIOUS:
1182 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1183 break;
1184 default:
1185 return 0;
1186 }
1187 nfs_mark_delegation_referenced(delegation);
1188 return 1;
1189}
1190
1191static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1192{
1193 switch (fmode) {
1194 case FMODE_WRITE:
1195 state->n_wronly++;
1196 break;
1197 case FMODE_READ:
1198 state->n_rdonly++;
1199 break;
1200 case FMODE_READ|FMODE_WRITE:
1201 state->n_rdwr++;
1202 }
1203 nfs4_state_set_mode_locked(state, state->state | fmode);
1204}
1205
1206static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1207{
1208 struct nfs_client *clp = state->owner->so_server->nfs_client;
1209 bool need_recover = false;
1210
1211 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1212 need_recover = true;
1213 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1214 need_recover = true;
1215 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1216 need_recover = true;
1217 if (need_recover)
1218 nfs4_state_mark_reclaim_nograce(clp, state);
1219}
1220
1221static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1222 nfs4_stateid *stateid)
1223{
1224 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0)
1225 return true;
1226 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1227 nfs_test_and_clear_all_open_stateid(state);
1228 return true;
1229 }
1230 if (nfs4_stateid_is_newer(stateid, &state->open_stateid))
1231 return true;
1232 return false;
1233}
1234
1235static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1236{
1237 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1238 return;
1239 if (state->n_wronly)
1240 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1241 if (state->n_rdonly)
1242 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1243 if (state->n_rdwr)
1244 set_bit(NFS_O_RDWR_STATE, &state->flags);
1245 set_bit(NFS_OPEN_STATE, &state->flags);
1246}
1247
1248static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1249 nfs4_stateid *arg_stateid,
1250 nfs4_stateid *stateid, fmode_t fmode)
1251{
1252 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1253 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1254 case FMODE_WRITE:
1255 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1256 break;
1257 case FMODE_READ:
1258 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1259 break;
1260 case 0:
1261 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1262 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1263 clear_bit(NFS_OPEN_STATE, &state->flags);
1264 }
1265 if (stateid == NULL)
1266 return;
1267 /* Handle races with OPEN */
1268 if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) ||
1269 (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1270 !nfs4_stateid_is_newer(stateid, &state->open_stateid))) {
1271 nfs_resync_open_stateid_locked(state);
1272 return;
1273 }
1274 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1275 nfs4_stateid_copy(&state->stateid, stateid);
1276 nfs4_stateid_copy(&state->open_stateid, stateid);
1277}
1278
1279static void nfs_clear_open_stateid(struct nfs4_state *state,
1280 nfs4_stateid *arg_stateid,
1281 nfs4_stateid *stateid, fmode_t fmode)
1282{
1283 write_seqlock(&state->seqlock);
1284 nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode);
1285 write_sequnlock(&state->seqlock);
1286 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1287 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1288}
1289
1290static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1291{
1292 switch (fmode) {
1293 case FMODE_READ:
1294 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1295 break;
1296 case FMODE_WRITE:
1297 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1298 break;
1299 case FMODE_READ|FMODE_WRITE:
1300 set_bit(NFS_O_RDWR_STATE, &state->flags);
1301 }
1302 if (!nfs_need_update_open_stateid(state, stateid))
1303 return;
1304 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1305 nfs4_stateid_copy(&state->stateid, stateid);
1306 nfs4_stateid_copy(&state->open_stateid, stateid);
1307}
1308
1309static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
1310{
1311 /*
1312 * Protect the call to nfs4_state_set_mode_locked and
1313 * serialise the stateid update
1314 */
1315 write_seqlock(&state->seqlock);
1316 if (deleg_stateid != NULL) {
1317 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1318 set_bit(NFS_DELEGATED_STATE, &state->flags);
1319 }
1320 if (open_stateid != NULL)
1321 nfs_set_open_stateid_locked(state, open_stateid, fmode);
1322 write_sequnlock(&state->seqlock);
1323 spin_lock(&state->owner->so_lock);
1324 update_open_stateflags(state, fmode);
1325 spin_unlock(&state->owner->so_lock);
1326}
1327
1328static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
1329{
1330 struct nfs_inode *nfsi = NFS_I(state->inode);
1331 struct nfs_delegation *deleg_cur;
1332 int ret = 0;
1333
1334 fmode &= (FMODE_READ|FMODE_WRITE);
1335
1336 rcu_read_lock();
1337 deleg_cur = rcu_dereference(nfsi->delegation);
1338 if (deleg_cur == NULL)
1339 goto no_delegation;
1340
1341 spin_lock(&deleg_cur->lock);
1342 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1343 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1344 (deleg_cur->type & fmode) != fmode)
1345 goto no_delegation_unlock;
1346
1347 if (delegation == NULL)
1348 delegation = &deleg_cur->stateid;
1349 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1350 goto no_delegation_unlock;
1351
1352 nfs_mark_delegation_referenced(deleg_cur);
1353 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1354 ret = 1;
1355no_delegation_unlock:
1356 spin_unlock(&deleg_cur->lock);
1357no_delegation:
1358 rcu_read_unlock();
1359
1360 if (!ret && open_stateid != NULL) {
1361 __update_open_stateid(state, open_stateid, NULL, fmode);
1362 ret = 1;
1363 }
1364 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1365 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1366
1367 return ret;
1368}
1369
1370static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1371 const nfs4_stateid *stateid)
1372{
1373 struct nfs4_state *state = lsp->ls_state;
1374 bool ret = false;
1375
1376 spin_lock(&state->state_lock);
1377 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1378 goto out_noupdate;
1379 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1380 goto out_noupdate;
1381 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1382 ret = true;
1383out_noupdate:
1384 spin_unlock(&state->state_lock);
1385 return ret;
1386}
1387
1388static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1389{
1390 struct nfs_delegation *delegation;
1391
1392 rcu_read_lock();
1393 delegation = rcu_dereference(NFS_I(inode)->delegation);
1394 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1395 rcu_read_unlock();
1396 return;
1397 }
1398 rcu_read_unlock();
1399 nfs4_inode_return_delegation(inode);
1400}
1401
1402static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1403{
1404 struct nfs4_state *state = opendata->state;
1405 struct nfs_inode *nfsi = NFS_I(state->inode);
1406 struct nfs_delegation *delegation;
1407 int open_mode = opendata->o_arg.open_flags;
1408 fmode_t fmode = opendata->o_arg.fmode;
1409 enum open_claim_type4 claim = opendata->o_arg.claim;
1410 nfs4_stateid stateid;
1411 int ret = -EAGAIN;
1412
1413 for (;;) {
1414 spin_lock(&state->owner->so_lock);
1415 if (can_open_cached(state, fmode, open_mode)) {
1416 update_open_stateflags(state, fmode);
1417 spin_unlock(&state->owner->so_lock);
1418 goto out_return_state;
1419 }
1420 spin_unlock(&state->owner->so_lock);
1421 rcu_read_lock();
1422 delegation = rcu_dereference(nfsi->delegation);
1423 if (!can_open_delegated(delegation, fmode, claim)) {
1424 rcu_read_unlock();
1425 break;
1426 }
1427 /* Save the delegation */
1428 nfs4_stateid_copy(&stateid, &delegation->stateid);
1429 rcu_read_unlock();
1430 nfs_release_seqid(opendata->o_arg.seqid);
1431 if (!opendata->is_recover) {
1432 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1433 if (ret != 0)
1434 goto out;
1435 }
1436 ret = -EAGAIN;
1437
1438 /* Try to update the stateid using the delegation */
1439 if (update_open_stateid(state, NULL, &stateid, fmode))
1440 goto out_return_state;
1441 }
1442out:
1443 return ERR_PTR(ret);
1444out_return_state:
1445 atomic_inc(&state->count);
1446 return state;
1447}
1448
1449static void
1450nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1451{
1452 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1453 struct nfs_delegation *delegation;
1454 int delegation_flags = 0;
1455
1456 rcu_read_lock();
1457 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1458 if (delegation)
1459 delegation_flags = delegation->flags;
1460 rcu_read_unlock();
1461 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
1462 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1463 "returning a delegation for "
1464 "OPEN(CLAIM_DELEGATE_CUR)\n",
1465 clp->cl_hostname);
1466 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1467 nfs_inode_set_delegation(state->inode,
1468 data->owner->so_cred,
1469 &data->o_res);
1470 else
1471 nfs_inode_reclaim_delegation(state->inode,
1472 data->owner->so_cred,
1473 &data->o_res);
1474}
1475
1476/*
1477 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1478 * and update the nfs4_state.
1479 */
1480static struct nfs4_state *
1481_nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1482{
1483 struct inode *inode = data->state->inode;
1484 struct nfs4_state *state = data->state;
1485 int ret;
1486
1487 if (!data->rpc_done) {
1488 if (data->rpc_status) {
1489 ret = data->rpc_status;
1490 goto err;
1491 }
1492 /* cached opens have already been processed */
1493 goto update;
1494 }
1495
1496 ret = nfs_refresh_inode(inode, &data->f_attr);
1497 if (ret)
1498 goto err;
1499
1500 if (data->o_res.delegation_type != 0)
1501 nfs4_opendata_check_deleg(data, state);
1502update:
1503 update_open_stateid(state, &data->o_res.stateid, NULL,
1504 data->o_arg.fmode);
1505 atomic_inc(&state->count);
1506
1507 return state;
1508err:
1509 return ERR_PTR(ret);
1510
1511}
1512
1513static struct nfs4_state *
1514_nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1515{
1516 struct inode *inode;
1517 struct nfs4_state *state = NULL;
1518 int ret;
1519
1520 if (!data->rpc_done) {
1521 state = nfs4_try_open_cached(data);
1522 goto out;
1523 }
1524
1525 ret = -EAGAIN;
1526 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1527 goto err;
1528 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label);
1529 ret = PTR_ERR(inode);
1530 if (IS_ERR(inode))
1531 goto err;
1532 ret = -ENOMEM;
1533 state = nfs4_get_open_state(inode, data->owner);
1534 if (state == NULL)
1535 goto err_put_inode;
1536 if (data->o_res.delegation_type != 0)
1537 nfs4_opendata_check_deleg(data, state);
1538 update_open_stateid(state, &data->o_res.stateid, NULL,
1539 data->o_arg.fmode);
1540 iput(inode);
1541out:
1542 nfs_release_seqid(data->o_arg.seqid);
1543 return state;
1544err_put_inode:
1545 iput(inode);
1546err:
1547 return ERR_PTR(ret);
1548}
1549
1550static struct nfs4_state *
1551nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1552{
1553 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
1554 return _nfs4_opendata_reclaim_to_nfs4_state(data);
1555 return _nfs4_opendata_to_nfs4_state(data);
1556}
1557
1558static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1559{
1560 struct nfs_inode *nfsi = NFS_I(state->inode);
1561 struct nfs_open_context *ctx;
1562
1563 spin_lock(&state->inode->i_lock);
1564 list_for_each_entry(ctx, &nfsi->open_files, list) {
1565 if (ctx->state != state)
1566 continue;
1567 get_nfs_open_context(ctx);
1568 spin_unlock(&state->inode->i_lock);
1569 return ctx;
1570 }
1571 spin_unlock(&state->inode->i_lock);
1572 return ERR_PTR(-ENOENT);
1573}
1574
1575static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
1576 struct nfs4_state *state, enum open_claim_type4 claim)
1577{
1578 struct nfs4_opendata *opendata;
1579
1580 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
1581 NULL, NULL, claim, GFP_NOFS);
1582 if (opendata == NULL)
1583 return ERR_PTR(-ENOMEM);
1584 opendata->state = state;
1585 atomic_inc(&state->count);
1586 return opendata;
1587}
1588
1589static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
1590 fmode_t fmode)
1591{
1592 struct nfs4_state *newstate;
1593 int ret;
1594
1595 if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
1596 return 0;
1597 opendata->o_arg.open_flags = 0;
1598 opendata->o_arg.fmode = fmode;
1599 opendata->o_arg.share_access = nfs4_map_atomic_open_share(
1600 NFS_SB(opendata->dentry->d_sb),
1601 fmode, 0);
1602 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1603 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1604 nfs4_init_opendata_res(opendata);
1605 ret = _nfs4_recover_proc_open(opendata);
1606 if (ret != 0)
1607 return ret;
1608 newstate = nfs4_opendata_to_nfs4_state(opendata);
1609 if (IS_ERR(newstate))
1610 return PTR_ERR(newstate);
1611 if (newstate != opendata->state)
1612 ret = -ESTALE;
1613 nfs4_close_state(newstate, fmode);
1614 return ret;
1615}
1616
1617static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1618{
1619 int ret;
1620
1621 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
1622 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1623 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1624 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1625 /* memory barrier prior to reading state->n_* */
1626 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1627 clear_bit(NFS_OPEN_STATE, &state->flags);
1628 smp_rmb();
1629 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1630 if (ret != 0)
1631 return ret;
1632 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1633 if (ret != 0)
1634 return ret;
1635 ret = nfs4_open_recover_helper(opendata, FMODE_READ);
1636 if (ret != 0)
1637 return ret;
1638 /*
1639 * We may have performed cached opens for all three recoveries.
1640 * Check if we need to update the current stateid.
1641 */
1642 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1643 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1644 write_seqlock(&state->seqlock);
1645 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1646 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1647 write_sequnlock(&state->seqlock);
1648 }
1649 return 0;
1650}
1651
1652/*
1653 * OPEN_RECLAIM:
1654 * reclaim state on the server after a reboot.
1655 */
1656static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1657{
1658 struct nfs_delegation *delegation;
1659 struct nfs4_opendata *opendata;
1660 fmode_t delegation_type = 0;
1661 int status;
1662
1663 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1664 NFS4_OPEN_CLAIM_PREVIOUS);
1665 if (IS_ERR(opendata))
1666 return PTR_ERR(opendata);
1667 rcu_read_lock();
1668 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1669 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1670 delegation_type = delegation->type;
1671 rcu_read_unlock();
1672 opendata->o_arg.u.delegation_type = delegation_type;
1673 status = nfs4_open_recover(opendata, state);
1674 nfs4_opendata_put(opendata);
1675 return status;
1676}
1677
1678static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1679{
1680 struct nfs_server *server = NFS_SERVER(state->inode);
1681 struct nfs4_exception exception = { };
1682 int err;
1683 do {
1684 err = _nfs4_do_open_reclaim(ctx, state);
1685 trace_nfs4_open_reclaim(ctx, 0, err);
1686 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
1687 continue;
1688 if (err != -NFS4ERR_DELAY)
1689 break;
1690 nfs4_handle_exception(server, err, &exception);
1691 } while (exception.retry);
1692 return err;
1693}
1694
1695static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1696{
1697 struct nfs_open_context *ctx;
1698 int ret;
1699
1700 ctx = nfs4_state_find_open_context(state);
1701 if (IS_ERR(ctx))
1702 return -EAGAIN;
1703 ret = nfs4_do_open_reclaim(ctx, state);
1704 put_nfs_open_context(ctx);
1705 return ret;
1706}
1707
1708static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
1709{
1710 switch (err) {
1711 default:
1712 printk(KERN_ERR "NFS: %s: unhandled error "
1713 "%d.\n", __func__, err);
1714 case 0:
1715 case -ENOENT:
1716 case -EAGAIN:
1717 case -ESTALE:
1718 break;
1719 case -NFS4ERR_BADSESSION:
1720 case -NFS4ERR_BADSLOT:
1721 case -NFS4ERR_BAD_HIGH_SLOT:
1722 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1723 case -NFS4ERR_DEADSESSION:
1724 set_bit(NFS_DELEGATED_STATE, &state->flags);
1725 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1726 return -EAGAIN;
1727 case -NFS4ERR_STALE_CLIENTID:
1728 case -NFS4ERR_STALE_STATEID:
1729 set_bit(NFS_DELEGATED_STATE, &state->flags);
1730 case -NFS4ERR_EXPIRED:
1731 /* Don't recall a delegation if it was lost */
1732 nfs4_schedule_lease_recovery(server->nfs_client);
1733 return -EAGAIN;
1734 case -NFS4ERR_MOVED:
1735 nfs4_schedule_migration_recovery(server);
1736 return -EAGAIN;
1737 case -NFS4ERR_LEASE_MOVED:
1738 nfs4_schedule_lease_moved_recovery(server->nfs_client);
1739 return -EAGAIN;
1740 case -NFS4ERR_DELEG_REVOKED:
1741 case -NFS4ERR_ADMIN_REVOKED:
1742 case -NFS4ERR_BAD_STATEID:
1743 case -NFS4ERR_OPENMODE:
1744 nfs_inode_find_state_and_recover(state->inode,
1745 stateid);
1746 nfs4_schedule_stateid_recovery(server, state);
1747 return -EAGAIN;
1748 case -NFS4ERR_DELAY:
1749 case -NFS4ERR_GRACE:
1750 set_bit(NFS_DELEGATED_STATE, &state->flags);
1751 ssleep(1);
1752 return -EAGAIN;
1753 case -ENOMEM:
1754 case -NFS4ERR_DENIED:
1755 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
1756 return 0;
1757 }
1758 return err;
1759}
1760
1761int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
1762 struct nfs4_state *state, const nfs4_stateid *stateid,
1763 fmode_t type)
1764{
1765 struct nfs_server *server = NFS_SERVER(state->inode);
1766 struct nfs4_opendata *opendata;
1767 int err = 0;
1768
1769 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1770 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
1771 if (IS_ERR(opendata))
1772 return PTR_ERR(opendata);
1773 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1774 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1775 switch (type & (FMODE_READ|FMODE_WRITE)) {
1776 case FMODE_READ|FMODE_WRITE:
1777 case FMODE_WRITE:
1778 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1779 if (err)
1780 break;
1781 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1782 if (err)
1783 break;
1784 case FMODE_READ:
1785 err = nfs4_open_recover_helper(opendata, FMODE_READ);
1786 }
1787 nfs4_opendata_put(opendata);
1788 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
1789}
1790
1791static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
1792{
1793 struct nfs4_opendata *data = calldata;
1794
1795 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl,
1796 &data->c_arg.seq_args, &data->c_res.seq_res, task);
1797}
1798
1799static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1800{
1801 struct nfs4_opendata *data = calldata;
1802
1803 nfs40_sequence_done(task, &data->c_res.seq_res);
1804
1805 data->rpc_status = task->tk_status;
1806 if (data->rpc_status == 0) {
1807 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1808 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1809 renew_lease(data->o_res.server, data->timestamp);
1810 data->rpc_done = 1;
1811 }
1812}
1813
1814static void nfs4_open_confirm_release(void *calldata)
1815{
1816 struct nfs4_opendata *data = calldata;
1817 struct nfs4_state *state = NULL;
1818
1819 /* If this request hasn't been cancelled, do nothing */
1820 if (data->cancelled == 0)
1821 goto out_free;
1822 /* In case of error, no cleanup! */
1823 if (!data->rpc_done)
1824 goto out_free;
1825 state = nfs4_opendata_to_nfs4_state(data);
1826 if (!IS_ERR(state))
1827 nfs4_close_state(state, data->o_arg.fmode);
1828out_free:
1829 nfs4_opendata_put(data);
1830}
1831
1832static const struct rpc_call_ops nfs4_open_confirm_ops = {
1833 .rpc_call_prepare = nfs4_open_confirm_prepare,
1834 .rpc_call_done = nfs4_open_confirm_done,
1835 .rpc_release = nfs4_open_confirm_release,
1836};
1837
1838/*
1839 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1840 */
1841static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1842{
1843 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
1844 struct rpc_task *task;
1845 struct rpc_message msg = {
1846 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1847 .rpc_argp = &data->c_arg,
1848 .rpc_resp = &data->c_res,
1849 .rpc_cred = data->owner->so_cred,
1850 };
1851 struct rpc_task_setup task_setup_data = {
1852 .rpc_client = server->client,
1853 .rpc_message = &msg,
1854 .callback_ops = &nfs4_open_confirm_ops,
1855 .callback_data = data,
1856 .workqueue = nfsiod_workqueue,
1857 .flags = RPC_TASK_ASYNC,
1858 };
1859 int status;
1860
1861 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
1862 kref_get(&data->kref);
1863 data->rpc_done = 0;
1864 data->rpc_status = 0;
1865 data->timestamp = jiffies;
1866 task = rpc_run_task(&task_setup_data);
1867 if (IS_ERR(task))
1868 return PTR_ERR(task);
1869 status = nfs4_wait_for_completion_rpc_task(task);
1870 if (status != 0) {
1871 data->cancelled = 1;
1872 smp_wmb();
1873 } else
1874 status = data->rpc_status;
1875 rpc_put_task(task);
1876 return status;
1877}
1878
1879static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1880{
1881 struct nfs4_opendata *data = calldata;
1882 struct nfs4_state_owner *sp = data->owner;
1883 struct nfs_client *clp = sp->so_server->nfs_client;
1884 enum open_claim_type4 claim = data->o_arg.claim;
1885
1886 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1887 goto out_wait;
1888 /*
1889 * Check if we still need to send an OPEN call, or if we can use
1890 * a delegation instead.
1891 */
1892 if (data->state != NULL) {
1893 struct nfs_delegation *delegation;
1894
1895 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1896 goto out_no_action;
1897 rcu_read_lock();
1898 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1899 if (can_open_delegated(delegation, data->o_arg.fmode, claim))
1900 goto unlock_no_action;
1901 rcu_read_unlock();
1902 }
1903 /* Update client id. */
1904 data->o_arg.clientid = clp->cl_clientid;
1905 switch (claim) {
1906 default:
1907 break;
1908 case NFS4_OPEN_CLAIM_PREVIOUS:
1909 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1910 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1911 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
1912 case NFS4_OPEN_CLAIM_FH:
1913 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1914 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1915 }
1916 data->timestamp = jiffies;
1917 if (nfs4_setup_sequence(data->o_arg.server,
1918 &data->o_arg.seq_args,
1919 &data->o_res.seq_res,
1920 task) != 0)
1921 nfs_release_seqid(data->o_arg.seqid);
1922
1923 /* Set the create mode (note dependency on the session type) */
1924 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
1925 if (data->o_arg.open_flags & O_EXCL) {
1926 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
1927 if (nfs4_has_persistent_session(clp))
1928 data->o_arg.createmode = NFS4_CREATE_GUARDED;
1929 else if (clp->cl_mvops->minor_version > 0)
1930 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
1931 }
1932 return;
1933unlock_no_action:
1934 rcu_read_unlock();
1935out_no_action:
1936 task->tk_action = NULL;
1937out_wait:
1938 nfs4_sequence_done(task, &data->o_res.seq_res);
1939}
1940
1941static void nfs4_open_done(struct rpc_task *task, void *calldata)
1942{
1943 struct nfs4_opendata *data = calldata;
1944
1945 data->rpc_status = task->tk_status;
1946
1947 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1948 return;
1949
1950 if (task->tk_status == 0) {
1951 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
1952 switch (data->o_res.f_attr->mode & S_IFMT) {
1953 case S_IFREG:
1954 break;
1955 case S_IFLNK:
1956 data->rpc_status = -ELOOP;
1957 break;
1958 case S_IFDIR:
1959 data->rpc_status = -EISDIR;
1960 break;
1961 default:
1962 data->rpc_status = -ENOTDIR;
1963 }
1964 }
1965 renew_lease(data->o_res.server, data->timestamp);
1966 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1967 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1968 }
1969 data->rpc_done = 1;
1970}
1971
1972static void nfs4_open_release(void *calldata)
1973{
1974 struct nfs4_opendata *data = calldata;
1975 struct nfs4_state *state = NULL;
1976
1977 /* If this request hasn't been cancelled, do nothing */
1978 if (data->cancelled == 0)
1979 goto out_free;
1980 /* In case of error, no cleanup! */
1981 if (data->rpc_status != 0 || !data->rpc_done)
1982 goto out_free;
1983 /* In case we need an open_confirm, no cleanup! */
1984 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1985 goto out_free;
1986 state = nfs4_opendata_to_nfs4_state(data);
1987 if (!IS_ERR(state))
1988 nfs4_close_state(state, data->o_arg.fmode);
1989out_free:
1990 nfs4_opendata_put(data);
1991}
1992
1993static const struct rpc_call_ops nfs4_open_ops = {
1994 .rpc_call_prepare = nfs4_open_prepare,
1995 .rpc_call_done = nfs4_open_done,
1996 .rpc_release = nfs4_open_release,
1997};
1998
1999static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
2000{
2001 struct inode *dir = d_inode(data->dir);
2002 struct nfs_server *server = NFS_SERVER(dir);
2003 struct nfs_openargs *o_arg = &data->o_arg;
2004 struct nfs_openres *o_res = &data->o_res;
2005 struct rpc_task *task;
2006 struct rpc_message msg = {
2007 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
2008 .rpc_argp = o_arg,
2009 .rpc_resp = o_res,
2010 .rpc_cred = data->owner->so_cred,
2011 };
2012 struct rpc_task_setup task_setup_data = {
2013 .rpc_client = server->client,
2014 .rpc_message = &msg,
2015 .callback_ops = &nfs4_open_ops,
2016 .callback_data = data,
2017 .workqueue = nfsiod_workqueue,
2018 .flags = RPC_TASK_ASYNC,
2019 };
2020 int status;
2021
2022 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
2023 kref_get(&data->kref);
2024 data->rpc_done = 0;
2025 data->rpc_status = 0;
2026 data->cancelled = 0;
2027 data->is_recover = 0;
2028 if (isrecover) {
2029 nfs4_set_sequence_privileged(&o_arg->seq_args);
2030 data->is_recover = 1;
2031 }
2032 task = rpc_run_task(&task_setup_data);
2033 if (IS_ERR(task))
2034 return PTR_ERR(task);
2035 status = nfs4_wait_for_completion_rpc_task(task);
2036 if (status != 0) {
2037 data->cancelled = 1;
2038 smp_wmb();
2039 } else
2040 status = data->rpc_status;
2041 rpc_put_task(task);
2042
2043 return status;
2044}
2045
2046static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2047{
2048 struct inode *dir = d_inode(data->dir);
2049 struct nfs_openres *o_res = &data->o_res;
2050 int status;
2051
2052 status = nfs4_run_open_task(data, 1);
2053 if (status != 0 || !data->rpc_done)
2054 return status;
2055
2056 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2057
2058 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2059 status = _nfs4_proc_open_confirm(data);
2060 if (status != 0)
2061 return status;
2062 }
2063
2064 return status;
2065}
2066
2067/*
2068 * Additional permission checks in order to distinguish between an
2069 * open for read, and an open for execute. This works around the
2070 * fact that NFSv4 OPEN treats read and execute permissions as being
2071 * the same.
2072 * Note that in the non-execute case, we want to turn off permission
2073 * checking if we just created a new file (POSIX open() semantics).
2074 */
2075static int nfs4_opendata_access(struct rpc_cred *cred,
2076 struct nfs4_opendata *opendata,
2077 struct nfs4_state *state, fmode_t fmode,
2078 int openflags)
2079{
2080 struct nfs_access_entry cache;
2081 u32 mask;
2082
2083 /* access call failed or for some reason the server doesn't
2084 * support any access modes -- defer access call until later */
2085 if (opendata->o_res.access_supported == 0)
2086 return 0;
2087
2088 mask = 0;
2089 /*
2090 * Use openflags to check for exec, because fmode won't
2091 * always have FMODE_EXEC set when file open for exec.
2092 */
2093 if (openflags & __FMODE_EXEC) {
2094 /* ONLY check for exec rights */
2095 mask = MAY_EXEC;
2096 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2097 mask = MAY_READ;
2098
2099 cache.cred = cred;
2100 cache.jiffies = jiffies;
2101 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2102 nfs_access_add_cache(state->inode, &cache);
2103
2104 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
2105 return 0;
2106
2107 /* even though OPEN succeeded, access is denied. Close the file */
2108 nfs4_close_state(state, fmode);
2109 return -EACCES;
2110}
2111
2112/*
2113 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2114 */
2115static int _nfs4_proc_open(struct nfs4_opendata *data)
2116{
2117 struct inode *dir = d_inode(data->dir);
2118 struct nfs_server *server = NFS_SERVER(dir);
2119 struct nfs_openargs *o_arg = &data->o_arg;
2120 struct nfs_openres *o_res = &data->o_res;
2121 int status;
2122
2123 status = nfs4_run_open_task(data, 0);
2124 if (!data->rpc_done)
2125 return status;
2126 if (status != 0) {
2127 if (status == -NFS4ERR_BADNAME &&
2128 !(o_arg->open_flags & O_CREAT))
2129 return -ENOENT;
2130 return status;
2131 }
2132
2133 nfs_fattr_map_and_free_names(server, &data->f_attr);
2134
2135 if (o_arg->open_flags & O_CREAT) {
2136 update_changeattr(dir, &o_res->cinfo);
2137 if (o_arg->open_flags & O_EXCL)
2138 data->file_created = 1;
2139 else if (o_res->cinfo.before != o_res->cinfo.after)
2140 data->file_created = 1;
2141 }
2142 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2143 server->caps &= ~NFS_CAP_POSIX_LOCK;
2144 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2145 status = _nfs4_proc_open_confirm(data);
2146 if (status != 0)
2147 return status;
2148 }
2149 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
2150 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label);
2151 return 0;
2152}
2153
2154static int nfs4_recover_expired_lease(struct nfs_server *server)
2155{
2156 return nfs4_client_recover_expired_lease(server->nfs_client);
2157}
2158
2159/*
2160 * OPEN_EXPIRED:
2161 * reclaim state on the server after a network partition.
2162 * Assumes caller holds the appropriate lock
2163 */
2164static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2165{
2166 struct nfs4_opendata *opendata;
2167 int ret;
2168
2169 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2170 NFS4_OPEN_CLAIM_FH);
2171 if (IS_ERR(opendata))
2172 return PTR_ERR(opendata);
2173 ret = nfs4_open_recover(opendata, state);
2174 if (ret == -ESTALE)
2175 d_drop(ctx->dentry);
2176 nfs4_opendata_put(opendata);
2177 return ret;
2178}
2179
2180static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2181{
2182 struct nfs_server *server = NFS_SERVER(state->inode);
2183 struct nfs4_exception exception = { };
2184 int err;
2185
2186 do {
2187 err = _nfs4_open_expired(ctx, state);
2188 trace_nfs4_open_expired(ctx, 0, err);
2189 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2190 continue;
2191 switch (err) {
2192 default:
2193 goto out;
2194 case -NFS4ERR_GRACE:
2195 case -NFS4ERR_DELAY:
2196 nfs4_handle_exception(server, err, &exception);
2197 err = 0;
2198 }
2199 } while (exception.retry);
2200out:
2201 return err;
2202}
2203
2204static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2205{
2206 struct nfs_open_context *ctx;
2207 int ret;
2208
2209 ctx = nfs4_state_find_open_context(state);
2210 if (IS_ERR(ctx))
2211 return -EAGAIN;
2212 ret = nfs4_do_open_expired(ctx, state);
2213 put_nfs_open_context(ctx);
2214 return ret;
2215}
2216
2217static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
2218{
2219 nfs_remove_bad_delegation(state->inode);
2220 write_seqlock(&state->seqlock);
2221 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2222 write_sequnlock(&state->seqlock);
2223 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2224}
2225
2226static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2227{
2228 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2229 nfs_finish_clear_delegation_stateid(state);
2230}
2231
2232static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2233{
2234 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2235 nfs40_clear_delegation_stateid(state);
2236 return nfs4_open_expired(sp, state);
2237}
2238
2239#if defined(CONFIG_NFS_V4_1)
2240static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2241{
2242 struct nfs_server *server = NFS_SERVER(state->inode);
2243 nfs4_stateid stateid;
2244 struct nfs_delegation *delegation;
2245 struct rpc_cred *cred;
2246 int status;
2247
2248 /* Get the delegation credential for use by test/free_stateid */
2249 rcu_read_lock();
2250 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2251 if (delegation == NULL) {
2252 rcu_read_unlock();
2253 return;
2254 }
2255
2256 nfs4_stateid_copy(&stateid, &delegation->stateid);
2257 cred = get_rpccred(delegation->cred);
2258 rcu_read_unlock();
2259 status = nfs41_test_stateid(server, &stateid, cred);
2260 trace_nfs4_test_delegation_stateid(state, NULL, status);
2261
2262 if (status != NFS_OK) {
2263 /* Free the stateid unless the server explicitly
2264 * informs us the stateid is unrecognized. */
2265 if (status != -NFS4ERR_BAD_STATEID)
2266 nfs41_free_stateid(server, &stateid, cred);
2267 nfs_finish_clear_delegation_stateid(state);
2268 }
2269
2270 put_rpccred(cred);
2271}
2272
2273/**
2274 * nfs41_check_open_stateid - possibly free an open stateid
2275 *
2276 * @state: NFSv4 state for an inode
2277 *
2278 * Returns NFS_OK if recovery for this stateid is now finished.
2279 * Otherwise a negative NFS4ERR value is returned.
2280 */
2281static int nfs41_check_open_stateid(struct nfs4_state *state)
2282{
2283 struct nfs_server *server = NFS_SERVER(state->inode);
2284 nfs4_stateid *stateid = &state->open_stateid;
2285 struct rpc_cred *cred = state->owner->so_cred;
2286 int status;
2287
2288 /* If a state reset has been done, test_stateid is unneeded */
2289 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) &&
2290 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) &&
2291 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0))
2292 return -NFS4ERR_BAD_STATEID;
2293
2294 status = nfs41_test_stateid(server, stateid, cred);
2295 trace_nfs4_test_open_stateid(state, NULL, status);
2296 if (status != NFS_OK) {
2297 /* Free the stateid unless the server explicitly
2298 * informs us the stateid is unrecognized. */
2299 if (status != -NFS4ERR_BAD_STATEID)
2300 nfs41_free_stateid(server, stateid, cred);
2301
2302 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2303 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2304 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2305 clear_bit(NFS_OPEN_STATE, &state->flags);
2306 }
2307 return status;
2308}
2309
2310static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2311{
2312 int status;
2313
2314 nfs41_check_delegation_stateid(state);
2315 status = nfs41_check_open_stateid(state);
2316 if (status != NFS_OK)
2317 status = nfs4_open_expired(sp, state);
2318 return status;
2319}
2320#endif
2321
2322/*
2323 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2324 * fields corresponding to attributes that were used to store the verifier.
2325 * Make sure we clobber those fields in the later setattr call
2326 */
2327static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
2328 struct iattr *sattr, struct nfs4_label **label)
2329{
2330 const u32 *attrset = opendata->o_res.attrset;
2331
2332 if ((attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
2333 !(sattr->ia_valid & ATTR_ATIME_SET))
2334 sattr->ia_valid |= ATTR_ATIME;
2335
2336 if ((attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
2337 !(sattr->ia_valid & ATTR_MTIME_SET))
2338 sattr->ia_valid |= ATTR_MTIME;
2339
2340 /* Except MODE, it seems harmless of setting twice. */
2341 if ((attrset[1] & FATTR4_WORD1_MODE))
2342 sattr->ia_valid &= ~ATTR_MODE;
2343
2344 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
2345 *label = NULL;
2346}
2347
2348static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2349 fmode_t fmode,
2350 int flags,
2351 struct nfs_open_context *ctx)
2352{
2353 struct nfs4_state_owner *sp = opendata->owner;
2354 struct nfs_server *server = sp->so_server;
2355 struct dentry *dentry;
2356 struct nfs4_state *state;
2357 unsigned int seq;
2358 int ret;
2359
2360 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
2361
2362 ret = _nfs4_proc_open(opendata);
2363 if (ret != 0)
2364 goto out;
2365
2366 state = nfs4_opendata_to_nfs4_state(opendata);
2367 ret = PTR_ERR(state);
2368 if (IS_ERR(state))
2369 goto out;
2370 if (server->caps & NFS_CAP_POSIX_LOCK)
2371 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
2372
2373 dentry = opendata->dentry;
2374 if (d_really_is_negative(dentry)) {
2375 /* FIXME: Is this d_drop() ever needed? */
2376 d_drop(dentry);
2377 dentry = d_add_unique(dentry, igrab(state->inode));
2378 if (dentry == NULL) {
2379 dentry = opendata->dentry;
2380 } else if (dentry != ctx->dentry) {
2381 dput(ctx->dentry);
2382 ctx->dentry = dget(dentry);
2383 }
2384 nfs_set_verifier(dentry,
2385 nfs_save_change_attribute(d_inode(opendata->dir)));
2386 }
2387
2388 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags);
2389 if (ret != 0)
2390 goto out;
2391
2392 ctx->state = state;
2393 if (d_inode(dentry) == state->inode) {
2394 nfs_inode_attach_open_context(ctx);
2395 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
2396 nfs4_schedule_stateid_recovery(server, state);
2397 }
2398out:
2399 return ret;
2400}
2401
2402/*
2403 * Returns a referenced nfs4_state
2404 */
2405static int _nfs4_do_open(struct inode *dir,
2406 struct nfs_open_context *ctx,
2407 int flags,
2408 struct iattr *sattr,
2409 struct nfs4_label *label,
2410 int *opened)
2411{
2412 struct nfs4_state_owner *sp;
2413 struct nfs4_state *state = NULL;
2414 struct nfs_server *server = NFS_SERVER(dir);
2415 struct nfs4_opendata *opendata;
2416 struct dentry *dentry = ctx->dentry;
2417 struct rpc_cred *cred = ctx->cred;
2418 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
2419 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
2420 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
2421 struct nfs4_label *olabel = NULL;
2422 int status;
2423
2424 /* Protect against reboot recovery conflicts */
2425 status = -ENOMEM;
2426 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
2427 if (sp == NULL) {
2428 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
2429 goto out_err;
2430 }
2431 status = nfs4_recover_expired_lease(server);
2432 if (status != 0)
2433 goto err_put_state_owner;
2434 if (d_really_is_positive(dentry))
2435 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
2436 status = -ENOMEM;
2437 if (d_really_is_positive(dentry))
2438 claim = NFS4_OPEN_CLAIM_FH;
2439 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr,
2440 label, claim, GFP_KERNEL);
2441 if (opendata == NULL)
2442 goto err_put_state_owner;
2443
2444 if (label) {
2445 olabel = nfs4_label_alloc(server, GFP_KERNEL);
2446 if (IS_ERR(olabel)) {
2447 status = PTR_ERR(olabel);
2448 goto err_opendata_put;
2449 }
2450 }
2451
2452 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
2453 if (!opendata->f_attr.mdsthreshold) {
2454 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
2455 if (!opendata->f_attr.mdsthreshold)
2456 goto err_free_label;
2457 }
2458 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
2459 }
2460 if (d_really_is_positive(dentry))
2461 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
2462
2463 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx);
2464 if (status != 0)
2465 goto err_free_label;
2466 state = ctx->state;
2467
2468 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
2469 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
2470 nfs4_exclusive_attrset(opendata, sattr, &label);
2471
2472 nfs_fattr_init(opendata->o_res.f_attr);
2473 status = nfs4_do_setattr(state->inode, cred,
2474 opendata->o_res.f_attr, sattr,
2475 state, label, olabel);
2476 if (status == 0) {
2477 nfs_setattr_update_inode(state->inode, sattr,
2478 opendata->o_res.f_attr);
2479 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
2480 }
2481 }
2482 if (opened && opendata->file_created)
2483 *opened |= FILE_CREATED;
2484
2485 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
2486 *ctx_th = opendata->f_attr.mdsthreshold;
2487 opendata->f_attr.mdsthreshold = NULL;
2488 }
2489
2490 nfs4_label_free(olabel);
2491
2492 nfs4_opendata_put(opendata);
2493 nfs4_put_state_owner(sp);
2494 return 0;
2495err_free_label:
2496 nfs4_label_free(olabel);
2497err_opendata_put:
2498 nfs4_opendata_put(opendata);
2499err_put_state_owner:
2500 nfs4_put_state_owner(sp);
2501out_err:
2502 return status;
2503}
2504
2505
2506static struct nfs4_state *nfs4_do_open(struct inode *dir,
2507 struct nfs_open_context *ctx,
2508 int flags,
2509 struct iattr *sattr,
2510 struct nfs4_label *label,
2511 int *opened)
2512{
2513 struct nfs_server *server = NFS_SERVER(dir);
2514 struct nfs4_exception exception = { };
2515 struct nfs4_state *res;
2516 int status;
2517
2518 do {
2519 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
2520 res = ctx->state;
2521 trace_nfs4_open_file(ctx, flags, status);
2522 if (status == 0)
2523 break;
2524 /* NOTE: BAD_SEQID means the server and client disagree about the
2525 * book-keeping w.r.t. state-changing operations
2526 * (OPEN/CLOSE/LOCK/LOCKU...)
2527 * It is actually a sign of a bug on the client or on the server.
2528 *
2529 * If we receive a BAD_SEQID error in the particular case of
2530 * doing an OPEN, we assume that nfs_increment_open_seqid() will
2531 * have unhashed the old state_owner for us, and that we can
2532 * therefore safely retry using a new one. We should still warn
2533 * the user though...
2534 */
2535 if (status == -NFS4ERR_BAD_SEQID) {
2536 pr_warn_ratelimited("NFS: v4 server %s "
2537 " returned a bad sequence-id error!\n",
2538 NFS_SERVER(dir)->nfs_client->cl_hostname);
2539 exception.retry = 1;
2540 continue;
2541 }
2542 /*
2543 * BAD_STATEID on OPEN means that the server cancelled our
2544 * state before it received the OPEN_CONFIRM.
2545 * Recover by retrying the request as per the discussion
2546 * on Page 181 of RFC3530.
2547 */
2548 if (status == -NFS4ERR_BAD_STATEID) {
2549 exception.retry = 1;
2550 continue;
2551 }
2552 if (status == -EAGAIN) {
2553 /* We must have found a delegation */
2554 exception.retry = 1;
2555 continue;
2556 }
2557 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
2558 continue;
2559 res = ERR_PTR(nfs4_handle_exception(server,
2560 status, &exception));
2561 } while (exception.retry);
2562 return res;
2563}
2564
2565static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2566 struct nfs_fattr *fattr, struct iattr *sattr,
2567 struct nfs4_state *state, struct nfs4_label *ilabel,
2568 struct nfs4_label *olabel)
2569{
2570 struct nfs_server *server = NFS_SERVER(inode);
2571 struct nfs_setattrargs arg = {
2572 .fh = NFS_FH(inode),
2573 .iap = sattr,
2574 .server = server,
2575 .bitmask = server->attr_bitmask,
2576 .label = ilabel,
2577 };
2578 struct nfs_setattrres res = {
2579 .fattr = fattr,
2580 .label = olabel,
2581 .server = server,
2582 };
2583 struct rpc_message msg = {
2584 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
2585 .rpc_argp = &arg,
2586 .rpc_resp = &res,
2587 .rpc_cred = cred,
2588 };
2589 unsigned long timestamp = jiffies;
2590 fmode_t fmode;
2591 bool truncate;
2592 int status;
2593
2594 arg.bitmask = nfs4_bitmask(server, ilabel);
2595 if (ilabel)
2596 arg.bitmask = nfs4_bitmask(server, olabel);
2597
2598 nfs_fattr_init(fattr);
2599
2600 /* Servers should only apply open mode checks for file size changes */
2601 truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false;
2602 fmode = truncate ? FMODE_WRITE : FMODE_READ;
2603
2604 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) {
2605 /* Use that stateid */
2606 } else if (truncate && state != NULL) {
2607 struct nfs_lockowner lockowner = {
2608 .l_owner = current->files,
2609 .l_pid = current->tgid,
2610 };
2611 if (!nfs4_valid_open_stateid(state))
2612 return -EBADF;
2613 if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
2614 &lockowner) == -EIO)
2615 return -EBADF;
2616 } else
2617 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
2618
2619 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2620 if (status == 0 && state != NULL)
2621 renew_lease(server, timestamp);
2622 return status;
2623}
2624
2625static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2626 struct nfs_fattr *fattr, struct iattr *sattr,
2627 struct nfs4_state *state, struct nfs4_label *ilabel,
2628 struct nfs4_label *olabel)
2629{
2630 struct nfs_server *server = NFS_SERVER(inode);
2631 struct nfs4_exception exception = {
2632 .state = state,
2633 .inode = inode,
2634 };
2635 int err;
2636 do {
2637 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state, ilabel, olabel);
2638 trace_nfs4_setattr(inode, err);
2639 switch (err) {
2640 case -NFS4ERR_OPENMODE:
2641 if (!(sattr->ia_valid & ATTR_SIZE)) {
2642 pr_warn_once("NFSv4: server %s is incorrectly "
2643 "applying open mode checks to "
2644 "a SETATTR that is not "
2645 "changing file size.\n",
2646 server->nfs_client->cl_hostname);
2647 }
2648 if (state && !(state->state & FMODE_WRITE)) {
2649 err = -EBADF;
2650 if (sattr->ia_valid & ATTR_OPEN)
2651 err = -EACCES;
2652 goto out;
2653 }
2654 }
2655 err = nfs4_handle_exception(server, err, &exception);
2656 } while (exception.retry);
2657out:
2658 return err;
2659}
2660
2661struct nfs4_closedata {
2662 struct inode *inode;
2663 struct nfs4_state *state;
2664 struct nfs_closeargs arg;
2665 struct nfs_closeres res;
2666 struct nfs_fattr fattr;
2667 unsigned long timestamp;
2668 bool roc;
2669 u32 roc_barrier;
2670};
2671
2672static void nfs4_free_closedata(void *data)
2673{
2674 struct nfs4_closedata *calldata = data;
2675 struct nfs4_state_owner *sp = calldata->state->owner;
2676 struct super_block *sb = calldata->state->inode->i_sb;
2677
2678 if (calldata->roc)
2679 pnfs_roc_release(calldata->state->inode);
2680 nfs4_put_open_state(calldata->state);
2681 nfs_free_seqid(calldata->arg.seqid);
2682 nfs4_put_state_owner(sp);
2683 nfs_sb_deactive(sb);
2684 kfree(calldata);
2685}
2686
2687static void nfs4_close_done(struct rpc_task *task, void *data)
2688{
2689 struct nfs4_closedata *calldata = data;
2690 struct nfs4_state *state = calldata->state;
2691 struct nfs_server *server = NFS_SERVER(calldata->inode);
2692 nfs4_stateid *res_stateid = NULL;
2693
2694 dprintk("%s: begin!\n", __func__);
2695 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2696 return;
2697 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
2698 /* hmm. we are done with the inode, and in the process of freeing
2699 * the state_owner. we keep this around to process errors
2700 */
2701 switch (task->tk_status) {
2702 case 0:
2703 res_stateid = &calldata->res.stateid;
2704 if (calldata->roc)
2705 pnfs_roc_set_barrier(state->inode,
2706 calldata->roc_barrier);
2707 renew_lease(server, calldata->timestamp);
2708 break;
2709 case -NFS4ERR_ADMIN_REVOKED:
2710 case -NFS4ERR_STALE_STATEID:
2711 case -NFS4ERR_OLD_STATEID:
2712 case -NFS4ERR_BAD_STATEID:
2713 case -NFS4ERR_EXPIRED:
2714 if (!nfs4_stateid_match(&calldata->arg.stateid,
2715 &state->open_stateid)) {
2716 rpc_restart_call_prepare(task);
2717 goto out_release;
2718 }
2719 if (calldata->arg.fmode == 0)
2720 break;
2721 default:
2722 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) {
2723 rpc_restart_call_prepare(task);
2724 goto out_release;
2725 }
2726 }
2727 nfs_clear_open_stateid(state, &calldata->arg.stateid,
2728 res_stateid, calldata->arg.fmode);
2729out_release:
2730 nfs_release_seqid(calldata->arg.seqid);
2731 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2732 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2733}
2734
2735static void nfs4_close_prepare(struct rpc_task *task, void *data)
2736{
2737 struct nfs4_closedata *calldata = data;
2738 struct nfs4_state *state = calldata->state;
2739 struct inode *inode = calldata->inode;
2740 bool is_rdonly, is_wronly, is_rdwr;
2741 int call_close = 0;
2742
2743 dprintk("%s: begin!\n", __func__);
2744 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2745 goto out_wait;
2746
2747 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2748 spin_lock(&state->owner->so_lock);
2749 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
2750 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
2751 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
2752 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid);
2753 /* Calculate the change in open mode */
2754 calldata->arg.fmode = 0;
2755 if (state->n_rdwr == 0) {
2756 if (state->n_rdonly == 0)
2757 call_close |= is_rdonly;
2758 else if (is_rdonly)
2759 calldata->arg.fmode |= FMODE_READ;
2760 if (state->n_wronly == 0)
2761 call_close |= is_wronly;
2762 else if (is_wronly)
2763 calldata->arg.fmode |= FMODE_WRITE;
2764 } else if (is_rdwr)
2765 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
2766
2767 if (calldata->arg.fmode == 0)
2768 call_close |= is_rdwr;
2769
2770 if (!nfs4_valid_open_stateid(state))
2771 call_close = 0;
2772 spin_unlock(&state->owner->so_lock);
2773
2774 if (!call_close) {
2775 /* Note: exit _without_ calling nfs4_close_done */
2776 goto out_no_action;
2777 }
2778
2779 if (calldata->arg.fmode == 0)
2780 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2781 if (calldata->roc)
2782 pnfs_roc_get_barrier(inode, &calldata->roc_barrier);
2783
2784 calldata->arg.share_access =
2785 nfs4_map_atomic_open_share(NFS_SERVER(inode),
2786 calldata->arg.fmode, 0);
2787
2788 nfs_fattr_init(calldata->res.fattr);
2789 calldata->timestamp = jiffies;
2790 if (nfs4_setup_sequence(NFS_SERVER(inode),
2791 &calldata->arg.seq_args,
2792 &calldata->res.seq_res,
2793 task) != 0)
2794 nfs_release_seqid(calldata->arg.seqid);
2795 dprintk("%s: done!\n", __func__);
2796 return;
2797out_no_action:
2798 task->tk_action = NULL;
2799out_wait:
2800 nfs4_sequence_done(task, &calldata->res.seq_res);
2801}
2802
2803static const struct rpc_call_ops nfs4_close_ops = {
2804 .rpc_call_prepare = nfs4_close_prepare,
2805 .rpc_call_done = nfs4_close_done,
2806 .rpc_release = nfs4_free_closedata,
2807};
2808
2809static bool nfs4_roc(struct inode *inode)
2810{
2811 if (!nfs_have_layout(inode))
2812 return false;
2813 return pnfs_roc(inode);
2814}
2815
2816/*
2817 * It is possible for data to be read/written from a mem-mapped file
2818 * after the sys_close call (which hits the vfs layer as a flush).
2819 * This means that we can't safely call nfsv4 close on a file until
2820 * the inode is cleared. This in turn means that we are not good
2821 * NFSv4 citizens - we do not indicate to the server to update the file's
2822 * share state even when we are done with one of the three share
2823 * stateid's in the inode.
2824 *
2825 * NOTE: Caller must be holding the sp->so_owner semaphore!
2826 */
2827int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
2828{
2829 struct nfs_server *server = NFS_SERVER(state->inode);
2830 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
2831 struct nfs4_closedata *calldata;
2832 struct nfs4_state_owner *sp = state->owner;
2833 struct rpc_task *task;
2834 struct rpc_message msg = {
2835 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2836 .rpc_cred = state->owner->so_cred,
2837 };
2838 struct rpc_task_setup task_setup_data = {
2839 .rpc_client = server->client,
2840 .rpc_message = &msg,
2841 .callback_ops = &nfs4_close_ops,
2842 .workqueue = nfsiod_workqueue,
2843 .flags = RPC_TASK_ASYNC,
2844 };
2845 int status = -ENOMEM;
2846
2847 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
2848 &task_setup_data.rpc_client, &msg);
2849
2850 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2851 if (calldata == NULL)
2852 goto out;
2853 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2854 calldata->inode = state->inode;
2855 calldata->state = state;
2856 calldata->arg.fh = NFS_FH(state->inode);
2857 /* Serialization for the sequence id */
2858 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
2859 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
2860 if (IS_ERR(calldata->arg.seqid))
2861 goto out_free_calldata;
2862 calldata->arg.fmode = 0;
2863 calldata->arg.bitmask = server->cache_consistency_bitmask;
2864 calldata->res.fattr = &calldata->fattr;
2865 calldata->res.seqid = calldata->arg.seqid;
2866 calldata->res.server = server;
2867 calldata->roc = nfs4_roc(state->inode);
2868 nfs_sb_active(calldata->inode->i_sb);
2869
2870 msg.rpc_argp = &calldata->arg;
2871 msg.rpc_resp = &calldata->res;
2872 task_setup_data.callback_data = calldata;
2873 task = rpc_run_task(&task_setup_data);
2874 if (IS_ERR(task))
2875 return PTR_ERR(task);
2876 status = 0;
2877 if (wait)
2878 status = rpc_wait_for_completion_task(task);
2879 rpc_put_task(task);
2880 return status;
2881out_free_calldata:
2882 kfree(calldata);
2883out:
2884 nfs4_put_open_state(state);
2885 nfs4_put_state_owner(sp);
2886 return status;
2887}
2888
2889static struct inode *
2890nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
2891 int open_flags, struct iattr *attr, int *opened)
2892{
2893 struct nfs4_state *state;
2894 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
2895
2896 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
2897
2898 /* Protect against concurrent sillydeletes */
2899 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
2900
2901 nfs4_label_release_security(label);
2902
2903 if (IS_ERR(state))
2904 return ERR_CAST(state);
2905 return state->inode;
2906}
2907
2908static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
2909{
2910 if (ctx->state == NULL)
2911 return;
2912 if (is_sync)
2913 nfs4_close_sync(ctx->state, ctx->mode);
2914 else
2915 nfs4_close_state(ctx->state, ctx->mode);
2916}
2917
2918#define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
2919#define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
2920#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL)
2921
2922static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2923{
2924 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
2925 struct nfs4_server_caps_arg args = {
2926 .fhandle = fhandle,
2927 .bitmask = bitmask,
2928 };
2929 struct nfs4_server_caps_res res = {};
2930 struct rpc_message msg = {
2931 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
2932 .rpc_argp = &args,
2933 .rpc_resp = &res,
2934 };
2935 int status;
2936
2937 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
2938 FATTR4_WORD0_FH_EXPIRE_TYPE |
2939 FATTR4_WORD0_LINK_SUPPORT |
2940 FATTR4_WORD0_SYMLINK_SUPPORT |
2941 FATTR4_WORD0_ACLSUPPORT;
2942 if (minorversion)
2943 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
2944
2945 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2946 if (status == 0) {
2947 /* Sanity check the server answers */
2948 switch (minorversion) {
2949 case 0:
2950 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
2951 res.attr_bitmask[2] = 0;
2952 break;
2953 case 1:
2954 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
2955 break;
2956 case 2:
2957 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
2958 }
2959 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2960 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2961 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2962 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2963 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2964 NFS_CAP_CTIME|NFS_CAP_MTIME|
2965 NFS_CAP_SECURITY_LABEL);
2966 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
2967 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
2968 server->caps |= NFS_CAP_ACLS;
2969 if (res.has_links != 0)
2970 server->caps |= NFS_CAP_HARDLINKS;
2971 if (res.has_symlinks != 0)
2972 server->caps |= NFS_CAP_SYMLINKS;
2973 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2974 server->caps |= NFS_CAP_FILEID;
2975 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2976 server->caps |= NFS_CAP_MODE;
2977 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2978 server->caps |= NFS_CAP_NLINK;
2979 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2980 server->caps |= NFS_CAP_OWNER;
2981 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2982 server->caps |= NFS_CAP_OWNER_GROUP;
2983 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2984 server->caps |= NFS_CAP_ATIME;
2985 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2986 server->caps |= NFS_CAP_CTIME;
2987 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2988 server->caps |= NFS_CAP_MTIME;
2989#ifdef CONFIG_NFS_V4_SECURITY_LABEL
2990 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
2991 server->caps |= NFS_CAP_SECURITY_LABEL;
2992#endif
2993 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
2994 sizeof(server->attr_bitmask));
2995 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
2996
2997 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2998 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2999 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
3000 server->cache_consistency_bitmask[2] = 0;
3001 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
3002 sizeof(server->exclcreat_bitmask));
3003 server->acl_bitmask = res.acl_bitmask;
3004 server->fh_expire_type = res.fh_expire_type;
3005 }
3006
3007 return status;
3008}
3009
3010int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3011{
3012 struct nfs4_exception exception = { };
3013 int err;
3014 do {
3015 err = nfs4_handle_exception(server,
3016 _nfs4_server_capabilities(server, fhandle),
3017 &exception);
3018 } while (exception.retry);
3019 return err;
3020}
3021
3022static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3023 struct nfs_fsinfo *info)
3024{
3025 u32 bitmask[3];
3026 struct nfs4_lookup_root_arg args = {
3027 .bitmask = bitmask,
3028 };
3029 struct nfs4_lookup_res res = {
3030 .server = server,
3031 .fattr = info->fattr,
3032 .fh = fhandle,
3033 };
3034 struct rpc_message msg = {
3035 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
3036 .rpc_argp = &args,
3037 .rpc_resp = &res,
3038 };
3039
3040 bitmask[0] = nfs4_fattr_bitmap[0];
3041 bitmask[1] = nfs4_fattr_bitmap[1];
3042 /*
3043 * Process the label in the upcoming getfattr
3044 */
3045 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
3046
3047 nfs_fattr_init(info->fattr);
3048 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3049}
3050
3051static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3052 struct nfs_fsinfo *info)
3053{
3054 struct nfs4_exception exception = { };
3055 int err;
3056 do {
3057 err = _nfs4_lookup_root(server, fhandle, info);
3058 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
3059 switch (err) {
3060 case 0:
3061 case -NFS4ERR_WRONGSEC:
3062 goto out;
3063 default:
3064 err = nfs4_handle_exception(server, err, &exception);
3065 }
3066 } while (exception.retry);
3067out:
3068 return err;
3069}
3070
3071static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3072 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
3073{
3074 struct rpc_auth_create_args auth_args = {
3075 .pseudoflavor = flavor,
3076 };
3077 struct rpc_auth *auth;
3078 int ret;
3079
3080 auth = rpcauth_create(&auth_args, server->client);
3081 if (IS_ERR(auth)) {
3082 ret = -EACCES;
3083 goto out;
3084 }
3085 ret = nfs4_lookup_root(server, fhandle, info);
3086out:
3087 return ret;
3088}
3089
3090/*
3091 * Retry pseudoroot lookup with various security flavors. We do this when:
3092 *
3093 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
3094 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
3095 *
3096 * Returns zero on success, or a negative NFS4ERR value, or a
3097 * negative errno value.
3098 */
3099static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3100 struct nfs_fsinfo *info)
3101{
3102 /* Per 3530bis 15.33.5 */
3103 static const rpc_authflavor_t flav_array[] = {
3104 RPC_AUTH_GSS_KRB5P,
3105 RPC_AUTH_GSS_KRB5I,
3106 RPC_AUTH_GSS_KRB5,
3107 RPC_AUTH_UNIX, /* courtesy */
3108 RPC_AUTH_NULL,
3109 };
3110 int status = -EPERM;
3111 size_t i;
3112
3113 if (server->auth_info.flavor_len > 0) {
3114 /* try each flavor specified by user */
3115 for (i = 0; i < server->auth_info.flavor_len; i++) {
3116 status = nfs4_lookup_root_sec(server, fhandle, info,
3117 server->auth_info.flavors[i]);
3118 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3119 continue;
3120 break;
3121 }
3122 } else {
3123 /* no flavors specified by user, try default list */
3124 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
3125 status = nfs4_lookup_root_sec(server, fhandle, info,
3126 flav_array[i]);
3127 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3128 continue;
3129 break;
3130 }
3131 }
3132
3133 /*
3134 * -EACCESS could mean that the user doesn't have correct permissions
3135 * to access the mount. It could also mean that we tried to mount
3136 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
3137 * existing mount programs don't handle -EACCES very well so it should
3138 * be mapped to -EPERM instead.
3139 */
3140 if (status == -EACCES)
3141 status = -EPERM;
3142 return status;
3143}
3144
3145static int nfs4_do_find_root_sec(struct nfs_server *server,
3146 struct nfs_fh *fhandle, struct nfs_fsinfo *info)
3147{
3148 int mv = server->nfs_client->cl_minorversion;
3149 return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info);
3150}
3151
3152/**
3153 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
3154 * @server: initialized nfs_server handle
3155 * @fhandle: we fill in the pseudo-fs root file handle
3156 * @info: we fill in an FSINFO struct
3157 * @auth_probe: probe the auth flavours
3158 *
3159 * Returns zero on success, or a negative errno.
3160 */
3161int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
3162 struct nfs_fsinfo *info,
3163 bool auth_probe)
3164{
3165 int status = 0;
3166
3167 if (!auth_probe)
3168 status = nfs4_lookup_root(server, fhandle, info);
3169
3170 if (auth_probe || status == NFS4ERR_WRONGSEC)
3171 status = nfs4_do_find_root_sec(server, fhandle, info);
3172
3173 if (status == 0)
3174 status = nfs4_server_capabilities(server, fhandle);
3175 if (status == 0)
3176 status = nfs4_do_fsinfo(server, fhandle, info);
3177
3178 return nfs4_map_errors(status);
3179}
3180
3181static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
3182 struct nfs_fsinfo *info)
3183{
3184 int error;
3185 struct nfs_fattr *fattr = info->fattr;
3186 struct nfs4_label *label = NULL;
3187
3188 error = nfs4_server_capabilities(server, mntfh);
3189 if (error < 0) {
3190 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
3191 return error;
3192 }
3193
3194 label = nfs4_label_alloc(server, GFP_KERNEL);
3195 if (IS_ERR(label))
3196 return PTR_ERR(label);
3197
3198 error = nfs4_proc_getattr(server, mntfh, fattr, label);
3199 if (error < 0) {
3200 dprintk("nfs4_get_root: getattr error = %d\n", -error);
3201 goto err_free_label;
3202 }
3203
3204 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
3205 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
3206 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
3207
3208err_free_label:
3209 nfs4_label_free(label);
3210
3211 return error;
3212}
3213
3214/*
3215 * Get locations and (maybe) other attributes of a referral.
3216 * Note that we'll actually follow the referral later when
3217 * we detect fsid mismatch in inode revalidation
3218 */
3219static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
3220 const struct qstr *name, struct nfs_fattr *fattr,
3221 struct nfs_fh *fhandle)
3222{
3223 int status = -ENOMEM;
3224 struct page *page = NULL;
3225 struct nfs4_fs_locations *locations = NULL;
3226
3227 page = alloc_page(GFP_KERNEL);
3228 if (page == NULL)
3229 goto out;
3230 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
3231 if (locations == NULL)
3232 goto out;
3233
3234 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
3235 if (status != 0)
3236 goto out;
3237
3238 /*
3239 * If the fsid didn't change, this is a migration event, not a
3240 * referral. Cause us to drop into the exception handler, which
3241 * will kick off migration recovery.
3242 */
3243 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
3244 dprintk("%s: server did not return a different fsid for"
3245 " a referral at %s\n", __func__, name->name);
3246 status = -NFS4ERR_MOVED;
3247 goto out;
3248 }
3249 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
3250 nfs_fixup_referral_attributes(&locations->fattr);
3251
3252 /* replace the lookup nfs_fattr with the locations nfs_fattr */
3253 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
3254 memset(fhandle, 0, sizeof(struct nfs_fh));
3255out:
3256 if (page)
3257 __free_page(page);
3258 kfree(locations);
3259 return status;
3260}
3261
3262static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3263 struct nfs_fattr *fattr, struct nfs4_label *label)
3264{
3265 struct nfs4_getattr_arg args = {
3266 .fh = fhandle,
3267 .bitmask = server->attr_bitmask,
3268 };
3269 struct nfs4_getattr_res res = {
3270 .fattr = fattr,
3271 .label = label,
3272 .server = server,
3273 };
3274 struct rpc_message msg = {
3275 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
3276 .rpc_argp = &args,
3277 .rpc_resp = &res,
3278 };
3279
3280 args.bitmask = nfs4_bitmask(server, label);
3281
3282 nfs_fattr_init(fattr);
3283 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3284}
3285
3286static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3287 struct nfs_fattr *fattr, struct nfs4_label *label)
3288{
3289 struct nfs4_exception exception = { };
3290 int err;
3291 do {
3292 err = _nfs4_proc_getattr(server, fhandle, fattr, label);
3293 trace_nfs4_getattr(server, fhandle, fattr, err);
3294 err = nfs4_handle_exception(server, err,
3295 &exception);
3296 } while (exception.retry);
3297 return err;
3298}
3299
3300/*
3301 * The file is not closed if it is opened due to the a request to change
3302 * the size of the file. The open call will not be needed once the
3303 * VFS layer lookup-intents are implemented.
3304 *
3305 * Close is called when the inode is destroyed.
3306 * If we haven't opened the file for O_WRONLY, we
3307 * need to in the size_change case to obtain a stateid.
3308 *
3309 * Got race?
3310 * Because OPEN is always done by name in nfsv4, it is
3311 * possible that we opened a different file by the same
3312 * name. We can recognize this race condition, but we
3313 * can't do anything about it besides returning an error.
3314 *
3315 * This will be fixed with VFS changes (lookup-intent).
3316 */
3317static int
3318nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
3319 struct iattr *sattr)
3320{
3321 struct inode *inode = d_inode(dentry);
3322 struct rpc_cred *cred = NULL;
3323 struct nfs4_state *state = NULL;
3324 struct nfs4_label *label = NULL;
3325 int status;
3326
3327 if (pnfs_ld_layoutret_on_setattr(inode) &&
3328 sattr->ia_valid & ATTR_SIZE &&
3329 sattr->ia_size < i_size_read(inode))
3330 pnfs_commit_and_return_layout(inode);
3331
3332 nfs_fattr_init(fattr);
3333
3334 /* Deal with open(O_TRUNC) */
3335 if (sattr->ia_valid & ATTR_OPEN)
3336 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
3337
3338 /* Optimization: if the end result is no change, don't RPC */
3339 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
3340 return 0;
3341
3342 /* Search for an existing open(O_WRITE) file */
3343 if (sattr->ia_valid & ATTR_FILE) {
3344 struct nfs_open_context *ctx;
3345
3346 ctx = nfs_file_open_context(sattr->ia_file);
3347 if (ctx) {
3348 cred = ctx->cred;
3349 state = ctx->state;
3350 }
3351 }
3352
3353 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
3354 if (IS_ERR(label))
3355 return PTR_ERR(label);
3356
3357 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label);
3358 if (status == 0) {
3359 nfs_setattr_update_inode(inode, sattr, fattr);
3360 nfs_setsecurity(inode, fattr, label);
3361 }
3362 nfs4_label_free(label);
3363 return status;
3364}
3365
3366static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
3367 const struct qstr *name, struct nfs_fh *fhandle,
3368 struct nfs_fattr *fattr, struct nfs4_label *label)
3369{
3370 struct nfs_server *server = NFS_SERVER(dir);
3371 int status;
3372 struct nfs4_lookup_arg args = {
3373 .bitmask = server->attr_bitmask,
3374 .dir_fh = NFS_FH(dir),
3375 .name = name,
3376 };
3377 struct nfs4_lookup_res res = {
3378 .server = server,
3379 .fattr = fattr,
3380 .label = label,
3381 .fh = fhandle,
3382 };
3383 struct rpc_message msg = {
3384 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
3385 .rpc_argp = &args,
3386 .rpc_resp = &res,
3387 };
3388
3389 args.bitmask = nfs4_bitmask(server, label);
3390
3391 nfs_fattr_init(fattr);
3392
3393 dprintk("NFS call lookup %s\n", name->name);
3394 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
3395 dprintk("NFS reply lookup: %d\n", status);
3396 return status;
3397}
3398
3399static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
3400{
3401 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
3402 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
3403 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
3404 fattr->nlink = 2;
3405}
3406
3407static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
3408 struct qstr *name, struct nfs_fh *fhandle,
3409 struct nfs_fattr *fattr, struct nfs4_label *label)
3410{
3411 struct nfs4_exception exception = { };
3412 struct rpc_clnt *client = *clnt;
3413 int err;
3414 do {
3415 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label);
3416 trace_nfs4_lookup(dir, name, err);
3417 switch (err) {
3418 case -NFS4ERR_BADNAME:
3419 err = -ENOENT;
3420 goto out;
3421 case -NFS4ERR_MOVED:
3422 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
3423 if (err == -NFS4ERR_MOVED)
3424 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3425 goto out;
3426 case -NFS4ERR_WRONGSEC:
3427 err = -EPERM;
3428 if (client != *clnt)
3429 goto out;
3430 client = nfs4_negotiate_security(client, dir, name);
3431 if (IS_ERR(client))
3432 return PTR_ERR(client);
3433
3434 exception.retry = 1;
3435 break;
3436 default:
3437 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3438 }
3439 } while (exception.retry);
3440
3441out:
3442 if (err == 0)
3443 *clnt = client;
3444 else if (client != *clnt)
3445 rpc_shutdown_client(client);
3446
3447 return err;
3448}
3449
3450static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
3451 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
3452 struct nfs4_label *label)
3453{
3454 int status;
3455 struct rpc_clnt *client = NFS_CLIENT(dir);
3456
3457 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label);
3458 if (client != NFS_CLIENT(dir)) {
3459 rpc_shutdown_client(client);
3460 nfs_fixup_secinfo_attributes(fattr);
3461 }
3462 return status;
3463}
3464
3465struct rpc_clnt *
3466nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
3467 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
3468{
3469 struct rpc_clnt *client = NFS_CLIENT(dir);
3470 int status;
3471
3472 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
3473 if (status < 0)
3474 return ERR_PTR(status);
3475 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
3476}
3477
3478static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3479{
3480 struct nfs_server *server = NFS_SERVER(inode);
3481 struct nfs4_accessargs args = {
3482 .fh = NFS_FH(inode),
3483 .bitmask = server->cache_consistency_bitmask,
3484 };
3485 struct nfs4_accessres res = {
3486 .server = server,
3487 };
3488 struct rpc_message msg = {
3489 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
3490 .rpc_argp = &args,
3491 .rpc_resp = &res,
3492 .rpc_cred = entry->cred,
3493 };
3494 int mode = entry->mask;
3495 int status = 0;
3496
3497 /*
3498 * Determine which access bits we want to ask for...
3499 */
3500 if (mode & MAY_READ)
3501 args.access |= NFS4_ACCESS_READ;
3502 if (S_ISDIR(inode->i_mode)) {
3503 if (mode & MAY_WRITE)
3504 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
3505 if (mode & MAY_EXEC)
3506 args.access |= NFS4_ACCESS_LOOKUP;
3507 } else {
3508 if (mode & MAY_WRITE)
3509 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
3510 if (mode & MAY_EXEC)
3511 args.access |= NFS4_ACCESS_EXECUTE;
3512 }
3513
3514 res.fattr = nfs_alloc_fattr();
3515 if (res.fattr == NULL)
3516 return -ENOMEM;
3517
3518 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3519 if (!status) {
3520 nfs_access_set_mask(entry, res.access);
3521 nfs_refresh_inode(inode, res.fattr);
3522 }
3523 nfs_free_fattr(res.fattr);
3524 return status;
3525}
3526
3527static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3528{
3529 struct nfs4_exception exception = { };
3530 int err;
3531 do {
3532 err = _nfs4_proc_access(inode, entry);
3533 trace_nfs4_access(inode, err);
3534 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3535 &exception);
3536 } while (exception.retry);
3537 return err;
3538}
3539
3540/*
3541 * TODO: For the time being, we don't try to get any attributes
3542 * along with any of the zero-copy operations READ, READDIR,
3543 * READLINK, WRITE.
3544 *
3545 * In the case of the first three, we want to put the GETATTR
3546 * after the read-type operation -- this is because it is hard
3547 * to predict the length of a GETATTR response in v4, and thus
3548 * align the READ data correctly. This means that the GETATTR
3549 * may end up partially falling into the page cache, and we should
3550 * shift it into the 'tail' of the xdr_buf before processing.
3551 * To do this efficiently, we need to know the total length
3552 * of data received, which doesn't seem to be available outside
3553 * of the RPC layer.
3554 *
3555 * In the case of WRITE, we also want to put the GETATTR after
3556 * the operation -- in this case because we want to make sure
3557 * we get the post-operation mtime and size.
3558 *
3559 * Both of these changes to the XDR layer would in fact be quite
3560 * minor, but I decided to leave them for a subsequent patch.
3561 */
3562static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
3563 unsigned int pgbase, unsigned int pglen)
3564{
3565 struct nfs4_readlink args = {
3566 .fh = NFS_FH(inode),
3567 .pgbase = pgbase,
3568 .pglen = pglen,
3569 .pages = &page,
3570 };
3571 struct nfs4_readlink_res res;
3572 struct rpc_message msg = {
3573 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
3574 .rpc_argp = &args,
3575 .rpc_resp = &res,
3576 };
3577
3578 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
3579}
3580
3581static int nfs4_proc_readlink(struct inode *inode, struct page *page,
3582 unsigned int pgbase, unsigned int pglen)
3583{
3584 struct nfs4_exception exception = { };
3585 int err;
3586 do {
3587 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
3588 trace_nfs4_readlink(inode, err);
3589 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3590 &exception);
3591 } while (exception.retry);
3592 return err;
3593}
3594
3595/*
3596 * This is just for mknod. open(O_CREAT) will always do ->open_context().
3597 */
3598static int
3599nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3600 int flags)
3601{
3602 struct nfs4_label l, *ilabel = NULL;
3603 struct nfs_open_context *ctx;
3604 struct nfs4_state *state;
3605 int status = 0;
3606
3607 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
3608 if (IS_ERR(ctx))
3609 return PTR_ERR(ctx);
3610
3611 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
3612
3613 sattr->ia_mode &= ~current_umask();
3614 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
3615 if (IS_ERR(state)) {
3616 status = PTR_ERR(state);
3617 goto out;
3618 }
3619out:
3620 nfs4_label_release_security(ilabel);
3621 put_nfs_open_context(ctx);
3622 return status;
3623}
3624
3625static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
3626{
3627 struct nfs_server *server = NFS_SERVER(dir);
3628 struct nfs_removeargs args = {
3629 .fh = NFS_FH(dir),
3630 .name = *name,
3631 };
3632 struct nfs_removeres res = {
3633 .server = server,
3634 };
3635 struct rpc_message msg = {
3636 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
3637 .rpc_argp = &args,
3638 .rpc_resp = &res,
3639 };
3640 int status;
3641
3642 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
3643 if (status == 0)
3644 update_changeattr(dir, &res.cinfo);
3645 return status;
3646}
3647
3648static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
3649{
3650 struct nfs4_exception exception = { };
3651 int err;
3652 do {
3653 err = _nfs4_proc_remove(dir, name);
3654 trace_nfs4_remove(dir, name, err);
3655 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3656 &exception);
3657 } while (exception.retry);
3658 return err;
3659}
3660
3661static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
3662{
3663 struct nfs_server *server = NFS_SERVER(dir);
3664 struct nfs_removeargs *args = msg->rpc_argp;
3665 struct nfs_removeres *res = msg->rpc_resp;
3666
3667 res->server = server;
3668 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
3669 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1);
3670
3671 nfs_fattr_init(res->dir_attr);
3672}
3673
3674static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
3675{
3676 nfs4_setup_sequence(NFS_SERVER(data->dir),
3677 &data->args.seq_args,
3678 &data->res.seq_res,
3679 task);
3680}
3681
3682static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
3683{
3684 struct nfs_unlinkdata *data = task->tk_calldata;
3685 struct nfs_removeres *res = &data->res;
3686
3687 if (!nfs4_sequence_done(task, &res->seq_res))
3688 return 0;
3689 if (nfs4_async_handle_error(task, res->server, NULL,
3690 &data->timeout) == -EAGAIN)
3691 return 0;
3692 update_changeattr(dir, &res->cinfo);
3693 return 1;
3694}
3695
3696static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
3697{
3698 struct nfs_server *server = NFS_SERVER(dir);
3699 struct nfs_renameargs *arg = msg->rpc_argp;
3700 struct nfs_renameres *res = msg->rpc_resp;
3701
3702 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
3703 res->server = server;
3704 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1);
3705}
3706
3707static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
3708{
3709 nfs4_setup_sequence(NFS_SERVER(data->old_dir),
3710 &data->args.seq_args,
3711 &data->res.seq_res,
3712 task);
3713}
3714
3715static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
3716 struct inode *new_dir)
3717{
3718 struct nfs_renamedata *data = task->tk_calldata;
3719 struct nfs_renameres *res = &data->res;
3720
3721 if (!nfs4_sequence_done(task, &res->seq_res))
3722 return 0;
3723 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
3724 return 0;
3725
3726 update_changeattr(old_dir, &res->old_cinfo);
3727 update_changeattr(new_dir, &res->new_cinfo);
3728 return 1;
3729}
3730
3731static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3732{
3733 struct nfs_server *server = NFS_SERVER(inode);
3734 struct nfs4_link_arg arg = {
3735 .fh = NFS_FH(inode),
3736 .dir_fh = NFS_FH(dir),
3737 .name = name,
3738 .bitmask = server->attr_bitmask,
3739 };
3740 struct nfs4_link_res res = {
3741 .server = server,
3742 .label = NULL,
3743 };
3744 struct rpc_message msg = {
3745 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
3746 .rpc_argp = &arg,
3747 .rpc_resp = &res,
3748 };
3749 int status = -ENOMEM;
3750
3751 res.fattr = nfs_alloc_fattr();
3752 if (res.fattr == NULL)
3753 goto out;
3754
3755 res.label = nfs4_label_alloc(server, GFP_KERNEL);
3756 if (IS_ERR(res.label)) {
3757 status = PTR_ERR(res.label);
3758 goto out;
3759 }
3760 arg.bitmask = nfs4_bitmask(server, res.label);
3761
3762 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3763 if (!status) {
3764 update_changeattr(dir, &res.cinfo);
3765 status = nfs_post_op_update_inode(inode, res.fattr);
3766 if (!status)
3767 nfs_setsecurity(inode, res.fattr, res.label);
3768 }
3769
3770
3771 nfs4_label_free(res.label);
3772
3773out:
3774 nfs_free_fattr(res.fattr);
3775 return status;
3776}
3777
3778static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3779{
3780 struct nfs4_exception exception = { };
3781 int err;
3782 do {
3783 err = nfs4_handle_exception(NFS_SERVER(inode),
3784 _nfs4_proc_link(inode, dir, name),
3785 &exception);
3786 } while (exception.retry);
3787 return err;
3788}
3789
3790struct nfs4_createdata {
3791 struct rpc_message msg;
3792 struct nfs4_create_arg arg;
3793 struct nfs4_create_res res;
3794 struct nfs_fh fh;
3795 struct nfs_fattr fattr;
3796 struct nfs4_label *label;
3797};
3798
3799static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3800 struct qstr *name, struct iattr *sattr, u32 ftype)
3801{
3802 struct nfs4_createdata *data;
3803
3804 data = kzalloc(sizeof(*data), GFP_KERNEL);
3805 if (data != NULL) {
3806 struct nfs_server *server = NFS_SERVER(dir);
3807
3808 data->label = nfs4_label_alloc(server, GFP_KERNEL);
3809 if (IS_ERR(data->label))
3810 goto out_free;
3811
3812 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3813 data->msg.rpc_argp = &data->arg;
3814 data->msg.rpc_resp = &data->res;
3815 data->arg.dir_fh = NFS_FH(dir);
3816 data->arg.server = server;
3817 data->arg.name = name;
3818 data->arg.attrs = sattr;
3819 data->arg.ftype = ftype;
3820 data->arg.bitmask = nfs4_bitmask(server, data->label);
3821 data->res.server = server;
3822 data->res.fh = &data->fh;
3823 data->res.fattr = &data->fattr;
3824 data->res.label = data->label;
3825 nfs_fattr_init(data->res.fattr);
3826 }
3827 return data;
3828out_free:
3829 kfree(data);
3830 return NULL;
3831}
3832
3833static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3834{
3835 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3836 &data->arg.seq_args, &data->res.seq_res, 1);
3837 if (status == 0) {
3838 update_changeattr(dir, &data->res.dir_cinfo);
3839 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
3840 }
3841 return status;
3842}
3843
3844static void nfs4_free_createdata(struct nfs4_createdata *data)
3845{
3846 nfs4_label_free(data->label);
3847 kfree(data);
3848}
3849
3850static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3851 struct page *page, unsigned int len, struct iattr *sattr,
3852 struct nfs4_label *label)
3853{
3854 struct nfs4_createdata *data;
3855 int status = -ENAMETOOLONG;
3856
3857 if (len > NFS4_MAXPATHLEN)
3858 goto out;
3859
3860 status = -ENOMEM;
3861 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3862 if (data == NULL)
3863 goto out;
3864
3865 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3866 data->arg.u.symlink.pages = &page;
3867 data->arg.u.symlink.len = len;
3868 data->arg.label = label;
3869
3870 status = nfs4_do_create(dir, dentry, data);
3871
3872 nfs4_free_createdata(data);
3873out:
3874 return status;
3875}
3876
3877static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3878 struct page *page, unsigned int len, struct iattr *sattr)
3879{
3880 struct nfs4_exception exception = { };
3881 struct nfs4_label l, *label = NULL;
3882 int err;
3883
3884 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3885
3886 do {
3887 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
3888 trace_nfs4_symlink(dir, &dentry->d_name, err);
3889 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3890 &exception);
3891 } while (exception.retry);
3892
3893 nfs4_label_release_security(label);
3894 return err;
3895}
3896
3897static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3898 struct iattr *sattr, struct nfs4_label *label)
3899{
3900 struct nfs4_createdata *data;
3901 int status = -ENOMEM;
3902
3903 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
3904 if (data == NULL)
3905 goto out;
3906
3907 data->arg.label = label;
3908 status = nfs4_do_create(dir, dentry, data);
3909
3910 nfs4_free_createdata(data);
3911out:
3912 return status;
3913}
3914
3915static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3916 struct iattr *sattr)
3917{
3918 struct nfs4_exception exception = { };
3919 struct nfs4_label l, *label = NULL;
3920 int err;
3921
3922 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3923
3924 sattr->ia_mode &= ~current_umask();
3925 do {
3926 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
3927 trace_nfs4_mkdir(dir, &dentry->d_name, err);
3928 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3929 &exception);
3930 } while (exception.retry);
3931 nfs4_label_release_security(label);
3932
3933 return err;
3934}
3935
3936static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3937 u64 cookie, struct page **pages, unsigned int count, int plus)
3938{
3939 struct inode *dir = d_inode(dentry);
3940 struct nfs4_readdir_arg args = {
3941 .fh = NFS_FH(dir),
3942 .pages = pages,
3943 .pgbase = 0,
3944 .count = count,
3945 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask,
3946 .plus = plus,
3947 };
3948 struct nfs4_readdir_res res;
3949 struct rpc_message msg = {
3950 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
3951 .rpc_argp = &args,
3952 .rpc_resp = &res,
3953 .rpc_cred = cred,
3954 };
3955 int status;
3956
3957 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
3958 dentry,
3959 (unsigned long long)cookie);
3960 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
3961 res.pgbase = args.pgbase;
3962 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
3963 if (status >= 0) {
3964 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
3965 status += args.pgbase;
3966 }
3967
3968 nfs_invalidate_atime(dir);
3969
3970 dprintk("%s: returns %d\n", __func__, status);
3971 return status;
3972}
3973
3974static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3975 u64 cookie, struct page **pages, unsigned int count, int plus)
3976{
3977 struct nfs4_exception exception = { };
3978 int err;
3979 do {
3980 err = _nfs4_proc_readdir(dentry, cred, cookie,
3981 pages, count, plus);
3982 trace_nfs4_readdir(d_inode(dentry), err);
3983 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err,
3984 &exception);
3985 } while (exception.retry);
3986 return err;
3987}
3988
3989static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3990 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
3991{
3992 struct nfs4_createdata *data;
3993 int mode = sattr->ia_mode;
3994 int status = -ENOMEM;
3995
3996 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
3997 if (data == NULL)
3998 goto out;
3999
4000 if (S_ISFIFO(mode))
4001 data->arg.ftype = NF4FIFO;
4002 else if (S_ISBLK(mode)) {
4003 data->arg.ftype = NF4BLK;
4004 data->arg.u.device.specdata1 = MAJOR(rdev);
4005 data->arg.u.device.specdata2 = MINOR(rdev);
4006 }
4007 else if (S_ISCHR(mode)) {
4008 data->arg.ftype = NF4CHR;
4009 data->arg.u.device.specdata1 = MAJOR(rdev);
4010 data->arg.u.device.specdata2 = MINOR(rdev);
4011 } else if (!S_ISSOCK(mode)) {
4012 status = -EINVAL;
4013 goto out_free;
4014 }
4015
4016 data->arg.label = label;
4017 status = nfs4_do_create(dir, dentry, data);
4018out_free:
4019 nfs4_free_createdata(data);
4020out:
4021 return status;
4022}
4023
4024static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4025 struct iattr *sattr, dev_t rdev)
4026{
4027 struct nfs4_exception exception = { };
4028 struct nfs4_label l, *label = NULL;
4029 int err;
4030
4031 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4032
4033 sattr->ia_mode &= ~current_umask();
4034 do {
4035 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
4036 trace_nfs4_mknod(dir, &dentry->d_name, err);
4037 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4038 &exception);
4039 } while (exception.retry);
4040
4041 nfs4_label_release_security(label);
4042
4043 return err;
4044}
4045
4046static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
4047 struct nfs_fsstat *fsstat)
4048{
4049 struct nfs4_statfs_arg args = {
4050 .fh = fhandle,
4051 .bitmask = server->attr_bitmask,
4052 };
4053 struct nfs4_statfs_res res = {
4054 .fsstat = fsstat,
4055 };
4056 struct rpc_message msg = {
4057 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
4058 .rpc_argp = &args,
4059 .rpc_resp = &res,
4060 };
4061
4062 nfs_fattr_init(fsstat->fattr);
4063 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4064}
4065
4066static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
4067{
4068 struct nfs4_exception exception = { };
4069 int err;
4070 do {
4071 err = nfs4_handle_exception(server,
4072 _nfs4_proc_statfs(server, fhandle, fsstat),
4073 &exception);
4074 } while (exception.retry);
4075 return err;
4076}
4077
4078static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
4079 struct nfs_fsinfo *fsinfo)
4080{
4081 struct nfs4_fsinfo_arg args = {
4082 .fh = fhandle,
4083 .bitmask = server->attr_bitmask,
4084 };
4085 struct nfs4_fsinfo_res res = {
4086 .fsinfo = fsinfo,
4087 };
4088 struct rpc_message msg = {
4089 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
4090 .rpc_argp = &args,
4091 .rpc_resp = &res,
4092 };
4093
4094 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4095}
4096
4097static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4098{
4099 struct nfs4_exception exception = { };
4100 unsigned long now = jiffies;
4101 int err;
4102
4103 do {
4104 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
4105 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
4106 if (err == 0) {
4107 struct nfs_client *clp = server->nfs_client;
4108
4109 spin_lock(&clp->cl_lock);
4110 clp->cl_lease_time = fsinfo->lease_time * HZ;
4111 clp->cl_last_renewal = now;
4112 spin_unlock(&clp->cl_lock);
4113 break;
4114 }
4115 err = nfs4_handle_exception(server, err, &exception);
4116 } while (exception.retry);
4117 return err;
4118}
4119
4120static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4121{
4122 int error;
4123
4124 nfs_fattr_init(fsinfo->fattr);
4125 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
4126 if (error == 0) {
4127 /* block layout checks this! */
4128 server->pnfs_blksize = fsinfo->blksize;
4129 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
4130 }
4131
4132 return error;
4133}
4134
4135static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4136 struct nfs_pathconf *pathconf)
4137{
4138 struct nfs4_pathconf_arg args = {
4139 .fh = fhandle,
4140 .bitmask = server->attr_bitmask,
4141 };
4142 struct nfs4_pathconf_res res = {
4143 .pathconf = pathconf,
4144 };
4145 struct rpc_message msg = {
4146 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
4147 .rpc_argp = &args,
4148 .rpc_resp = &res,
4149 };
4150
4151 /* None of the pathconf attributes are mandatory to implement */
4152 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
4153 memset(pathconf, 0, sizeof(*pathconf));
4154 return 0;
4155 }
4156
4157 nfs_fattr_init(pathconf->fattr);
4158 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4159}
4160
4161static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4162 struct nfs_pathconf *pathconf)
4163{
4164 struct nfs4_exception exception = { };
4165 int err;
4166
4167 do {
4168 err = nfs4_handle_exception(server,
4169 _nfs4_proc_pathconf(server, fhandle, pathconf),
4170 &exception);
4171 } while (exception.retry);
4172 return err;
4173}
4174
4175int nfs4_set_rw_stateid(nfs4_stateid *stateid,
4176 const struct nfs_open_context *ctx,
4177 const struct nfs_lock_context *l_ctx,
4178 fmode_t fmode)
4179{
4180 const struct nfs_lockowner *lockowner = NULL;
4181
4182 if (l_ctx != NULL)
4183 lockowner = &l_ctx->lockowner;
4184 return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner);
4185}
4186EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
4187
4188static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
4189 const struct nfs_open_context *ctx,
4190 const struct nfs_lock_context *l_ctx,
4191 fmode_t fmode)
4192{
4193 nfs4_stateid current_stateid;
4194
4195 /* If the current stateid represents a lost lock, then exit */
4196 if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
4197 return true;
4198 return nfs4_stateid_match(stateid, &current_stateid);
4199}
4200
4201static bool nfs4_error_stateid_expired(int err)
4202{
4203 switch (err) {
4204 case -NFS4ERR_DELEG_REVOKED:
4205 case -NFS4ERR_ADMIN_REVOKED:
4206 case -NFS4ERR_BAD_STATEID:
4207 case -NFS4ERR_STALE_STATEID:
4208 case -NFS4ERR_OLD_STATEID:
4209 case -NFS4ERR_OPENMODE:
4210 case -NFS4ERR_EXPIRED:
4211 return true;
4212 }
4213 return false;
4214}
4215
4216void __nfs4_read_done_cb(struct nfs_pgio_header *hdr)
4217{
4218 nfs_invalidate_atime(hdr->inode);
4219}
4220
4221static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
4222{
4223 struct nfs_server *server = NFS_SERVER(hdr->inode);
4224
4225 trace_nfs4_read(hdr, task->tk_status);
4226 if (nfs4_async_handle_error(task, server,
4227 hdr->args.context->state,
4228 NULL) == -EAGAIN) {
4229 rpc_restart_call_prepare(task);
4230 return -EAGAIN;
4231 }
4232
4233 __nfs4_read_done_cb(hdr);
4234 if (task->tk_status > 0)
4235 renew_lease(server, hdr->timestamp);
4236 return 0;
4237}
4238
4239static bool nfs4_read_stateid_changed(struct rpc_task *task,
4240 struct nfs_pgio_args *args)
4241{
4242
4243 if (!nfs4_error_stateid_expired(task->tk_status) ||
4244 nfs4_stateid_is_current(&args->stateid,
4245 args->context,
4246 args->lock_context,
4247 FMODE_READ))
4248 return false;
4249 rpc_restart_call_prepare(task);
4250 return true;
4251}
4252
4253static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4254{
4255
4256 dprintk("--> %s\n", __func__);
4257
4258 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4259 return -EAGAIN;
4260 if (nfs4_read_stateid_changed(task, &hdr->args))
4261 return -EAGAIN;
4262 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4263 nfs4_read_done_cb(task, hdr);
4264}
4265
4266static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
4267 struct rpc_message *msg)
4268{
4269 hdr->timestamp = jiffies;
4270 hdr->pgio_done_cb = nfs4_read_done_cb;
4271 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
4272 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0);
4273}
4274
4275static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
4276 struct nfs_pgio_header *hdr)
4277{
4278 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode),
4279 &hdr->args.seq_args,
4280 &hdr->res.seq_res,
4281 task))
4282 return 0;
4283 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
4284 hdr->args.lock_context,
4285 hdr->rw_ops->rw_mode) == -EIO)
4286 return -EIO;
4287 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
4288 return -EIO;
4289 return 0;
4290}
4291
4292static int nfs4_write_done_cb(struct rpc_task *task,
4293 struct nfs_pgio_header *hdr)
4294{
4295 struct inode *inode = hdr->inode;
4296
4297 trace_nfs4_write(hdr, task->tk_status);
4298 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4299 hdr->args.context->state,
4300 NULL) == -EAGAIN) {
4301 rpc_restart_call_prepare(task);
4302 return -EAGAIN;
4303 }
4304 if (task->tk_status >= 0) {
4305 renew_lease(NFS_SERVER(inode), hdr->timestamp);
4306 nfs_writeback_update_inode(hdr);
4307 }
4308 return 0;
4309}
4310
4311static bool nfs4_write_stateid_changed(struct rpc_task *task,
4312 struct nfs_pgio_args *args)
4313{
4314
4315 if (!nfs4_error_stateid_expired(task->tk_status) ||
4316 nfs4_stateid_is_current(&args->stateid,
4317 args->context,
4318 args->lock_context,
4319 FMODE_WRITE))
4320 return false;
4321 rpc_restart_call_prepare(task);
4322 return true;
4323}
4324
4325static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4326{
4327 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4328 return -EAGAIN;
4329 if (nfs4_write_stateid_changed(task, &hdr->args))
4330 return -EAGAIN;
4331 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4332 nfs4_write_done_cb(task, hdr);
4333}
4334
4335static
4336bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
4337{
4338 /* Don't request attributes for pNFS or O_DIRECT writes */
4339 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
4340 return false;
4341 /* Otherwise, request attributes if and only if we don't hold
4342 * a delegation
4343 */
4344 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
4345}
4346
4347static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
4348 struct rpc_message *msg)
4349{
4350 struct nfs_server *server = NFS_SERVER(hdr->inode);
4351
4352 if (!nfs4_write_need_cache_consistency_data(hdr)) {
4353 hdr->args.bitmask = NULL;
4354 hdr->res.fattr = NULL;
4355 } else
4356 hdr->args.bitmask = server->cache_consistency_bitmask;
4357
4358 if (!hdr->pgio_done_cb)
4359 hdr->pgio_done_cb = nfs4_write_done_cb;
4360 hdr->res.server = server;
4361 hdr->timestamp = jiffies;
4362
4363 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
4364 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1);
4365}
4366
4367static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
4368{
4369 nfs4_setup_sequence(NFS_SERVER(data->inode),
4370 &data->args.seq_args,
4371 &data->res.seq_res,
4372 task);
4373}
4374
4375static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
4376{
4377 struct inode *inode = data->inode;
4378
4379 trace_nfs4_commit(data, task->tk_status);
4380 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4381 NULL, NULL) == -EAGAIN) {
4382 rpc_restart_call_prepare(task);
4383 return -EAGAIN;
4384 }
4385 return 0;
4386}
4387
4388static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
4389{
4390 if (!nfs4_sequence_done(task, &data->res.seq_res))
4391 return -EAGAIN;
4392 return data->commit_done_cb(task, data);
4393}
4394
4395static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
4396{
4397 struct nfs_server *server = NFS_SERVER(data->inode);
4398
4399 if (data->commit_done_cb == NULL)
4400 data->commit_done_cb = nfs4_commit_done_cb;
4401 data->res.server = server;
4402 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
4403 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4404}
4405
4406struct nfs4_renewdata {
4407 struct nfs_client *client;
4408 unsigned long timestamp;
4409};
4410
4411/*
4412 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
4413 * standalone procedure for queueing an asynchronous RENEW.
4414 */
4415static void nfs4_renew_release(void *calldata)
4416{
4417 struct nfs4_renewdata *data = calldata;
4418 struct nfs_client *clp = data->client;
4419
4420 if (atomic_read(&clp->cl_count) > 1)
4421 nfs4_schedule_state_renewal(clp);
4422 nfs_put_client(clp);
4423 kfree(data);
4424}
4425
4426static void nfs4_renew_done(struct rpc_task *task, void *calldata)
4427{
4428 struct nfs4_renewdata *data = calldata;
4429 struct nfs_client *clp = data->client;
4430 unsigned long timestamp = data->timestamp;
4431
4432 trace_nfs4_renew_async(clp, task->tk_status);
4433 switch (task->tk_status) {
4434 case 0:
4435 break;
4436 case -NFS4ERR_LEASE_MOVED:
4437 nfs4_schedule_lease_moved_recovery(clp);
4438 break;
4439 default:
4440 /* Unless we're shutting down, schedule state recovery! */
4441 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
4442 return;
4443 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
4444 nfs4_schedule_lease_recovery(clp);
4445 return;
4446 }
4447 nfs4_schedule_path_down_recovery(clp);
4448 }
4449 do_renew_lease(clp, timestamp);
4450}
4451
4452static const struct rpc_call_ops nfs4_renew_ops = {
4453 .rpc_call_done = nfs4_renew_done,
4454 .rpc_release = nfs4_renew_release,
4455};
4456
4457static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
4458{
4459 struct rpc_message msg = {
4460 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4461 .rpc_argp = clp,
4462 .rpc_cred = cred,
4463 };
4464 struct nfs4_renewdata *data;
4465
4466 if (renew_flags == 0)
4467 return 0;
4468 if (!atomic_inc_not_zero(&clp->cl_count))
4469 return -EIO;
4470 data = kmalloc(sizeof(*data), GFP_NOFS);
4471 if (data == NULL)
4472 return -ENOMEM;
4473 data->client = clp;
4474 data->timestamp = jiffies;
4475 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
4476 &nfs4_renew_ops, data);
4477}
4478
4479static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
4480{
4481 struct rpc_message msg = {
4482 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4483 .rpc_argp = clp,
4484 .rpc_cred = cred,
4485 };
4486 unsigned long now = jiffies;
4487 int status;
4488
4489 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4490 if (status < 0)
4491 return status;
4492 do_renew_lease(clp, now);
4493 return 0;
4494}
4495
4496static inline int nfs4_server_supports_acls(struct nfs_server *server)
4497{
4498 return server->caps & NFS_CAP_ACLS;
4499}
4500
4501/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
4502 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
4503 * the stack.
4504 */
4505#define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
4506
4507static int buf_to_pages_noslab(const void *buf, size_t buflen,
4508 struct page **pages, unsigned int *pgbase)
4509{
4510 struct page *newpage, **spages;
4511 int rc = 0;
4512 size_t len;
4513 spages = pages;
4514
4515 do {
4516 len = min_t(size_t, PAGE_SIZE, buflen);
4517 newpage = alloc_page(GFP_KERNEL);
4518
4519 if (newpage == NULL)
4520 goto unwind;
4521 memcpy(page_address(newpage), buf, len);
4522 buf += len;
4523 buflen -= len;
4524 *pages++ = newpage;
4525 rc++;
4526 } while (buflen != 0);
4527
4528 return rc;
4529
4530unwind:
4531 for(; rc > 0; rc--)
4532 __free_page(spages[rc-1]);
4533 return -ENOMEM;
4534}
4535
4536struct nfs4_cached_acl {
4537 int cached;
4538 size_t len;
4539 char data[0];
4540};
4541
4542static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
4543{
4544 struct nfs_inode *nfsi = NFS_I(inode);
4545
4546 spin_lock(&inode->i_lock);
4547 kfree(nfsi->nfs4_acl);
4548 nfsi->nfs4_acl = acl;
4549 spin_unlock(&inode->i_lock);
4550}
4551
4552static void nfs4_zap_acl_attr(struct inode *inode)
4553{
4554 nfs4_set_cached_acl(inode, NULL);
4555}
4556
4557static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
4558{
4559 struct nfs_inode *nfsi = NFS_I(inode);
4560 struct nfs4_cached_acl *acl;
4561 int ret = -ENOENT;
4562
4563 spin_lock(&inode->i_lock);
4564 acl = nfsi->nfs4_acl;
4565 if (acl == NULL)
4566 goto out;
4567 if (buf == NULL) /* user is just asking for length */
4568 goto out_len;
4569 if (acl->cached == 0)
4570 goto out;
4571 ret = -ERANGE; /* see getxattr(2) man page */
4572 if (acl->len > buflen)
4573 goto out;
4574 memcpy(buf, acl->data, acl->len);
4575out_len:
4576 ret = acl->len;
4577out:
4578 spin_unlock(&inode->i_lock);
4579 return ret;
4580}
4581
4582static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
4583{
4584 struct nfs4_cached_acl *acl;
4585 size_t buflen = sizeof(*acl) + acl_len;
4586
4587 if (buflen <= PAGE_SIZE) {
4588 acl = kmalloc(buflen, GFP_KERNEL);
4589 if (acl == NULL)
4590 goto out;
4591 acl->cached = 1;
4592 _copy_from_pages(acl->data, pages, pgbase, acl_len);
4593 } else {
4594 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
4595 if (acl == NULL)
4596 goto out;
4597 acl->cached = 0;
4598 }
4599 acl->len = acl_len;
4600out:
4601 nfs4_set_cached_acl(inode, acl);
4602}
4603
4604/*
4605 * The getxattr API returns the required buffer length when called with a
4606 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
4607 * the required buf. On a NULL buf, we send a page of data to the server
4608 * guessing that the ACL request can be serviced by a page. If so, we cache
4609 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
4610 * the cache. If not so, we throw away the page, and cache the required
4611 * length. The next getxattr call will then produce another round trip to
4612 * the server, this time with the input buf of the required size.
4613 */
4614static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4615{
4616 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
4617 struct nfs_getaclargs args = {
4618 .fh = NFS_FH(inode),
4619 .acl_pages = pages,
4620 .acl_len = buflen,
4621 };
4622 struct nfs_getaclres res = {
4623 .acl_len = buflen,
4624 };
4625 struct rpc_message msg = {
4626 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
4627 .rpc_argp = &args,
4628 .rpc_resp = &res,
4629 };
4630 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4631 int ret = -ENOMEM, i;
4632
4633 /* As long as we're doing a round trip to the server anyway,
4634 * let's be prepared for a page of acl data. */
4635 if (npages == 0)
4636 npages = 1;
4637 if (npages > ARRAY_SIZE(pages))
4638 return -ERANGE;
4639
4640 for (i = 0; i < npages; i++) {
4641 pages[i] = alloc_page(GFP_KERNEL);
4642 if (!pages[i])
4643 goto out_free;
4644 }
4645
4646 /* for decoding across pages */
4647 res.acl_scratch = alloc_page(GFP_KERNEL);
4648 if (!res.acl_scratch)
4649 goto out_free;
4650
4651 args.acl_len = npages * PAGE_SIZE;
4652 args.acl_pgbase = 0;
4653
4654 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
4655 __func__, buf, buflen, npages, args.acl_len);
4656 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
4657 &msg, &args.seq_args, &res.seq_res, 0);
4658 if (ret)
4659 goto out_free;
4660
4661 /* Handle the case where the passed-in buffer is too short */
4662 if (res.acl_flags & NFS4_ACL_TRUNC) {
4663 /* Did the user only issue a request for the acl length? */
4664 if (buf == NULL)
4665 goto out_ok;
4666 ret = -ERANGE;
4667 goto out_free;
4668 }
4669 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
4670 if (buf) {
4671 if (res.acl_len > buflen) {
4672 ret = -ERANGE;
4673 goto out_free;
4674 }
4675 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
4676 }
4677out_ok:
4678 ret = res.acl_len;
4679out_free:
4680 for (i = 0; i < npages; i++)
4681 if (pages[i])
4682 __free_page(pages[i]);
4683 if (res.acl_scratch)
4684 __free_page(res.acl_scratch);
4685 return ret;
4686}
4687
4688static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4689{
4690 struct nfs4_exception exception = { };
4691 ssize_t ret;
4692 do {
4693 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
4694 trace_nfs4_get_acl(inode, ret);
4695 if (ret >= 0)
4696 break;
4697 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
4698 } while (exception.retry);
4699 return ret;
4700}
4701
4702static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
4703{
4704 struct nfs_server *server = NFS_SERVER(inode);
4705 int ret;
4706
4707 if (!nfs4_server_supports_acls(server))
4708 return -EOPNOTSUPP;
4709 ret = nfs_revalidate_inode(server, inode);
4710 if (ret < 0)
4711 return ret;
4712 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
4713 nfs_zap_acl_cache(inode);
4714 ret = nfs4_read_cached_acl(inode, buf, buflen);
4715 if (ret != -ENOENT)
4716 /* -ENOENT is returned if there is no ACL or if there is an ACL
4717 * but no cached acl data, just the acl length */
4718 return ret;
4719 return nfs4_get_acl_uncached(inode, buf, buflen);
4720}
4721
4722static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4723{
4724 struct nfs_server *server = NFS_SERVER(inode);
4725 struct page *pages[NFS4ACL_MAXPAGES];
4726 struct nfs_setaclargs arg = {
4727 .fh = NFS_FH(inode),
4728 .acl_pages = pages,
4729 .acl_len = buflen,
4730 };
4731 struct nfs_setaclres res;
4732 struct rpc_message msg = {
4733 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
4734 .rpc_argp = &arg,
4735 .rpc_resp = &res,
4736 };
4737 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4738 int ret, i;
4739
4740 if (!nfs4_server_supports_acls(server))
4741 return -EOPNOTSUPP;
4742 if (npages > ARRAY_SIZE(pages))
4743 return -ERANGE;
4744 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
4745 if (i < 0)
4746 return i;
4747 nfs4_inode_return_delegation(inode);
4748 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4749
4750 /*
4751 * Free each page after tx, so the only ref left is
4752 * held by the network stack
4753 */
4754 for (; i > 0; i--)
4755 put_page(pages[i-1]);
4756
4757 /*
4758 * Acl update can result in inode attribute update.
4759 * so mark the attribute cache invalid.
4760 */
4761 spin_lock(&inode->i_lock);
4762 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
4763 spin_unlock(&inode->i_lock);
4764 nfs_access_zap_cache(inode);
4765 nfs_zap_acl_cache(inode);
4766 return ret;
4767}
4768
4769static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4770{
4771 struct nfs4_exception exception = { };
4772 int err;
4773 do {
4774 err = __nfs4_proc_set_acl(inode, buf, buflen);
4775 trace_nfs4_set_acl(inode, err);
4776 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4777 &exception);
4778 } while (exception.retry);
4779 return err;
4780}
4781
4782#ifdef CONFIG_NFS_V4_SECURITY_LABEL
4783static int _nfs4_get_security_label(struct inode *inode, void *buf,
4784 size_t buflen)
4785{
4786 struct nfs_server *server = NFS_SERVER(inode);
4787 struct nfs_fattr fattr;
4788 struct nfs4_label label = {0, 0, buflen, buf};
4789
4790 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4791 struct nfs4_getattr_arg arg = {
4792 .fh = NFS_FH(inode),
4793 .bitmask = bitmask,
4794 };
4795 struct nfs4_getattr_res res = {
4796 .fattr = &fattr,
4797 .label = &label,
4798 .server = server,
4799 };
4800 struct rpc_message msg = {
4801 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4802 .rpc_argp = &arg,
4803 .rpc_resp = &res,
4804 };
4805 int ret;
4806
4807 nfs_fattr_init(&fattr);
4808
4809 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
4810 if (ret)
4811 return ret;
4812 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
4813 return -ENOENT;
4814 if (buflen < label.len)
4815 return -ERANGE;
4816 return 0;
4817}
4818
4819static int nfs4_get_security_label(struct inode *inode, void *buf,
4820 size_t buflen)
4821{
4822 struct nfs4_exception exception = { };
4823 int err;
4824
4825 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
4826 return -EOPNOTSUPP;
4827
4828 do {
4829 err = _nfs4_get_security_label(inode, buf, buflen);
4830 trace_nfs4_get_security_label(inode, err);
4831 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4832 &exception);
4833 } while (exception.retry);
4834 return err;
4835}
4836
4837static int _nfs4_do_set_security_label(struct inode *inode,
4838 struct nfs4_label *ilabel,
4839 struct nfs_fattr *fattr,
4840 struct nfs4_label *olabel)
4841{
4842
4843 struct iattr sattr = {0};
4844 struct nfs_server *server = NFS_SERVER(inode);
4845 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4846 struct nfs_setattrargs arg = {
4847 .fh = NFS_FH(inode),
4848 .iap = &sattr,
4849 .server = server,
4850 .bitmask = bitmask,
4851 .label = ilabel,
4852 };
4853 struct nfs_setattrres res = {
4854 .fattr = fattr,
4855 .label = olabel,
4856 .server = server,
4857 };
4858 struct rpc_message msg = {
4859 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
4860 .rpc_argp = &arg,
4861 .rpc_resp = &res,
4862 };
4863 int status;
4864
4865 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
4866
4867 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4868 if (status)
4869 dprintk("%s failed: %d\n", __func__, status);
4870
4871 return status;
4872}
4873
4874static int nfs4_do_set_security_label(struct inode *inode,
4875 struct nfs4_label *ilabel,
4876 struct nfs_fattr *fattr,
4877 struct nfs4_label *olabel)
4878{
4879 struct nfs4_exception exception = { };
4880 int err;
4881
4882 do {
4883 err = _nfs4_do_set_security_label(inode, ilabel,
4884 fattr, olabel);
4885 trace_nfs4_set_security_label(inode, err);
4886 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4887 &exception);
4888 } while (exception.retry);
4889 return err;
4890}
4891
4892static int
4893nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen)
4894{
4895 struct nfs4_label ilabel, *olabel = NULL;
4896 struct nfs_fattr fattr;
4897 struct rpc_cred *cred;
4898 struct inode *inode = d_inode(dentry);
4899 int status;
4900
4901 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
4902 return -EOPNOTSUPP;
4903
4904 nfs_fattr_init(&fattr);
4905
4906 ilabel.pi = 0;
4907 ilabel.lfs = 0;
4908 ilabel.label = (char *)buf;
4909 ilabel.len = buflen;
4910
4911 cred = rpc_lookup_cred();
4912 if (IS_ERR(cred))
4913 return PTR_ERR(cred);
4914
4915 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
4916 if (IS_ERR(olabel)) {
4917 status = -PTR_ERR(olabel);
4918 goto out;
4919 }
4920
4921 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel);
4922 if (status == 0)
4923 nfs_setsecurity(inode, &fattr, olabel);
4924
4925 nfs4_label_free(olabel);
4926out:
4927 put_rpccred(cred);
4928 return status;
4929}
4930#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
4931
4932
4933static int
4934nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
4935 struct nfs4_state *state, long *timeout)
4936{
4937 struct nfs_client *clp = server->nfs_client;
4938
4939 if (task->tk_status >= 0)
4940 return 0;
4941 switch(task->tk_status) {
4942 case -NFS4ERR_DELEG_REVOKED:
4943 case -NFS4ERR_ADMIN_REVOKED:
4944 case -NFS4ERR_BAD_STATEID:
4945 case -NFS4ERR_OPENMODE:
4946 if (state == NULL)
4947 break;
4948 if (nfs4_schedule_stateid_recovery(server, state) < 0)
4949 goto recovery_failed;
4950 goto wait_on_recovery;
4951 case -NFS4ERR_EXPIRED:
4952 if (state != NULL) {
4953 if (nfs4_schedule_stateid_recovery(server, state) < 0)
4954 goto recovery_failed;
4955 }
4956 case -NFS4ERR_STALE_STATEID:
4957 case -NFS4ERR_STALE_CLIENTID:
4958 nfs4_schedule_lease_recovery(clp);
4959 goto wait_on_recovery;
4960 case -NFS4ERR_MOVED:
4961 if (nfs4_schedule_migration_recovery(server) < 0)
4962 goto recovery_failed;
4963 goto wait_on_recovery;
4964 case -NFS4ERR_LEASE_MOVED:
4965 nfs4_schedule_lease_moved_recovery(clp);
4966 goto wait_on_recovery;
4967#if defined(CONFIG_NFS_V4_1)
4968 case -NFS4ERR_BADSESSION:
4969 case -NFS4ERR_BADSLOT:
4970 case -NFS4ERR_BAD_HIGH_SLOT:
4971 case -NFS4ERR_DEADSESSION:
4972 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4973 case -NFS4ERR_SEQ_FALSE_RETRY:
4974 case -NFS4ERR_SEQ_MISORDERED:
4975 dprintk("%s ERROR %d, Reset session\n", __func__,
4976 task->tk_status);
4977 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
4978 goto wait_on_recovery;
4979#endif /* CONFIG_NFS_V4_1 */
4980 case -NFS4ERR_DELAY:
4981 nfs_inc_server_stats(server, NFSIOS_DELAY);
4982 rpc_delay(task, nfs4_update_delay(timeout));
4983 goto restart_call;
4984 case -NFS4ERR_GRACE:
4985 rpc_delay(task, NFS4_POLL_RETRY_MAX);
4986 case -NFS4ERR_RETRY_UNCACHED_REP:
4987 case -NFS4ERR_OLD_STATEID:
4988 goto restart_call;
4989 }
4990 task->tk_status = nfs4_map_errors(task->tk_status);
4991 return 0;
4992recovery_failed:
4993 task->tk_status = -EIO;
4994 return 0;
4995wait_on_recovery:
4996 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
4997 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
4998 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
4999 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
5000 goto recovery_failed;
5001restart_call:
5002 task->tk_status = 0;
5003 return -EAGAIN;
5004}
5005
5006static void nfs4_init_boot_verifier(const struct nfs_client *clp,
5007 nfs4_verifier *bootverf)
5008{
5009 __be32 verf[2];
5010
5011 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
5012 /* An impossible timestamp guarantees this value
5013 * will never match a generated boot time. */
5014 verf[0] = 0;
5015 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1);
5016 } else {
5017 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
5018 verf[0] = cpu_to_be32(nn->boot_time.tv_sec);
5019 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec);
5020 }
5021 memcpy(bootverf->data, verf, sizeof(bootverf->data));
5022}
5023
5024static int
5025nfs4_init_nonuniform_client_string(struct nfs_client *clp)
5026{
5027 int result;
5028 size_t len;
5029 char *str;
5030
5031 if (clp->cl_owner_id != NULL)
5032 return 0;
5033
5034 rcu_read_lock();
5035 len = 14 + strlen(clp->cl_ipaddr) + 1 +
5036 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
5037 1 +
5038 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) +
5039 1;
5040 rcu_read_unlock();
5041
5042 if (len > NFS4_OPAQUE_LIMIT + 1)
5043 return -EINVAL;
5044
5045 /*
5046 * Since this string is allocated at mount time, and held until the
5047 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5048 * about a memory-reclaim deadlock.
5049 */
5050 str = kmalloc(len, GFP_KERNEL);
5051 if (!str)
5052 return -ENOMEM;
5053
5054 rcu_read_lock();
5055 result = scnprintf(str, len, "Linux NFSv4.0 %s/%s %s",
5056 clp->cl_ipaddr,
5057 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
5058 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO));
5059 rcu_read_unlock();
5060
5061 clp->cl_owner_id = str;
5062 return 0;
5063}
5064
5065static int
5066nfs4_init_uniquifier_client_string(struct nfs_client *clp)
5067{
5068 int result;
5069 size_t len;
5070 char *str;
5071
5072 len = 10 + 10 + 1 + 10 + 1 +
5073 strlen(nfs4_client_id_uniquifier) + 1 +
5074 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5075
5076 if (len > NFS4_OPAQUE_LIMIT + 1)
5077 return -EINVAL;
5078
5079 /*
5080 * Since this string is allocated at mount time, and held until the
5081 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5082 * about a memory-reclaim deadlock.
5083 */
5084 str = kmalloc(len, GFP_KERNEL);
5085 if (!str)
5086 return -ENOMEM;
5087
5088 result = scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
5089 clp->rpc_ops->version, clp->cl_minorversion,
5090 nfs4_client_id_uniquifier,
5091 clp->cl_rpcclient->cl_nodename);
5092 clp->cl_owner_id = str;
5093 return 0;
5094}
5095
5096static int
5097nfs4_init_uniform_client_string(struct nfs_client *clp)
5098{
5099 int result;
5100 size_t len;
5101 char *str;
5102
5103 if (clp->cl_owner_id != NULL)
5104 return 0;
5105
5106 if (nfs4_client_id_uniquifier[0] != '\0')
5107 return nfs4_init_uniquifier_client_string(clp);
5108
5109 len = 10 + 10 + 1 + 10 + 1 +
5110 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5111
5112 if (len > NFS4_OPAQUE_LIMIT + 1)
5113 return -EINVAL;
5114
5115 /*
5116 * Since this string is allocated at mount time, and held until the
5117 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5118 * about a memory-reclaim deadlock.
5119 */
5120 str = kmalloc(len, GFP_KERNEL);
5121 if (!str)
5122 return -ENOMEM;
5123
5124 result = scnprintf(str, len, "Linux NFSv%u.%u %s",
5125 clp->rpc_ops->version, clp->cl_minorversion,
5126 clp->cl_rpcclient->cl_nodename);
5127 clp->cl_owner_id = str;
5128 return 0;
5129}
5130
5131/*
5132 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
5133 * services. Advertise one based on the address family of the
5134 * clientaddr.
5135 */
5136static unsigned int
5137nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
5138{
5139 if (strchr(clp->cl_ipaddr, ':') != NULL)
5140 return scnprintf(buf, len, "tcp6");
5141 else
5142 return scnprintf(buf, len, "tcp");
5143}
5144
5145static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
5146{
5147 struct nfs4_setclientid *sc = calldata;
5148
5149 if (task->tk_status == 0)
5150 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
5151}
5152
5153static const struct rpc_call_ops nfs4_setclientid_ops = {
5154 .rpc_call_done = nfs4_setclientid_done,
5155};
5156
5157/**
5158 * nfs4_proc_setclientid - Negotiate client ID
5159 * @clp: state data structure
5160 * @program: RPC program for NFSv4 callback service
5161 * @port: IP port number for NFS4 callback service
5162 * @cred: RPC credential to use for this call
5163 * @res: where to place the result
5164 *
5165 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5166 */
5167int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
5168 unsigned short port, struct rpc_cred *cred,
5169 struct nfs4_setclientid_res *res)
5170{
5171 nfs4_verifier sc_verifier;
5172 struct nfs4_setclientid setclientid = {
5173 .sc_verifier = &sc_verifier,
5174 .sc_prog = program,
5175 .sc_clnt = clp,
5176 };
5177 struct rpc_message msg = {
5178 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
5179 .rpc_argp = &setclientid,
5180 .rpc_resp = res,
5181 .rpc_cred = cred,
5182 };
5183 struct rpc_task *task;
5184 struct rpc_task_setup task_setup_data = {
5185 .rpc_client = clp->cl_rpcclient,
5186 .rpc_message = &msg,
5187 .callback_ops = &nfs4_setclientid_ops,
5188 .callback_data = &setclientid,
5189 .flags = RPC_TASK_TIMEOUT,
5190 };
5191 int status;
5192
5193 /* nfs_client_id4 */
5194 nfs4_init_boot_verifier(clp, &sc_verifier);
5195
5196 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
5197 status = nfs4_init_uniform_client_string(clp);
5198 else
5199 status = nfs4_init_nonuniform_client_string(clp);
5200
5201 if (status)
5202 goto out;
5203
5204 /* cb_client4 */
5205 setclientid.sc_netid_len =
5206 nfs4_init_callback_netid(clp,
5207 setclientid.sc_netid,
5208 sizeof(setclientid.sc_netid));
5209 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
5210 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
5211 clp->cl_ipaddr, port >> 8, port & 255);
5212
5213 dprintk("NFS call setclientid auth=%s, '%s'\n",
5214 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5215 clp->cl_owner_id);
5216 task = rpc_run_task(&task_setup_data);
5217 if (IS_ERR(task)) {
5218 status = PTR_ERR(task);
5219 goto out;
5220 }
5221 status = task->tk_status;
5222 if (setclientid.sc_cred) {
5223 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
5224 put_rpccred(setclientid.sc_cred);
5225 }
5226 rpc_put_task(task);
5227out:
5228 trace_nfs4_setclientid(clp, status);
5229 dprintk("NFS reply setclientid: %d\n", status);
5230 return status;
5231}
5232
5233/**
5234 * nfs4_proc_setclientid_confirm - Confirm client ID
5235 * @clp: state data structure
5236 * @res: result of a previous SETCLIENTID
5237 * @cred: RPC credential to use for this call
5238 *
5239 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5240 */
5241int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
5242 struct nfs4_setclientid_res *arg,
5243 struct rpc_cred *cred)
5244{
5245 struct rpc_message msg = {
5246 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
5247 .rpc_argp = arg,
5248 .rpc_cred = cred,
5249 };
5250 int status;
5251
5252 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
5253 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5254 clp->cl_clientid);
5255 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5256 trace_nfs4_setclientid_confirm(clp, status);
5257 dprintk("NFS reply setclientid_confirm: %d\n", status);
5258 return status;
5259}
5260
5261struct nfs4_delegreturndata {
5262 struct nfs4_delegreturnargs args;
5263 struct nfs4_delegreturnres res;
5264 struct nfs_fh fh;
5265 nfs4_stateid stateid;
5266 unsigned long timestamp;
5267 struct nfs_fattr fattr;
5268 int rpc_status;
5269 struct inode *inode;
5270 bool roc;
5271 u32 roc_barrier;
5272};
5273
5274static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5275{
5276 struct nfs4_delegreturndata *data = calldata;
5277
5278 if (!nfs4_sequence_done(task, &data->res.seq_res))
5279 return;
5280
5281 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
5282 switch (task->tk_status) {
5283 case 0:
5284 renew_lease(data->res.server, data->timestamp);
5285 case -NFS4ERR_ADMIN_REVOKED:
5286 case -NFS4ERR_DELEG_REVOKED:
5287 case -NFS4ERR_BAD_STATEID:
5288 case -NFS4ERR_OLD_STATEID:
5289 case -NFS4ERR_STALE_STATEID:
5290 case -NFS4ERR_EXPIRED:
5291 task->tk_status = 0;
5292 if (data->roc)
5293 pnfs_roc_set_barrier(data->inode, data->roc_barrier);
5294 break;
5295 default:
5296 if (nfs4_async_handle_error(task, data->res.server,
5297 NULL, NULL) == -EAGAIN) {
5298 rpc_restart_call_prepare(task);
5299 return;
5300 }
5301 }
5302 data->rpc_status = task->tk_status;
5303}
5304
5305static void nfs4_delegreturn_release(void *calldata)
5306{
5307 struct nfs4_delegreturndata *data = calldata;
5308 struct inode *inode = data->inode;
5309
5310 if (inode) {
5311 if (data->roc)
5312 pnfs_roc_release(inode);
5313 nfs_iput_and_deactive(inode);
5314 }
5315 kfree(calldata);
5316}
5317
5318static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
5319{
5320 struct nfs4_delegreturndata *d_data;
5321
5322 d_data = (struct nfs4_delegreturndata *)data;
5323
5324 if (d_data->roc)
5325 pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier);
5326
5327 nfs4_setup_sequence(d_data->res.server,
5328 &d_data->args.seq_args,
5329 &d_data->res.seq_res,
5330 task);
5331}
5332
5333static const struct rpc_call_ops nfs4_delegreturn_ops = {
5334 .rpc_call_prepare = nfs4_delegreturn_prepare,
5335 .rpc_call_done = nfs4_delegreturn_done,
5336 .rpc_release = nfs4_delegreturn_release,
5337};
5338
5339static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5340{
5341 struct nfs4_delegreturndata *data;
5342 struct nfs_server *server = NFS_SERVER(inode);
5343 struct rpc_task *task;
5344 struct rpc_message msg = {
5345 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
5346 .rpc_cred = cred,
5347 };
5348 struct rpc_task_setup task_setup_data = {
5349 .rpc_client = server->client,
5350 .rpc_message = &msg,
5351 .callback_ops = &nfs4_delegreturn_ops,
5352 .flags = RPC_TASK_ASYNC,
5353 };
5354 int status = 0;
5355
5356 data = kzalloc(sizeof(*data), GFP_NOFS);
5357 if (data == NULL)
5358 return -ENOMEM;
5359 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
5360 data->args.fhandle = &data->fh;
5361 data->args.stateid = &data->stateid;
5362 data->args.bitmask = server->cache_consistency_bitmask;
5363 nfs_copy_fh(&data->fh, NFS_FH(inode));
5364 nfs4_stateid_copy(&data->stateid, stateid);
5365 data->res.fattr = &data->fattr;
5366 data->res.server = server;
5367 nfs_fattr_init(data->res.fattr);
5368 data->timestamp = jiffies;
5369 data->rpc_status = 0;
5370 data->inode = nfs_igrab_and_active(inode);
5371 if (data->inode)
5372 data->roc = nfs4_roc(inode);
5373
5374 task_setup_data.callback_data = data;
5375 msg.rpc_argp = &data->args;
5376 msg.rpc_resp = &data->res;
5377 task = rpc_run_task(&task_setup_data);
5378 if (IS_ERR(task))
5379 return PTR_ERR(task);
5380 if (!issync)
5381 goto out;
5382 status = nfs4_wait_for_completion_rpc_task(task);
5383 if (status != 0)
5384 goto out;
5385 status = data->rpc_status;
5386 if (status == 0)
5387 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
5388 else
5389 nfs_refresh_inode(inode, &data->fattr);
5390out:
5391 rpc_put_task(task);
5392 return status;
5393}
5394
5395int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5396{
5397 struct nfs_server *server = NFS_SERVER(inode);
5398 struct nfs4_exception exception = { };
5399 int err;
5400 do {
5401 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
5402 trace_nfs4_delegreturn(inode, err);
5403 switch (err) {
5404 case -NFS4ERR_STALE_STATEID:
5405 case -NFS4ERR_EXPIRED:
5406 case 0:
5407 return 0;
5408 }
5409 err = nfs4_handle_exception(server, err, &exception);
5410 } while (exception.retry);
5411 return err;
5412}
5413
5414#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
5415#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
5416
5417/*
5418 * sleep, with exponential backoff, and retry the LOCK operation.
5419 */
5420static unsigned long
5421nfs4_set_lock_task_retry(unsigned long timeout)
5422{
5423 freezable_schedule_timeout_killable_unsafe(timeout);
5424 timeout <<= 1;
5425 if (timeout > NFS4_LOCK_MAXTIMEOUT)
5426 return NFS4_LOCK_MAXTIMEOUT;
5427 return timeout;
5428}
5429
5430static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5431{
5432 struct inode *inode = state->inode;
5433 struct nfs_server *server = NFS_SERVER(inode);
5434 struct nfs_client *clp = server->nfs_client;
5435 struct nfs_lockt_args arg = {
5436 .fh = NFS_FH(inode),
5437 .fl = request,
5438 };
5439 struct nfs_lockt_res res = {
5440 .denied = request,
5441 };
5442 struct rpc_message msg = {
5443 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
5444 .rpc_argp = &arg,
5445 .rpc_resp = &res,
5446 .rpc_cred = state->owner->so_cred,
5447 };
5448 struct nfs4_lock_state *lsp;
5449 int status;
5450
5451 arg.lock_owner.clientid = clp->cl_clientid;
5452 status = nfs4_set_lock_state(state, request);
5453 if (status != 0)
5454 goto out;
5455 lsp = request->fl_u.nfs4_fl.owner;
5456 arg.lock_owner.id = lsp->ls_seqid.owner_id;
5457 arg.lock_owner.s_dev = server->s_dev;
5458 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5459 switch (status) {
5460 case 0:
5461 request->fl_type = F_UNLCK;
5462 break;
5463 case -NFS4ERR_DENIED:
5464 status = 0;
5465 }
5466 request->fl_ops->fl_release_private(request);
5467 request->fl_ops = NULL;
5468out:
5469 return status;
5470}
5471
5472static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5473{
5474 struct nfs4_exception exception = { };
5475 int err;
5476
5477 do {
5478 err = _nfs4_proc_getlk(state, cmd, request);
5479 trace_nfs4_get_lock(request, state, cmd, err);
5480 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
5481 &exception);
5482 } while (exception.retry);
5483 return err;
5484}
5485
5486static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
5487{
5488 int res = 0;
5489 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
5490 case FL_POSIX:
5491 res = posix_lock_inode_wait(inode, fl);
5492 break;
5493 case FL_FLOCK:
5494 res = flock_lock_inode_wait(inode, fl);
5495 break;
5496 default:
5497 BUG();
5498 }
5499 return res;
5500}
5501
5502struct nfs4_unlockdata {
5503 struct nfs_locku_args arg;
5504 struct nfs_locku_res res;
5505 struct nfs4_lock_state *lsp;
5506 struct nfs_open_context *ctx;
5507 struct file_lock fl;
5508 const struct nfs_server *server;
5509 unsigned long timestamp;
5510};
5511
5512static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
5513 struct nfs_open_context *ctx,
5514 struct nfs4_lock_state *lsp,
5515 struct nfs_seqid *seqid)
5516{
5517 struct nfs4_unlockdata *p;
5518 struct inode *inode = lsp->ls_state->inode;
5519
5520 p = kzalloc(sizeof(*p), GFP_NOFS);
5521 if (p == NULL)
5522 return NULL;
5523 p->arg.fh = NFS_FH(inode);
5524 p->arg.fl = &p->fl;
5525 p->arg.seqid = seqid;
5526 p->res.seqid = seqid;
5527 p->lsp = lsp;
5528 atomic_inc(&lsp->ls_count);
5529 /* Ensure we don't close file until we're done freeing locks! */
5530 p->ctx = get_nfs_open_context(ctx);
5531 memcpy(&p->fl, fl, sizeof(p->fl));
5532 p->server = NFS_SERVER(inode);
5533 return p;
5534}
5535
5536static void nfs4_locku_release_calldata(void *data)
5537{
5538 struct nfs4_unlockdata *calldata = data;
5539 nfs_free_seqid(calldata->arg.seqid);
5540 nfs4_put_lock_state(calldata->lsp);
5541 put_nfs_open_context(calldata->ctx);
5542 kfree(calldata);
5543}
5544
5545static void nfs4_locku_done(struct rpc_task *task, void *data)
5546{
5547 struct nfs4_unlockdata *calldata = data;
5548
5549 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
5550 return;
5551 switch (task->tk_status) {
5552 case 0:
5553 renew_lease(calldata->server, calldata->timestamp);
5554 do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl);
5555 if (nfs4_update_lock_stateid(calldata->lsp,
5556 &calldata->res.stateid))
5557 break;
5558 case -NFS4ERR_BAD_STATEID:
5559 case -NFS4ERR_OLD_STATEID:
5560 case -NFS4ERR_STALE_STATEID:
5561 case -NFS4ERR_EXPIRED:
5562 if (!nfs4_stateid_match(&calldata->arg.stateid,
5563 &calldata->lsp->ls_stateid))
5564 rpc_restart_call_prepare(task);
5565 break;
5566 default:
5567 if (nfs4_async_handle_error(task, calldata->server,
5568 NULL, NULL) == -EAGAIN)
5569 rpc_restart_call_prepare(task);
5570 }
5571 nfs_release_seqid(calldata->arg.seqid);
5572}
5573
5574static void nfs4_locku_prepare(struct rpc_task *task, void *data)
5575{
5576 struct nfs4_unlockdata *calldata = data;
5577
5578 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
5579 goto out_wait;
5580 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
5581 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
5582 /* Note: exit _without_ running nfs4_locku_done */
5583 goto out_no_action;
5584 }
5585 calldata->timestamp = jiffies;
5586 if (nfs4_setup_sequence(calldata->server,
5587 &calldata->arg.seq_args,
5588 &calldata->res.seq_res,
5589 task) != 0)
5590 nfs_release_seqid(calldata->arg.seqid);
5591 return;
5592out_no_action:
5593 task->tk_action = NULL;
5594out_wait:
5595 nfs4_sequence_done(task, &calldata->res.seq_res);
5596}
5597
5598static const struct rpc_call_ops nfs4_locku_ops = {
5599 .rpc_call_prepare = nfs4_locku_prepare,
5600 .rpc_call_done = nfs4_locku_done,
5601 .rpc_release = nfs4_locku_release_calldata,
5602};
5603
5604static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
5605 struct nfs_open_context *ctx,
5606 struct nfs4_lock_state *lsp,
5607 struct nfs_seqid *seqid)
5608{
5609 struct nfs4_unlockdata *data;
5610 struct rpc_message msg = {
5611 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
5612 .rpc_cred = ctx->cred,
5613 };
5614 struct rpc_task_setup task_setup_data = {
5615 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
5616 .rpc_message = &msg,
5617 .callback_ops = &nfs4_locku_ops,
5618 .workqueue = nfsiod_workqueue,
5619 .flags = RPC_TASK_ASYNC,
5620 };
5621
5622 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
5623 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
5624
5625 /* Ensure this is an unlock - when canceling a lock, the
5626 * canceled lock is passed in, and it won't be an unlock.
5627 */
5628 fl->fl_type = F_UNLCK;
5629
5630 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
5631 if (data == NULL) {
5632 nfs_free_seqid(seqid);
5633 return ERR_PTR(-ENOMEM);
5634 }
5635
5636 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5637 msg.rpc_argp = &data->arg;
5638 msg.rpc_resp = &data->res;
5639 task_setup_data.callback_data = data;
5640 return rpc_run_task(&task_setup_data);
5641}
5642
5643static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
5644{
5645 struct inode *inode = state->inode;
5646 struct nfs4_state_owner *sp = state->owner;
5647 struct nfs_inode *nfsi = NFS_I(inode);
5648 struct nfs_seqid *seqid;
5649 struct nfs4_lock_state *lsp;
5650 struct rpc_task *task;
5651 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5652 int status = 0;
5653 unsigned char fl_flags = request->fl_flags;
5654
5655 status = nfs4_set_lock_state(state, request);
5656 /* Unlock _before_ we do the RPC call */
5657 request->fl_flags |= FL_EXISTS;
5658 /* Exclude nfs_delegation_claim_locks() */
5659 mutex_lock(&sp->so_delegreturn_mutex);
5660 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
5661 down_read(&nfsi->rwsem);
5662 if (do_vfs_lock(inode, request) == -ENOENT) {
5663 up_read(&nfsi->rwsem);
5664 mutex_unlock(&sp->so_delegreturn_mutex);
5665 goto out;
5666 }
5667 up_read(&nfsi->rwsem);
5668 mutex_unlock(&sp->so_delegreturn_mutex);
5669 if (status != 0)
5670 goto out;
5671 /* Is this a delegated lock? */
5672 lsp = request->fl_u.nfs4_fl.owner;
5673 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
5674 goto out;
5675 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
5676 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
5677 status = -ENOMEM;
5678 if (IS_ERR(seqid))
5679 goto out;
5680 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
5681 status = PTR_ERR(task);
5682 if (IS_ERR(task))
5683 goto out;
5684 status = nfs4_wait_for_completion_rpc_task(task);
5685 rpc_put_task(task);
5686out:
5687 request->fl_flags = fl_flags;
5688 trace_nfs4_unlock(request, state, F_SETLK, status);
5689 return status;
5690}
5691
5692struct nfs4_lockdata {
5693 struct nfs_lock_args arg;
5694 struct nfs_lock_res res;
5695 struct nfs4_lock_state *lsp;
5696 struct nfs_open_context *ctx;
5697 struct file_lock fl;
5698 unsigned long timestamp;
5699 int rpc_status;
5700 int cancelled;
5701 struct nfs_server *server;
5702};
5703
5704static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
5705 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
5706 gfp_t gfp_mask)
5707{
5708 struct nfs4_lockdata *p;
5709 struct inode *inode = lsp->ls_state->inode;
5710 struct nfs_server *server = NFS_SERVER(inode);
5711 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5712
5713 p = kzalloc(sizeof(*p), gfp_mask);
5714 if (p == NULL)
5715 return NULL;
5716
5717 p->arg.fh = NFS_FH(inode);
5718 p->arg.fl = &p->fl;
5719 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
5720 if (IS_ERR(p->arg.open_seqid))
5721 goto out_free;
5722 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
5723 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
5724 if (IS_ERR(p->arg.lock_seqid))
5725 goto out_free_seqid;
5726 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
5727 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
5728 p->arg.lock_owner.s_dev = server->s_dev;
5729 p->res.lock_seqid = p->arg.lock_seqid;
5730 p->lsp = lsp;
5731 p->server = server;
5732 atomic_inc(&lsp->ls_count);
5733 p->ctx = get_nfs_open_context(ctx);
5734 get_file(fl->fl_file);
5735 memcpy(&p->fl, fl, sizeof(p->fl));
5736 return p;
5737out_free_seqid:
5738 nfs_free_seqid(p->arg.open_seqid);
5739out_free:
5740 kfree(p);
5741 return NULL;
5742}
5743
5744static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
5745{
5746 struct nfs4_lockdata *data = calldata;
5747 struct nfs4_state *state = data->lsp->ls_state;
5748
5749 dprintk("%s: begin!\n", __func__);
5750 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
5751 goto out_wait;
5752 /* Do we need to do an open_to_lock_owner? */
5753 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
5754 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
5755 goto out_release_lock_seqid;
5756 }
5757 nfs4_stateid_copy(&data->arg.open_stateid,
5758 &state->open_stateid);
5759 data->arg.new_lock_owner = 1;
5760 data->res.open_seqid = data->arg.open_seqid;
5761 } else {
5762 data->arg.new_lock_owner = 0;
5763 nfs4_stateid_copy(&data->arg.lock_stateid,
5764 &data->lsp->ls_stateid);
5765 }
5766 if (!nfs4_valid_open_stateid(state)) {
5767 data->rpc_status = -EBADF;
5768 task->tk_action = NULL;
5769 goto out_release_open_seqid;
5770 }
5771 data->timestamp = jiffies;
5772 if (nfs4_setup_sequence(data->server,
5773 &data->arg.seq_args,
5774 &data->res.seq_res,
5775 task) == 0)
5776 return;
5777out_release_open_seqid:
5778 nfs_release_seqid(data->arg.open_seqid);
5779out_release_lock_seqid:
5780 nfs_release_seqid(data->arg.lock_seqid);
5781out_wait:
5782 nfs4_sequence_done(task, &data->res.seq_res);
5783 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
5784}
5785
5786static void nfs4_lock_done(struct rpc_task *task, void *calldata)
5787{
5788 struct nfs4_lockdata *data = calldata;
5789 struct nfs4_lock_state *lsp = data->lsp;
5790
5791 dprintk("%s: begin!\n", __func__);
5792
5793 if (!nfs4_sequence_done(task, &data->res.seq_res))
5794 return;
5795
5796 data->rpc_status = task->tk_status;
5797 switch (task->tk_status) {
5798 case 0:
5799 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
5800 data->timestamp);
5801 if (data->arg.new_lock) {
5802 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
5803 if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) {
5804 rpc_restart_call_prepare(task);
5805 break;
5806 }
5807 }
5808 if (data->arg.new_lock_owner != 0) {
5809 nfs_confirm_seqid(&lsp->ls_seqid, 0);
5810 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
5811 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
5812 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
5813 rpc_restart_call_prepare(task);
5814 break;
5815 case -NFS4ERR_BAD_STATEID:
5816 case -NFS4ERR_OLD_STATEID:
5817 case -NFS4ERR_STALE_STATEID:
5818 case -NFS4ERR_EXPIRED:
5819 if (data->arg.new_lock_owner != 0) {
5820 if (!nfs4_stateid_match(&data->arg.open_stateid,
5821 &lsp->ls_state->open_stateid))
5822 rpc_restart_call_prepare(task);
5823 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
5824 &lsp->ls_stateid))
5825 rpc_restart_call_prepare(task);
5826 }
5827 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
5828}
5829
5830static void nfs4_lock_release(void *calldata)
5831{
5832 struct nfs4_lockdata *data = calldata;
5833
5834 dprintk("%s: begin!\n", __func__);
5835 nfs_free_seqid(data->arg.open_seqid);
5836 if (data->cancelled != 0) {
5837 struct rpc_task *task;
5838 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
5839 data->arg.lock_seqid);
5840 if (!IS_ERR(task))
5841 rpc_put_task_async(task);
5842 dprintk("%s: cancelling lock!\n", __func__);
5843 } else
5844 nfs_free_seqid(data->arg.lock_seqid);
5845 nfs4_put_lock_state(data->lsp);
5846 put_nfs_open_context(data->ctx);
5847 fput(data->fl.fl_file);
5848 kfree(data);
5849 dprintk("%s: done!\n", __func__);
5850}
5851
5852static const struct rpc_call_ops nfs4_lock_ops = {
5853 .rpc_call_prepare = nfs4_lock_prepare,
5854 .rpc_call_done = nfs4_lock_done,
5855 .rpc_release = nfs4_lock_release,
5856};
5857
5858static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
5859{
5860 switch (error) {
5861 case -NFS4ERR_ADMIN_REVOKED:
5862 case -NFS4ERR_BAD_STATEID:
5863 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5864 if (new_lock_owner != 0 ||
5865 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
5866 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
5867 break;
5868 case -NFS4ERR_STALE_STATEID:
5869 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5870 case -NFS4ERR_EXPIRED:
5871 nfs4_schedule_lease_recovery(server->nfs_client);
5872 };
5873}
5874
5875static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
5876{
5877 struct nfs4_lockdata *data;
5878 struct rpc_task *task;
5879 struct rpc_message msg = {
5880 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
5881 .rpc_cred = state->owner->so_cred,
5882 };
5883 struct rpc_task_setup task_setup_data = {
5884 .rpc_client = NFS_CLIENT(state->inode),
5885 .rpc_message = &msg,
5886 .callback_ops = &nfs4_lock_ops,
5887 .workqueue = nfsiod_workqueue,
5888 .flags = RPC_TASK_ASYNC,
5889 };
5890 int ret;
5891
5892 dprintk("%s: begin!\n", __func__);
5893 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
5894 fl->fl_u.nfs4_fl.owner,
5895 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
5896 if (data == NULL)
5897 return -ENOMEM;
5898 if (IS_SETLKW(cmd))
5899 data->arg.block = 1;
5900 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5901 msg.rpc_argp = &data->arg;
5902 msg.rpc_resp = &data->res;
5903 task_setup_data.callback_data = data;
5904 if (recovery_type > NFS_LOCK_NEW) {
5905 if (recovery_type == NFS_LOCK_RECLAIM)
5906 data->arg.reclaim = NFS_LOCK_RECLAIM;
5907 nfs4_set_sequence_privileged(&data->arg.seq_args);
5908 } else
5909 data->arg.new_lock = 1;
5910 task = rpc_run_task(&task_setup_data);
5911 if (IS_ERR(task))
5912 return PTR_ERR(task);
5913 ret = nfs4_wait_for_completion_rpc_task(task);
5914 if (ret == 0) {
5915 ret = data->rpc_status;
5916 if (ret)
5917 nfs4_handle_setlk_error(data->server, data->lsp,
5918 data->arg.new_lock_owner, ret);
5919 } else
5920 data->cancelled = 1;
5921 rpc_put_task(task);
5922 dprintk("%s: done, ret = %d!\n", __func__, ret);
5923 return ret;
5924}
5925
5926static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
5927{
5928 struct nfs_server *server = NFS_SERVER(state->inode);
5929 struct nfs4_exception exception = {
5930 .inode = state->inode,
5931 };
5932 int err;
5933
5934 do {
5935 /* Cache the lock if possible... */
5936 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5937 return 0;
5938 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
5939 trace_nfs4_lock_reclaim(request, state, F_SETLK, err);
5940 if (err != -NFS4ERR_DELAY)
5941 break;
5942 nfs4_handle_exception(server, err, &exception);
5943 } while (exception.retry);
5944 return err;
5945}
5946
5947static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
5948{
5949 struct nfs_server *server = NFS_SERVER(state->inode);
5950 struct nfs4_exception exception = {
5951 .inode = state->inode,
5952 };
5953 int err;
5954
5955 err = nfs4_set_lock_state(state, request);
5956 if (err != 0)
5957 return err;
5958 if (!recover_lost_locks) {
5959 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
5960 return 0;
5961 }
5962 do {
5963 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5964 return 0;
5965 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
5966 trace_nfs4_lock_expired(request, state, F_SETLK, err);
5967 switch (err) {
5968 default:
5969 goto out;
5970 case -NFS4ERR_GRACE:
5971 case -NFS4ERR_DELAY:
5972 nfs4_handle_exception(server, err, &exception);
5973 err = 0;
5974 }
5975 } while (exception.retry);
5976out:
5977 return err;
5978}
5979
5980#if defined(CONFIG_NFS_V4_1)
5981/**
5982 * nfs41_check_expired_locks - possibly free a lock stateid
5983 *
5984 * @state: NFSv4 state for an inode
5985 *
5986 * Returns NFS_OK if recovery for this stateid is now finished.
5987 * Otherwise a negative NFS4ERR value is returned.
5988 */
5989static int nfs41_check_expired_locks(struct nfs4_state *state)
5990{
5991 int status, ret = -NFS4ERR_BAD_STATEID;
5992 struct nfs4_lock_state *lsp;
5993 struct nfs_server *server = NFS_SERVER(state->inode);
5994
5995 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
5996 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
5997 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
5998
5999 status = nfs41_test_stateid(server,
6000 &lsp->ls_stateid,
6001 cred);
6002 trace_nfs4_test_lock_stateid(state, lsp, status);
6003 if (status != NFS_OK) {
6004 /* Free the stateid unless the server
6005 * informs us the stateid is unrecognized. */
6006 if (status != -NFS4ERR_BAD_STATEID)
6007 nfs41_free_stateid(server,
6008 &lsp->ls_stateid,
6009 cred);
6010 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
6011 ret = status;
6012 }
6013 }
6014 };
6015
6016 return ret;
6017}
6018
6019static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
6020{
6021 int status = NFS_OK;
6022
6023 if (test_bit(LK_STATE_IN_USE, &state->flags))
6024 status = nfs41_check_expired_locks(state);
6025 if (status != NFS_OK)
6026 status = nfs4_lock_expired(state, request);
6027 return status;
6028}
6029#endif
6030
6031static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6032{
6033 struct nfs_inode *nfsi = NFS_I(state->inode);
6034 unsigned char fl_flags = request->fl_flags;
6035 int status = -ENOLCK;
6036
6037 if ((fl_flags & FL_POSIX) &&
6038 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
6039 goto out;
6040 /* Is this a delegated open? */
6041 status = nfs4_set_lock_state(state, request);
6042 if (status != 0)
6043 goto out;
6044 request->fl_flags |= FL_ACCESS;
6045 status = do_vfs_lock(state->inode, request);
6046 if (status < 0)
6047 goto out;
6048 down_read(&nfsi->rwsem);
6049 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
6050 /* Yes: cache locks! */
6051 /* ...but avoid races with delegation recall... */
6052 request->fl_flags = fl_flags & ~FL_SLEEP;
6053 status = do_vfs_lock(state->inode, request);
6054 up_read(&nfsi->rwsem);
6055 goto out;
6056 }
6057 up_read(&nfsi->rwsem);
6058 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
6059out:
6060 request->fl_flags = fl_flags;
6061 return status;
6062}
6063
6064static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6065{
6066 struct nfs4_exception exception = {
6067 .state = state,
6068 .inode = state->inode,
6069 };
6070 int err;
6071
6072 do {
6073 err = _nfs4_proc_setlk(state, cmd, request);
6074 trace_nfs4_set_lock(request, state, cmd, err);
6075 if (err == -NFS4ERR_DENIED)
6076 err = -EAGAIN;
6077 err = nfs4_handle_exception(NFS_SERVER(state->inode),
6078 err, &exception);
6079 } while (exception.retry);
6080 return err;
6081}
6082
6083static int
6084nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
6085{
6086 struct nfs_open_context *ctx;
6087 struct nfs4_state *state;
6088 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
6089 int status;
6090
6091 /* verify open state */
6092 ctx = nfs_file_open_context(filp);
6093 state = ctx->state;
6094
6095 if (request->fl_start < 0 || request->fl_end < 0)
6096 return -EINVAL;
6097
6098 if (IS_GETLK(cmd)) {
6099 if (state != NULL)
6100 return nfs4_proc_getlk(state, F_GETLK, request);
6101 return 0;
6102 }
6103
6104 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
6105 return -EINVAL;
6106
6107 if (request->fl_type == F_UNLCK) {
6108 if (state != NULL)
6109 return nfs4_proc_unlck(state, cmd, request);
6110 return 0;
6111 }
6112
6113 if (state == NULL)
6114 return -ENOLCK;
6115 /*
6116 * Don't rely on the VFS having checked the file open mode,
6117 * since it won't do this for flock() locks.
6118 */
6119 switch (request->fl_type) {
6120 case F_RDLCK:
6121 if (!(filp->f_mode & FMODE_READ))
6122 return -EBADF;
6123 break;
6124 case F_WRLCK:
6125 if (!(filp->f_mode & FMODE_WRITE))
6126 return -EBADF;
6127 }
6128
6129 do {
6130 status = nfs4_proc_setlk(state, cmd, request);
6131 if ((status != -EAGAIN) || IS_SETLK(cmd))
6132 break;
6133 timeout = nfs4_set_lock_task_retry(timeout);
6134 status = -ERESTARTSYS;
6135 if (signalled())
6136 break;
6137 } while(status < 0);
6138 return status;
6139}
6140
6141int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
6142{
6143 struct nfs_server *server = NFS_SERVER(state->inode);
6144 int err;
6145
6146 err = nfs4_set_lock_state(state, fl);
6147 if (err != 0)
6148 return err;
6149 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
6150 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
6151}
6152
6153struct nfs_release_lockowner_data {
6154 struct nfs4_lock_state *lsp;
6155 struct nfs_server *server;
6156 struct nfs_release_lockowner_args args;
6157 struct nfs_release_lockowner_res res;
6158 unsigned long timestamp;
6159};
6160
6161static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
6162{
6163 struct nfs_release_lockowner_data *data = calldata;
6164 struct nfs_server *server = data->server;
6165 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
6166 &data->args.seq_args, &data->res.seq_res, task);
6167 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6168 data->timestamp = jiffies;
6169}
6170
6171static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
6172{
6173 struct nfs_release_lockowner_data *data = calldata;
6174 struct nfs_server *server = data->server;
6175
6176 nfs40_sequence_done(task, &data->res.seq_res);
6177
6178 switch (task->tk_status) {
6179 case 0:
6180 renew_lease(server, data->timestamp);
6181 break;
6182 case -NFS4ERR_STALE_CLIENTID:
6183 case -NFS4ERR_EXPIRED:
6184 nfs4_schedule_lease_recovery(server->nfs_client);
6185 break;
6186 case -NFS4ERR_LEASE_MOVED:
6187 case -NFS4ERR_DELAY:
6188 if (nfs4_async_handle_error(task, server,
6189 NULL, NULL) == -EAGAIN)
6190 rpc_restart_call_prepare(task);
6191 }
6192}
6193
6194static void nfs4_release_lockowner_release(void *calldata)
6195{
6196 struct nfs_release_lockowner_data *data = calldata;
6197 nfs4_free_lock_state(data->server, data->lsp);
6198 kfree(calldata);
6199}
6200
6201static const struct rpc_call_ops nfs4_release_lockowner_ops = {
6202 .rpc_call_prepare = nfs4_release_lockowner_prepare,
6203 .rpc_call_done = nfs4_release_lockowner_done,
6204 .rpc_release = nfs4_release_lockowner_release,
6205};
6206
6207static void
6208nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
6209{
6210 struct nfs_release_lockowner_data *data;
6211 struct rpc_message msg = {
6212 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
6213 };
6214
6215 if (server->nfs_client->cl_mvops->minor_version != 0)
6216 return;
6217
6218 data = kmalloc(sizeof(*data), GFP_NOFS);
6219 if (!data)
6220 return;
6221 data->lsp = lsp;
6222 data->server = server;
6223 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6224 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
6225 data->args.lock_owner.s_dev = server->s_dev;
6226
6227 msg.rpc_argp = &data->args;
6228 msg.rpc_resp = &data->res;
6229 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
6230 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
6231}
6232
6233#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
6234
6235static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
6236 const void *buf, size_t buflen,
6237 int flags, int type)
6238{
6239 if (strcmp(key, "") != 0)
6240 return -EINVAL;
6241
6242 return nfs4_proc_set_acl(d_inode(dentry), buf, buflen);
6243}
6244
6245static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
6246 void *buf, size_t buflen, int type)
6247{
6248 if (strcmp(key, "") != 0)
6249 return -EINVAL;
6250
6251 return nfs4_proc_get_acl(d_inode(dentry), buf, buflen);
6252}
6253
6254static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
6255 size_t list_len, const char *name,
6256 size_t name_len, int type)
6257{
6258 size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
6259
6260 if (!nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry))))
6261 return 0;
6262
6263 if (list && len <= list_len)
6264 memcpy(list, XATTR_NAME_NFSV4_ACL, len);
6265 return len;
6266}
6267
6268#ifdef CONFIG_NFS_V4_SECURITY_LABEL
6269static inline int nfs4_server_supports_labels(struct nfs_server *server)
6270{
6271 return server->caps & NFS_CAP_SECURITY_LABEL;
6272}
6273
6274static int nfs4_xattr_set_nfs4_label(struct dentry *dentry, const char *key,
6275 const void *buf, size_t buflen,
6276 int flags, int type)
6277{
6278 if (security_ismaclabel(key))
6279 return nfs4_set_security_label(dentry, buf, buflen);
6280
6281 return -EOPNOTSUPP;
6282}
6283
6284static int nfs4_xattr_get_nfs4_label(struct dentry *dentry, const char *key,
6285 void *buf, size_t buflen, int type)
6286{
6287 if (security_ismaclabel(key))
6288 return nfs4_get_security_label(d_inode(dentry), buf, buflen);
6289 return -EOPNOTSUPP;
6290}
6291
6292static size_t nfs4_xattr_list_nfs4_label(struct dentry *dentry, char *list,
6293 size_t list_len, const char *name,
6294 size_t name_len, int type)
6295{
6296 size_t len = 0;
6297
6298 if (nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) {
6299 len = security_inode_listsecurity(d_inode(dentry), NULL, 0);
6300 if (list && len <= list_len)
6301 security_inode_listsecurity(d_inode(dentry), list, len);
6302 }
6303 return len;
6304}
6305
6306static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
6307 .prefix = XATTR_SECURITY_PREFIX,
6308 .list = nfs4_xattr_list_nfs4_label,
6309 .get = nfs4_xattr_get_nfs4_label,
6310 .set = nfs4_xattr_set_nfs4_label,
6311};
6312#endif
6313
6314
6315/*
6316 * nfs_fhget will use either the mounted_on_fileid or the fileid
6317 */
6318static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
6319{
6320 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
6321 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
6322 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
6323 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
6324 return;
6325
6326 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
6327 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
6328 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
6329 fattr->nlink = 2;
6330}
6331
6332static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6333 const struct qstr *name,
6334 struct nfs4_fs_locations *fs_locations,
6335 struct page *page)
6336{
6337 struct nfs_server *server = NFS_SERVER(dir);
6338 u32 bitmask[3] = {
6339 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6340 };
6341 struct nfs4_fs_locations_arg args = {
6342 .dir_fh = NFS_FH(dir),
6343 .name = name,
6344 .page = page,
6345 .bitmask = bitmask,
6346 };
6347 struct nfs4_fs_locations_res res = {
6348 .fs_locations = fs_locations,
6349 };
6350 struct rpc_message msg = {
6351 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6352 .rpc_argp = &args,
6353 .rpc_resp = &res,
6354 };
6355 int status;
6356
6357 dprintk("%s: start\n", __func__);
6358
6359 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
6360 * is not supported */
6361 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
6362 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
6363 else
6364 bitmask[0] |= FATTR4_WORD0_FILEID;
6365
6366 nfs_fattr_init(&fs_locations->fattr);
6367 fs_locations->server = server;
6368 fs_locations->nlocations = 0;
6369 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
6370 dprintk("%s: returned status = %d\n", __func__, status);
6371 return status;
6372}
6373
6374int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6375 const struct qstr *name,
6376 struct nfs4_fs_locations *fs_locations,
6377 struct page *page)
6378{
6379 struct nfs4_exception exception = { };
6380 int err;
6381 do {
6382 err = _nfs4_proc_fs_locations(client, dir, name,
6383 fs_locations, page);
6384 trace_nfs4_get_fs_locations(dir, name, err);
6385 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6386 &exception);
6387 } while (exception.retry);
6388 return err;
6389}
6390
6391/*
6392 * This operation also signals the server that this client is
6393 * performing migration recovery. The server can stop returning
6394 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
6395 * appended to this compound to identify the client ID which is
6396 * performing recovery.
6397 */
6398static int _nfs40_proc_get_locations(struct inode *inode,
6399 struct nfs4_fs_locations *locations,
6400 struct page *page, struct rpc_cred *cred)
6401{
6402 struct nfs_server *server = NFS_SERVER(inode);
6403 struct rpc_clnt *clnt = server->client;
6404 u32 bitmask[2] = {
6405 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6406 };
6407 struct nfs4_fs_locations_arg args = {
6408 .clientid = server->nfs_client->cl_clientid,
6409 .fh = NFS_FH(inode),
6410 .page = page,
6411 .bitmask = bitmask,
6412 .migration = 1, /* skip LOOKUP */
6413 .renew = 1, /* append RENEW */
6414 };
6415 struct nfs4_fs_locations_res res = {
6416 .fs_locations = locations,
6417 .migration = 1,
6418 .renew = 1,
6419 };
6420 struct rpc_message msg = {
6421 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6422 .rpc_argp = &args,
6423 .rpc_resp = &res,
6424 .rpc_cred = cred,
6425 };
6426 unsigned long now = jiffies;
6427 int status;
6428
6429 nfs_fattr_init(&locations->fattr);
6430 locations->server = server;
6431 locations->nlocations = 0;
6432
6433 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6434 nfs4_set_sequence_privileged(&args.seq_args);
6435 status = nfs4_call_sync_sequence(clnt, server, &msg,
6436 &args.seq_args, &res.seq_res);
6437 if (status)
6438 return status;
6439
6440 renew_lease(server, now);
6441 return 0;
6442}
6443
6444#ifdef CONFIG_NFS_V4_1
6445
6446/*
6447 * This operation also signals the server that this client is
6448 * performing migration recovery. The server can stop asserting
6449 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
6450 * performing this operation is identified in the SEQUENCE
6451 * operation in this compound.
6452 *
6453 * When the client supports GETATTR(fs_locations_info), it can
6454 * be plumbed in here.
6455 */
6456static int _nfs41_proc_get_locations(struct inode *inode,
6457 struct nfs4_fs_locations *locations,
6458 struct page *page, struct rpc_cred *cred)
6459{
6460 struct nfs_server *server = NFS_SERVER(inode);
6461 struct rpc_clnt *clnt = server->client;
6462 u32 bitmask[2] = {
6463 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6464 };
6465 struct nfs4_fs_locations_arg args = {
6466 .fh = NFS_FH(inode),
6467 .page = page,
6468 .bitmask = bitmask,
6469 .migration = 1, /* skip LOOKUP */
6470 };
6471 struct nfs4_fs_locations_res res = {
6472 .fs_locations = locations,
6473 .migration = 1,
6474 };
6475 struct rpc_message msg = {
6476 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6477 .rpc_argp = &args,
6478 .rpc_resp = &res,
6479 .rpc_cred = cred,
6480 };
6481 int status;
6482
6483 nfs_fattr_init(&locations->fattr);
6484 locations->server = server;
6485 locations->nlocations = 0;
6486
6487 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6488 nfs4_set_sequence_privileged(&args.seq_args);
6489 status = nfs4_call_sync_sequence(clnt, server, &msg,
6490 &args.seq_args, &res.seq_res);
6491 if (status == NFS4_OK &&
6492 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6493 status = -NFS4ERR_LEASE_MOVED;
6494 return status;
6495}
6496
6497#endif /* CONFIG_NFS_V4_1 */
6498
6499/**
6500 * nfs4_proc_get_locations - discover locations for a migrated FSID
6501 * @inode: inode on FSID that is migrating
6502 * @locations: result of query
6503 * @page: buffer
6504 * @cred: credential to use for this operation
6505 *
6506 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
6507 * operation failed, or a negative errno if a local error occurred.
6508 *
6509 * On success, "locations" is filled in, but if the server has
6510 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
6511 * asserted.
6512 *
6513 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
6514 * from this client that require migration recovery.
6515 */
6516int nfs4_proc_get_locations(struct inode *inode,
6517 struct nfs4_fs_locations *locations,
6518 struct page *page, struct rpc_cred *cred)
6519{
6520 struct nfs_server *server = NFS_SERVER(inode);
6521 struct nfs_client *clp = server->nfs_client;
6522 const struct nfs4_mig_recovery_ops *ops =
6523 clp->cl_mvops->mig_recovery_ops;
6524 struct nfs4_exception exception = { };
6525 int status;
6526
6527 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6528 (unsigned long long)server->fsid.major,
6529 (unsigned long long)server->fsid.minor,
6530 clp->cl_hostname);
6531 nfs_display_fhandle(NFS_FH(inode), __func__);
6532
6533 do {
6534 status = ops->get_locations(inode, locations, page, cred);
6535 if (status != -NFS4ERR_DELAY)
6536 break;
6537 nfs4_handle_exception(server, status, &exception);
6538 } while (exception.retry);
6539 return status;
6540}
6541
6542/*
6543 * This operation also signals the server that this client is
6544 * performing "lease moved" recovery. The server can stop
6545 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
6546 * is appended to this compound to identify the client ID which is
6547 * performing recovery.
6548 */
6549static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6550{
6551 struct nfs_server *server = NFS_SERVER(inode);
6552 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
6553 struct rpc_clnt *clnt = server->client;
6554 struct nfs4_fsid_present_arg args = {
6555 .fh = NFS_FH(inode),
6556 .clientid = clp->cl_clientid,
6557 .renew = 1, /* append RENEW */
6558 };
6559 struct nfs4_fsid_present_res res = {
6560 .renew = 1,
6561 };
6562 struct rpc_message msg = {
6563 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6564 .rpc_argp = &args,
6565 .rpc_resp = &res,
6566 .rpc_cred = cred,
6567 };
6568 unsigned long now = jiffies;
6569 int status;
6570
6571 res.fh = nfs_alloc_fhandle();
6572 if (res.fh == NULL)
6573 return -ENOMEM;
6574
6575 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6576 nfs4_set_sequence_privileged(&args.seq_args);
6577 status = nfs4_call_sync_sequence(clnt, server, &msg,
6578 &args.seq_args, &res.seq_res);
6579 nfs_free_fhandle(res.fh);
6580 if (status)
6581 return status;
6582
6583 do_renew_lease(clp, now);
6584 return 0;
6585}
6586
6587#ifdef CONFIG_NFS_V4_1
6588
6589/*
6590 * This operation also signals the server that this client is
6591 * performing "lease moved" recovery. The server can stop asserting
6592 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
6593 * this operation is identified in the SEQUENCE operation in this
6594 * compound.
6595 */
6596static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6597{
6598 struct nfs_server *server = NFS_SERVER(inode);
6599 struct rpc_clnt *clnt = server->client;
6600 struct nfs4_fsid_present_arg args = {
6601 .fh = NFS_FH(inode),
6602 };
6603 struct nfs4_fsid_present_res res = {
6604 };
6605 struct rpc_message msg = {
6606 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6607 .rpc_argp = &args,
6608 .rpc_resp = &res,
6609 .rpc_cred = cred,
6610 };
6611 int status;
6612
6613 res.fh = nfs_alloc_fhandle();
6614 if (res.fh == NULL)
6615 return -ENOMEM;
6616
6617 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6618 nfs4_set_sequence_privileged(&args.seq_args);
6619 status = nfs4_call_sync_sequence(clnt, server, &msg,
6620 &args.seq_args, &res.seq_res);
6621 nfs_free_fhandle(res.fh);
6622 if (status == NFS4_OK &&
6623 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6624 status = -NFS4ERR_LEASE_MOVED;
6625 return status;
6626}
6627
6628#endif /* CONFIG_NFS_V4_1 */
6629
6630/**
6631 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
6632 * @inode: inode on FSID to check
6633 * @cred: credential to use for this operation
6634 *
6635 * Server indicates whether the FSID is present, moved, or not
6636 * recognized. This operation is necessary to clear a LEASE_MOVED
6637 * condition for this client ID.
6638 *
6639 * Returns NFS4_OK if the FSID is present on this server,
6640 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
6641 * NFS4ERR code if some error occurred on the server, or a
6642 * negative errno if a local failure occurred.
6643 */
6644int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6645{
6646 struct nfs_server *server = NFS_SERVER(inode);
6647 struct nfs_client *clp = server->nfs_client;
6648 const struct nfs4_mig_recovery_ops *ops =
6649 clp->cl_mvops->mig_recovery_ops;
6650 struct nfs4_exception exception = { };
6651 int status;
6652
6653 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6654 (unsigned long long)server->fsid.major,
6655 (unsigned long long)server->fsid.minor,
6656 clp->cl_hostname);
6657 nfs_display_fhandle(NFS_FH(inode), __func__);
6658
6659 do {
6660 status = ops->fsid_present(inode, cred);
6661 if (status != -NFS4ERR_DELAY)
6662 break;
6663 nfs4_handle_exception(server, status, &exception);
6664 } while (exception.retry);
6665 return status;
6666}
6667
6668/**
6669 * If 'use_integrity' is true and the state managment nfs_client
6670 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
6671 * and the machine credential as per RFC3530bis and RFC5661 Security
6672 * Considerations sections. Otherwise, just use the user cred with the
6673 * filesystem's rpc_client.
6674 */
6675static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
6676{
6677 int status;
6678 struct nfs4_secinfo_arg args = {
6679 .dir_fh = NFS_FH(dir),
6680 .name = name,
6681 };
6682 struct nfs4_secinfo_res res = {
6683 .flavors = flavors,
6684 };
6685 struct rpc_message msg = {
6686 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
6687 .rpc_argp = &args,
6688 .rpc_resp = &res,
6689 };
6690 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
6691 struct rpc_cred *cred = NULL;
6692
6693 if (use_integrity) {
6694 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient;
6695 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client);
6696 msg.rpc_cred = cred;
6697 }
6698
6699 dprintk("NFS call secinfo %s\n", name->name);
6700
6701 nfs4_state_protect(NFS_SERVER(dir)->nfs_client,
6702 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
6703
6704 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args,
6705 &res.seq_res, 0);
6706 dprintk("NFS reply secinfo: %d\n", status);
6707
6708 if (cred)
6709 put_rpccred(cred);
6710
6711 return status;
6712}
6713
6714int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
6715 struct nfs4_secinfo_flavors *flavors)
6716{
6717 struct nfs4_exception exception = { };
6718 int err;
6719 do {
6720 err = -NFS4ERR_WRONGSEC;
6721
6722 /* try to use integrity protection with machine cred */
6723 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
6724 err = _nfs4_proc_secinfo(dir, name, flavors, true);
6725
6726 /*
6727 * if unable to use integrity protection, or SECINFO with
6728 * integrity protection returns NFS4ERR_WRONGSEC (which is
6729 * disallowed by spec, but exists in deployed servers) use
6730 * the current filesystem's rpc_client and the user cred.
6731 */
6732 if (err == -NFS4ERR_WRONGSEC)
6733 err = _nfs4_proc_secinfo(dir, name, flavors, false);
6734
6735 trace_nfs4_secinfo(dir, name, err);
6736 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6737 &exception);
6738 } while (exception.retry);
6739 return err;
6740}
6741
6742#ifdef CONFIG_NFS_V4_1
6743/*
6744 * Check the exchange flags returned by the server for invalid flags, having
6745 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
6746 * DS flags set.
6747 */
6748static int nfs4_check_cl_exchange_flags(u32 flags)
6749{
6750 if (flags & ~EXCHGID4_FLAG_MASK_R)
6751 goto out_inval;
6752 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
6753 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
6754 goto out_inval;
6755 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
6756 goto out_inval;
6757 return NFS_OK;
6758out_inval:
6759 return -NFS4ERR_INVAL;
6760}
6761
6762static bool
6763nfs41_same_server_scope(struct nfs41_server_scope *a,
6764 struct nfs41_server_scope *b)
6765{
6766 if (a->server_scope_sz == b->server_scope_sz &&
6767 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
6768 return true;
6769
6770 return false;
6771}
6772
6773/*
6774 * nfs4_proc_bind_conn_to_session()
6775 *
6776 * The 4.1 client currently uses the same TCP connection for the
6777 * fore and backchannel.
6778 */
6779int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
6780{
6781 int status;
6782 struct nfs41_bind_conn_to_session_args args = {
6783 .client = clp,
6784 .dir = NFS4_CDFC4_FORE_OR_BOTH,
6785 };
6786 struct nfs41_bind_conn_to_session_res res;
6787 struct rpc_message msg = {
6788 .rpc_proc =
6789 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
6790 .rpc_argp = &args,
6791 .rpc_resp = &res,
6792 .rpc_cred = cred,
6793 };
6794
6795 dprintk("--> %s\n", __func__);
6796
6797 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
6798 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
6799 args.dir = NFS4_CDFC4_FORE;
6800
6801 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
6802 trace_nfs4_bind_conn_to_session(clp, status);
6803 if (status == 0) {
6804 if (memcmp(res.sessionid.data,
6805 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
6806 dprintk("NFS: %s: Session ID mismatch\n", __func__);
6807 status = -EIO;
6808 goto out;
6809 }
6810 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
6811 dprintk("NFS: %s: Unexpected direction from server\n",
6812 __func__);
6813 status = -EIO;
6814 goto out;
6815 }
6816 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
6817 dprintk("NFS: %s: Server returned RDMA mode = true\n",
6818 __func__);
6819 status = -EIO;
6820 goto out;
6821 }
6822 }
6823out:
6824 dprintk("<-- %s status= %d\n", __func__, status);
6825 return status;
6826}
6827
6828/*
6829 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
6830 * and operations we'd like to see to enable certain features in the allow map
6831 */
6832static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
6833 .how = SP4_MACH_CRED,
6834 .enforce.u.words = {
6835 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6836 1 << (OP_EXCHANGE_ID - 32) |
6837 1 << (OP_CREATE_SESSION - 32) |
6838 1 << (OP_DESTROY_SESSION - 32) |
6839 1 << (OP_DESTROY_CLIENTID - 32)
6840 },
6841 .allow.u.words = {
6842 [0] = 1 << (OP_CLOSE) |
6843 1 << (OP_LOCKU) |
6844 1 << (OP_COMMIT),
6845 [1] = 1 << (OP_SECINFO - 32) |
6846 1 << (OP_SECINFO_NO_NAME - 32) |
6847 1 << (OP_TEST_STATEID - 32) |
6848 1 << (OP_FREE_STATEID - 32) |
6849 1 << (OP_WRITE - 32)
6850 }
6851};
6852
6853/*
6854 * Select the state protection mode for client `clp' given the server results
6855 * from exchange_id in `sp'.
6856 *
6857 * Returns 0 on success, negative errno otherwise.
6858 */
6859static int nfs4_sp4_select_mode(struct nfs_client *clp,
6860 struct nfs41_state_protection *sp)
6861{
6862 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
6863 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6864 1 << (OP_EXCHANGE_ID - 32) |
6865 1 << (OP_CREATE_SESSION - 32) |
6866 1 << (OP_DESTROY_SESSION - 32) |
6867 1 << (OP_DESTROY_CLIENTID - 32)
6868 };
6869 unsigned int i;
6870
6871 if (sp->how == SP4_MACH_CRED) {
6872 /* Print state protect result */
6873 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
6874 for (i = 0; i <= LAST_NFS4_OP; i++) {
6875 if (test_bit(i, sp->enforce.u.longs))
6876 dfprintk(MOUNT, " enforce op %d\n", i);
6877 if (test_bit(i, sp->allow.u.longs))
6878 dfprintk(MOUNT, " allow op %d\n", i);
6879 }
6880
6881 /* make sure nothing is on enforce list that isn't supported */
6882 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
6883 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
6884 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6885 return -EINVAL;
6886 }
6887 }
6888
6889 /*
6890 * Minimal mode - state operations are allowed to use machine
6891 * credential. Note this already happens by default, so the
6892 * client doesn't have to do anything more than the negotiation.
6893 *
6894 * NOTE: we don't care if EXCHANGE_ID is in the list -
6895 * we're already using the machine cred for exchange_id
6896 * and will never use a different cred.
6897 */
6898 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
6899 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
6900 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
6901 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
6902 dfprintk(MOUNT, "sp4_mach_cred:\n");
6903 dfprintk(MOUNT, " minimal mode enabled\n");
6904 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags);
6905 } else {
6906 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6907 return -EINVAL;
6908 }
6909
6910 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
6911 test_bit(OP_LOCKU, sp->allow.u.longs)) {
6912 dfprintk(MOUNT, " cleanup mode enabled\n");
6913 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags);
6914 }
6915
6916 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
6917 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
6918 dfprintk(MOUNT, " secinfo mode enabled\n");
6919 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags);
6920 }
6921
6922 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
6923 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
6924 dfprintk(MOUNT, " stateid mode enabled\n");
6925 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags);
6926 }
6927
6928 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
6929 dfprintk(MOUNT, " write mode enabled\n");
6930 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags);
6931 }
6932
6933 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
6934 dfprintk(MOUNT, " commit mode enabled\n");
6935 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags);
6936 }
6937 }
6938
6939 return 0;
6940}
6941
6942/*
6943 * _nfs4_proc_exchange_id()
6944 *
6945 * Wrapper for EXCHANGE_ID operation.
6946 */
6947static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
6948 u32 sp4_how)
6949{
6950 nfs4_verifier verifier;
6951 struct nfs41_exchange_id_args args = {
6952 .verifier = &verifier,
6953 .client = clp,
6954#ifdef CONFIG_NFS_V4_1_MIGRATION
6955 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
6956 EXCHGID4_FLAG_BIND_PRINC_STATEID |
6957 EXCHGID4_FLAG_SUPP_MOVED_MIGR,
6958#else
6959 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
6960 EXCHGID4_FLAG_BIND_PRINC_STATEID,
6961#endif
6962 };
6963 struct nfs41_exchange_id_res res = {
6964 0
6965 };
6966 int status;
6967 struct rpc_message msg = {
6968 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
6969 .rpc_argp = &args,
6970 .rpc_resp = &res,
6971 .rpc_cred = cred,
6972 };
6973
6974 nfs4_init_boot_verifier(clp, &verifier);
6975
6976 status = nfs4_init_uniform_client_string(clp);
6977 if (status)
6978 goto out;
6979
6980 dprintk("NFS call exchange_id auth=%s, '%s'\n",
6981 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6982 clp->cl_owner_id);
6983
6984 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
6985 GFP_NOFS);
6986 if (unlikely(res.server_owner == NULL)) {
6987 status = -ENOMEM;
6988 goto out;
6989 }
6990
6991 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
6992 GFP_NOFS);
6993 if (unlikely(res.server_scope == NULL)) {
6994 status = -ENOMEM;
6995 goto out_server_owner;
6996 }
6997
6998 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
6999 if (unlikely(res.impl_id == NULL)) {
7000 status = -ENOMEM;
7001 goto out_server_scope;
7002 }
7003
7004 switch (sp4_how) {
7005 case SP4_NONE:
7006 args.state_protect.how = SP4_NONE;
7007 break;
7008
7009 case SP4_MACH_CRED:
7010 args.state_protect = nfs4_sp4_mach_cred_request;
7011 break;
7012
7013 default:
7014 /* unsupported! */
7015 WARN_ON_ONCE(1);
7016 status = -EINVAL;
7017 goto out_impl_id;
7018 }
7019
7020 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7021 trace_nfs4_exchange_id(clp, status);
7022 if (status == 0)
7023 status = nfs4_check_cl_exchange_flags(res.flags);
7024
7025 if (status == 0)
7026 status = nfs4_sp4_select_mode(clp, &res.state_protect);
7027
7028 if (status == 0) {
7029 clp->cl_clientid = res.clientid;
7030 clp->cl_exchange_flags = res.flags;
7031 /* Client ID is not confirmed */
7032 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) {
7033 clear_bit(NFS4_SESSION_ESTABLISHED,
7034 &clp->cl_session->session_state);
7035 clp->cl_seqid = res.seqid;
7036 }
7037
7038 kfree(clp->cl_serverowner);
7039 clp->cl_serverowner = res.server_owner;
7040 res.server_owner = NULL;
7041
7042 /* use the most recent implementation id */
7043 kfree(clp->cl_implid);
7044 clp->cl_implid = res.impl_id;
7045 res.impl_id = NULL;
7046
7047 if (clp->cl_serverscope != NULL &&
7048 !nfs41_same_server_scope(clp->cl_serverscope,
7049 res.server_scope)) {
7050 dprintk("%s: server_scope mismatch detected\n",
7051 __func__);
7052 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
7053 kfree(clp->cl_serverscope);
7054 clp->cl_serverscope = NULL;
7055 }
7056
7057 if (clp->cl_serverscope == NULL) {
7058 clp->cl_serverscope = res.server_scope;
7059 res.server_scope = NULL;
7060 }
7061 }
7062
7063out_impl_id:
7064 kfree(res.impl_id);
7065out_server_scope:
7066 kfree(res.server_scope);
7067out_server_owner:
7068 kfree(res.server_owner);
7069out:
7070 if (clp->cl_implid != NULL)
7071 dprintk("NFS reply exchange_id: Server Implementation ID: "
7072 "domain: %s, name: %s, date: %llu,%u\n",
7073 clp->cl_implid->domain, clp->cl_implid->name,
7074 clp->cl_implid->date.seconds,
7075 clp->cl_implid->date.nseconds);
7076 dprintk("NFS reply exchange_id: %d\n", status);
7077 return status;
7078}
7079
7080/*
7081 * nfs4_proc_exchange_id()
7082 *
7083 * Returns zero, a negative errno, or a negative NFS4ERR status code.
7084 *
7085 * Since the clientid has expired, all compounds using sessions
7086 * associated with the stale clientid will be returning
7087 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
7088 * be in some phase of session reset.
7089 *
7090 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
7091 */
7092int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
7093{
7094 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
7095 int status;
7096
7097 /* try SP4_MACH_CRED if krb5i/p */
7098 if (authflavor == RPC_AUTH_GSS_KRB5I ||
7099 authflavor == RPC_AUTH_GSS_KRB5P) {
7100 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
7101 if (!status)
7102 return 0;
7103 }
7104
7105 /* try SP4_NONE */
7106 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
7107}
7108
7109static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
7110 struct rpc_cred *cred)
7111{
7112 struct rpc_message msg = {
7113 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
7114 .rpc_argp = clp,
7115 .rpc_cred = cred,
7116 };
7117 int status;
7118
7119 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7120 trace_nfs4_destroy_clientid(clp, status);
7121 if (status)
7122 dprintk("NFS: Got error %d from the server %s on "
7123 "DESTROY_CLIENTID.", status, clp->cl_hostname);
7124 return status;
7125}
7126
7127static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
7128 struct rpc_cred *cred)
7129{
7130 unsigned int loop;
7131 int ret;
7132
7133 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
7134 ret = _nfs4_proc_destroy_clientid(clp, cred);
7135 switch (ret) {
7136 case -NFS4ERR_DELAY:
7137 case -NFS4ERR_CLIENTID_BUSY:
7138 ssleep(1);
7139 break;
7140 default:
7141 return ret;
7142 }
7143 }
7144 return 0;
7145}
7146
7147int nfs4_destroy_clientid(struct nfs_client *clp)
7148{
7149 struct rpc_cred *cred;
7150 int ret = 0;
7151
7152 if (clp->cl_mvops->minor_version < 1)
7153 goto out;
7154 if (clp->cl_exchange_flags == 0)
7155 goto out;
7156 if (clp->cl_preserve_clid)
7157 goto out;
7158 cred = nfs4_get_clid_cred(clp);
7159 ret = nfs4_proc_destroy_clientid(clp, cred);
7160 if (cred)
7161 put_rpccred(cred);
7162 switch (ret) {
7163 case 0:
7164 case -NFS4ERR_STALE_CLIENTID:
7165 clp->cl_exchange_flags = 0;
7166 }
7167out:
7168 return ret;
7169}
7170
7171struct nfs4_get_lease_time_data {
7172 struct nfs4_get_lease_time_args *args;
7173 struct nfs4_get_lease_time_res *res;
7174 struct nfs_client *clp;
7175};
7176
7177static void nfs4_get_lease_time_prepare(struct rpc_task *task,
7178 void *calldata)
7179{
7180 struct nfs4_get_lease_time_data *data =
7181 (struct nfs4_get_lease_time_data *)calldata;
7182
7183 dprintk("--> %s\n", __func__);
7184 /* just setup sequence, do not trigger session recovery
7185 since we're invoked within one */
7186 nfs41_setup_sequence(data->clp->cl_session,
7187 &data->args->la_seq_args,
7188 &data->res->lr_seq_res,
7189 task);
7190 dprintk("<-- %s\n", __func__);
7191}
7192
7193/*
7194 * Called from nfs4_state_manager thread for session setup, so don't recover
7195 * from sequence operation or clientid errors.
7196 */
7197static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
7198{
7199 struct nfs4_get_lease_time_data *data =
7200 (struct nfs4_get_lease_time_data *)calldata;
7201
7202 dprintk("--> %s\n", __func__);
7203 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
7204 return;
7205 switch (task->tk_status) {
7206 case -NFS4ERR_DELAY:
7207 case -NFS4ERR_GRACE:
7208 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
7209 rpc_delay(task, NFS4_POLL_RETRY_MIN);
7210 task->tk_status = 0;
7211 /* fall through */
7212 case -NFS4ERR_RETRY_UNCACHED_REP:
7213 rpc_restart_call_prepare(task);
7214 return;
7215 }
7216 dprintk("<-- %s\n", __func__);
7217}
7218
7219static const struct rpc_call_ops nfs4_get_lease_time_ops = {
7220 .rpc_call_prepare = nfs4_get_lease_time_prepare,
7221 .rpc_call_done = nfs4_get_lease_time_done,
7222};
7223
7224int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
7225{
7226 struct rpc_task *task;
7227 struct nfs4_get_lease_time_args args;
7228 struct nfs4_get_lease_time_res res = {
7229 .lr_fsinfo = fsinfo,
7230 };
7231 struct nfs4_get_lease_time_data data = {
7232 .args = &args,
7233 .res = &res,
7234 .clp = clp,
7235 };
7236 struct rpc_message msg = {
7237 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
7238 .rpc_argp = &args,
7239 .rpc_resp = &res,
7240 };
7241 struct rpc_task_setup task_setup = {
7242 .rpc_client = clp->cl_rpcclient,
7243 .rpc_message = &msg,
7244 .callback_ops = &nfs4_get_lease_time_ops,
7245 .callback_data = &data,
7246 .flags = RPC_TASK_TIMEOUT,
7247 };
7248 int status;
7249
7250 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
7251 nfs4_set_sequence_privileged(&args.la_seq_args);
7252 dprintk("--> %s\n", __func__);
7253 task = rpc_run_task(&task_setup);
7254
7255 if (IS_ERR(task))
7256 status = PTR_ERR(task);
7257 else {
7258 status = task->tk_status;
7259 rpc_put_task(task);
7260 }
7261 dprintk("<-- %s return %d\n", __func__, status);
7262
7263 return status;
7264}
7265
7266/*
7267 * Initialize the values to be used by the client in CREATE_SESSION
7268 * If nfs4_init_session set the fore channel request and response sizes,
7269 * use them.
7270 *
7271 * Set the back channel max_resp_sz_cached to zero to force the client to
7272 * always set csa_cachethis to FALSE because the current implementation
7273 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
7274 */
7275static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
7276{
7277 unsigned int max_rqst_sz, max_resp_sz;
7278
7279 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
7280 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
7281
7282 /* Fore channel attributes */
7283 args->fc_attrs.max_rqst_sz = max_rqst_sz;
7284 args->fc_attrs.max_resp_sz = max_resp_sz;
7285 args->fc_attrs.max_ops = NFS4_MAX_OPS;
7286 args->fc_attrs.max_reqs = max_session_slots;
7287
7288 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
7289 "max_ops=%u max_reqs=%u\n",
7290 __func__,
7291 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
7292 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
7293
7294 /* Back channel attributes */
7295 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
7296 args->bc_attrs.max_resp_sz = PAGE_SIZE;
7297 args->bc_attrs.max_resp_sz_cached = 0;
7298 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
7299 args->bc_attrs.max_reqs = 1;
7300
7301 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
7302 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
7303 __func__,
7304 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
7305 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
7306 args->bc_attrs.max_reqs);
7307}
7308
7309static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
7310 struct nfs41_create_session_res *res)
7311{
7312 struct nfs4_channel_attrs *sent = &args->fc_attrs;
7313 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
7314
7315 if (rcvd->max_resp_sz > sent->max_resp_sz)
7316 return -EINVAL;
7317 /*
7318 * Our requested max_ops is the minimum we need; we're not
7319 * prepared to break up compounds into smaller pieces than that.
7320 * So, no point even trying to continue if the server won't
7321 * cooperate:
7322 */
7323 if (rcvd->max_ops < sent->max_ops)
7324 return -EINVAL;
7325 if (rcvd->max_reqs == 0)
7326 return -EINVAL;
7327 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
7328 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
7329 return 0;
7330}
7331
7332static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
7333 struct nfs41_create_session_res *res)
7334{
7335 struct nfs4_channel_attrs *sent = &args->bc_attrs;
7336 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
7337
7338 if (!(res->flags & SESSION4_BACK_CHAN))
7339 goto out;
7340 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
7341 return -EINVAL;
7342 if (rcvd->max_resp_sz < sent->max_resp_sz)
7343 return -EINVAL;
7344 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
7345 return -EINVAL;
7346 /* These would render the backchannel useless: */
7347 if (rcvd->max_ops != sent->max_ops)
7348 return -EINVAL;
7349 if (rcvd->max_reqs != sent->max_reqs)
7350 return -EINVAL;
7351out:
7352 return 0;
7353}
7354
7355static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
7356 struct nfs41_create_session_res *res)
7357{
7358 int ret;
7359
7360 ret = nfs4_verify_fore_channel_attrs(args, res);
7361 if (ret)
7362 return ret;
7363 return nfs4_verify_back_channel_attrs(args, res);
7364}
7365
7366static void nfs4_update_session(struct nfs4_session *session,
7367 struct nfs41_create_session_res *res)
7368{
7369 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
7370 /* Mark client id and session as being confirmed */
7371 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
7372 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
7373 session->flags = res->flags;
7374 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
7375 if (res->flags & SESSION4_BACK_CHAN)
7376 memcpy(&session->bc_attrs, &res->bc_attrs,
7377 sizeof(session->bc_attrs));
7378}
7379
7380static int _nfs4_proc_create_session(struct nfs_client *clp,
7381 struct rpc_cred *cred)
7382{
7383 struct nfs4_session *session = clp->cl_session;
7384 struct nfs41_create_session_args args = {
7385 .client = clp,
7386 .clientid = clp->cl_clientid,
7387 .seqid = clp->cl_seqid,
7388 .cb_program = NFS4_CALLBACK,
7389 };
7390 struct nfs41_create_session_res res;
7391
7392 struct rpc_message msg = {
7393 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
7394 .rpc_argp = &args,
7395 .rpc_resp = &res,
7396 .rpc_cred = cred,
7397 };
7398 int status;
7399
7400 nfs4_init_channel_attrs(&args);
7401 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
7402
7403 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7404 trace_nfs4_create_session(clp, status);
7405
7406 if (!status) {
7407 /* Verify the session's negotiated channel_attrs values */
7408 status = nfs4_verify_channel_attrs(&args, &res);
7409 /* Increment the clientid slot sequence id */
7410 if (clp->cl_seqid == res.seqid)
7411 clp->cl_seqid++;
7412 if (status)
7413 goto out;
7414 nfs4_update_session(session, &res);
7415 }
7416out:
7417 return status;
7418}
7419
7420/*
7421 * Issues a CREATE_SESSION operation to the server.
7422 * It is the responsibility of the caller to verify the session is
7423 * expired before calling this routine.
7424 */
7425int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
7426{
7427 int status;
7428 unsigned *ptr;
7429 struct nfs4_session *session = clp->cl_session;
7430
7431 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
7432
7433 status = _nfs4_proc_create_session(clp, cred);
7434 if (status)
7435 goto out;
7436
7437 /* Init or reset the session slot tables */
7438 status = nfs4_setup_session_slot_tables(session);
7439 dprintk("slot table setup returned %d\n", status);
7440 if (status)
7441 goto out;
7442
7443 ptr = (unsigned *)&session->sess_id.data[0];
7444 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
7445 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
7446out:
7447 dprintk("<-- %s\n", __func__);
7448 return status;
7449}
7450
7451/*
7452 * Issue the over-the-wire RPC DESTROY_SESSION.
7453 * The caller must serialize access to this routine.
7454 */
7455int nfs4_proc_destroy_session(struct nfs4_session *session,
7456 struct rpc_cred *cred)
7457{
7458 struct rpc_message msg = {
7459 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
7460 .rpc_argp = session,
7461 .rpc_cred = cred,
7462 };
7463 int status = 0;
7464
7465 dprintk("--> nfs4_proc_destroy_session\n");
7466
7467 /* session is still being setup */
7468 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
7469 return 0;
7470
7471 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7472 trace_nfs4_destroy_session(session->clp, status);
7473
7474 if (status)
7475 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
7476 "Session has been destroyed regardless...\n", status);
7477
7478 dprintk("<-- nfs4_proc_destroy_session\n");
7479 return status;
7480}
7481
7482/*
7483 * Renew the cl_session lease.
7484 */
7485struct nfs4_sequence_data {
7486 struct nfs_client *clp;
7487 struct nfs4_sequence_args args;
7488 struct nfs4_sequence_res res;
7489};
7490
7491static void nfs41_sequence_release(void *data)
7492{
7493 struct nfs4_sequence_data *calldata = data;
7494 struct nfs_client *clp = calldata->clp;
7495
7496 if (atomic_read(&clp->cl_count) > 1)
7497 nfs4_schedule_state_renewal(clp);
7498 nfs_put_client(clp);
7499 kfree(calldata);
7500}
7501
7502static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7503{
7504 switch(task->tk_status) {
7505 case -NFS4ERR_DELAY:
7506 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7507 return -EAGAIN;
7508 default:
7509 nfs4_schedule_lease_recovery(clp);
7510 }
7511 return 0;
7512}
7513
7514static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
7515{
7516 struct nfs4_sequence_data *calldata = data;
7517 struct nfs_client *clp = calldata->clp;
7518
7519 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
7520 return;
7521
7522 trace_nfs4_sequence(clp, task->tk_status);
7523 if (task->tk_status < 0) {
7524 dprintk("%s ERROR %d\n", __func__, task->tk_status);
7525 if (atomic_read(&clp->cl_count) == 1)
7526 goto out;
7527
7528 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
7529 rpc_restart_call_prepare(task);
7530 return;
7531 }
7532 }
7533 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
7534out:
7535 dprintk("<-- %s\n", __func__);
7536}
7537
7538static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
7539{
7540 struct nfs4_sequence_data *calldata = data;
7541 struct nfs_client *clp = calldata->clp;
7542 struct nfs4_sequence_args *args;
7543 struct nfs4_sequence_res *res;
7544
7545 args = task->tk_msg.rpc_argp;
7546 res = task->tk_msg.rpc_resp;
7547
7548 nfs41_setup_sequence(clp->cl_session, args, res, task);
7549}
7550
7551static const struct rpc_call_ops nfs41_sequence_ops = {
7552 .rpc_call_done = nfs41_sequence_call_done,
7553 .rpc_call_prepare = nfs41_sequence_prepare,
7554 .rpc_release = nfs41_sequence_release,
7555};
7556
7557static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
7558 struct rpc_cred *cred,
7559 bool is_privileged)
7560{
7561 struct nfs4_sequence_data *calldata;
7562 struct rpc_message msg = {
7563 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
7564 .rpc_cred = cred,
7565 };
7566 struct rpc_task_setup task_setup_data = {
7567 .rpc_client = clp->cl_rpcclient,
7568 .rpc_message = &msg,
7569 .callback_ops = &nfs41_sequence_ops,
7570 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
7571 };
7572
7573 if (!atomic_inc_not_zero(&clp->cl_count))
7574 return ERR_PTR(-EIO);
7575 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7576 if (calldata == NULL) {
7577 nfs_put_client(clp);
7578 return ERR_PTR(-ENOMEM);
7579 }
7580 nfs4_init_sequence(&calldata->args, &calldata->res, 0);
7581 if (is_privileged)
7582 nfs4_set_sequence_privileged(&calldata->args);
7583 msg.rpc_argp = &calldata->args;
7584 msg.rpc_resp = &calldata->res;
7585 calldata->clp = clp;
7586 task_setup_data.callback_data = calldata;
7587
7588 return rpc_run_task(&task_setup_data);
7589}
7590
7591static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
7592{
7593 struct rpc_task *task;
7594 int ret = 0;
7595
7596 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
7597 return -EAGAIN;
7598 task = _nfs41_proc_sequence(clp, cred, false);
7599 if (IS_ERR(task))
7600 ret = PTR_ERR(task);
7601 else
7602 rpc_put_task_async(task);
7603 dprintk("<-- %s status=%d\n", __func__, ret);
7604 return ret;
7605}
7606
7607static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
7608{
7609 struct rpc_task *task;
7610 int ret;
7611
7612 task = _nfs41_proc_sequence(clp, cred, true);
7613 if (IS_ERR(task)) {
7614 ret = PTR_ERR(task);
7615 goto out;
7616 }
7617 ret = rpc_wait_for_completion_task(task);
7618 if (!ret)
7619 ret = task->tk_status;
7620 rpc_put_task(task);
7621out:
7622 dprintk("<-- %s status=%d\n", __func__, ret);
7623 return ret;
7624}
7625
7626struct nfs4_reclaim_complete_data {
7627 struct nfs_client *clp;
7628 struct nfs41_reclaim_complete_args arg;
7629 struct nfs41_reclaim_complete_res res;
7630};
7631
7632static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
7633{
7634 struct nfs4_reclaim_complete_data *calldata = data;
7635
7636 nfs41_setup_sequence(calldata->clp->cl_session,
7637 &calldata->arg.seq_args,
7638 &calldata->res.seq_res,
7639 task);
7640}
7641
7642static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7643{
7644 switch(task->tk_status) {
7645 case 0:
7646 case -NFS4ERR_COMPLETE_ALREADY:
7647 case -NFS4ERR_WRONG_CRED: /* What to do here? */
7648 break;
7649 case -NFS4ERR_DELAY:
7650 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7651 /* fall through */
7652 case -NFS4ERR_RETRY_UNCACHED_REP:
7653 return -EAGAIN;
7654 default:
7655 nfs4_schedule_lease_recovery(clp);
7656 }
7657 return 0;
7658}
7659
7660static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
7661{
7662 struct nfs4_reclaim_complete_data *calldata = data;
7663 struct nfs_client *clp = calldata->clp;
7664 struct nfs4_sequence_res *res = &calldata->res.seq_res;
7665
7666 dprintk("--> %s\n", __func__);
7667 if (!nfs41_sequence_done(task, res))
7668 return;
7669
7670 trace_nfs4_reclaim_complete(clp, task->tk_status);
7671 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
7672 rpc_restart_call_prepare(task);
7673 return;
7674 }
7675 dprintk("<-- %s\n", __func__);
7676}
7677
7678static void nfs4_free_reclaim_complete_data(void *data)
7679{
7680 struct nfs4_reclaim_complete_data *calldata = data;
7681
7682 kfree(calldata);
7683}
7684
7685static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
7686 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
7687 .rpc_call_done = nfs4_reclaim_complete_done,
7688 .rpc_release = nfs4_free_reclaim_complete_data,
7689};
7690
7691/*
7692 * Issue a global reclaim complete.
7693 */
7694static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
7695 struct rpc_cred *cred)
7696{
7697 struct nfs4_reclaim_complete_data *calldata;
7698 struct rpc_task *task;
7699 struct rpc_message msg = {
7700 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
7701 .rpc_cred = cred,
7702 };
7703 struct rpc_task_setup task_setup_data = {
7704 .rpc_client = clp->cl_rpcclient,
7705 .rpc_message = &msg,
7706 .callback_ops = &nfs4_reclaim_complete_call_ops,
7707 .flags = RPC_TASK_ASYNC,
7708 };
7709 int status = -ENOMEM;
7710
7711 dprintk("--> %s\n", __func__);
7712 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7713 if (calldata == NULL)
7714 goto out;
7715 calldata->clp = clp;
7716 calldata->arg.one_fs = 0;
7717
7718 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
7719 nfs4_set_sequence_privileged(&calldata->arg.seq_args);
7720 msg.rpc_argp = &calldata->arg;
7721 msg.rpc_resp = &calldata->res;
7722 task_setup_data.callback_data = calldata;
7723 task = rpc_run_task(&task_setup_data);
7724 if (IS_ERR(task)) {
7725 status = PTR_ERR(task);
7726 goto out;
7727 }
7728 status = nfs4_wait_for_completion_rpc_task(task);
7729 if (status == 0)
7730 status = task->tk_status;
7731 rpc_put_task(task);
7732 return 0;
7733out:
7734 dprintk("<-- %s status=%d\n", __func__, status);
7735 return status;
7736}
7737
7738static void
7739nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
7740{
7741 struct nfs4_layoutget *lgp = calldata;
7742 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
7743 struct nfs4_session *session = nfs4_get_session(server);
7744
7745 dprintk("--> %s\n", __func__);
7746 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
7747 * right now covering the LAYOUTGET we are about to send.
7748 * However, that is not so catastrophic, and there seems
7749 * to be no way to prevent it completely.
7750 */
7751 if (nfs41_setup_sequence(session, &lgp->args.seq_args,
7752 &lgp->res.seq_res, task))
7753 return;
7754 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
7755 NFS_I(lgp->args.inode)->layout,
7756 &lgp->args.range,
7757 lgp->args.ctx->state)) {
7758 rpc_exit(task, NFS4_OK);
7759 }
7760}
7761
7762static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
7763{
7764 struct nfs4_layoutget *lgp = calldata;
7765 struct inode *inode = lgp->args.inode;
7766 struct nfs_server *server = NFS_SERVER(inode);
7767 struct pnfs_layout_hdr *lo;
7768 struct nfs4_state *state = NULL;
7769 unsigned long timeo, now, giveup;
7770
7771 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
7772
7773 if (!nfs41_sequence_done(task, &lgp->res.seq_res))
7774 goto out;
7775
7776 switch (task->tk_status) {
7777 case 0:
7778 goto out;
7779 /*
7780 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
7781 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
7782 */
7783 case -NFS4ERR_BADLAYOUT:
7784 goto out_overflow;
7785 /*
7786 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
7787 * (or clients) writing to the same RAID stripe except when
7788 * the minlength argument is 0 (see RFC5661 section 18.43.3).
7789 */
7790 case -NFS4ERR_LAYOUTTRYLATER:
7791 if (lgp->args.minlength == 0)
7792 goto out_overflow;
7793 /*
7794 * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall
7795 * existing layout before getting a new one).
7796 */
7797 case -NFS4ERR_RECALLCONFLICT:
7798 timeo = rpc_get_timeout(task->tk_client);
7799 giveup = lgp->args.timestamp + timeo;
7800 now = jiffies;
7801 if (time_after(giveup, now)) {
7802 unsigned long delay;
7803
7804 /* Delay for:
7805 * - Not less then NFS4_POLL_RETRY_MIN.
7806 * - One last time a jiffie before we give up
7807 * - exponential backoff (time_now minus start_attempt)
7808 */
7809 delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN,
7810 min((giveup - now - 1),
7811 now - lgp->args.timestamp));
7812
7813 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
7814 __func__, delay);
7815 rpc_delay(task, delay);
7816 task->tk_status = 0;
7817 rpc_restart_call_prepare(task);
7818 goto out; /* Do not call nfs4_async_handle_error() */
7819 }
7820 break;
7821 case -NFS4ERR_EXPIRED:
7822 case -NFS4ERR_BAD_STATEID:
7823 spin_lock(&inode->i_lock);
7824 lo = NFS_I(inode)->layout;
7825 if (!lo || list_empty(&lo->plh_segs)) {
7826 spin_unlock(&inode->i_lock);
7827 /* If the open stateid was bad, then recover it. */
7828 state = lgp->args.ctx->state;
7829 } else {
7830 LIST_HEAD(head);
7831
7832 /*
7833 * Mark the bad layout state as invalid, then retry
7834 * with the current stateid.
7835 */
7836 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
7837 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
7838 spin_unlock(&inode->i_lock);
7839 pnfs_free_lseg_list(&head);
7840
7841 task->tk_status = 0;
7842 rpc_restart_call_prepare(task);
7843 }
7844 }
7845 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN)
7846 rpc_restart_call_prepare(task);
7847out:
7848 dprintk("<-- %s\n", __func__);
7849 return;
7850out_overflow:
7851 task->tk_status = -EOVERFLOW;
7852 goto out;
7853}
7854
7855static size_t max_response_pages(struct nfs_server *server)
7856{
7857 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
7858 return nfs_page_array_len(0, max_resp_sz);
7859}
7860
7861static void nfs4_free_pages(struct page **pages, size_t size)
7862{
7863 int i;
7864
7865 if (!pages)
7866 return;
7867
7868 for (i = 0; i < size; i++) {
7869 if (!pages[i])
7870 break;
7871 __free_page(pages[i]);
7872 }
7873 kfree(pages);
7874}
7875
7876static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
7877{
7878 struct page **pages;
7879 int i;
7880
7881 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
7882 if (!pages) {
7883 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
7884 return NULL;
7885 }
7886
7887 for (i = 0; i < size; i++) {
7888 pages[i] = alloc_page(gfp_flags);
7889 if (!pages[i]) {
7890 dprintk("%s: failed to allocate page\n", __func__);
7891 nfs4_free_pages(pages, size);
7892 return NULL;
7893 }
7894 }
7895
7896 return pages;
7897}
7898
7899static void nfs4_layoutget_release(void *calldata)
7900{
7901 struct nfs4_layoutget *lgp = calldata;
7902 struct inode *inode = lgp->args.inode;
7903 struct nfs_server *server = NFS_SERVER(inode);
7904 size_t max_pages = max_response_pages(server);
7905
7906 dprintk("--> %s\n", __func__);
7907 nfs4_free_pages(lgp->args.layout.pages, max_pages);
7908 pnfs_put_layout_hdr(NFS_I(inode)->layout);
7909 put_nfs_open_context(lgp->args.ctx);
7910 kfree(calldata);
7911 dprintk("<-- %s\n", __func__);
7912}
7913
7914static const struct rpc_call_ops nfs4_layoutget_call_ops = {
7915 .rpc_call_prepare = nfs4_layoutget_prepare,
7916 .rpc_call_done = nfs4_layoutget_done,
7917 .rpc_release = nfs4_layoutget_release,
7918};
7919
7920struct pnfs_layout_segment *
7921nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
7922{
7923 struct inode *inode = lgp->args.inode;
7924 struct nfs_server *server = NFS_SERVER(inode);
7925 size_t max_pages = max_response_pages(server);
7926 struct rpc_task *task;
7927 struct rpc_message msg = {
7928 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
7929 .rpc_argp = &lgp->args,
7930 .rpc_resp = &lgp->res,
7931 .rpc_cred = lgp->cred,
7932 };
7933 struct rpc_task_setup task_setup_data = {
7934 .rpc_client = server->client,
7935 .rpc_message = &msg,
7936 .callback_ops = &nfs4_layoutget_call_ops,
7937 .callback_data = lgp,
7938 .flags = RPC_TASK_ASYNC,
7939 };
7940 struct pnfs_layout_segment *lseg = NULL;
7941 int status = 0;
7942
7943 dprintk("--> %s\n", __func__);
7944
7945 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
7946 pnfs_get_layout_hdr(NFS_I(inode)->layout);
7947
7948 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
7949 if (!lgp->args.layout.pages) {
7950 nfs4_layoutget_release(lgp);
7951 return ERR_PTR(-ENOMEM);
7952 }
7953 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
7954 lgp->args.timestamp = jiffies;
7955
7956 lgp->res.layoutp = &lgp->args.layout;
7957 lgp->res.seq_res.sr_slot = NULL;
7958 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
7959
7960 task = rpc_run_task(&task_setup_data);
7961 if (IS_ERR(task))
7962 return ERR_CAST(task);
7963 status = nfs4_wait_for_completion_rpc_task(task);
7964 if (status == 0)
7965 status = task->tk_status;
7966 trace_nfs4_layoutget(lgp->args.ctx,
7967 &lgp->args.range,
7968 &lgp->res.range,
7969 status);
7970 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
7971 if (status == 0 && lgp->res.layoutp->len)
7972 lseg = pnfs_layout_process(lgp);
7973 rpc_put_task(task);
7974 dprintk("<-- %s status=%d\n", __func__, status);
7975 if (status)
7976 return ERR_PTR(status);
7977 return lseg;
7978}
7979
7980static void
7981nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
7982{
7983 struct nfs4_layoutreturn *lrp = calldata;
7984
7985 dprintk("--> %s\n", __func__);
7986 nfs41_setup_sequence(lrp->clp->cl_session,
7987 &lrp->args.seq_args,
7988 &lrp->res.seq_res,
7989 task);
7990}
7991
7992static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
7993{
7994 struct nfs4_layoutreturn *lrp = calldata;
7995 struct nfs_server *server;
7996
7997 dprintk("--> %s\n", __func__);
7998
7999 if (!nfs41_sequence_done(task, &lrp->res.seq_res))
8000 return;
8001
8002 server = NFS_SERVER(lrp->args.inode);
8003 switch (task->tk_status) {
8004 default:
8005 task->tk_status = 0;
8006 case 0:
8007 break;
8008 case -NFS4ERR_DELAY:
8009 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
8010 break;
8011 rpc_restart_call_prepare(task);
8012 return;
8013 }
8014 dprintk("<-- %s\n", __func__);
8015}
8016
8017static void nfs4_layoutreturn_release(void *calldata)
8018{
8019 struct nfs4_layoutreturn *lrp = calldata;
8020 struct pnfs_layout_hdr *lo = lrp->args.layout;
8021 LIST_HEAD(freeme);
8022
8023 dprintk("--> %s\n", __func__);
8024 spin_lock(&lo->plh_inode->i_lock);
8025 if (lrp->res.lrs_present)
8026 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
8027 pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
8028 pnfs_clear_layoutreturn_waitbit(lo);
8029 lo->plh_block_lgets--;
8030 spin_unlock(&lo->plh_inode->i_lock);
8031 pnfs_free_lseg_list(&freeme);
8032 pnfs_put_layout_hdr(lrp->args.layout);
8033 nfs_iput_and_deactive(lrp->inode);
8034 kfree(calldata);
8035 dprintk("<-- %s\n", __func__);
8036}
8037
8038static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
8039 .rpc_call_prepare = nfs4_layoutreturn_prepare,
8040 .rpc_call_done = nfs4_layoutreturn_done,
8041 .rpc_release = nfs4_layoutreturn_release,
8042};
8043
8044int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
8045{
8046 struct rpc_task *task;
8047 struct rpc_message msg = {
8048 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
8049 .rpc_argp = &lrp->args,
8050 .rpc_resp = &lrp->res,
8051 .rpc_cred = lrp->cred,
8052 };
8053 struct rpc_task_setup task_setup_data = {
8054 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
8055 .rpc_message = &msg,
8056 .callback_ops = &nfs4_layoutreturn_call_ops,
8057 .callback_data = lrp,
8058 };
8059 int status = 0;
8060
8061 dprintk("--> %s\n", __func__);
8062 if (!sync) {
8063 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
8064 if (!lrp->inode) {
8065 nfs4_layoutreturn_release(lrp);
8066 return -EAGAIN;
8067 }
8068 task_setup_data.flags |= RPC_TASK_ASYNC;
8069 }
8070 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
8071 task = rpc_run_task(&task_setup_data);
8072 if (IS_ERR(task))
8073 return PTR_ERR(task);
8074 if (sync)
8075 status = task->tk_status;
8076 trace_nfs4_layoutreturn(lrp->args.inode, status);
8077 dprintk("<-- %s status=%d\n", __func__, status);
8078 rpc_put_task(task);
8079 return status;
8080}
8081
8082static int
8083_nfs4_proc_getdeviceinfo(struct nfs_server *server,
8084 struct pnfs_device *pdev,
8085 struct rpc_cred *cred)
8086{
8087 struct nfs4_getdeviceinfo_args args = {
8088 .pdev = pdev,
8089 .notify_types = NOTIFY_DEVICEID4_CHANGE |
8090 NOTIFY_DEVICEID4_DELETE,
8091 };
8092 struct nfs4_getdeviceinfo_res res = {
8093 .pdev = pdev,
8094 };
8095 struct rpc_message msg = {
8096 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
8097 .rpc_argp = &args,
8098 .rpc_resp = &res,
8099 .rpc_cred = cred,
8100 };
8101 int status;
8102
8103 dprintk("--> %s\n", __func__);
8104 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
8105 if (res.notification & ~args.notify_types)
8106 dprintk("%s: unsupported notification\n", __func__);
8107 if (res.notification != args.notify_types)
8108 pdev->nocache = 1;
8109
8110 dprintk("<-- %s status=%d\n", __func__, status);
8111
8112 return status;
8113}
8114
8115int nfs4_proc_getdeviceinfo(struct nfs_server *server,
8116 struct pnfs_device *pdev,
8117 struct rpc_cred *cred)
8118{
8119 struct nfs4_exception exception = { };
8120 int err;
8121
8122 do {
8123 err = nfs4_handle_exception(server,
8124 _nfs4_proc_getdeviceinfo(server, pdev, cred),
8125 &exception);
8126 } while (exception.retry);
8127 return err;
8128}
8129EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
8130
8131static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
8132{
8133 struct nfs4_layoutcommit_data *data = calldata;
8134 struct nfs_server *server = NFS_SERVER(data->args.inode);
8135 struct nfs4_session *session = nfs4_get_session(server);
8136
8137 nfs41_setup_sequence(session,
8138 &data->args.seq_args,
8139 &data->res.seq_res,
8140 task);
8141}
8142
8143static void
8144nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
8145{
8146 struct nfs4_layoutcommit_data *data = calldata;
8147 struct nfs_server *server = NFS_SERVER(data->args.inode);
8148
8149 if (!nfs41_sequence_done(task, &data->res.seq_res))
8150 return;
8151
8152 switch (task->tk_status) { /* Just ignore these failures */
8153 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
8154 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
8155 case -NFS4ERR_BADLAYOUT: /* no layout */
8156 case -NFS4ERR_GRACE: /* loca_recalim always false */
8157 task->tk_status = 0;
8158 case 0:
8159 break;
8160 default:
8161 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
8162 rpc_restart_call_prepare(task);
8163 return;
8164 }
8165 }
8166}
8167
8168static void nfs4_layoutcommit_release(void *calldata)
8169{
8170 struct nfs4_layoutcommit_data *data = calldata;
8171
8172 pnfs_cleanup_layoutcommit(data);
8173 nfs_post_op_update_inode_force_wcc(data->args.inode,
8174 data->res.fattr);
8175 put_rpccred(data->cred);
8176 nfs_iput_and_deactive(data->inode);
8177 kfree(data);
8178}
8179
8180static const struct rpc_call_ops nfs4_layoutcommit_ops = {
8181 .rpc_call_prepare = nfs4_layoutcommit_prepare,
8182 .rpc_call_done = nfs4_layoutcommit_done,
8183 .rpc_release = nfs4_layoutcommit_release,
8184};
8185
8186int
8187nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
8188{
8189 struct rpc_message msg = {
8190 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
8191 .rpc_argp = &data->args,
8192 .rpc_resp = &data->res,
8193 .rpc_cred = data->cred,
8194 };
8195 struct rpc_task_setup task_setup_data = {
8196 .task = &data->task,
8197 .rpc_client = NFS_CLIENT(data->args.inode),
8198 .rpc_message = &msg,
8199 .callback_ops = &nfs4_layoutcommit_ops,
8200 .callback_data = data,
8201 };
8202 struct rpc_task *task;
8203 int status = 0;
8204
8205 dprintk("NFS: initiating layoutcommit call. sync %d "
8206 "lbw: %llu inode %lu\n", sync,
8207 data->args.lastbytewritten,
8208 data->args.inode->i_ino);
8209
8210 if (!sync) {
8211 data->inode = nfs_igrab_and_active(data->args.inode);
8212 if (data->inode == NULL) {
8213 nfs4_layoutcommit_release(data);
8214 return -EAGAIN;
8215 }
8216 task_setup_data.flags = RPC_TASK_ASYNC;
8217 }
8218 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
8219 task = rpc_run_task(&task_setup_data);
8220 if (IS_ERR(task))
8221 return PTR_ERR(task);
8222 if (sync)
8223 status = task->tk_status;
8224 trace_nfs4_layoutcommit(data->args.inode, status);
8225 dprintk("%s: status %d\n", __func__, status);
8226 rpc_put_task(task);
8227 return status;
8228}
8229
8230/**
8231 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
8232 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
8233 */
8234static int
8235_nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8236 struct nfs_fsinfo *info,
8237 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
8238{
8239 struct nfs41_secinfo_no_name_args args = {
8240 .style = SECINFO_STYLE_CURRENT_FH,
8241 };
8242 struct nfs4_secinfo_res res = {
8243 .flavors = flavors,
8244 };
8245 struct rpc_message msg = {
8246 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
8247 .rpc_argp = &args,
8248 .rpc_resp = &res,
8249 };
8250 struct rpc_clnt *clnt = server->client;
8251 struct rpc_cred *cred = NULL;
8252 int status;
8253
8254 if (use_integrity) {
8255 clnt = server->nfs_client->cl_rpcclient;
8256 cred = nfs4_get_clid_cred(server->nfs_client);
8257 msg.rpc_cred = cred;
8258 }
8259
8260 dprintk("--> %s\n", __func__);
8261 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
8262 &res.seq_res, 0);
8263 dprintk("<-- %s status=%d\n", __func__, status);
8264
8265 if (cred)
8266 put_rpccred(cred);
8267
8268 return status;
8269}
8270
8271static int
8272nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8273 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
8274{
8275 struct nfs4_exception exception = { };
8276 int err;
8277 do {
8278 /* first try using integrity protection */
8279 err = -NFS4ERR_WRONGSEC;
8280
8281 /* try to use integrity protection with machine cred */
8282 if (_nfs4_is_integrity_protected(server->nfs_client))
8283 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8284 flavors, true);
8285
8286 /*
8287 * if unable to use integrity protection, or SECINFO with
8288 * integrity protection returns NFS4ERR_WRONGSEC (which is
8289 * disallowed by spec, but exists in deployed servers) use
8290 * the current filesystem's rpc_client and the user cred.
8291 */
8292 if (err == -NFS4ERR_WRONGSEC)
8293 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8294 flavors, false);
8295
8296 switch (err) {
8297 case 0:
8298 case -NFS4ERR_WRONGSEC:
8299 case -ENOTSUPP:
8300 goto out;
8301 default:
8302 err = nfs4_handle_exception(server, err, &exception);
8303 }
8304 } while (exception.retry);
8305out:
8306 return err;
8307}
8308
8309static int
8310nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
8311 struct nfs_fsinfo *info)
8312{
8313 int err;
8314 struct page *page;
8315 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
8316 struct nfs4_secinfo_flavors *flavors;
8317 struct nfs4_secinfo4 *secinfo;
8318 int i;
8319
8320 page = alloc_page(GFP_KERNEL);
8321 if (!page) {
8322 err = -ENOMEM;
8323 goto out;
8324 }
8325
8326 flavors = page_address(page);
8327 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
8328
8329 /*
8330 * Fall back on "guess and check" method if
8331 * the server doesn't support SECINFO_NO_NAME
8332 */
8333 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
8334 err = nfs4_find_root_sec(server, fhandle, info);
8335 goto out_freepage;
8336 }
8337 if (err)
8338 goto out_freepage;
8339
8340 for (i = 0; i < flavors->num_flavors; i++) {
8341 secinfo = &flavors->flavors[i];
8342
8343 switch (secinfo->flavor) {
8344 case RPC_AUTH_NULL:
8345 case RPC_AUTH_UNIX:
8346 case RPC_AUTH_GSS:
8347 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
8348 &secinfo->flavor_info);
8349 break;
8350 default:
8351 flavor = RPC_AUTH_MAXFLAVOR;
8352 break;
8353 }
8354
8355 if (!nfs_auth_info_match(&server->auth_info, flavor))
8356 flavor = RPC_AUTH_MAXFLAVOR;
8357
8358 if (flavor != RPC_AUTH_MAXFLAVOR) {
8359 err = nfs4_lookup_root_sec(server, fhandle,
8360 info, flavor);
8361 if (!err)
8362 break;
8363 }
8364 }
8365
8366 if (flavor == RPC_AUTH_MAXFLAVOR)
8367 err = -EPERM;
8368
8369out_freepage:
8370 put_page(page);
8371 if (err == -EACCES)
8372 return -EPERM;
8373out:
8374 return err;
8375}
8376
8377static int _nfs41_test_stateid(struct nfs_server *server,
8378 nfs4_stateid *stateid,
8379 struct rpc_cred *cred)
8380{
8381 int status;
8382 struct nfs41_test_stateid_args args = {
8383 .stateid = stateid,
8384 };
8385 struct nfs41_test_stateid_res res;
8386 struct rpc_message msg = {
8387 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
8388 .rpc_argp = &args,
8389 .rpc_resp = &res,
8390 .rpc_cred = cred,
8391 };
8392 struct rpc_clnt *rpc_client = server->client;
8393
8394 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8395 &rpc_client, &msg);
8396
8397 dprintk("NFS call test_stateid %p\n", stateid);
8398 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
8399 nfs4_set_sequence_privileged(&args.seq_args);
8400 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
8401 &args.seq_args, &res.seq_res);
8402 if (status != NFS_OK) {
8403 dprintk("NFS reply test_stateid: failed, %d\n", status);
8404 return status;
8405 }
8406 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
8407 return -res.status;
8408}
8409
8410/**
8411 * nfs41_test_stateid - perform a TEST_STATEID operation
8412 *
8413 * @server: server / transport on which to perform the operation
8414 * @stateid: state ID to test
8415 * @cred: credential
8416 *
8417 * Returns NFS_OK if the server recognizes that "stateid" is valid.
8418 * Otherwise a negative NFS4ERR value is returned if the operation
8419 * failed or the state ID is not currently valid.
8420 */
8421static int nfs41_test_stateid(struct nfs_server *server,
8422 nfs4_stateid *stateid,
8423 struct rpc_cred *cred)
8424{
8425 struct nfs4_exception exception = { };
8426 int err;
8427 do {
8428 err = _nfs41_test_stateid(server, stateid, cred);
8429 if (err != -NFS4ERR_DELAY)
8430 break;
8431 nfs4_handle_exception(server, err, &exception);
8432 } while (exception.retry);
8433 return err;
8434}
8435
8436struct nfs_free_stateid_data {
8437 struct nfs_server *server;
8438 struct nfs41_free_stateid_args args;
8439 struct nfs41_free_stateid_res res;
8440};
8441
8442static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
8443{
8444 struct nfs_free_stateid_data *data = calldata;
8445 nfs41_setup_sequence(nfs4_get_session(data->server),
8446 &data->args.seq_args,
8447 &data->res.seq_res,
8448 task);
8449}
8450
8451static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
8452{
8453 struct nfs_free_stateid_data *data = calldata;
8454
8455 nfs41_sequence_done(task, &data->res.seq_res);
8456
8457 switch (task->tk_status) {
8458 case -NFS4ERR_DELAY:
8459 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
8460 rpc_restart_call_prepare(task);
8461 }
8462}
8463
8464static void nfs41_free_stateid_release(void *calldata)
8465{
8466 kfree(calldata);
8467}
8468
8469static const struct rpc_call_ops nfs41_free_stateid_ops = {
8470 .rpc_call_prepare = nfs41_free_stateid_prepare,
8471 .rpc_call_done = nfs41_free_stateid_done,
8472 .rpc_release = nfs41_free_stateid_release,
8473};
8474
8475static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server,
8476 nfs4_stateid *stateid,
8477 struct rpc_cred *cred,
8478 bool privileged)
8479{
8480 struct rpc_message msg = {
8481 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
8482 .rpc_cred = cred,
8483 };
8484 struct rpc_task_setup task_setup = {
8485 .rpc_client = server->client,
8486 .rpc_message = &msg,
8487 .callback_ops = &nfs41_free_stateid_ops,
8488 .flags = RPC_TASK_ASYNC,
8489 };
8490 struct nfs_free_stateid_data *data;
8491
8492 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8493 &task_setup.rpc_client, &msg);
8494
8495 dprintk("NFS call free_stateid %p\n", stateid);
8496 data = kmalloc(sizeof(*data), GFP_NOFS);
8497 if (!data)
8498 return ERR_PTR(-ENOMEM);
8499 data->server = server;
8500 nfs4_stateid_copy(&data->args.stateid, stateid);
8501
8502 task_setup.callback_data = data;
8503
8504 msg.rpc_argp = &data->args;
8505 msg.rpc_resp = &data->res;
8506 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
8507 if (privileged)
8508 nfs4_set_sequence_privileged(&data->args.seq_args);
8509
8510 return rpc_run_task(&task_setup);
8511}
8512
8513/**
8514 * nfs41_free_stateid - perform a FREE_STATEID operation
8515 *
8516 * @server: server / transport on which to perform the operation
8517 * @stateid: state ID to release
8518 * @cred: credential
8519 *
8520 * Returns NFS_OK if the server freed "stateid". Otherwise a
8521 * negative NFS4ERR value is returned.
8522 */
8523static int nfs41_free_stateid(struct nfs_server *server,
8524 nfs4_stateid *stateid,
8525 struct rpc_cred *cred)
8526{
8527 struct rpc_task *task;
8528 int ret;
8529
8530 task = _nfs41_free_stateid(server, stateid, cred, true);
8531 if (IS_ERR(task))
8532 return PTR_ERR(task);
8533 ret = rpc_wait_for_completion_task(task);
8534 if (!ret)
8535 ret = task->tk_status;
8536 rpc_put_task(task);
8537 return ret;
8538}
8539
8540static void
8541nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
8542{
8543 struct rpc_task *task;
8544 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
8545
8546 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
8547 nfs4_free_lock_state(server, lsp);
8548 if (IS_ERR(task))
8549 return;
8550 rpc_put_task(task);
8551}
8552
8553static bool nfs41_match_stateid(const nfs4_stateid *s1,
8554 const nfs4_stateid *s2)
8555{
8556 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
8557 return false;
8558
8559 if (s1->seqid == s2->seqid)
8560 return true;
8561 if (s1->seqid == 0 || s2->seqid == 0)
8562 return true;
8563
8564 return false;
8565}
8566
8567#endif /* CONFIG_NFS_V4_1 */
8568
8569static bool nfs4_match_stateid(const nfs4_stateid *s1,
8570 const nfs4_stateid *s2)
8571{
8572 return nfs4_stateid_match(s1, s2);
8573}
8574
8575
8576static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
8577 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8578 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8579 .recover_open = nfs4_open_reclaim,
8580 .recover_lock = nfs4_lock_reclaim,
8581 .establish_clid = nfs4_init_clientid,
8582 .detect_trunking = nfs40_discover_server_trunking,
8583};
8584
8585#if defined(CONFIG_NFS_V4_1)
8586static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
8587 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8588 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8589 .recover_open = nfs4_open_reclaim,
8590 .recover_lock = nfs4_lock_reclaim,
8591 .establish_clid = nfs41_init_clientid,
8592 .reclaim_complete = nfs41_proc_reclaim_complete,
8593 .detect_trunking = nfs41_discover_server_trunking,
8594};
8595#endif /* CONFIG_NFS_V4_1 */
8596
8597static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
8598 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8599 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8600 .recover_open = nfs40_open_expired,
8601 .recover_lock = nfs4_lock_expired,
8602 .establish_clid = nfs4_init_clientid,
8603};
8604
8605#if defined(CONFIG_NFS_V4_1)
8606static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
8607 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8608 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8609 .recover_open = nfs41_open_expired,
8610 .recover_lock = nfs41_lock_expired,
8611 .establish_clid = nfs41_init_clientid,
8612};
8613#endif /* CONFIG_NFS_V4_1 */
8614
8615static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
8616 .sched_state_renewal = nfs4_proc_async_renew,
8617 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
8618 .renew_lease = nfs4_proc_renew,
8619};
8620
8621#if defined(CONFIG_NFS_V4_1)
8622static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
8623 .sched_state_renewal = nfs41_proc_async_sequence,
8624 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
8625 .renew_lease = nfs4_proc_sequence,
8626};
8627#endif
8628
8629static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
8630 .get_locations = _nfs40_proc_get_locations,
8631 .fsid_present = _nfs40_proc_fsid_present,
8632};
8633
8634#if defined(CONFIG_NFS_V4_1)
8635static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
8636 .get_locations = _nfs41_proc_get_locations,
8637 .fsid_present = _nfs41_proc_fsid_present,
8638};
8639#endif /* CONFIG_NFS_V4_1 */
8640
8641static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
8642 .minor_version = 0,
8643 .init_caps = NFS_CAP_READDIRPLUS
8644 | NFS_CAP_ATOMIC_OPEN
8645 | NFS_CAP_POSIX_LOCK,
8646 .init_client = nfs40_init_client,
8647 .shutdown_client = nfs40_shutdown_client,
8648 .match_stateid = nfs4_match_stateid,
8649 .find_root_sec = nfs4_find_root_sec,
8650 .free_lock_state = nfs4_release_lockowner,
8651 .alloc_seqid = nfs_alloc_seqid,
8652 .call_sync_ops = &nfs40_call_sync_ops,
8653 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
8654 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
8655 .state_renewal_ops = &nfs40_state_renewal_ops,
8656 .mig_recovery_ops = &nfs40_mig_recovery_ops,
8657};
8658
8659#if defined(CONFIG_NFS_V4_1)
8660static struct nfs_seqid *
8661nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
8662{
8663 return NULL;
8664}
8665
8666static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
8667 .minor_version = 1,
8668 .init_caps = NFS_CAP_READDIRPLUS
8669 | NFS_CAP_ATOMIC_OPEN
8670 | NFS_CAP_POSIX_LOCK
8671 | NFS_CAP_STATEID_NFSV41
8672 | NFS_CAP_ATOMIC_OPEN_V1,
8673 .init_client = nfs41_init_client,
8674 .shutdown_client = nfs41_shutdown_client,
8675 .match_stateid = nfs41_match_stateid,
8676 .find_root_sec = nfs41_find_root_sec,
8677 .free_lock_state = nfs41_free_lock_state,
8678 .alloc_seqid = nfs_alloc_no_seqid,
8679 .call_sync_ops = &nfs41_call_sync_ops,
8680 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8681 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8682 .state_renewal_ops = &nfs41_state_renewal_ops,
8683 .mig_recovery_ops = &nfs41_mig_recovery_ops,
8684};
8685#endif
8686
8687#if defined(CONFIG_NFS_V4_2)
8688static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
8689 .minor_version = 2,
8690 .init_caps = NFS_CAP_READDIRPLUS
8691 | NFS_CAP_ATOMIC_OPEN
8692 | NFS_CAP_POSIX_LOCK
8693 | NFS_CAP_STATEID_NFSV41
8694 | NFS_CAP_ATOMIC_OPEN_V1
8695 | NFS_CAP_ALLOCATE
8696 | NFS_CAP_DEALLOCATE
8697 | NFS_CAP_SEEK
8698 | NFS_CAP_LAYOUTSTATS,
8699 .init_client = nfs41_init_client,
8700 .shutdown_client = nfs41_shutdown_client,
8701 .match_stateid = nfs41_match_stateid,
8702 .find_root_sec = nfs41_find_root_sec,
8703 .free_lock_state = nfs41_free_lock_state,
8704 .call_sync_ops = &nfs41_call_sync_ops,
8705 .alloc_seqid = nfs_alloc_no_seqid,
8706 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8707 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8708 .state_renewal_ops = &nfs41_state_renewal_ops,
8709 .mig_recovery_ops = &nfs41_mig_recovery_ops,
8710};
8711#endif
8712
8713const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
8714 [0] = &nfs_v4_0_minor_ops,
8715#if defined(CONFIG_NFS_V4_1)
8716 [1] = &nfs_v4_1_minor_ops,
8717#endif
8718#if defined(CONFIG_NFS_V4_2)
8719 [2] = &nfs_v4_2_minor_ops,
8720#endif
8721};
8722
8723static const struct inode_operations nfs4_dir_inode_operations = {
8724 .create = nfs_create,
8725 .lookup = nfs_lookup,
8726 .atomic_open = nfs_atomic_open,
8727 .link = nfs_link,
8728 .unlink = nfs_unlink,
8729 .symlink = nfs_symlink,
8730 .mkdir = nfs_mkdir,
8731 .rmdir = nfs_rmdir,
8732 .mknod = nfs_mknod,
8733 .rename = nfs_rename,
8734 .permission = nfs_permission,
8735 .getattr = nfs_getattr,
8736 .setattr = nfs_setattr,
8737 .getxattr = generic_getxattr,
8738 .setxattr = generic_setxattr,
8739 .listxattr = generic_listxattr,
8740 .removexattr = generic_removexattr,
8741};
8742
8743static const struct inode_operations nfs4_file_inode_operations = {
8744 .permission = nfs_permission,
8745 .getattr = nfs_getattr,
8746 .setattr = nfs_setattr,
8747 .getxattr = generic_getxattr,
8748 .setxattr = generic_setxattr,
8749 .listxattr = generic_listxattr,
8750 .removexattr = generic_removexattr,
8751};
8752
8753const struct nfs_rpc_ops nfs_v4_clientops = {
8754 .version = 4, /* protocol version */
8755 .dentry_ops = &nfs4_dentry_operations,
8756 .dir_inode_ops = &nfs4_dir_inode_operations,
8757 .file_inode_ops = &nfs4_file_inode_operations,
8758 .file_ops = &nfs4_file_operations,
8759 .getroot = nfs4_proc_get_root,
8760 .submount = nfs4_submount,
8761 .try_mount = nfs4_try_mount,
8762 .getattr = nfs4_proc_getattr,
8763 .setattr = nfs4_proc_setattr,
8764 .lookup = nfs4_proc_lookup,
8765 .access = nfs4_proc_access,
8766 .readlink = nfs4_proc_readlink,
8767 .create = nfs4_proc_create,
8768 .remove = nfs4_proc_remove,
8769 .unlink_setup = nfs4_proc_unlink_setup,
8770 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
8771 .unlink_done = nfs4_proc_unlink_done,
8772 .rename_setup = nfs4_proc_rename_setup,
8773 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
8774 .rename_done = nfs4_proc_rename_done,
8775 .link = nfs4_proc_link,
8776 .symlink = nfs4_proc_symlink,
8777 .mkdir = nfs4_proc_mkdir,
8778 .rmdir = nfs4_proc_remove,
8779 .readdir = nfs4_proc_readdir,
8780 .mknod = nfs4_proc_mknod,
8781 .statfs = nfs4_proc_statfs,
8782 .fsinfo = nfs4_proc_fsinfo,
8783 .pathconf = nfs4_proc_pathconf,
8784 .set_capabilities = nfs4_server_capabilities,
8785 .decode_dirent = nfs4_decode_dirent,
8786 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
8787 .read_setup = nfs4_proc_read_setup,
8788 .read_done = nfs4_read_done,
8789 .write_setup = nfs4_proc_write_setup,
8790 .write_done = nfs4_write_done,
8791 .commit_setup = nfs4_proc_commit_setup,
8792 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
8793 .commit_done = nfs4_commit_done,
8794 .lock = nfs4_proc_lock,
8795 .clear_acl_cache = nfs4_zap_acl_attr,
8796 .close_context = nfs4_close_context,
8797 .open_context = nfs4_atomic_open,
8798 .have_delegation = nfs4_have_delegation,
8799 .return_delegation = nfs4_inode_return_delegation,
8800 .alloc_client = nfs4_alloc_client,
8801 .init_client = nfs4_init_client,
8802 .free_client = nfs4_free_client,
8803 .create_server = nfs4_create_server,
8804 .clone_server = nfs_clone_server,
8805};
8806
8807static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
8808 .prefix = XATTR_NAME_NFSV4_ACL,
8809 .list = nfs4_xattr_list_nfs4_acl,
8810 .get = nfs4_xattr_get_nfs4_acl,
8811 .set = nfs4_xattr_set_nfs4_acl,
8812};
8813
8814const struct xattr_handler *nfs4_xattr_handlers[] = {
8815 &nfs4_xattr_nfs4_acl_handler,
8816#ifdef CONFIG_NFS_V4_SECURITY_LABEL
8817 &nfs4_xattr_nfs4_label_handler,
8818#endif
8819 NULL
8820};
8821
8822/*
8823 * Local variables:
8824 * c-basic-offset: 8
8825 * End:
8826 */