NFSv4.1: nfs41_sequence_done should handle sequence flag errors
[linux-2.6-block.git] / fs / nfs / nfs4proc.c
... / ...
CommitLineData
1/*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/mm.h>
39#include <linux/delay.h>
40#include <linux/errno.h>
41#include <linux/file.h>
42#include <linux/string.h>
43#include <linux/ratelimit.h>
44#include <linux/printk.h>
45#include <linux/slab.h>
46#include <linux/sunrpc/clnt.h>
47#include <linux/nfs.h>
48#include <linux/nfs4.h>
49#include <linux/nfs_fs.h>
50#include <linux/nfs_page.h>
51#include <linux/nfs_mount.h>
52#include <linux/namei.h>
53#include <linux/mount.h>
54#include <linux/module.h>
55#include <linux/xattr.h>
56#include <linux/utsname.h>
57#include <linux/freezer.h>
58
59#include "nfs4_fs.h"
60#include "delegation.h"
61#include "internal.h"
62#include "iostat.h"
63#include "callback.h"
64#include "pnfs.h"
65#include "netns.h"
66#include "nfs4idmap.h"
67#include "nfs4session.h"
68#include "fscache.h"
69
70#include "nfs4trace.h"
71
72#define NFSDBG_FACILITY NFSDBG_PROC
73
74#define NFS4_POLL_RETRY_MIN (HZ/10)
75#define NFS4_POLL_RETRY_MAX (15*HZ)
76
77struct nfs4_opendata;
78static int _nfs4_proc_open(struct nfs4_opendata *data);
79static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
80static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
81static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *, long *);
82static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
83static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label);
84static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label);
85static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
86 struct nfs_fattr *fattr, struct iattr *sattr,
87 struct nfs4_state *state, struct nfs4_label *ilabel,
88 struct nfs4_label *olabel);
89#ifdef CONFIG_NFS_V4_1
90static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
91 struct rpc_cred *);
92static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *,
93 struct rpc_cred *);
94#endif
95
96#ifdef CONFIG_NFS_V4_SECURITY_LABEL
97static inline struct nfs4_label *
98nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
99 struct iattr *sattr, struct nfs4_label *label)
100{
101 int err;
102
103 if (label == NULL)
104 return NULL;
105
106 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
107 return NULL;
108
109 err = security_dentry_init_security(dentry, sattr->ia_mode,
110 &dentry->d_name, (void **)&label->label, &label->len);
111 if (err == 0)
112 return label;
113
114 return NULL;
115}
116static inline void
117nfs4_label_release_security(struct nfs4_label *label)
118{
119 if (label)
120 security_release_secctx(label->label, label->len);
121}
122static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
123{
124 if (label)
125 return server->attr_bitmask;
126
127 return server->attr_bitmask_nl;
128}
129#else
130static inline struct nfs4_label *
131nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
132 struct iattr *sattr, struct nfs4_label *l)
133{ return NULL; }
134static inline void
135nfs4_label_release_security(struct nfs4_label *label)
136{ return; }
137static inline u32 *
138nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
139{ return server->attr_bitmask; }
140#endif
141
142/* Prevent leaks of NFSv4 errors into userland */
143static int nfs4_map_errors(int err)
144{
145 if (err >= -1000)
146 return err;
147 switch (err) {
148 case -NFS4ERR_RESOURCE:
149 case -NFS4ERR_LAYOUTTRYLATER:
150 case -NFS4ERR_RECALLCONFLICT:
151 return -EREMOTEIO;
152 case -NFS4ERR_WRONGSEC:
153 case -NFS4ERR_WRONG_CRED:
154 return -EPERM;
155 case -NFS4ERR_BADOWNER:
156 case -NFS4ERR_BADNAME:
157 return -EINVAL;
158 case -NFS4ERR_SHARE_DENIED:
159 return -EACCES;
160 case -NFS4ERR_MINOR_VERS_MISMATCH:
161 return -EPROTONOSUPPORT;
162 case -NFS4ERR_FILE_OPEN:
163 return -EBUSY;
164 default:
165 dprintk("%s could not handle NFSv4 error %d\n",
166 __func__, -err);
167 break;
168 }
169 return -EIO;
170}
171
172/*
173 * This is our standard bitmap for GETATTR requests.
174 */
175const u32 nfs4_fattr_bitmap[3] = {
176 FATTR4_WORD0_TYPE
177 | FATTR4_WORD0_CHANGE
178 | FATTR4_WORD0_SIZE
179 | FATTR4_WORD0_FSID
180 | FATTR4_WORD0_FILEID,
181 FATTR4_WORD1_MODE
182 | FATTR4_WORD1_NUMLINKS
183 | FATTR4_WORD1_OWNER
184 | FATTR4_WORD1_OWNER_GROUP
185 | FATTR4_WORD1_RAWDEV
186 | FATTR4_WORD1_SPACE_USED
187 | FATTR4_WORD1_TIME_ACCESS
188 | FATTR4_WORD1_TIME_METADATA
189 | FATTR4_WORD1_TIME_MODIFY
190 | FATTR4_WORD1_MOUNTED_ON_FILEID,
191#ifdef CONFIG_NFS_V4_SECURITY_LABEL
192 FATTR4_WORD2_SECURITY_LABEL
193#endif
194};
195
196static const u32 nfs4_pnfs_open_bitmap[3] = {
197 FATTR4_WORD0_TYPE
198 | FATTR4_WORD0_CHANGE
199 | FATTR4_WORD0_SIZE
200 | FATTR4_WORD0_FSID
201 | FATTR4_WORD0_FILEID,
202 FATTR4_WORD1_MODE
203 | FATTR4_WORD1_NUMLINKS
204 | FATTR4_WORD1_OWNER
205 | FATTR4_WORD1_OWNER_GROUP
206 | FATTR4_WORD1_RAWDEV
207 | FATTR4_WORD1_SPACE_USED
208 | FATTR4_WORD1_TIME_ACCESS
209 | FATTR4_WORD1_TIME_METADATA
210 | FATTR4_WORD1_TIME_MODIFY,
211 FATTR4_WORD2_MDSTHRESHOLD
212};
213
214static const u32 nfs4_open_noattr_bitmap[3] = {
215 FATTR4_WORD0_TYPE
216 | FATTR4_WORD0_CHANGE
217 | FATTR4_WORD0_FILEID,
218};
219
220const u32 nfs4_statfs_bitmap[3] = {
221 FATTR4_WORD0_FILES_AVAIL
222 | FATTR4_WORD0_FILES_FREE
223 | FATTR4_WORD0_FILES_TOTAL,
224 FATTR4_WORD1_SPACE_AVAIL
225 | FATTR4_WORD1_SPACE_FREE
226 | FATTR4_WORD1_SPACE_TOTAL
227};
228
229const u32 nfs4_pathconf_bitmap[3] = {
230 FATTR4_WORD0_MAXLINK
231 | FATTR4_WORD0_MAXNAME,
232 0
233};
234
235const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
236 | FATTR4_WORD0_MAXREAD
237 | FATTR4_WORD0_MAXWRITE
238 | FATTR4_WORD0_LEASE_TIME,
239 FATTR4_WORD1_TIME_DELTA
240 | FATTR4_WORD1_FS_LAYOUT_TYPES,
241 FATTR4_WORD2_LAYOUT_BLKSIZE
242};
243
244const u32 nfs4_fs_locations_bitmap[3] = {
245 FATTR4_WORD0_TYPE
246 | FATTR4_WORD0_CHANGE
247 | FATTR4_WORD0_SIZE
248 | FATTR4_WORD0_FSID
249 | FATTR4_WORD0_FILEID
250 | FATTR4_WORD0_FS_LOCATIONS,
251 FATTR4_WORD1_MODE
252 | FATTR4_WORD1_NUMLINKS
253 | FATTR4_WORD1_OWNER
254 | FATTR4_WORD1_OWNER_GROUP
255 | FATTR4_WORD1_RAWDEV
256 | FATTR4_WORD1_SPACE_USED
257 | FATTR4_WORD1_TIME_ACCESS
258 | FATTR4_WORD1_TIME_METADATA
259 | FATTR4_WORD1_TIME_MODIFY
260 | FATTR4_WORD1_MOUNTED_ON_FILEID,
261};
262
263static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
264 struct nfs4_readdir_arg *readdir)
265{
266 __be32 *start, *p;
267
268 if (cookie > 2) {
269 readdir->cookie = cookie;
270 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
271 return;
272 }
273
274 readdir->cookie = 0;
275 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
276 if (cookie == 2)
277 return;
278
279 /*
280 * NFSv4 servers do not return entries for '.' and '..'
281 * Therefore, we fake these entries here. We let '.'
282 * have cookie 0 and '..' have cookie 1. Note that
283 * when talking to the server, we always send cookie 0
284 * instead of 1 or 2.
285 */
286 start = p = kmap_atomic(*readdir->pages);
287
288 if (cookie == 0) {
289 *p++ = xdr_one; /* next */
290 *p++ = xdr_zero; /* cookie, first word */
291 *p++ = xdr_one; /* cookie, second word */
292 *p++ = xdr_one; /* entry len */
293 memcpy(p, ".\0\0\0", 4); /* entry */
294 p++;
295 *p++ = xdr_one; /* bitmap length */
296 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
297 *p++ = htonl(8); /* attribute buffer length */
298 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
299 }
300
301 *p++ = xdr_one; /* next */
302 *p++ = xdr_zero; /* cookie, first word */
303 *p++ = xdr_two; /* cookie, second word */
304 *p++ = xdr_two; /* entry len */
305 memcpy(p, "..\0\0", 4); /* entry */
306 p++;
307 *p++ = xdr_one; /* bitmap length */
308 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
309 *p++ = htonl(8); /* attribute buffer length */
310 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
311
312 readdir->pgbase = (char *)p - (char *)start;
313 readdir->count -= readdir->pgbase;
314 kunmap_atomic(start);
315}
316
317static long nfs4_update_delay(long *timeout)
318{
319 long ret;
320 if (!timeout)
321 return NFS4_POLL_RETRY_MAX;
322 if (*timeout <= 0)
323 *timeout = NFS4_POLL_RETRY_MIN;
324 if (*timeout > NFS4_POLL_RETRY_MAX)
325 *timeout = NFS4_POLL_RETRY_MAX;
326 ret = *timeout;
327 *timeout <<= 1;
328 return ret;
329}
330
331static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
332{
333 int res = 0;
334
335 might_sleep();
336
337 freezable_schedule_timeout_killable_unsafe(
338 nfs4_update_delay(timeout));
339 if (fatal_signal_pending(current))
340 res = -ERESTARTSYS;
341 return res;
342}
343
344/* This is the error handling routine for processes that are allowed
345 * to sleep.
346 */
347int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
348{
349 struct nfs_client *clp = server->nfs_client;
350 struct nfs4_state *state = exception->state;
351 struct inode *inode = exception->inode;
352 int ret = errorcode;
353
354 exception->retry = 0;
355 switch(errorcode) {
356 case 0:
357 return 0;
358 case -NFS4ERR_OPENMODE:
359 case -NFS4ERR_DELEG_REVOKED:
360 case -NFS4ERR_ADMIN_REVOKED:
361 case -NFS4ERR_BAD_STATEID:
362 if (inode && nfs4_have_delegation(inode, FMODE_READ)) {
363 nfs4_inode_return_delegation(inode);
364 exception->retry = 1;
365 return 0;
366 }
367 if (state == NULL)
368 break;
369 ret = nfs4_schedule_stateid_recovery(server, state);
370 if (ret < 0)
371 break;
372 goto wait_on_recovery;
373 case -NFS4ERR_EXPIRED:
374 if (state != NULL) {
375 ret = nfs4_schedule_stateid_recovery(server, state);
376 if (ret < 0)
377 break;
378 }
379 case -NFS4ERR_STALE_STATEID:
380 case -NFS4ERR_STALE_CLIENTID:
381 nfs4_schedule_lease_recovery(clp);
382 goto wait_on_recovery;
383 case -NFS4ERR_MOVED:
384 ret = nfs4_schedule_migration_recovery(server);
385 if (ret < 0)
386 break;
387 goto wait_on_recovery;
388 case -NFS4ERR_LEASE_MOVED:
389 nfs4_schedule_lease_moved_recovery(clp);
390 goto wait_on_recovery;
391#if defined(CONFIG_NFS_V4_1)
392 case -NFS4ERR_BADSESSION:
393 case -NFS4ERR_BADSLOT:
394 case -NFS4ERR_BAD_HIGH_SLOT:
395 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
396 case -NFS4ERR_DEADSESSION:
397 case -NFS4ERR_SEQ_FALSE_RETRY:
398 case -NFS4ERR_SEQ_MISORDERED:
399 dprintk("%s ERROR: %d Reset session\n", __func__,
400 errorcode);
401 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
402 goto wait_on_recovery;
403#endif /* defined(CONFIG_NFS_V4_1) */
404 case -NFS4ERR_FILE_OPEN:
405 if (exception->timeout > HZ) {
406 /* We have retried a decent amount, time to
407 * fail
408 */
409 ret = -EBUSY;
410 break;
411 }
412 case -NFS4ERR_GRACE:
413 case -NFS4ERR_DELAY:
414 ret = nfs4_delay(server->client, &exception->timeout);
415 if (ret != 0)
416 break;
417 case -NFS4ERR_RETRY_UNCACHED_REP:
418 case -NFS4ERR_OLD_STATEID:
419 exception->retry = 1;
420 break;
421 case -NFS4ERR_BADOWNER:
422 /* The following works around a Linux server bug! */
423 case -NFS4ERR_BADNAME:
424 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
425 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
426 exception->retry = 1;
427 printk(KERN_WARNING "NFS: v4 server %s "
428 "does not accept raw "
429 "uid/gids. "
430 "Reenabling the idmapper.\n",
431 server->nfs_client->cl_hostname);
432 }
433 }
434 /* We failed to handle the error */
435 return nfs4_map_errors(ret);
436wait_on_recovery:
437 ret = nfs4_wait_clnt_recover(clp);
438 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
439 return -EIO;
440 if (ret == 0)
441 exception->retry = 1;
442 return ret;
443}
444
445/*
446 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
447 * or 'false' otherwise.
448 */
449static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
450{
451 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
452
453 if (flavor == RPC_AUTH_GSS_KRB5I ||
454 flavor == RPC_AUTH_GSS_KRB5P)
455 return true;
456
457 return false;
458}
459
460static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
461{
462 spin_lock(&clp->cl_lock);
463 if (time_before(clp->cl_last_renewal,timestamp))
464 clp->cl_last_renewal = timestamp;
465 spin_unlock(&clp->cl_lock);
466}
467
468static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
469{
470 do_renew_lease(server->nfs_client, timestamp);
471}
472
473struct nfs4_call_sync_data {
474 const struct nfs_server *seq_server;
475 struct nfs4_sequence_args *seq_args;
476 struct nfs4_sequence_res *seq_res;
477};
478
479void nfs4_init_sequence(struct nfs4_sequence_args *args,
480 struct nfs4_sequence_res *res, int cache_reply)
481{
482 args->sa_slot = NULL;
483 args->sa_cache_this = cache_reply;
484 args->sa_privileged = 0;
485
486 res->sr_slot = NULL;
487}
488
489static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
490{
491 args->sa_privileged = 1;
492}
493
494int nfs40_setup_sequence(struct nfs4_slot_table *tbl,
495 struct nfs4_sequence_args *args,
496 struct nfs4_sequence_res *res,
497 struct rpc_task *task)
498{
499 struct nfs4_slot *slot;
500
501 /* slot already allocated? */
502 if (res->sr_slot != NULL)
503 goto out_start;
504
505 spin_lock(&tbl->slot_tbl_lock);
506 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
507 goto out_sleep;
508
509 slot = nfs4_alloc_slot(tbl);
510 if (IS_ERR(slot)) {
511 if (slot == ERR_PTR(-ENOMEM))
512 task->tk_timeout = HZ >> 2;
513 goto out_sleep;
514 }
515 spin_unlock(&tbl->slot_tbl_lock);
516
517 args->sa_slot = slot;
518 res->sr_slot = slot;
519
520out_start:
521 rpc_call_start(task);
522 return 0;
523
524out_sleep:
525 if (args->sa_privileged)
526 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
527 NULL, RPC_PRIORITY_PRIVILEGED);
528 else
529 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
530 spin_unlock(&tbl->slot_tbl_lock);
531 return -EAGAIN;
532}
533EXPORT_SYMBOL_GPL(nfs40_setup_sequence);
534
535static int nfs40_sequence_done(struct rpc_task *task,
536 struct nfs4_sequence_res *res)
537{
538 struct nfs4_slot *slot = res->sr_slot;
539 struct nfs4_slot_table *tbl;
540
541 if (slot == NULL)
542 goto out;
543
544 tbl = slot->table;
545 spin_lock(&tbl->slot_tbl_lock);
546 if (!nfs41_wake_and_assign_slot(tbl, slot))
547 nfs4_free_slot(tbl, slot);
548 spin_unlock(&tbl->slot_tbl_lock);
549
550 res->sr_slot = NULL;
551out:
552 return 1;
553}
554
555#if defined(CONFIG_NFS_V4_1)
556
557static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
558{
559 struct nfs4_session *session;
560 struct nfs4_slot_table *tbl;
561 struct nfs4_slot *slot = res->sr_slot;
562 bool send_new_highest_used_slotid = false;
563
564 tbl = slot->table;
565 session = tbl->session;
566
567 spin_lock(&tbl->slot_tbl_lock);
568 /* Be nice to the server: try to ensure that the last transmitted
569 * value for highest_user_slotid <= target_highest_slotid
570 */
571 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
572 send_new_highest_used_slotid = true;
573
574 if (nfs41_wake_and_assign_slot(tbl, slot)) {
575 send_new_highest_used_slotid = false;
576 goto out_unlock;
577 }
578 nfs4_free_slot(tbl, slot);
579
580 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
581 send_new_highest_used_slotid = false;
582out_unlock:
583 spin_unlock(&tbl->slot_tbl_lock);
584 res->sr_slot = NULL;
585 if (send_new_highest_used_slotid)
586 nfs41_server_notify_highest_slotid_update(session->clp);
587}
588
589int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
590{
591 struct nfs4_session *session;
592 struct nfs4_slot *slot = res->sr_slot;
593 struct nfs_client *clp;
594 bool interrupted = false;
595 int ret = 1;
596
597 if (slot == NULL)
598 goto out_noaction;
599 /* don't increment the sequence number if the task wasn't sent */
600 if (!RPC_WAS_SENT(task))
601 goto out;
602
603 session = slot->table->session;
604
605 if (slot->interrupted) {
606 slot->interrupted = 0;
607 interrupted = true;
608 }
609
610 trace_nfs4_sequence_done(session, res);
611 /* Check the SEQUENCE operation status */
612 switch (res->sr_status) {
613 case 0:
614 /* Update the slot's sequence and clientid lease timer */
615 ++slot->seq_nr;
616 clp = session->clp;
617 do_renew_lease(clp, res->sr_timestamp);
618 /* Check sequence flags */
619 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
620 nfs41_update_target_slotid(slot->table, slot, res);
621 break;
622 case 1:
623 /*
624 * sr_status remains 1 if an RPC level error occurred.
625 * The server may or may not have processed the sequence
626 * operation..
627 * Mark the slot as having hosted an interrupted RPC call.
628 */
629 slot->interrupted = 1;
630 goto out;
631 case -NFS4ERR_DELAY:
632 /* The server detected a resend of the RPC call and
633 * returned NFS4ERR_DELAY as per Section 2.10.6.2
634 * of RFC5661.
635 */
636 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
637 __func__,
638 slot->slot_nr,
639 slot->seq_nr);
640 goto out_retry;
641 case -NFS4ERR_BADSLOT:
642 /*
643 * The slot id we used was probably retired. Try again
644 * using a different slot id.
645 */
646 goto retry_nowait;
647 case -NFS4ERR_SEQ_MISORDERED:
648 /*
649 * Was the last operation on this sequence interrupted?
650 * If so, retry after bumping the sequence number.
651 */
652 if (interrupted) {
653 ++slot->seq_nr;
654 goto retry_nowait;
655 }
656 /*
657 * Could this slot have been previously retired?
658 * If so, then the server may be expecting seq_nr = 1!
659 */
660 if (slot->seq_nr != 1) {
661 slot->seq_nr = 1;
662 goto retry_nowait;
663 }
664 break;
665 case -NFS4ERR_SEQ_FALSE_RETRY:
666 ++slot->seq_nr;
667 goto retry_nowait;
668 default:
669 /* Just update the slot sequence no. */
670 ++slot->seq_nr;
671 }
672out:
673 /* The session may be reset by one of the error handlers. */
674 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
675 nfs41_sequence_free_slot(res);
676out_noaction:
677 return ret;
678retry_nowait:
679 if (rpc_restart_call_prepare(task)) {
680 task->tk_status = 0;
681 ret = 0;
682 }
683 goto out;
684out_retry:
685 if (!rpc_restart_call(task))
686 goto out;
687 rpc_delay(task, NFS4_POLL_RETRY_MAX);
688 return 0;
689}
690EXPORT_SYMBOL_GPL(nfs41_sequence_done);
691
692int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
693{
694 if (res->sr_slot == NULL)
695 return 1;
696 if (!res->sr_slot->table->session)
697 return nfs40_sequence_done(task, res);
698 return nfs41_sequence_done(task, res);
699}
700EXPORT_SYMBOL_GPL(nfs4_sequence_done);
701
702int nfs41_setup_sequence(struct nfs4_session *session,
703 struct nfs4_sequence_args *args,
704 struct nfs4_sequence_res *res,
705 struct rpc_task *task)
706{
707 struct nfs4_slot *slot;
708 struct nfs4_slot_table *tbl;
709
710 dprintk("--> %s\n", __func__);
711 /* slot already allocated? */
712 if (res->sr_slot != NULL)
713 goto out_success;
714
715 tbl = &session->fc_slot_table;
716
717 task->tk_timeout = 0;
718
719 spin_lock(&tbl->slot_tbl_lock);
720 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) &&
721 !args->sa_privileged) {
722 /* The state manager will wait until the slot table is empty */
723 dprintk("%s session is draining\n", __func__);
724 goto out_sleep;
725 }
726
727 slot = nfs4_alloc_slot(tbl);
728 if (IS_ERR(slot)) {
729 /* If out of memory, try again in 1/4 second */
730 if (slot == ERR_PTR(-ENOMEM))
731 task->tk_timeout = HZ >> 2;
732 dprintk("<-- %s: no free slots\n", __func__);
733 goto out_sleep;
734 }
735 spin_unlock(&tbl->slot_tbl_lock);
736
737 args->sa_slot = slot;
738
739 dprintk("<-- %s slotid=%u seqid=%u\n", __func__,
740 slot->slot_nr, slot->seq_nr);
741
742 res->sr_slot = slot;
743 res->sr_timestamp = jiffies;
744 res->sr_status_flags = 0;
745 /*
746 * sr_status is only set in decode_sequence, and so will remain
747 * set to 1 if an rpc level failure occurs.
748 */
749 res->sr_status = 1;
750 trace_nfs4_setup_sequence(session, args);
751out_success:
752 rpc_call_start(task);
753 return 0;
754out_sleep:
755 /* Privileged tasks are queued with top priority */
756 if (args->sa_privileged)
757 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
758 NULL, RPC_PRIORITY_PRIVILEGED);
759 else
760 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
761 spin_unlock(&tbl->slot_tbl_lock);
762 return -EAGAIN;
763}
764EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
765
766static int nfs4_setup_sequence(const struct nfs_server *server,
767 struct nfs4_sequence_args *args,
768 struct nfs4_sequence_res *res,
769 struct rpc_task *task)
770{
771 struct nfs4_session *session = nfs4_get_session(server);
772 int ret = 0;
773
774 if (!session)
775 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
776 args, res, task);
777
778 dprintk("--> %s clp %p session %p sr_slot %u\n",
779 __func__, session->clp, session, res->sr_slot ?
780 res->sr_slot->slot_nr : NFS4_NO_SLOT);
781
782 ret = nfs41_setup_sequence(session, args, res, task);
783
784 dprintk("<-- %s status=%d\n", __func__, ret);
785 return ret;
786}
787
788static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
789{
790 struct nfs4_call_sync_data *data = calldata;
791 struct nfs4_session *session = nfs4_get_session(data->seq_server);
792
793 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
794
795 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task);
796}
797
798static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
799{
800 struct nfs4_call_sync_data *data = calldata;
801
802 nfs41_sequence_done(task, data->seq_res);
803}
804
805static const struct rpc_call_ops nfs41_call_sync_ops = {
806 .rpc_call_prepare = nfs41_call_sync_prepare,
807 .rpc_call_done = nfs41_call_sync_done,
808};
809
810#else /* !CONFIG_NFS_V4_1 */
811
812static int nfs4_setup_sequence(const struct nfs_server *server,
813 struct nfs4_sequence_args *args,
814 struct nfs4_sequence_res *res,
815 struct rpc_task *task)
816{
817 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
818 args, res, task);
819}
820
821int nfs4_sequence_done(struct rpc_task *task,
822 struct nfs4_sequence_res *res)
823{
824 return nfs40_sequence_done(task, res);
825}
826EXPORT_SYMBOL_GPL(nfs4_sequence_done);
827
828#endif /* !CONFIG_NFS_V4_1 */
829
830static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
831{
832 struct nfs4_call_sync_data *data = calldata;
833 nfs4_setup_sequence(data->seq_server,
834 data->seq_args, data->seq_res, task);
835}
836
837static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
838{
839 struct nfs4_call_sync_data *data = calldata;
840 nfs4_sequence_done(task, data->seq_res);
841}
842
843static const struct rpc_call_ops nfs40_call_sync_ops = {
844 .rpc_call_prepare = nfs40_call_sync_prepare,
845 .rpc_call_done = nfs40_call_sync_done,
846};
847
848static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
849 struct nfs_server *server,
850 struct rpc_message *msg,
851 struct nfs4_sequence_args *args,
852 struct nfs4_sequence_res *res)
853{
854 int ret;
855 struct rpc_task *task;
856 struct nfs_client *clp = server->nfs_client;
857 struct nfs4_call_sync_data data = {
858 .seq_server = server,
859 .seq_args = args,
860 .seq_res = res,
861 };
862 struct rpc_task_setup task_setup = {
863 .rpc_client = clnt,
864 .rpc_message = msg,
865 .callback_ops = clp->cl_mvops->call_sync_ops,
866 .callback_data = &data
867 };
868
869 task = rpc_run_task(&task_setup);
870 if (IS_ERR(task))
871 ret = PTR_ERR(task);
872 else {
873 ret = task->tk_status;
874 rpc_put_task(task);
875 }
876 return ret;
877}
878
879int nfs4_call_sync(struct rpc_clnt *clnt,
880 struct nfs_server *server,
881 struct rpc_message *msg,
882 struct nfs4_sequence_args *args,
883 struct nfs4_sequence_res *res,
884 int cache_reply)
885{
886 nfs4_init_sequence(args, res, cache_reply);
887 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
888}
889
890static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
891{
892 struct nfs_inode *nfsi = NFS_I(dir);
893
894 spin_lock(&dir->i_lock);
895 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
896 if (!cinfo->atomic || cinfo->before != dir->i_version)
897 nfs_force_lookup_revalidate(dir);
898 dir->i_version = cinfo->after;
899 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
900 nfs_fscache_invalidate(dir);
901 spin_unlock(&dir->i_lock);
902}
903
904struct nfs4_opendata {
905 struct kref kref;
906 struct nfs_openargs o_arg;
907 struct nfs_openres o_res;
908 struct nfs_open_confirmargs c_arg;
909 struct nfs_open_confirmres c_res;
910 struct nfs4_string owner_name;
911 struct nfs4_string group_name;
912 struct nfs_fattr f_attr;
913 struct nfs4_label *f_label;
914 struct dentry *dir;
915 struct dentry *dentry;
916 struct nfs4_state_owner *owner;
917 struct nfs4_state *state;
918 struct iattr attrs;
919 unsigned long timestamp;
920 unsigned int rpc_done : 1;
921 unsigned int file_created : 1;
922 unsigned int is_recover : 1;
923 int rpc_status;
924 int cancelled;
925};
926
927static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
928 int err, struct nfs4_exception *exception)
929{
930 if (err != -EINVAL)
931 return false;
932 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
933 return false;
934 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
935 exception->retry = 1;
936 return true;
937}
938
939static u32
940nfs4_map_atomic_open_share(struct nfs_server *server,
941 fmode_t fmode, int openflags)
942{
943 u32 res = 0;
944
945 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
946 case FMODE_READ:
947 res = NFS4_SHARE_ACCESS_READ;
948 break;
949 case FMODE_WRITE:
950 res = NFS4_SHARE_ACCESS_WRITE;
951 break;
952 case FMODE_READ|FMODE_WRITE:
953 res = NFS4_SHARE_ACCESS_BOTH;
954 }
955 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
956 goto out;
957 /* Want no delegation if we're using O_DIRECT */
958 if (openflags & O_DIRECT)
959 res |= NFS4_SHARE_WANT_NO_DELEG;
960out:
961 return res;
962}
963
964static enum open_claim_type4
965nfs4_map_atomic_open_claim(struct nfs_server *server,
966 enum open_claim_type4 claim)
967{
968 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
969 return claim;
970 switch (claim) {
971 default:
972 return claim;
973 case NFS4_OPEN_CLAIM_FH:
974 return NFS4_OPEN_CLAIM_NULL;
975 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
976 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
977 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
978 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
979 }
980}
981
982static void nfs4_init_opendata_res(struct nfs4_opendata *p)
983{
984 p->o_res.f_attr = &p->f_attr;
985 p->o_res.f_label = p->f_label;
986 p->o_res.seqid = p->o_arg.seqid;
987 p->c_res.seqid = p->c_arg.seqid;
988 p->o_res.server = p->o_arg.server;
989 p->o_res.access_request = p->o_arg.access;
990 nfs_fattr_init(&p->f_attr);
991 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
992}
993
994static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
995 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
996 const struct iattr *attrs,
997 struct nfs4_label *label,
998 enum open_claim_type4 claim,
999 gfp_t gfp_mask)
1000{
1001 struct dentry *parent = dget_parent(dentry);
1002 struct inode *dir = d_inode(parent);
1003 struct nfs_server *server = NFS_SERVER(dir);
1004 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1005 struct nfs4_opendata *p;
1006
1007 p = kzalloc(sizeof(*p), gfp_mask);
1008 if (p == NULL)
1009 goto err;
1010
1011 p->f_label = nfs4_label_alloc(server, gfp_mask);
1012 if (IS_ERR(p->f_label))
1013 goto err_free_p;
1014
1015 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1016 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1017 if (IS_ERR(p->o_arg.seqid))
1018 goto err_free_label;
1019 nfs_sb_active(dentry->d_sb);
1020 p->dentry = dget(dentry);
1021 p->dir = parent;
1022 p->owner = sp;
1023 atomic_inc(&sp->so_count);
1024 p->o_arg.open_flags = flags;
1025 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1026 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1027 fmode, flags);
1028 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
1029 * will return permission denied for all bits until close */
1030 if (!(flags & O_EXCL)) {
1031 /* ask server to check for all possible rights as results
1032 * are cached */
1033 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
1034 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE;
1035 }
1036 p->o_arg.clientid = server->nfs_client->cl_clientid;
1037 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1038 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1039 p->o_arg.name = &dentry->d_name;
1040 p->o_arg.server = server;
1041 p->o_arg.bitmask = nfs4_bitmask(server, label);
1042 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1043 p->o_arg.label = label;
1044 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1045 switch (p->o_arg.claim) {
1046 case NFS4_OPEN_CLAIM_NULL:
1047 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1048 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1049 p->o_arg.fh = NFS_FH(dir);
1050 break;
1051 case NFS4_OPEN_CLAIM_PREVIOUS:
1052 case NFS4_OPEN_CLAIM_FH:
1053 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1054 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1055 p->o_arg.fh = NFS_FH(d_inode(dentry));
1056 }
1057 if (attrs != NULL && attrs->ia_valid != 0) {
1058 __u32 verf[2];
1059
1060 p->o_arg.u.attrs = &p->attrs;
1061 memcpy(&p->attrs, attrs, sizeof(p->attrs));
1062
1063 verf[0] = jiffies;
1064 verf[1] = current->pid;
1065 memcpy(p->o_arg.u.verifier.data, verf,
1066 sizeof(p->o_arg.u.verifier.data));
1067 }
1068 p->c_arg.fh = &p->o_res.fh;
1069 p->c_arg.stateid = &p->o_res.stateid;
1070 p->c_arg.seqid = p->o_arg.seqid;
1071 nfs4_init_opendata_res(p);
1072 kref_init(&p->kref);
1073 return p;
1074
1075err_free_label:
1076 nfs4_label_free(p->f_label);
1077err_free_p:
1078 kfree(p);
1079err:
1080 dput(parent);
1081 return NULL;
1082}
1083
1084static void nfs4_opendata_free(struct kref *kref)
1085{
1086 struct nfs4_opendata *p = container_of(kref,
1087 struct nfs4_opendata, kref);
1088 struct super_block *sb = p->dentry->d_sb;
1089
1090 nfs_free_seqid(p->o_arg.seqid);
1091 if (p->state != NULL)
1092 nfs4_put_open_state(p->state);
1093 nfs4_put_state_owner(p->owner);
1094
1095 nfs4_label_free(p->f_label);
1096
1097 dput(p->dir);
1098 dput(p->dentry);
1099 nfs_sb_deactive(sb);
1100 nfs_fattr_free_names(&p->f_attr);
1101 kfree(p->f_attr.mdsthreshold);
1102 kfree(p);
1103}
1104
1105static void nfs4_opendata_put(struct nfs4_opendata *p)
1106{
1107 if (p != NULL)
1108 kref_put(&p->kref, nfs4_opendata_free);
1109}
1110
1111static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
1112{
1113 int ret;
1114
1115 ret = rpc_wait_for_completion_task(task);
1116 return ret;
1117}
1118
1119static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
1120{
1121 int ret = 0;
1122
1123 if (open_mode & (O_EXCL|O_TRUNC))
1124 goto out;
1125 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1126 case FMODE_READ:
1127 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1128 && state->n_rdonly != 0;
1129 break;
1130 case FMODE_WRITE:
1131 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1132 && state->n_wronly != 0;
1133 break;
1134 case FMODE_READ|FMODE_WRITE:
1135 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1136 && state->n_rdwr != 0;
1137 }
1138out:
1139 return ret;
1140}
1141
1142static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
1143{
1144 if (delegation == NULL)
1145 return 0;
1146 if ((delegation->type & fmode) != fmode)
1147 return 0;
1148 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
1149 return 0;
1150 nfs_mark_delegation_referenced(delegation);
1151 return 1;
1152}
1153
1154static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1155{
1156 switch (fmode) {
1157 case FMODE_WRITE:
1158 state->n_wronly++;
1159 break;
1160 case FMODE_READ:
1161 state->n_rdonly++;
1162 break;
1163 case FMODE_READ|FMODE_WRITE:
1164 state->n_rdwr++;
1165 }
1166 nfs4_state_set_mode_locked(state, state->state | fmode);
1167}
1168
1169static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1170{
1171 struct nfs_client *clp = state->owner->so_server->nfs_client;
1172 bool need_recover = false;
1173
1174 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1175 need_recover = true;
1176 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1177 need_recover = true;
1178 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1179 need_recover = true;
1180 if (need_recover)
1181 nfs4_state_mark_reclaim_nograce(clp, state);
1182}
1183
1184static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1185 nfs4_stateid *stateid)
1186{
1187 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0)
1188 return true;
1189 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1190 nfs_test_and_clear_all_open_stateid(state);
1191 return true;
1192 }
1193 if (nfs4_stateid_is_newer(stateid, &state->open_stateid))
1194 return true;
1195 return false;
1196}
1197
1198static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1199{
1200 if (state->n_wronly)
1201 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1202 if (state->n_rdonly)
1203 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1204 if (state->n_rdwr)
1205 set_bit(NFS_O_RDWR_STATE, &state->flags);
1206}
1207
1208static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1209 nfs4_stateid *stateid, fmode_t fmode)
1210{
1211 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1212 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1213 case FMODE_WRITE:
1214 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1215 break;
1216 case FMODE_READ:
1217 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1218 break;
1219 case 0:
1220 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1221 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1222 clear_bit(NFS_OPEN_STATE, &state->flags);
1223 }
1224 if (stateid == NULL)
1225 return;
1226 /* Handle races with OPEN */
1227 if (!nfs4_stateid_match_other(stateid, &state->open_stateid) ||
1228 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1229 nfs_resync_open_stateid_locked(state);
1230 return;
1231 }
1232 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1233 nfs4_stateid_copy(&state->stateid, stateid);
1234 nfs4_stateid_copy(&state->open_stateid, stateid);
1235}
1236
1237static void nfs_clear_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1238{
1239 write_seqlock(&state->seqlock);
1240 nfs_clear_open_stateid_locked(state, stateid, fmode);
1241 write_sequnlock(&state->seqlock);
1242 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1243 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1244}
1245
1246static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1247{
1248 switch (fmode) {
1249 case FMODE_READ:
1250 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1251 break;
1252 case FMODE_WRITE:
1253 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1254 break;
1255 case FMODE_READ|FMODE_WRITE:
1256 set_bit(NFS_O_RDWR_STATE, &state->flags);
1257 }
1258 if (!nfs_need_update_open_stateid(state, stateid))
1259 return;
1260 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1261 nfs4_stateid_copy(&state->stateid, stateid);
1262 nfs4_stateid_copy(&state->open_stateid, stateid);
1263}
1264
1265static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
1266{
1267 /*
1268 * Protect the call to nfs4_state_set_mode_locked and
1269 * serialise the stateid update
1270 */
1271 write_seqlock(&state->seqlock);
1272 if (deleg_stateid != NULL) {
1273 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1274 set_bit(NFS_DELEGATED_STATE, &state->flags);
1275 }
1276 if (open_stateid != NULL)
1277 nfs_set_open_stateid_locked(state, open_stateid, fmode);
1278 write_sequnlock(&state->seqlock);
1279 spin_lock(&state->owner->so_lock);
1280 update_open_stateflags(state, fmode);
1281 spin_unlock(&state->owner->so_lock);
1282}
1283
1284static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
1285{
1286 struct nfs_inode *nfsi = NFS_I(state->inode);
1287 struct nfs_delegation *deleg_cur;
1288 int ret = 0;
1289
1290 fmode &= (FMODE_READ|FMODE_WRITE);
1291
1292 rcu_read_lock();
1293 deleg_cur = rcu_dereference(nfsi->delegation);
1294 if (deleg_cur == NULL)
1295 goto no_delegation;
1296
1297 spin_lock(&deleg_cur->lock);
1298 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1299 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1300 (deleg_cur->type & fmode) != fmode)
1301 goto no_delegation_unlock;
1302
1303 if (delegation == NULL)
1304 delegation = &deleg_cur->stateid;
1305 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1306 goto no_delegation_unlock;
1307
1308 nfs_mark_delegation_referenced(deleg_cur);
1309 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1310 ret = 1;
1311no_delegation_unlock:
1312 spin_unlock(&deleg_cur->lock);
1313no_delegation:
1314 rcu_read_unlock();
1315
1316 if (!ret && open_stateid != NULL) {
1317 __update_open_stateid(state, open_stateid, NULL, fmode);
1318 ret = 1;
1319 }
1320 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1321 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1322
1323 return ret;
1324}
1325
1326static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1327 const nfs4_stateid *stateid)
1328{
1329 struct nfs4_state *state = lsp->ls_state;
1330 bool ret = false;
1331
1332 spin_lock(&state->state_lock);
1333 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1334 goto out_noupdate;
1335 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1336 goto out_noupdate;
1337 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1338 ret = true;
1339out_noupdate:
1340 spin_unlock(&state->state_lock);
1341 return ret;
1342}
1343
1344static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1345{
1346 struct nfs_delegation *delegation;
1347
1348 rcu_read_lock();
1349 delegation = rcu_dereference(NFS_I(inode)->delegation);
1350 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1351 rcu_read_unlock();
1352 return;
1353 }
1354 rcu_read_unlock();
1355 nfs4_inode_return_delegation(inode);
1356}
1357
1358static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1359{
1360 struct nfs4_state *state = opendata->state;
1361 struct nfs_inode *nfsi = NFS_I(state->inode);
1362 struct nfs_delegation *delegation;
1363 int open_mode = opendata->o_arg.open_flags;
1364 fmode_t fmode = opendata->o_arg.fmode;
1365 nfs4_stateid stateid;
1366 int ret = -EAGAIN;
1367
1368 for (;;) {
1369 spin_lock(&state->owner->so_lock);
1370 if (can_open_cached(state, fmode, open_mode)) {
1371 update_open_stateflags(state, fmode);
1372 spin_unlock(&state->owner->so_lock);
1373 goto out_return_state;
1374 }
1375 spin_unlock(&state->owner->so_lock);
1376 rcu_read_lock();
1377 delegation = rcu_dereference(nfsi->delegation);
1378 if (!can_open_delegated(delegation, fmode)) {
1379 rcu_read_unlock();
1380 break;
1381 }
1382 /* Save the delegation */
1383 nfs4_stateid_copy(&stateid, &delegation->stateid);
1384 rcu_read_unlock();
1385 nfs_release_seqid(opendata->o_arg.seqid);
1386 if (!opendata->is_recover) {
1387 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1388 if (ret != 0)
1389 goto out;
1390 }
1391 ret = -EAGAIN;
1392
1393 /* Try to update the stateid using the delegation */
1394 if (update_open_stateid(state, NULL, &stateid, fmode))
1395 goto out_return_state;
1396 }
1397out:
1398 return ERR_PTR(ret);
1399out_return_state:
1400 atomic_inc(&state->count);
1401 return state;
1402}
1403
1404static void
1405nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1406{
1407 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1408 struct nfs_delegation *delegation;
1409 int delegation_flags = 0;
1410
1411 rcu_read_lock();
1412 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1413 if (delegation)
1414 delegation_flags = delegation->flags;
1415 rcu_read_unlock();
1416 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
1417 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1418 "returning a delegation for "
1419 "OPEN(CLAIM_DELEGATE_CUR)\n",
1420 clp->cl_hostname);
1421 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1422 nfs_inode_set_delegation(state->inode,
1423 data->owner->so_cred,
1424 &data->o_res);
1425 else
1426 nfs_inode_reclaim_delegation(state->inode,
1427 data->owner->so_cred,
1428 &data->o_res);
1429}
1430
1431/*
1432 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1433 * and update the nfs4_state.
1434 */
1435static struct nfs4_state *
1436_nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1437{
1438 struct inode *inode = data->state->inode;
1439 struct nfs4_state *state = data->state;
1440 int ret;
1441
1442 if (!data->rpc_done) {
1443 if (data->rpc_status) {
1444 ret = data->rpc_status;
1445 goto err;
1446 }
1447 /* cached opens have already been processed */
1448 goto update;
1449 }
1450
1451 ret = nfs_refresh_inode(inode, &data->f_attr);
1452 if (ret)
1453 goto err;
1454
1455 if (data->o_res.delegation_type != 0)
1456 nfs4_opendata_check_deleg(data, state);
1457update:
1458 update_open_stateid(state, &data->o_res.stateid, NULL,
1459 data->o_arg.fmode);
1460 atomic_inc(&state->count);
1461
1462 return state;
1463err:
1464 return ERR_PTR(ret);
1465
1466}
1467
1468static struct nfs4_state *
1469_nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1470{
1471 struct inode *inode;
1472 struct nfs4_state *state = NULL;
1473 int ret;
1474
1475 if (!data->rpc_done) {
1476 state = nfs4_try_open_cached(data);
1477 goto out;
1478 }
1479
1480 ret = -EAGAIN;
1481 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1482 goto err;
1483 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label);
1484 ret = PTR_ERR(inode);
1485 if (IS_ERR(inode))
1486 goto err;
1487 ret = -ENOMEM;
1488 state = nfs4_get_open_state(inode, data->owner);
1489 if (state == NULL)
1490 goto err_put_inode;
1491 if (data->o_res.delegation_type != 0)
1492 nfs4_opendata_check_deleg(data, state);
1493 update_open_stateid(state, &data->o_res.stateid, NULL,
1494 data->o_arg.fmode);
1495 iput(inode);
1496out:
1497 nfs_release_seqid(data->o_arg.seqid);
1498 return state;
1499err_put_inode:
1500 iput(inode);
1501err:
1502 return ERR_PTR(ret);
1503}
1504
1505static struct nfs4_state *
1506nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1507{
1508 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
1509 return _nfs4_opendata_reclaim_to_nfs4_state(data);
1510 return _nfs4_opendata_to_nfs4_state(data);
1511}
1512
1513static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1514{
1515 struct nfs_inode *nfsi = NFS_I(state->inode);
1516 struct nfs_open_context *ctx;
1517
1518 spin_lock(&state->inode->i_lock);
1519 list_for_each_entry(ctx, &nfsi->open_files, list) {
1520 if (ctx->state != state)
1521 continue;
1522 get_nfs_open_context(ctx);
1523 spin_unlock(&state->inode->i_lock);
1524 return ctx;
1525 }
1526 spin_unlock(&state->inode->i_lock);
1527 return ERR_PTR(-ENOENT);
1528}
1529
1530static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
1531 struct nfs4_state *state, enum open_claim_type4 claim)
1532{
1533 struct nfs4_opendata *opendata;
1534
1535 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
1536 NULL, NULL, claim, GFP_NOFS);
1537 if (opendata == NULL)
1538 return ERR_PTR(-ENOMEM);
1539 opendata->state = state;
1540 atomic_inc(&state->count);
1541 return opendata;
1542}
1543
1544static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
1545{
1546 struct nfs4_state *newstate;
1547 int ret;
1548
1549 if ((opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
1550 opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEG_CUR_FH) &&
1551 (opendata->o_arg.u.delegation_type & fmode) != fmode)
1552 /* This mode can't have been delegated, so we must have
1553 * a valid open_stateid to cover it - not need to reclaim.
1554 */
1555 return 0;
1556 opendata->o_arg.open_flags = 0;
1557 opendata->o_arg.fmode = fmode;
1558 opendata->o_arg.share_access = nfs4_map_atomic_open_share(
1559 NFS_SB(opendata->dentry->d_sb),
1560 fmode, 0);
1561 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1562 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1563 nfs4_init_opendata_res(opendata);
1564 ret = _nfs4_recover_proc_open(opendata);
1565 if (ret != 0)
1566 return ret;
1567 newstate = nfs4_opendata_to_nfs4_state(opendata);
1568 if (IS_ERR(newstate))
1569 return PTR_ERR(newstate);
1570 nfs4_close_state(newstate, fmode);
1571 *res = newstate;
1572 return 0;
1573}
1574
1575static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1576{
1577 struct nfs4_state *newstate;
1578 int ret;
1579
1580 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
1581 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1582 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1583 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1584 /* memory barrier prior to reading state->n_* */
1585 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1586 clear_bit(NFS_OPEN_STATE, &state->flags);
1587 smp_rmb();
1588 if (state->n_rdwr != 0) {
1589 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
1590 if (ret != 0)
1591 return ret;
1592 if (newstate != state)
1593 return -ESTALE;
1594 }
1595 if (state->n_wronly != 0) {
1596 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
1597 if (ret != 0)
1598 return ret;
1599 if (newstate != state)
1600 return -ESTALE;
1601 }
1602 if (state->n_rdonly != 0) {
1603 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1604 if (ret != 0)
1605 return ret;
1606 if (newstate != state)
1607 return -ESTALE;
1608 }
1609 /*
1610 * We may have performed cached opens for all three recoveries.
1611 * Check if we need to update the current stateid.
1612 */
1613 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1614 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1615 write_seqlock(&state->seqlock);
1616 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1617 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1618 write_sequnlock(&state->seqlock);
1619 }
1620 return 0;
1621}
1622
1623/*
1624 * OPEN_RECLAIM:
1625 * reclaim state on the server after a reboot.
1626 */
1627static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1628{
1629 struct nfs_delegation *delegation;
1630 struct nfs4_opendata *opendata;
1631 fmode_t delegation_type = 0;
1632 int status;
1633
1634 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1635 NFS4_OPEN_CLAIM_PREVIOUS);
1636 if (IS_ERR(opendata))
1637 return PTR_ERR(opendata);
1638 rcu_read_lock();
1639 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1640 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1641 delegation_type = delegation->type;
1642 rcu_read_unlock();
1643 opendata->o_arg.u.delegation_type = delegation_type;
1644 status = nfs4_open_recover(opendata, state);
1645 nfs4_opendata_put(opendata);
1646 return status;
1647}
1648
1649static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1650{
1651 struct nfs_server *server = NFS_SERVER(state->inode);
1652 struct nfs4_exception exception = { };
1653 int err;
1654 do {
1655 err = _nfs4_do_open_reclaim(ctx, state);
1656 trace_nfs4_open_reclaim(ctx, 0, err);
1657 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
1658 continue;
1659 if (err != -NFS4ERR_DELAY)
1660 break;
1661 nfs4_handle_exception(server, err, &exception);
1662 } while (exception.retry);
1663 return err;
1664}
1665
1666static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1667{
1668 struct nfs_open_context *ctx;
1669 int ret;
1670
1671 ctx = nfs4_state_find_open_context(state);
1672 if (IS_ERR(ctx))
1673 return -EAGAIN;
1674 ret = nfs4_do_open_reclaim(ctx, state);
1675 put_nfs_open_context(ctx);
1676 return ret;
1677}
1678
1679static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
1680{
1681 switch (err) {
1682 default:
1683 printk(KERN_ERR "NFS: %s: unhandled error "
1684 "%d.\n", __func__, err);
1685 case 0:
1686 case -ENOENT:
1687 case -EAGAIN:
1688 case -ESTALE:
1689 break;
1690 case -NFS4ERR_BADSESSION:
1691 case -NFS4ERR_BADSLOT:
1692 case -NFS4ERR_BAD_HIGH_SLOT:
1693 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1694 case -NFS4ERR_DEADSESSION:
1695 set_bit(NFS_DELEGATED_STATE, &state->flags);
1696 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1697 return -EAGAIN;
1698 case -NFS4ERR_STALE_CLIENTID:
1699 case -NFS4ERR_STALE_STATEID:
1700 set_bit(NFS_DELEGATED_STATE, &state->flags);
1701 case -NFS4ERR_EXPIRED:
1702 /* Don't recall a delegation if it was lost */
1703 nfs4_schedule_lease_recovery(server->nfs_client);
1704 return -EAGAIN;
1705 case -NFS4ERR_MOVED:
1706 nfs4_schedule_migration_recovery(server);
1707 return -EAGAIN;
1708 case -NFS4ERR_LEASE_MOVED:
1709 nfs4_schedule_lease_moved_recovery(server->nfs_client);
1710 return -EAGAIN;
1711 case -NFS4ERR_DELEG_REVOKED:
1712 case -NFS4ERR_ADMIN_REVOKED:
1713 case -NFS4ERR_BAD_STATEID:
1714 case -NFS4ERR_OPENMODE:
1715 nfs_inode_find_state_and_recover(state->inode,
1716 stateid);
1717 nfs4_schedule_stateid_recovery(server, state);
1718 return -EAGAIN;
1719 case -NFS4ERR_DELAY:
1720 case -NFS4ERR_GRACE:
1721 set_bit(NFS_DELEGATED_STATE, &state->flags);
1722 ssleep(1);
1723 return -EAGAIN;
1724 case -ENOMEM:
1725 case -NFS4ERR_DENIED:
1726 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
1727 return 0;
1728 }
1729 return err;
1730}
1731
1732int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1733{
1734 struct nfs_server *server = NFS_SERVER(state->inode);
1735 struct nfs4_opendata *opendata;
1736 int err;
1737
1738 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1739 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
1740 if (IS_ERR(opendata))
1741 return PTR_ERR(opendata);
1742 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1743 err = nfs4_open_recover(opendata, state);
1744 nfs4_opendata_put(opendata);
1745 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
1746}
1747
1748static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
1749{
1750 struct nfs4_opendata *data = calldata;
1751
1752 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl,
1753 &data->c_arg.seq_args, &data->c_res.seq_res, task);
1754}
1755
1756static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1757{
1758 struct nfs4_opendata *data = calldata;
1759
1760 nfs40_sequence_done(task, &data->c_res.seq_res);
1761
1762 data->rpc_status = task->tk_status;
1763 if (data->rpc_status == 0) {
1764 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1765 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1766 renew_lease(data->o_res.server, data->timestamp);
1767 data->rpc_done = 1;
1768 }
1769}
1770
1771static void nfs4_open_confirm_release(void *calldata)
1772{
1773 struct nfs4_opendata *data = calldata;
1774 struct nfs4_state *state = NULL;
1775
1776 /* If this request hasn't been cancelled, do nothing */
1777 if (data->cancelled == 0)
1778 goto out_free;
1779 /* In case of error, no cleanup! */
1780 if (!data->rpc_done)
1781 goto out_free;
1782 state = nfs4_opendata_to_nfs4_state(data);
1783 if (!IS_ERR(state))
1784 nfs4_close_state(state, data->o_arg.fmode);
1785out_free:
1786 nfs4_opendata_put(data);
1787}
1788
1789static const struct rpc_call_ops nfs4_open_confirm_ops = {
1790 .rpc_call_prepare = nfs4_open_confirm_prepare,
1791 .rpc_call_done = nfs4_open_confirm_done,
1792 .rpc_release = nfs4_open_confirm_release,
1793};
1794
1795/*
1796 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1797 */
1798static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1799{
1800 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
1801 struct rpc_task *task;
1802 struct rpc_message msg = {
1803 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1804 .rpc_argp = &data->c_arg,
1805 .rpc_resp = &data->c_res,
1806 .rpc_cred = data->owner->so_cred,
1807 };
1808 struct rpc_task_setup task_setup_data = {
1809 .rpc_client = server->client,
1810 .rpc_message = &msg,
1811 .callback_ops = &nfs4_open_confirm_ops,
1812 .callback_data = data,
1813 .workqueue = nfsiod_workqueue,
1814 .flags = RPC_TASK_ASYNC,
1815 };
1816 int status;
1817
1818 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
1819 kref_get(&data->kref);
1820 data->rpc_done = 0;
1821 data->rpc_status = 0;
1822 data->timestamp = jiffies;
1823 task = rpc_run_task(&task_setup_data);
1824 if (IS_ERR(task))
1825 return PTR_ERR(task);
1826 status = nfs4_wait_for_completion_rpc_task(task);
1827 if (status != 0) {
1828 data->cancelled = 1;
1829 smp_wmb();
1830 } else
1831 status = data->rpc_status;
1832 rpc_put_task(task);
1833 return status;
1834}
1835
1836static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1837{
1838 struct nfs4_opendata *data = calldata;
1839 struct nfs4_state_owner *sp = data->owner;
1840 struct nfs_client *clp = sp->so_server->nfs_client;
1841
1842 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1843 goto out_wait;
1844 /*
1845 * Check if we still need to send an OPEN call, or if we can use
1846 * a delegation instead.
1847 */
1848 if (data->state != NULL) {
1849 struct nfs_delegation *delegation;
1850
1851 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1852 goto out_no_action;
1853 rcu_read_lock();
1854 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1855 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
1856 data->o_arg.claim != NFS4_OPEN_CLAIM_DELEG_CUR_FH &&
1857 can_open_delegated(delegation, data->o_arg.fmode))
1858 goto unlock_no_action;
1859 rcu_read_unlock();
1860 }
1861 /* Update client id. */
1862 data->o_arg.clientid = clp->cl_clientid;
1863 switch (data->o_arg.claim) {
1864 case NFS4_OPEN_CLAIM_PREVIOUS:
1865 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1866 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1867 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
1868 case NFS4_OPEN_CLAIM_FH:
1869 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1870 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1871 }
1872 data->timestamp = jiffies;
1873 if (nfs4_setup_sequence(data->o_arg.server,
1874 &data->o_arg.seq_args,
1875 &data->o_res.seq_res,
1876 task) != 0)
1877 nfs_release_seqid(data->o_arg.seqid);
1878
1879 /* Set the create mode (note dependency on the session type) */
1880 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
1881 if (data->o_arg.open_flags & O_EXCL) {
1882 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
1883 if (nfs4_has_persistent_session(clp))
1884 data->o_arg.createmode = NFS4_CREATE_GUARDED;
1885 else if (clp->cl_mvops->minor_version > 0)
1886 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
1887 }
1888 return;
1889unlock_no_action:
1890 rcu_read_unlock();
1891out_no_action:
1892 task->tk_action = NULL;
1893out_wait:
1894 nfs4_sequence_done(task, &data->o_res.seq_res);
1895}
1896
1897static void nfs4_open_done(struct rpc_task *task, void *calldata)
1898{
1899 struct nfs4_opendata *data = calldata;
1900
1901 data->rpc_status = task->tk_status;
1902
1903 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1904 return;
1905
1906 if (task->tk_status == 0) {
1907 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
1908 switch (data->o_res.f_attr->mode & S_IFMT) {
1909 case S_IFREG:
1910 break;
1911 case S_IFLNK:
1912 data->rpc_status = -ELOOP;
1913 break;
1914 case S_IFDIR:
1915 data->rpc_status = -EISDIR;
1916 break;
1917 default:
1918 data->rpc_status = -ENOTDIR;
1919 }
1920 }
1921 renew_lease(data->o_res.server, data->timestamp);
1922 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1923 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1924 }
1925 data->rpc_done = 1;
1926}
1927
1928static void nfs4_open_release(void *calldata)
1929{
1930 struct nfs4_opendata *data = calldata;
1931 struct nfs4_state *state = NULL;
1932
1933 /* If this request hasn't been cancelled, do nothing */
1934 if (data->cancelled == 0)
1935 goto out_free;
1936 /* In case of error, no cleanup! */
1937 if (data->rpc_status != 0 || !data->rpc_done)
1938 goto out_free;
1939 /* In case we need an open_confirm, no cleanup! */
1940 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1941 goto out_free;
1942 state = nfs4_opendata_to_nfs4_state(data);
1943 if (!IS_ERR(state))
1944 nfs4_close_state(state, data->o_arg.fmode);
1945out_free:
1946 nfs4_opendata_put(data);
1947}
1948
1949static const struct rpc_call_ops nfs4_open_ops = {
1950 .rpc_call_prepare = nfs4_open_prepare,
1951 .rpc_call_done = nfs4_open_done,
1952 .rpc_release = nfs4_open_release,
1953};
1954
1955static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
1956{
1957 struct inode *dir = d_inode(data->dir);
1958 struct nfs_server *server = NFS_SERVER(dir);
1959 struct nfs_openargs *o_arg = &data->o_arg;
1960 struct nfs_openres *o_res = &data->o_res;
1961 struct rpc_task *task;
1962 struct rpc_message msg = {
1963 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
1964 .rpc_argp = o_arg,
1965 .rpc_resp = o_res,
1966 .rpc_cred = data->owner->so_cred,
1967 };
1968 struct rpc_task_setup task_setup_data = {
1969 .rpc_client = server->client,
1970 .rpc_message = &msg,
1971 .callback_ops = &nfs4_open_ops,
1972 .callback_data = data,
1973 .workqueue = nfsiod_workqueue,
1974 .flags = RPC_TASK_ASYNC,
1975 };
1976 int status;
1977
1978 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
1979 kref_get(&data->kref);
1980 data->rpc_done = 0;
1981 data->rpc_status = 0;
1982 data->cancelled = 0;
1983 data->is_recover = 0;
1984 if (isrecover) {
1985 nfs4_set_sequence_privileged(&o_arg->seq_args);
1986 data->is_recover = 1;
1987 }
1988 task = rpc_run_task(&task_setup_data);
1989 if (IS_ERR(task))
1990 return PTR_ERR(task);
1991 status = nfs4_wait_for_completion_rpc_task(task);
1992 if (status != 0) {
1993 data->cancelled = 1;
1994 smp_wmb();
1995 } else
1996 status = data->rpc_status;
1997 rpc_put_task(task);
1998
1999 return status;
2000}
2001
2002static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2003{
2004 struct inode *dir = d_inode(data->dir);
2005 struct nfs_openres *o_res = &data->o_res;
2006 int status;
2007
2008 status = nfs4_run_open_task(data, 1);
2009 if (status != 0 || !data->rpc_done)
2010 return status;
2011
2012 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2013
2014 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2015 status = _nfs4_proc_open_confirm(data);
2016 if (status != 0)
2017 return status;
2018 }
2019
2020 return status;
2021}
2022
2023/*
2024 * Additional permission checks in order to distinguish between an
2025 * open for read, and an open for execute. This works around the
2026 * fact that NFSv4 OPEN treats read and execute permissions as being
2027 * the same.
2028 * Note that in the non-execute case, we want to turn off permission
2029 * checking if we just created a new file (POSIX open() semantics).
2030 */
2031static int nfs4_opendata_access(struct rpc_cred *cred,
2032 struct nfs4_opendata *opendata,
2033 struct nfs4_state *state, fmode_t fmode,
2034 int openflags)
2035{
2036 struct nfs_access_entry cache;
2037 u32 mask;
2038
2039 /* access call failed or for some reason the server doesn't
2040 * support any access modes -- defer access call until later */
2041 if (opendata->o_res.access_supported == 0)
2042 return 0;
2043
2044 mask = 0;
2045 /*
2046 * Use openflags to check for exec, because fmode won't
2047 * always have FMODE_EXEC set when file open for exec.
2048 */
2049 if (openflags & __FMODE_EXEC) {
2050 /* ONLY check for exec rights */
2051 mask = MAY_EXEC;
2052 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2053 mask = MAY_READ;
2054
2055 cache.cred = cred;
2056 cache.jiffies = jiffies;
2057 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2058 nfs_access_add_cache(state->inode, &cache);
2059
2060 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
2061 return 0;
2062
2063 /* even though OPEN succeeded, access is denied. Close the file */
2064 nfs4_close_state(state, fmode);
2065 return -EACCES;
2066}
2067
2068/*
2069 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2070 */
2071static int _nfs4_proc_open(struct nfs4_opendata *data)
2072{
2073 struct inode *dir = d_inode(data->dir);
2074 struct nfs_server *server = NFS_SERVER(dir);
2075 struct nfs_openargs *o_arg = &data->o_arg;
2076 struct nfs_openres *o_res = &data->o_res;
2077 int status;
2078
2079 status = nfs4_run_open_task(data, 0);
2080 if (!data->rpc_done)
2081 return status;
2082 if (status != 0) {
2083 if (status == -NFS4ERR_BADNAME &&
2084 !(o_arg->open_flags & O_CREAT))
2085 return -ENOENT;
2086 return status;
2087 }
2088
2089 nfs_fattr_map_and_free_names(server, &data->f_attr);
2090
2091 if (o_arg->open_flags & O_CREAT) {
2092 update_changeattr(dir, &o_res->cinfo);
2093 if (o_arg->open_flags & O_EXCL)
2094 data->file_created = 1;
2095 else if (o_res->cinfo.before != o_res->cinfo.after)
2096 data->file_created = 1;
2097 }
2098 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2099 server->caps &= ~NFS_CAP_POSIX_LOCK;
2100 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2101 status = _nfs4_proc_open_confirm(data);
2102 if (status != 0)
2103 return status;
2104 }
2105 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
2106 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label);
2107 return 0;
2108}
2109
2110static int nfs4_recover_expired_lease(struct nfs_server *server)
2111{
2112 return nfs4_client_recover_expired_lease(server->nfs_client);
2113}
2114
2115/*
2116 * OPEN_EXPIRED:
2117 * reclaim state on the server after a network partition.
2118 * Assumes caller holds the appropriate lock
2119 */
2120static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2121{
2122 struct nfs4_opendata *opendata;
2123 int ret;
2124
2125 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2126 NFS4_OPEN_CLAIM_FH);
2127 if (IS_ERR(opendata))
2128 return PTR_ERR(opendata);
2129 ret = nfs4_open_recover(opendata, state);
2130 if (ret == -ESTALE)
2131 d_drop(ctx->dentry);
2132 nfs4_opendata_put(opendata);
2133 return ret;
2134}
2135
2136static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2137{
2138 struct nfs_server *server = NFS_SERVER(state->inode);
2139 struct nfs4_exception exception = { };
2140 int err;
2141
2142 do {
2143 err = _nfs4_open_expired(ctx, state);
2144 trace_nfs4_open_expired(ctx, 0, err);
2145 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2146 continue;
2147 switch (err) {
2148 default:
2149 goto out;
2150 case -NFS4ERR_GRACE:
2151 case -NFS4ERR_DELAY:
2152 nfs4_handle_exception(server, err, &exception);
2153 err = 0;
2154 }
2155 } while (exception.retry);
2156out:
2157 return err;
2158}
2159
2160static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2161{
2162 struct nfs_open_context *ctx;
2163 int ret;
2164
2165 ctx = nfs4_state_find_open_context(state);
2166 if (IS_ERR(ctx))
2167 return -EAGAIN;
2168 ret = nfs4_do_open_expired(ctx, state);
2169 put_nfs_open_context(ctx);
2170 return ret;
2171}
2172
2173static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
2174{
2175 nfs_remove_bad_delegation(state->inode);
2176 write_seqlock(&state->seqlock);
2177 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2178 write_sequnlock(&state->seqlock);
2179 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2180}
2181
2182static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2183{
2184 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2185 nfs_finish_clear_delegation_stateid(state);
2186}
2187
2188static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2189{
2190 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2191 nfs40_clear_delegation_stateid(state);
2192 return nfs4_open_expired(sp, state);
2193}
2194
2195#if defined(CONFIG_NFS_V4_1)
2196static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2197{
2198 struct nfs_server *server = NFS_SERVER(state->inode);
2199 nfs4_stateid stateid;
2200 struct nfs_delegation *delegation;
2201 struct rpc_cred *cred;
2202 int status;
2203
2204 /* Get the delegation credential for use by test/free_stateid */
2205 rcu_read_lock();
2206 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2207 if (delegation == NULL) {
2208 rcu_read_unlock();
2209 return;
2210 }
2211
2212 nfs4_stateid_copy(&stateid, &delegation->stateid);
2213 cred = get_rpccred(delegation->cred);
2214 rcu_read_unlock();
2215 status = nfs41_test_stateid(server, &stateid, cred);
2216 trace_nfs4_test_delegation_stateid(state, NULL, status);
2217
2218 if (status != NFS_OK) {
2219 /* Free the stateid unless the server explicitly
2220 * informs us the stateid is unrecognized. */
2221 if (status != -NFS4ERR_BAD_STATEID)
2222 nfs41_free_stateid(server, &stateid, cred);
2223 nfs_finish_clear_delegation_stateid(state);
2224 }
2225
2226 put_rpccred(cred);
2227}
2228
2229/**
2230 * nfs41_check_open_stateid - possibly free an open stateid
2231 *
2232 * @state: NFSv4 state for an inode
2233 *
2234 * Returns NFS_OK if recovery for this stateid is now finished.
2235 * Otherwise a negative NFS4ERR value is returned.
2236 */
2237static int nfs41_check_open_stateid(struct nfs4_state *state)
2238{
2239 struct nfs_server *server = NFS_SERVER(state->inode);
2240 nfs4_stateid *stateid = &state->open_stateid;
2241 struct rpc_cred *cred = state->owner->so_cred;
2242 int status;
2243
2244 /* If a state reset has been done, test_stateid is unneeded */
2245 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) &&
2246 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) &&
2247 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0))
2248 return -NFS4ERR_BAD_STATEID;
2249
2250 status = nfs41_test_stateid(server, stateid, cred);
2251 trace_nfs4_test_open_stateid(state, NULL, status);
2252 if (status != NFS_OK) {
2253 /* Free the stateid unless the server explicitly
2254 * informs us the stateid is unrecognized. */
2255 if (status != -NFS4ERR_BAD_STATEID)
2256 nfs41_free_stateid(server, stateid, cred);
2257
2258 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2259 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2260 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2261 clear_bit(NFS_OPEN_STATE, &state->flags);
2262 }
2263 return status;
2264}
2265
2266static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2267{
2268 int status;
2269
2270 nfs41_check_delegation_stateid(state);
2271 status = nfs41_check_open_stateid(state);
2272 if (status != NFS_OK)
2273 status = nfs4_open_expired(sp, state);
2274 return status;
2275}
2276#endif
2277
2278/*
2279 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2280 * fields corresponding to attributes that were used to store the verifier.
2281 * Make sure we clobber those fields in the later setattr call
2282 */
2283static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
2284{
2285 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
2286 !(sattr->ia_valid & ATTR_ATIME_SET))
2287 sattr->ia_valid |= ATTR_ATIME;
2288
2289 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
2290 !(sattr->ia_valid & ATTR_MTIME_SET))
2291 sattr->ia_valid |= ATTR_MTIME;
2292}
2293
2294static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2295 fmode_t fmode,
2296 int flags,
2297 struct nfs_open_context *ctx)
2298{
2299 struct nfs4_state_owner *sp = opendata->owner;
2300 struct nfs_server *server = sp->so_server;
2301 struct dentry *dentry;
2302 struct nfs4_state *state;
2303 unsigned int seq;
2304 int ret;
2305
2306 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
2307
2308 ret = _nfs4_proc_open(opendata);
2309 if (ret != 0)
2310 goto out;
2311
2312 state = nfs4_opendata_to_nfs4_state(opendata);
2313 ret = PTR_ERR(state);
2314 if (IS_ERR(state))
2315 goto out;
2316 if (server->caps & NFS_CAP_POSIX_LOCK)
2317 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
2318
2319 dentry = opendata->dentry;
2320 if (d_really_is_negative(dentry)) {
2321 /* FIXME: Is this d_drop() ever needed? */
2322 d_drop(dentry);
2323 dentry = d_add_unique(dentry, igrab(state->inode));
2324 if (dentry == NULL) {
2325 dentry = opendata->dentry;
2326 } else if (dentry != ctx->dentry) {
2327 dput(ctx->dentry);
2328 ctx->dentry = dget(dentry);
2329 }
2330 nfs_set_verifier(dentry,
2331 nfs_save_change_attribute(d_inode(opendata->dir)));
2332 }
2333
2334 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags);
2335 if (ret != 0)
2336 goto out;
2337
2338 ctx->state = state;
2339 if (d_inode(dentry) == state->inode) {
2340 nfs_inode_attach_open_context(ctx);
2341 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
2342 nfs4_schedule_stateid_recovery(server, state);
2343 }
2344out:
2345 return ret;
2346}
2347
2348/*
2349 * Returns a referenced nfs4_state
2350 */
2351static int _nfs4_do_open(struct inode *dir,
2352 struct nfs_open_context *ctx,
2353 int flags,
2354 struct iattr *sattr,
2355 struct nfs4_label *label,
2356 int *opened)
2357{
2358 struct nfs4_state_owner *sp;
2359 struct nfs4_state *state = NULL;
2360 struct nfs_server *server = NFS_SERVER(dir);
2361 struct nfs4_opendata *opendata;
2362 struct dentry *dentry = ctx->dentry;
2363 struct rpc_cred *cred = ctx->cred;
2364 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
2365 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
2366 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
2367 struct nfs4_label *olabel = NULL;
2368 int status;
2369
2370 /* Protect against reboot recovery conflicts */
2371 status = -ENOMEM;
2372 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
2373 if (sp == NULL) {
2374 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
2375 goto out_err;
2376 }
2377 status = nfs4_recover_expired_lease(server);
2378 if (status != 0)
2379 goto err_put_state_owner;
2380 if (d_really_is_positive(dentry))
2381 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
2382 status = -ENOMEM;
2383 if (d_really_is_positive(dentry))
2384 claim = NFS4_OPEN_CLAIM_FH;
2385 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr,
2386 label, claim, GFP_KERNEL);
2387 if (opendata == NULL)
2388 goto err_put_state_owner;
2389
2390 if (label) {
2391 olabel = nfs4_label_alloc(server, GFP_KERNEL);
2392 if (IS_ERR(olabel)) {
2393 status = PTR_ERR(olabel);
2394 goto err_opendata_put;
2395 }
2396 }
2397
2398 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
2399 if (!opendata->f_attr.mdsthreshold) {
2400 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
2401 if (!opendata->f_attr.mdsthreshold)
2402 goto err_free_label;
2403 }
2404 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
2405 }
2406 if (d_really_is_positive(dentry))
2407 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
2408
2409 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx);
2410 if (status != 0)
2411 goto err_free_label;
2412 state = ctx->state;
2413
2414 if ((opendata->o_arg.open_flags & O_EXCL) &&
2415 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
2416 nfs4_exclusive_attrset(opendata, sattr);
2417
2418 nfs_fattr_init(opendata->o_res.f_attr);
2419 status = nfs4_do_setattr(state->inode, cred,
2420 opendata->o_res.f_attr, sattr,
2421 state, label, olabel);
2422 if (status == 0) {
2423 nfs_setattr_update_inode(state->inode, sattr,
2424 opendata->o_res.f_attr);
2425 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
2426 }
2427 }
2428 if (opendata->file_created)
2429 *opened |= FILE_CREATED;
2430
2431 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
2432 *ctx_th = opendata->f_attr.mdsthreshold;
2433 opendata->f_attr.mdsthreshold = NULL;
2434 }
2435
2436 nfs4_label_free(olabel);
2437
2438 nfs4_opendata_put(opendata);
2439 nfs4_put_state_owner(sp);
2440 return 0;
2441err_free_label:
2442 nfs4_label_free(olabel);
2443err_opendata_put:
2444 nfs4_opendata_put(opendata);
2445err_put_state_owner:
2446 nfs4_put_state_owner(sp);
2447out_err:
2448 return status;
2449}
2450
2451
2452static struct nfs4_state *nfs4_do_open(struct inode *dir,
2453 struct nfs_open_context *ctx,
2454 int flags,
2455 struct iattr *sattr,
2456 struct nfs4_label *label,
2457 int *opened)
2458{
2459 struct nfs_server *server = NFS_SERVER(dir);
2460 struct nfs4_exception exception = { };
2461 struct nfs4_state *res;
2462 int status;
2463
2464 do {
2465 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
2466 res = ctx->state;
2467 trace_nfs4_open_file(ctx, flags, status);
2468 if (status == 0)
2469 break;
2470 /* NOTE: BAD_SEQID means the server and client disagree about the
2471 * book-keeping w.r.t. state-changing operations
2472 * (OPEN/CLOSE/LOCK/LOCKU...)
2473 * It is actually a sign of a bug on the client or on the server.
2474 *
2475 * If we receive a BAD_SEQID error in the particular case of
2476 * doing an OPEN, we assume that nfs_increment_open_seqid() will
2477 * have unhashed the old state_owner for us, and that we can
2478 * therefore safely retry using a new one. We should still warn
2479 * the user though...
2480 */
2481 if (status == -NFS4ERR_BAD_SEQID) {
2482 pr_warn_ratelimited("NFS: v4 server %s "
2483 " returned a bad sequence-id error!\n",
2484 NFS_SERVER(dir)->nfs_client->cl_hostname);
2485 exception.retry = 1;
2486 continue;
2487 }
2488 /*
2489 * BAD_STATEID on OPEN means that the server cancelled our
2490 * state before it received the OPEN_CONFIRM.
2491 * Recover by retrying the request as per the discussion
2492 * on Page 181 of RFC3530.
2493 */
2494 if (status == -NFS4ERR_BAD_STATEID) {
2495 exception.retry = 1;
2496 continue;
2497 }
2498 if (status == -EAGAIN) {
2499 /* We must have found a delegation */
2500 exception.retry = 1;
2501 continue;
2502 }
2503 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
2504 continue;
2505 res = ERR_PTR(nfs4_handle_exception(server,
2506 status, &exception));
2507 } while (exception.retry);
2508 return res;
2509}
2510
2511static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2512 struct nfs_fattr *fattr, struct iattr *sattr,
2513 struct nfs4_state *state, struct nfs4_label *ilabel,
2514 struct nfs4_label *olabel)
2515{
2516 struct nfs_server *server = NFS_SERVER(inode);
2517 struct nfs_setattrargs arg = {
2518 .fh = NFS_FH(inode),
2519 .iap = sattr,
2520 .server = server,
2521 .bitmask = server->attr_bitmask,
2522 .label = ilabel,
2523 };
2524 struct nfs_setattrres res = {
2525 .fattr = fattr,
2526 .label = olabel,
2527 .server = server,
2528 };
2529 struct rpc_message msg = {
2530 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
2531 .rpc_argp = &arg,
2532 .rpc_resp = &res,
2533 .rpc_cred = cred,
2534 };
2535 unsigned long timestamp = jiffies;
2536 fmode_t fmode;
2537 bool truncate;
2538 int status;
2539
2540 arg.bitmask = nfs4_bitmask(server, ilabel);
2541 if (ilabel)
2542 arg.bitmask = nfs4_bitmask(server, olabel);
2543
2544 nfs_fattr_init(fattr);
2545
2546 /* Servers should only apply open mode checks for file size changes */
2547 truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false;
2548 fmode = truncate ? FMODE_WRITE : FMODE_READ;
2549
2550 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) {
2551 /* Use that stateid */
2552 } else if (truncate && state != NULL) {
2553 struct nfs_lockowner lockowner = {
2554 .l_owner = current->files,
2555 .l_pid = current->tgid,
2556 };
2557 if (!nfs4_valid_open_stateid(state))
2558 return -EBADF;
2559 if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
2560 &lockowner) == -EIO)
2561 return -EBADF;
2562 } else
2563 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
2564
2565 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2566 if (status == 0 && state != NULL)
2567 renew_lease(server, timestamp);
2568 return status;
2569}
2570
2571static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2572 struct nfs_fattr *fattr, struct iattr *sattr,
2573 struct nfs4_state *state, struct nfs4_label *ilabel,
2574 struct nfs4_label *olabel)
2575{
2576 struct nfs_server *server = NFS_SERVER(inode);
2577 struct nfs4_exception exception = {
2578 .state = state,
2579 .inode = inode,
2580 };
2581 int err;
2582 do {
2583 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state, ilabel, olabel);
2584 trace_nfs4_setattr(inode, err);
2585 switch (err) {
2586 case -NFS4ERR_OPENMODE:
2587 if (!(sattr->ia_valid & ATTR_SIZE)) {
2588 pr_warn_once("NFSv4: server %s is incorrectly "
2589 "applying open mode checks to "
2590 "a SETATTR that is not "
2591 "changing file size.\n",
2592 server->nfs_client->cl_hostname);
2593 }
2594 if (state && !(state->state & FMODE_WRITE)) {
2595 err = -EBADF;
2596 if (sattr->ia_valid & ATTR_OPEN)
2597 err = -EACCES;
2598 goto out;
2599 }
2600 }
2601 err = nfs4_handle_exception(server, err, &exception);
2602 } while (exception.retry);
2603out:
2604 return err;
2605}
2606
2607struct nfs4_closedata {
2608 struct inode *inode;
2609 struct nfs4_state *state;
2610 struct nfs_closeargs arg;
2611 struct nfs_closeres res;
2612 struct nfs_fattr fattr;
2613 unsigned long timestamp;
2614 bool roc;
2615 u32 roc_barrier;
2616};
2617
2618static void nfs4_free_closedata(void *data)
2619{
2620 struct nfs4_closedata *calldata = data;
2621 struct nfs4_state_owner *sp = calldata->state->owner;
2622 struct super_block *sb = calldata->state->inode->i_sb;
2623
2624 if (calldata->roc)
2625 pnfs_roc_release(calldata->state->inode);
2626 nfs4_put_open_state(calldata->state);
2627 nfs_free_seqid(calldata->arg.seqid);
2628 nfs4_put_state_owner(sp);
2629 nfs_sb_deactive(sb);
2630 kfree(calldata);
2631}
2632
2633static void nfs4_close_done(struct rpc_task *task, void *data)
2634{
2635 struct nfs4_closedata *calldata = data;
2636 struct nfs4_state *state = calldata->state;
2637 struct nfs_server *server = NFS_SERVER(calldata->inode);
2638 nfs4_stateid *res_stateid = NULL;
2639
2640 dprintk("%s: begin!\n", __func__);
2641 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2642 return;
2643 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
2644 /* hmm. we are done with the inode, and in the process of freeing
2645 * the state_owner. we keep this around to process errors
2646 */
2647 switch (task->tk_status) {
2648 case 0:
2649 res_stateid = &calldata->res.stateid;
2650 if (calldata->arg.fmode == 0 && calldata->roc)
2651 pnfs_roc_set_barrier(state->inode,
2652 calldata->roc_barrier);
2653 renew_lease(server, calldata->timestamp);
2654 break;
2655 case -NFS4ERR_ADMIN_REVOKED:
2656 case -NFS4ERR_STALE_STATEID:
2657 case -NFS4ERR_OLD_STATEID:
2658 case -NFS4ERR_BAD_STATEID:
2659 case -NFS4ERR_EXPIRED:
2660 if (!nfs4_stateid_match(&calldata->arg.stateid,
2661 &state->open_stateid)) {
2662 rpc_restart_call_prepare(task);
2663 goto out_release;
2664 }
2665 if (calldata->arg.fmode == 0)
2666 break;
2667 default:
2668 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) {
2669 rpc_restart_call_prepare(task);
2670 goto out_release;
2671 }
2672 }
2673 nfs_clear_open_stateid(state, res_stateid, calldata->arg.fmode);
2674out_release:
2675 nfs_release_seqid(calldata->arg.seqid);
2676 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2677 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2678}
2679
2680static void nfs4_close_prepare(struct rpc_task *task, void *data)
2681{
2682 struct nfs4_closedata *calldata = data;
2683 struct nfs4_state *state = calldata->state;
2684 struct inode *inode = calldata->inode;
2685 bool is_rdonly, is_wronly, is_rdwr;
2686 int call_close = 0;
2687
2688 dprintk("%s: begin!\n", __func__);
2689 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2690 goto out_wait;
2691
2692 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2693 spin_lock(&state->owner->so_lock);
2694 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
2695 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
2696 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
2697 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid);
2698 /* Calculate the change in open mode */
2699 calldata->arg.fmode = 0;
2700 if (state->n_rdwr == 0) {
2701 if (state->n_rdonly == 0)
2702 call_close |= is_rdonly;
2703 else if (is_rdonly)
2704 calldata->arg.fmode |= FMODE_READ;
2705 if (state->n_wronly == 0)
2706 call_close |= is_wronly;
2707 else if (is_wronly)
2708 calldata->arg.fmode |= FMODE_WRITE;
2709 } else if (is_rdwr)
2710 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
2711
2712 if (calldata->arg.fmode == 0)
2713 call_close |= is_rdwr;
2714
2715 if (!nfs4_valid_open_stateid(state))
2716 call_close = 0;
2717 spin_unlock(&state->owner->so_lock);
2718
2719 if (!call_close) {
2720 /* Note: exit _without_ calling nfs4_close_done */
2721 goto out_no_action;
2722 }
2723
2724 if (calldata->arg.fmode == 0) {
2725 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2726 if (calldata->roc &&
2727 pnfs_roc_drain(inode, &calldata->roc_barrier, task)) {
2728 nfs_release_seqid(calldata->arg.seqid);
2729 goto out_wait;
2730 }
2731 }
2732 calldata->arg.share_access =
2733 nfs4_map_atomic_open_share(NFS_SERVER(inode),
2734 calldata->arg.fmode, 0);
2735
2736 nfs_fattr_init(calldata->res.fattr);
2737 calldata->timestamp = jiffies;
2738 if (nfs4_setup_sequence(NFS_SERVER(inode),
2739 &calldata->arg.seq_args,
2740 &calldata->res.seq_res,
2741 task) != 0)
2742 nfs_release_seqid(calldata->arg.seqid);
2743 dprintk("%s: done!\n", __func__);
2744 return;
2745out_no_action:
2746 task->tk_action = NULL;
2747out_wait:
2748 nfs4_sequence_done(task, &calldata->res.seq_res);
2749}
2750
2751static const struct rpc_call_ops nfs4_close_ops = {
2752 .rpc_call_prepare = nfs4_close_prepare,
2753 .rpc_call_done = nfs4_close_done,
2754 .rpc_release = nfs4_free_closedata,
2755};
2756
2757static bool nfs4_roc(struct inode *inode)
2758{
2759 if (!nfs_have_layout(inode))
2760 return false;
2761 return pnfs_roc(inode);
2762}
2763
2764/*
2765 * It is possible for data to be read/written from a mem-mapped file
2766 * after the sys_close call (which hits the vfs layer as a flush).
2767 * This means that we can't safely call nfsv4 close on a file until
2768 * the inode is cleared. This in turn means that we are not good
2769 * NFSv4 citizens - we do not indicate to the server to update the file's
2770 * share state even when we are done with one of the three share
2771 * stateid's in the inode.
2772 *
2773 * NOTE: Caller must be holding the sp->so_owner semaphore!
2774 */
2775int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
2776{
2777 struct nfs_server *server = NFS_SERVER(state->inode);
2778 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
2779 struct nfs4_closedata *calldata;
2780 struct nfs4_state_owner *sp = state->owner;
2781 struct rpc_task *task;
2782 struct rpc_message msg = {
2783 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2784 .rpc_cred = state->owner->so_cred,
2785 };
2786 struct rpc_task_setup task_setup_data = {
2787 .rpc_client = server->client,
2788 .rpc_message = &msg,
2789 .callback_ops = &nfs4_close_ops,
2790 .workqueue = nfsiod_workqueue,
2791 .flags = RPC_TASK_ASYNC,
2792 };
2793 int status = -ENOMEM;
2794
2795 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
2796 &task_setup_data.rpc_client, &msg);
2797
2798 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2799 if (calldata == NULL)
2800 goto out;
2801 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2802 calldata->inode = state->inode;
2803 calldata->state = state;
2804 calldata->arg.fh = NFS_FH(state->inode);
2805 /* Serialization for the sequence id */
2806 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
2807 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
2808 if (IS_ERR(calldata->arg.seqid))
2809 goto out_free_calldata;
2810 calldata->arg.fmode = 0;
2811 calldata->arg.bitmask = server->cache_consistency_bitmask;
2812 calldata->res.fattr = &calldata->fattr;
2813 calldata->res.seqid = calldata->arg.seqid;
2814 calldata->res.server = server;
2815 calldata->roc = nfs4_roc(state->inode);
2816 nfs_sb_active(calldata->inode->i_sb);
2817
2818 msg.rpc_argp = &calldata->arg;
2819 msg.rpc_resp = &calldata->res;
2820 task_setup_data.callback_data = calldata;
2821 task = rpc_run_task(&task_setup_data);
2822 if (IS_ERR(task))
2823 return PTR_ERR(task);
2824 status = 0;
2825 if (wait)
2826 status = rpc_wait_for_completion_task(task);
2827 rpc_put_task(task);
2828 return status;
2829out_free_calldata:
2830 kfree(calldata);
2831out:
2832 nfs4_put_open_state(state);
2833 nfs4_put_state_owner(sp);
2834 return status;
2835}
2836
2837static struct inode *
2838nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
2839 int open_flags, struct iattr *attr, int *opened)
2840{
2841 struct nfs4_state *state;
2842 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
2843
2844 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
2845
2846 /* Protect against concurrent sillydeletes */
2847 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
2848
2849 nfs4_label_release_security(label);
2850
2851 if (IS_ERR(state))
2852 return ERR_CAST(state);
2853 return state->inode;
2854}
2855
2856static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
2857{
2858 if (ctx->state == NULL)
2859 return;
2860 if (is_sync)
2861 nfs4_close_sync(ctx->state, ctx->mode);
2862 else
2863 nfs4_close_state(ctx->state, ctx->mode);
2864}
2865
2866#define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
2867#define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
2868#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL)
2869
2870static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2871{
2872 struct nfs4_server_caps_arg args = {
2873 .fhandle = fhandle,
2874 };
2875 struct nfs4_server_caps_res res = {};
2876 struct rpc_message msg = {
2877 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
2878 .rpc_argp = &args,
2879 .rpc_resp = &res,
2880 };
2881 int status;
2882
2883 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2884 if (status == 0) {
2885 /* Sanity check the server answers */
2886 switch (server->nfs_client->cl_minorversion) {
2887 case 0:
2888 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
2889 res.attr_bitmask[2] = 0;
2890 break;
2891 case 1:
2892 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
2893 break;
2894 case 2:
2895 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
2896 }
2897 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2898 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2899 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2900 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2901 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2902 NFS_CAP_CTIME|NFS_CAP_MTIME|
2903 NFS_CAP_SECURITY_LABEL);
2904 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
2905 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
2906 server->caps |= NFS_CAP_ACLS;
2907 if (res.has_links != 0)
2908 server->caps |= NFS_CAP_HARDLINKS;
2909 if (res.has_symlinks != 0)
2910 server->caps |= NFS_CAP_SYMLINKS;
2911 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2912 server->caps |= NFS_CAP_FILEID;
2913 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2914 server->caps |= NFS_CAP_MODE;
2915 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2916 server->caps |= NFS_CAP_NLINK;
2917 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2918 server->caps |= NFS_CAP_OWNER;
2919 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2920 server->caps |= NFS_CAP_OWNER_GROUP;
2921 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2922 server->caps |= NFS_CAP_ATIME;
2923 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2924 server->caps |= NFS_CAP_CTIME;
2925 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2926 server->caps |= NFS_CAP_MTIME;
2927#ifdef CONFIG_NFS_V4_SECURITY_LABEL
2928 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
2929 server->caps |= NFS_CAP_SECURITY_LABEL;
2930#endif
2931 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
2932 sizeof(server->attr_bitmask));
2933 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
2934
2935 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2936 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2937 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
2938 server->cache_consistency_bitmask[2] = 0;
2939 server->acl_bitmask = res.acl_bitmask;
2940 server->fh_expire_type = res.fh_expire_type;
2941 }
2942
2943 return status;
2944}
2945
2946int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2947{
2948 struct nfs4_exception exception = { };
2949 int err;
2950 do {
2951 err = nfs4_handle_exception(server,
2952 _nfs4_server_capabilities(server, fhandle),
2953 &exception);
2954 } while (exception.retry);
2955 return err;
2956}
2957
2958static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2959 struct nfs_fsinfo *info)
2960{
2961 u32 bitmask[3];
2962 struct nfs4_lookup_root_arg args = {
2963 .bitmask = bitmask,
2964 };
2965 struct nfs4_lookup_res res = {
2966 .server = server,
2967 .fattr = info->fattr,
2968 .fh = fhandle,
2969 };
2970 struct rpc_message msg = {
2971 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
2972 .rpc_argp = &args,
2973 .rpc_resp = &res,
2974 };
2975
2976 bitmask[0] = nfs4_fattr_bitmap[0];
2977 bitmask[1] = nfs4_fattr_bitmap[1];
2978 /*
2979 * Process the label in the upcoming getfattr
2980 */
2981 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
2982
2983 nfs_fattr_init(info->fattr);
2984 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2985}
2986
2987static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2988 struct nfs_fsinfo *info)
2989{
2990 struct nfs4_exception exception = { };
2991 int err;
2992 do {
2993 err = _nfs4_lookup_root(server, fhandle, info);
2994 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
2995 switch (err) {
2996 case 0:
2997 case -NFS4ERR_WRONGSEC:
2998 goto out;
2999 default:
3000 err = nfs4_handle_exception(server, err, &exception);
3001 }
3002 } while (exception.retry);
3003out:
3004 return err;
3005}
3006
3007static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3008 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
3009{
3010 struct rpc_auth_create_args auth_args = {
3011 .pseudoflavor = flavor,
3012 };
3013 struct rpc_auth *auth;
3014 int ret;
3015
3016 auth = rpcauth_create(&auth_args, server->client);
3017 if (IS_ERR(auth)) {
3018 ret = -EACCES;
3019 goto out;
3020 }
3021 ret = nfs4_lookup_root(server, fhandle, info);
3022out:
3023 return ret;
3024}
3025
3026/*
3027 * Retry pseudoroot lookup with various security flavors. We do this when:
3028 *
3029 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
3030 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
3031 *
3032 * Returns zero on success, or a negative NFS4ERR value, or a
3033 * negative errno value.
3034 */
3035static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3036 struct nfs_fsinfo *info)
3037{
3038 /* Per 3530bis 15.33.5 */
3039 static const rpc_authflavor_t flav_array[] = {
3040 RPC_AUTH_GSS_KRB5P,
3041 RPC_AUTH_GSS_KRB5I,
3042 RPC_AUTH_GSS_KRB5,
3043 RPC_AUTH_UNIX, /* courtesy */
3044 RPC_AUTH_NULL,
3045 };
3046 int status = -EPERM;
3047 size_t i;
3048
3049 if (server->auth_info.flavor_len > 0) {
3050 /* try each flavor specified by user */
3051 for (i = 0; i < server->auth_info.flavor_len; i++) {
3052 status = nfs4_lookup_root_sec(server, fhandle, info,
3053 server->auth_info.flavors[i]);
3054 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3055 continue;
3056 break;
3057 }
3058 } else {
3059 /* no flavors specified by user, try default list */
3060 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
3061 status = nfs4_lookup_root_sec(server, fhandle, info,
3062 flav_array[i]);
3063 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3064 continue;
3065 break;
3066 }
3067 }
3068
3069 /*
3070 * -EACCESS could mean that the user doesn't have correct permissions
3071 * to access the mount. It could also mean that we tried to mount
3072 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
3073 * existing mount programs don't handle -EACCES very well so it should
3074 * be mapped to -EPERM instead.
3075 */
3076 if (status == -EACCES)
3077 status = -EPERM;
3078 return status;
3079}
3080
3081static int nfs4_do_find_root_sec(struct nfs_server *server,
3082 struct nfs_fh *fhandle, struct nfs_fsinfo *info)
3083{
3084 int mv = server->nfs_client->cl_minorversion;
3085 return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info);
3086}
3087
3088/**
3089 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
3090 * @server: initialized nfs_server handle
3091 * @fhandle: we fill in the pseudo-fs root file handle
3092 * @info: we fill in an FSINFO struct
3093 * @auth_probe: probe the auth flavours
3094 *
3095 * Returns zero on success, or a negative errno.
3096 */
3097int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
3098 struct nfs_fsinfo *info,
3099 bool auth_probe)
3100{
3101 int status = 0;
3102
3103 if (!auth_probe)
3104 status = nfs4_lookup_root(server, fhandle, info);
3105
3106 if (auth_probe || status == NFS4ERR_WRONGSEC)
3107 status = nfs4_do_find_root_sec(server, fhandle, info);
3108
3109 if (status == 0)
3110 status = nfs4_server_capabilities(server, fhandle);
3111 if (status == 0)
3112 status = nfs4_do_fsinfo(server, fhandle, info);
3113
3114 return nfs4_map_errors(status);
3115}
3116
3117static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
3118 struct nfs_fsinfo *info)
3119{
3120 int error;
3121 struct nfs_fattr *fattr = info->fattr;
3122 struct nfs4_label *label = NULL;
3123
3124 error = nfs4_server_capabilities(server, mntfh);
3125 if (error < 0) {
3126 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
3127 return error;
3128 }
3129
3130 label = nfs4_label_alloc(server, GFP_KERNEL);
3131 if (IS_ERR(label))
3132 return PTR_ERR(label);
3133
3134 error = nfs4_proc_getattr(server, mntfh, fattr, label);
3135 if (error < 0) {
3136 dprintk("nfs4_get_root: getattr error = %d\n", -error);
3137 goto err_free_label;
3138 }
3139
3140 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
3141 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
3142 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
3143
3144err_free_label:
3145 nfs4_label_free(label);
3146
3147 return error;
3148}
3149
3150/*
3151 * Get locations and (maybe) other attributes of a referral.
3152 * Note that we'll actually follow the referral later when
3153 * we detect fsid mismatch in inode revalidation
3154 */
3155static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
3156 const struct qstr *name, struct nfs_fattr *fattr,
3157 struct nfs_fh *fhandle)
3158{
3159 int status = -ENOMEM;
3160 struct page *page = NULL;
3161 struct nfs4_fs_locations *locations = NULL;
3162
3163 page = alloc_page(GFP_KERNEL);
3164 if (page == NULL)
3165 goto out;
3166 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
3167 if (locations == NULL)
3168 goto out;
3169
3170 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
3171 if (status != 0)
3172 goto out;
3173
3174 /*
3175 * If the fsid didn't change, this is a migration event, not a
3176 * referral. Cause us to drop into the exception handler, which
3177 * will kick off migration recovery.
3178 */
3179 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
3180 dprintk("%s: server did not return a different fsid for"
3181 " a referral at %s\n", __func__, name->name);
3182 status = -NFS4ERR_MOVED;
3183 goto out;
3184 }
3185 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
3186 nfs_fixup_referral_attributes(&locations->fattr);
3187
3188 /* replace the lookup nfs_fattr with the locations nfs_fattr */
3189 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
3190 memset(fhandle, 0, sizeof(struct nfs_fh));
3191out:
3192 if (page)
3193 __free_page(page);
3194 kfree(locations);
3195 return status;
3196}
3197
3198static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3199 struct nfs_fattr *fattr, struct nfs4_label *label)
3200{
3201 struct nfs4_getattr_arg args = {
3202 .fh = fhandle,
3203 .bitmask = server->attr_bitmask,
3204 };
3205 struct nfs4_getattr_res res = {
3206 .fattr = fattr,
3207 .label = label,
3208 .server = server,
3209 };
3210 struct rpc_message msg = {
3211 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
3212 .rpc_argp = &args,
3213 .rpc_resp = &res,
3214 };
3215
3216 args.bitmask = nfs4_bitmask(server, label);
3217
3218 nfs_fattr_init(fattr);
3219 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3220}
3221
3222static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3223 struct nfs_fattr *fattr, struct nfs4_label *label)
3224{
3225 struct nfs4_exception exception = { };
3226 int err;
3227 do {
3228 err = _nfs4_proc_getattr(server, fhandle, fattr, label);
3229 trace_nfs4_getattr(server, fhandle, fattr, err);
3230 err = nfs4_handle_exception(server, err,
3231 &exception);
3232 } while (exception.retry);
3233 return err;
3234}
3235
3236/*
3237 * The file is not closed if it is opened due to the a request to change
3238 * the size of the file. The open call will not be needed once the
3239 * VFS layer lookup-intents are implemented.
3240 *
3241 * Close is called when the inode is destroyed.
3242 * If we haven't opened the file for O_WRONLY, we
3243 * need to in the size_change case to obtain a stateid.
3244 *
3245 * Got race?
3246 * Because OPEN is always done by name in nfsv4, it is
3247 * possible that we opened a different file by the same
3248 * name. We can recognize this race condition, but we
3249 * can't do anything about it besides returning an error.
3250 *
3251 * This will be fixed with VFS changes (lookup-intent).
3252 */
3253static int
3254nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
3255 struct iattr *sattr)
3256{
3257 struct inode *inode = d_inode(dentry);
3258 struct rpc_cred *cred = NULL;
3259 struct nfs4_state *state = NULL;
3260 struct nfs4_label *label = NULL;
3261 int status;
3262
3263 if (pnfs_ld_layoutret_on_setattr(inode) &&
3264 sattr->ia_valid & ATTR_SIZE &&
3265 sattr->ia_size < i_size_read(inode))
3266 pnfs_commit_and_return_layout(inode);
3267
3268 nfs_fattr_init(fattr);
3269
3270 /* Deal with open(O_TRUNC) */
3271 if (sattr->ia_valid & ATTR_OPEN)
3272 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
3273
3274 /* Optimization: if the end result is no change, don't RPC */
3275 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
3276 return 0;
3277
3278 /* Search for an existing open(O_WRITE) file */
3279 if (sattr->ia_valid & ATTR_FILE) {
3280 struct nfs_open_context *ctx;
3281
3282 ctx = nfs_file_open_context(sattr->ia_file);
3283 if (ctx) {
3284 cred = ctx->cred;
3285 state = ctx->state;
3286 }
3287 }
3288
3289 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
3290 if (IS_ERR(label))
3291 return PTR_ERR(label);
3292
3293 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label);
3294 if (status == 0) {
3295 nfs_setattr_update_inode(inode, sattr, fattr);
3296 nfs_setsecurity(inode, fattr, label);
3297 }
3298 nfs4_label_free(label);
3299 return status;
3300}
3301
3302static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
3303 const struct qstr *name, struct nfs_fh *fhandle,
3304 struct nfs_fattr *fattr, struct nfs4_label *label)
3305{
3306 struct nfs_server *server = NFS_SERVER(dir);
3307 int status;
3308 struct nfs4_lookup_arg args = {
3309 .bitmask = server->attr_bitmask,
3310 .dir_fh = NFS_FH(dir),
3311 .name = name,
3312 };
3313 struct nfs4_lookup_res res = {
3314 .server = server,
3315 .fattr = fattr,
3316 .label = label,
3317 .fh = fhandle,
3318 };
3319 struct rpc_message msg = {
3320 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
3321 .rpc_argp = &args,
3322 .rpc_resp = &res,
3323 };
3324
3325 args.bitmask = nfs4_bitmask(server, label);
3326
3327 nfs_fattr_init(fattr);
3328
3329 dprintk("NFS call lookup %s\n", name->name);
3330 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
3331 dprintk("NFS reply lookup: %d\n", status);
3332 return status;
3333}
3334
3335static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
3336{
3337 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
3338 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
3339 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
3340 fattr->nlink = 2;
3341}
3342
3343static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
3344 struct qstr *name, struct nfs_fh *fhandle,
3345 struct nfs_fattr *fattr, struct nfs4_label *label)
3346{
3347 struct nfs4_exception exception = { };
3348 struct rpc_clnt *client = *clnt;
3349 int err;
3350 do {
3351 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label);
3352 trace_nfs4_lookup(dir, name, err);
3353 switch (err) {
3354 case -NFS4ERR_BADNAME:
3355 err = -ENOENT;
3356 goto out;
3357 case -NFS4ERR_MOVED:
3358 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
3359 if (err == -NFS4ERR_MOVED)
3360 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3361 goto out;
3362 case -NFS4ERR_WRONGSEC:
3363 err = -EPERM;
3364 if (client != *clnt)
3365 goto out;
3366 client = nfs4_negotiate_security(client, dir, name);
3367 if (IS_ERR(client))
3368 return PTR_ERR(client);
3369
3370 exception.retry = 1;
3371 break;
3372 default:
3373 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3374 }
3375 } while (exception.retry);
3376
3377out:
3378 if (err == 0)
3379 *clnt = client;
3380 else if (client != *clnt)
3381 rpc_shutdown_client(client);
3382
3383 return err;
3384}
3385
3386static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
3387 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
3388 struct nfs4_label *label)
3389{
3390 int status;
3391 struct rpc_clnt *client = NFS_CLIENT(dir);
3392
3393 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label);
3394 if (client != NFS_CLIENT(dir)) {
3395 rpc_shutdown_client(client);
3396 nfs_fixup_secinfo_attributes(fattr);
3397 }
3398 return status;
3399}
3400
3401struct rpc_clnt *
3402nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
3403 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
3404{
3405 struct rpc_clnt *client = NFS_CLIENT(dir);
3406 int status;
3407
3408 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
3409 if (status < 0)
3410 return ERR_PTR(status);
3411 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
3412}
3413
3414static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3415{
3416 struct nfs_server *server = NFS_SERVER(inode);
3417 struct nfs4_accessargs args = {
3418 .fh = NFS_FH(inode),
3419 .bitmask = server->cache_consistency_bitmask,
3420 };
3421 struct nfs4_accessres res = {
3422 .server = server,
3423 };
3424 struct rpc_message msg = {
3425 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
3426 .rpc_argp = &args,
3427 .rpc_resp = &res,
3428 .rpc_cred = entry->cred,
3429 };
3430 int mode = entry->mask;
3431 int status = 0;
3432
3433 /*
3434 * Determine which access bits we want to ask for...
3435 */
3436 if (mode & MAY_READ)
3437 args.access |= NFS4_ACCESS_READ;
3438 if (S_ISDIR(inode->i_mode)) {
3439 if (mode & MAY_WRITE)
3440 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
3441 if (mode & MAY_EXEC)
3442 args.access |= NFS4_ACCESS_LOOKUP;
3443 } else {
3444 if (mode & MAY_WRITE)
3445 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
3446 if (mode & MAY_EXEC)
3447 args.access |= NFS4_ACCESS_EXECUTE;
3448 }
3449
3450 res.fattr = nfs_alloc_fattr();
3451 if (res.fattr == NULL)
3452 return -ENOMEM;
3453
3454 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3455 if (!status) {
3456 nfs_access_set_mask(entry, res.access);
3457 nfs_refresh_inode(inode, res.fattr);
3458 }
3459 nfs_free_fattr(res.fattr);
3460 return status;
3461}
3462
3463static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3464{
3465 struct nfs4_exception exception = { };
3466 int err;
3467 do {
3468 err = _nfs4_proc_access(inode, entry);
3469 trace_nfs4_access(inode, err);
3470 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3471 &exception);
3472 } while (exception.retry);
3473 return err;
3474}
3475
3476/*
3477 * TODO: For the time being, we don't try to get any attributes
3478 * along with any of the zero-copy operations READ, READDIR,
3479 * READLINK, WRITE.
3480 *
3481 * In the case of the first three, we want to put the GETATTR
3482 * after the read-type operation -- this is because it is hard
3483 * to predict the length of a GETATTR response in v4, and thus
3484 * align the READ data correctly. This means that the GETATTR
3485 * may end up partially falling into the page cache, and we should
3486 * shift it into the 'tail' of the xdr_buf before processing.
3487 * To do this efficiently, we need to know the total length
3488 * of data received, which doesn't seem to be available outside
3489 * of the RPC layer.
3490 *
3491 * In the case of WRITE, we also want to put the GETATTR after
3492 * the operation -- in this case because we want to make sure
3493 * we get the post-operation mtime and size.
3494 *
3495 * Both of these changes to the XDR layer would in fact be quite
3496 * minor, but I decided to leave them for a subsequent patch.
3497 */
3498static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
3499 unsigned int pgbase, unsigned int pglen)
3500{
3501 struct nfs4_readlink args = {
3502 .fh = NFS_FH(inode),
3503 .pgbase = pgbase,
3504 .pglen = pglen,
3505 .pages = &page,
3506 };
3507 struct nfs4_readlink_res res;
3508 struct rpc_message msg = {
3509 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
3510 .rpc_argp = &args,
3511 .rpc_resp = &res,
3512 };
3513
3514 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
3515}
3516
3517static int nfs4_proc_readlink(struct inode *inode, struct page *page,
3518 unsigned int pgbase, unsigned int pglen)
3519{
3520 struct nfs4_exception exception = { };
3521 int err;
3522 do {
3523 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
3524 trace_nfs4_readlink(inode, err);
3525 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3526 &exception);
3527 } while (exception.retry);
3528 return err;
3529}
3530
3531/*
3532 * This is just for mknod. open(O_CREAT) will always do ->open_context().
3533 */
3534static int
3535nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3536 int flags)
3537{
3538 struct nfs4_label l, *ilabel = NULL;
3539 struct nfs_open_context *ctx;
3540 struct nfs4_state *state;
3541 int opened = 0;
3542 int status = 0;
3543
3544 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
3545 if (IS_ERR(ctx))
3546 return PTR_ERR(ctx);
3547
3548 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
3549
3550 sattr->ia_mode &= ~current_umask();
3551 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, &opened);
3552 if (IS_ERR(state)) {
3553 status = PTR_ERR(state);
3554 goto out;
3555 }
3556out:
3557 nfs4_label_release_security(ilabel);
3558 put_nfs_open_context(ctx);
3559 return status;
3560}
3561
3562static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
3563{
3564 struct nfs_server *server = NFS_SERVER(dir);
3565 struct nfs_removeargs args = {
3566 .fh = NFS_FH(dir),
3567 .name = *name,
3568 };
3569 struct nfs_removeres res = {
3570 .server = server,
3571 };
3572 struct rpc_message msg = {
3573 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
3574 .rpc_argp = &args,
3575 .rpc_resp = &res,
3576 };
3577 int status;
3578
3579 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
3580 if (status == 0)
3581 update_changeattr(dir, &res.cinfo);
3582 return status;
3583}
3584
3585static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
3586{
3587 struct nfs4_exception exception = { };
3588 int err;
3589 do {
3590 err = _nfs4_proc_remove(dir, name);
3591 trace_nfs4_remove(dir, name, err);
3592 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3593 &exception);
3594 } while (exception.retry);
3595 return err;
3596}
3597
3598static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
3599{
3600 struct nfs_server *server = NFS_SERVER(dir);
3601 struct nfs_removeargs *args = msg->rpc_argp;
3602 struct nfs_removeres *res = msg->rpc_resp;
3603
3604 res->server = server;
3605 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
3606 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1);
3607
3608 nfs_fattr_init(res->dir_attr);
3609}
3610
3611static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
3612{
3613 nfs4_setup_sequence(NFS_SERVER(data->dir),
3614 &data->args.seq_args,
3615 &data->res.seq_res,
3616 task);
3617}
3618
3619static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
3620{
3621 struct nfs_unlinkdata *data = task->tk_calldata;
3622 struct nfs_removeres *res = &data->res;
3623
3624 if (!nfs4_sequence_done(task, &res->seq_res))
3625 return 0;
3626 if (nfs4_async_handle_error(task, res->server, NULL,
3627 &data->timeout) == -EAGAIN)
3628 return 0;
3629 update_changeattr(dir, &res->cinfo);
3630 return 1;
3631}
3632
3633static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
3634{
3635 struct nfs_server *server = NFS_SERVER(dir);
3636 struct nfs_renameargs *arg = msg->rpc_argp;
3637 struct nfs_renameres *res = msg->rpc_resp;
3638
3639 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
3640 res->server = server;
3641 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1);
3642}
3643
3644static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
3645{
3646 nfs4_setup_sequence(NFS_SERVER(data->old_dir),
3647 &data->args.seq_args,
3648 &data->res.seq_res,
3649 task);
3650}
3651
3652static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
3653 struct inode *new_dir)
3654{
3655 struct nfs_renamedata *data = task->tk_calldata;
3656 struct nfs_renameres *res = &data->res;
3657
3658 if (!nfs4_sequence_done(task, &res->seq_res))
3659 return 0;
3660 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
3661 return 0;
3662
3663 update_changeattr(old_dir, &res->old_cinfo);
3664 update_changeattr(new_dir, &res->new_cinfo);
3665 return 1;
3666}
3667
3668static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3669{
3670 struct nfs_server *server = NFS_SERVER(inode);
3671 struct nfs4_link_arg arg = {
3672 .fh = NFS_FH(inode),
3673 .dir_fh = NFS_FH(dir),
3674 .name = name,
3675 .bitmask = server->attr_bitmask,
3676 };
3677 struct nfs4_link_res res = {
3678 .server = server,
3679 .label = NULL,
3680 };
3681 struct rpc_message msg = {
3682 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
3683 .rpc_argp = &arg,
3684 .rpc_resp = &res,
3685 };
3686 int status = -ENOMEM;
3687
3688 res.fattr = nfs_alloc_fattr();
3689 if (res.fattr == NULL)
3690 goto out;
3691
3692 res.label = nfs4_label_alloc(server, GFP_KERNEL);
3693 if (IS_ERR(res.label)) {
3694 status = PTR_ERR(res.label);
3695 goto out;
3696 }
3697 arg.bitmask = nfs4_bitmask(server, res.label);
3698
3699 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3700 if (!status) {
3701 update_changeattr(dir, &res.cinfo);
3702 status = nfs_post_op_update_inode(inode, res.fattr);
3703 if (!status)
3704 nfs_setsecurity(inode, res.fattr, res.label);
3705 }
3706
3707
3708 nfs4_label_free(res.label);
3709
3710out:
3711 nfs_free_fattr(res.fattr);
3712 return status;
3713}
3714
3715static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3716{
3717 struct nfs4_exception exception = { };
3718 int err;
3719 do {
3720 err = nfs4_handle_exception(NFS_SERVER(inode),
3721 _nfs4_proc_link(inode, dir, name),
3722 &exception);
3723 } while (exception.retry);
3724 return err;
3725}
3726
3727struct nfs4_createdata {
3728 struct rpc_message msg;
3729 struct nfs4_create_arg arg;
3730 struct nfs4_create_res res;
3731 struct nfs_fh fh;
3732 struct nfs_fattr fattr;
3733 struct nfs4_label *label;
3734};
3735
3736static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3737 struct qstr *name, struct iattr *sattr, u32 ftype)
3738{
3739 struct nfs4_createdata *data;
3740
3741 data = kzalloc(sizeof(*data), GFP_KERNEL);
3742 if (data != NULL) {
3743 struct nfs_server *server = NFS_SERVER(dir);
3744
3745 data->label = nfs4_label_alloc(server, GFP_KERNEL);
3746 if (IS_ERR(data->label))
3747 goto out_free;
3748
3749 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3750 data->msg.rpc_argp = &data->arg;
3751 data->msg.rpc_resp = &data->res;
3752 data->arg.dir_fh = NFS_FH(dir);
3753 data->arg.server = server;
3754 data->arg.name = name;
3755 data->arg.attrs = sattr;
3756 data->arg.ftype = ftype;
3757 data->arg.bitmask = nfs4_bitmask(server, data->label);
3758 data->res.server = server;
3759 data->res.fh = &data->fh;
3760 data->res.fattr = &data->fattr;
3761 data->res.label = data->label;
3762 nfs_fattr_init(data->res.fattr);
3763 }
3764 return data;
3765out_free:
3766 kfree(data);
3767 return NULL;
3768}
3769
3770static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3771{
3772 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3773 &data->arg.seq_args, &data->res.seq_res, 1);
3774 if (status == 0) {
3775 update_changeattr(dir, &data->res.dir_cinfo);
3776 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
3777 }
3778 return status;
3779}
3780
3781static void nfs4_free_createdata(struct nfs4_createdata *data)
3782{
3783 nfs4_label_free(data->label);
3784 kfree(data);
3785}
3786
3787static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3788 struct page *page, unsigned int len, struct iattr *sattr,
3789 struct nfs4_label *label)
3790{
3791 struct nfs4_createdata *data;
3792 int status = -ENAMETOOLONG;
3793
3794 if (len > NFS4_MAXPATHLEN)
3795 goto out;
3796
3797 status = -ENOMEM;
3798 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3799 if (data == NULL)
3800 goto out;
3801
3802 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3803 data->arg.u.symlink.pages = &page;
3804 data->arg.u.symlink.len = len;
3805 data->arg.label = label;
3806
3807 status = nfs4_do_create(dir, dentry, data);
3808
3809 nfs4_free_createdata(data);
3810out:
3811 return status;
3812}
3813
3814static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3815 struct page *page, unsigned int len, struct iattr *sattr)
3816{
3817 struct nfs4_exception exception = { };
3818 struct nfs4_label l, *label = NULL;
3819 int err;
3820
3821 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3822
3823 do {
3824 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
3825 trace_nfs4_symlink(dir, &dentry->d_name, err);
3826 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3827 &exception);
3828 } while (exception.retry);
3829
3830 nfs4_label_release_security(label);
3831 return err;
3832}
3833
3834static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3835 struct iattr *sattr, struct nfs4_label *label)
3836{
3837 struct nfs4_createdata *data;
3838 int status = -ENOMEM;
3839
3840 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
3841 if (data == NULL)
3842 goto out;
3843
3844 data->arg.label = label;
3845 status = nfs4_do_create(dir, dentry, data);
3846
3847 nfs4_free_createdata(data);
3848out:
3849 return status;
3850}
3851
3852static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3853 struct iattr *sattr)
3854{
3855 struct nfs4_exception exception = { };
3856 struct nfs4_label l, *label = NULL;
3857 int err;
3858
3859 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3860
3861 sattr->ia_mode &= ~current_umask();
3862 do {
3863 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
3864 trace_nfs4_mkdir(dir, &dentry->d_name, err);
3865 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3866 &exception);
3867 } while (exception.retry);
3868 nfs4_label_release_security(label);
3869
3870 return err;
3871}
3872
3873static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3874 u64 cookie, struct page **pages, unsigned int count, int plus)
3875{
3876 struct inode *dir = d_inode(dentry);
3877 struct nfs4_readdir_arg args = {
3878 .fh = NFS_FH(dir),
3879 .pages = pages,
3880 .pgbase = 0,
3881 .count = count,
3882 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask,
3883 .plus = plus,
3884 };
3885 struct nfs4_readdir_res res;
3886 struct rpc_message msg = {
3887 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
3888 .rpc_argp = &args,
3889 .rpc_resp = &res,
3890 .rpc_cred = cred,
3891 };
3892 int status;
3893
3894 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
3895 dentry,
3896 (unsigned long long)cookie);
3897 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
3898 res.pgbase = args.pgbase;
3899 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
3900 if (status >= 0) {
3901 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
3902 status += args.pgbase;
3903 }
3904
3905 nfs_invalidate_atime(dir);
3906
3907 dprintk("%s: returns %d\n", __func__, status);
3908 return status;
3909}
3910
3911static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3912 u64 cookie, struct page **pages, unsigned int count, int plus)
3913{
3914 struct nfs4_exception exception = { };
3915 int err;
3916 do {
3917 err = _nfs4_proc_readdir(dentry, cred, cookie,
3918 pages, count, plus);
3919 trace_nfs4_readdir(d_inode(dentry), err);
3920 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err,
3921 &exception);
3922 } while (exception.retry);
3923 return err;
3924}
3925
3926static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3927 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
3928{
3929 struct nfs4_createdata *data;
3930 int mode = sattr->ia_mode;
3931 int status = -ENOMEM;
3932
3933 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
3934 if (data == NULL)
3935 goto out;
3936
3937 if (S_ISFIFO(mode))
3938 data->arg.ftype = NF4FIFO;
3939 else if (S_ISBLK(mode)) {
3940 data->arg.ftype = NF4BLK;
3941 data->arg.u.device.specdata1 = MAJOR(rdev);
3942 data->arg.u.device.specdata2 = MINOR(rdev);
3943 }
3944 else if (S_ISCHR(mode)) {
3945 data->arg.ftype = NF4CHR;
3946 data->arg.u.device.specdata1 = MAJOR(rdev);
3947 data->arg.u.device.specdata2 = MINOR(rdev);
3948 } else if (!S_ISSOCK(mode)) {
3949 status = -EINVAL;
3950 goto out_free;
3951 }
3952
3953 data->arg.label = label;
3954 status = nfs4_do_create(dir, dentry, data);
3955out_free:
3956 nfs4_free_createdata(data);
3957out:
3958 return status;
3959}
3960
3961static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3962 struct iattr *sattr, dev_t rdev)
3963{
3964 struct nfs4_exception exception = { };
3965 struct nfs4_label l, *label = NULL;
3966 int err;
3967
3968 label = nfs4_label_init_security(dir, dentry, sattr, &l);
3969
3970 sattr->ia_mode &= ~current_umask();
3971 do {
3972 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
3973 trace_nfs4_mknod(dir, &dentry->d_name, err);
3974 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3975 &exception);
3976 } while (exception.retry);
3977
3978 nfs4_label_release_security(label);
3979
3980 return err;
3981}
3982
3983static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
3984 struct nfs_fsstat *fsstat)
3985{
3986 struct nfs4_statfs_arg args = {
3987 .fh = fhandle,
3988 .bitmask = server->attr_bitmask,
3989 };
3990 struct nfs4_statfs_res res = {
3991 .fsstat = fsstat,
3992 };
3993 struct rpc_message msg = {
3994 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
3995 .rpc_argp = &args,
3996 .rpc_resp = &res,
3997 };
3998
3999 nfs_fattr_init(fsstat->fattr);
4000 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4001}
4002
4003static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
4004{
4005 struct nfs4_exception exception = { };
4006 int err;
4007 do {
4008 err = nfs4_handle_exception(server,
4009 _nfs4_proc_statfs(server, fhandle, fsstat),
4010 &exception);
4011 } while (exception.retry);
4012 return err;
4013}
4014
4015static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
4016 struct nfs_fsinfo *fsinfo)
4017{
4018 struct nfs4_fsinfo_arg args = {
4019 .fh = fhandle,
4020 .bitmask = server->attr_bitmask,
4021 };
4022 struct nfs4_fsinfo_res res = {
4023 .fsinfo = fsinfo,
4024 };
4025 struct rpc_message msg = {
4026 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
4027 .rpc_argp = &args,
4028 .rpc_resp = &res,
4029 };
4030
4031 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4032}
4033
4034static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4035{
4036 struct nfs4_exception exception = { };
4037 unsigned long now = jiffies;
4038 int err;
4039
4040 do {
4041 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
4042 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
4043 if (err == 0) {
4044 struct nfs_client *clp = server->nfs_client;
4045
4046 spin_lock(&clp->cl_lock);
4047 clp->cl_lease_time = fsinfo->lease_time * HZ;
4048 clp->cl_last_renewal = now;
4049 spin_unlock(&clp->cl_lock);
4050 break;
4051 }
4052 err = nfs4_handle_exception(server, err, &exception);
4053 } while (exception.retry);
4054 return err;
4055}
4056
4057static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4058{
4059 int error;
4060
4061 nfs_fattr_init(fsinfo->fattr);
4062 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
4063 if (error == 0) {
4064 /* block layout checks this! */
4065 server->pnfs_blksize = fsinfo->blksize;
4066 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
4067 }
4068
4069 return error;
4070}
4071
4072static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4073 struct nfs_pathconf *pathconf)
4074{
4075 struct nfs4_pathconf_arg args = {
4076 .fh = fhandle,
4077 .bitmask = server->attr_bitmask,
4078 };
4079 struct nfs4_pathconf_res res = {
4080 .pathconf = pathconf,
4081 };
4082 struct rpc_message msg = {
4083 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
4084 .rpc_argp = &args,
4085 .rpc_resp = &res,
4086 };
4087
4088 /* None of the pathconf attributes are mandatory to implement */
4089 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
4090 memset(pathconf, 0, sizeof(*pathconf));
4091 return 0;
4092 }
4093
4094 nfs_fattr_init(pathconf->fattr);
4095 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4096}
4097
4098static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4099 struct nfs_pathconf *pathconf)
4100{
4101 struct nfs4_exception exception = { };
4102 int err;
4103
4104 do {
4105 err = nfs4_handle_exception(server,
4106 _nfs4_proc_pathconf(server, fhandle, pathconf),
4107 &exception);
4108 } while (exception.retry);
4109 return err;
4110}
4111
4112int nfs4_set_rw_stateid(nfs4_stateid *stateid,
4113 const struct nfs_open_context *ctx,
4114 const struct nfs_lock_context *l_ctx,
4115 fmode_t fmode)
4116{
4117 const struct nfs_lockowner *lockowner = NULL;
4118
4119 if (l_ctx != NULL)
4120 lockowner = &l_ctx->lockowner;
4121 return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner);
4122}
4123EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
4124
4125static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
4126 const struct nfs_open_context *ctx,
4127 const struct nfs_lock_context *l_ctx,
4128 fmode_t fmode)
4129{
4130 nfs4_stateid current_stateid;
4131
4132 /* If the current stateid represents a lost lock, then exit */
4133 if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
4134 return true;
4135 return nfs4_stateid_match(stateid, &current_stateid);
4136}
4137
4138static bool nfs4_error_stateid_expired(int err)
4139{
4140 switch (err) {
4141 case -NFS4ERR_DELEG_REVOKED:
4142 case -NFS4ERR_ADMIN_REVOKED:
4143 case -NFS4ERR_BAD_STATEID:
4144 case -NFS4ERR_STALE_STATEID:
4145 case -NFS4ERR_OLD_STATEID:
4146 case -NFS4ERR_OPENMODE:
4147 case -NFS4ERR_EXPIRED:
4148 return true;
4149 }
4150 return false;
4151}
4152
4153void __nfs4_read_done_cb(struct nfs_pgio_header *hdr)
4154{
4155 nfs_invalidate_atime(hdr->inode);
4156}
4157
4158static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
4159{
4160 struct nfs_server *server = NFS_SERVER(hdr->inode);
4161
4162 trace_nfs4_read(hdr, task->tk_status);
4163 if (nfs4_async_handle_error(task, server,
4164 hdr->args.context->state,
4165 NULL) == -EAGAIN) {
4166 rpc_restart_call_prepare(task);
4167 return -EAGAIN;
4168 }
4169
4170 __nfs4_read_done_cb(hdr);
4171 if (task->tk_status > 0)
4172 renew_lease(server, hdr->timestamp);
4173 return 0;
4174}
4175
4176static bool nfs4_read_stateid_changed(struct rpc_task *task,
4177 struct nfs_pgio_args *args)
4178{
4179
4180 if (!nfs4_error_stateid_expired(task->tk_status) ||
4181 nfs4_stateid_is_current(&args->stateid,
4182 args->context,
4183 args->lock_context,
4184 FMODE_READ))
4185 return false;
4186 rpc_restart_call_prepare(task);
4187 return true;
4188}
4189
4190static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4191{
4192
4193 dprintk("--> %s\n", __func__);
4194
4195 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4196 return -EAGAIN;
4197 if (nfs4_read_stateid_changed(task, &hdr->args))
4198 return -EAGAIN;
4199 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4200 nfs4_read_done_cb(task, hdr);
4201}
4202
4203static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
4204 struct rpc_message *msg)
4205{
4206 hdr->timestamp = jiffies;
4207 hdr->pgio_done_cb = nfs4_read_done_cb;
4208 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
4209 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0);
4210}
4211
4212static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
4213 struct nfs_pgio_header *hdr)
4214{
4215 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode),
4216 &hdr->args.seq_args,
4217 &hdr->res.seq_res,
4218 task))
4219 return 0;
4220 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
4221 hdr->args.lock_context,
4222 hdr->rw_ops->rw_mode) == -EIO)
4223 return -EIO;
4224 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
4225 return -EIO;
4226 return 0;
4227}
4228
4229static int nfs4_write_done_cb(struct rpc_task *task,
4230 struct nfs_pgio_header *hdr)
4231{
4232 struct inode *inode = hdr->inode;
4233
4234 trace_nfs4_write(hdr, task->tk_status);
4235 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4236 hdr->args.context->state,
4237 NULL) == -EAGAIN) {
4238 rpc_restart_call_prepare(task);
4239 return -EAGAIN;
4240 }
4241 if (task->tk_status >= 0) {
4242 renew_lease(NFS_SERVER(inode), hdr->timestamp);
4243 nfs_writeback_update_inode(hdr);
4244 }
4245 return 0;
4246}
4247
4248static bool nfs4_write_stateid_changed(struct rpc_task *task,
4249 struct nfs_pgio_args *args)
4250{
4251
4252 if (!nfs4_error_stateid_expired(task->tk_status) ||
4253 nfs4_stateid_is_current(&args->stateid,
4254 args->context,
4255 args->lock_context,
4256 FMODE_WRITE))
4257 return false;
4258 rpc_restart_call_prepare(task);
4259 return true;
4260}
4261
4262static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4263{
4264 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4265 return -EAGAIN;
4266 if (nfs4_write_stateid_changed(task, &hdr->args))
4267 return -EAGAIN;
4268 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4269 nfs4_write_done_cb(task, hdr);
4270}
4271
4272static
4273bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
4274{
4275 /* Don't request attributes for pNFS or O_DIRECT writes */
4276 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
4277 return false;
4278 /* Otherwise, request attributes if and only if we don't hold
4279 * a delegation
4280 */
4281 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
4282}
4283
4284static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
4285 struct rpc_message *msg)
4286{
4287 struct nfs_server *server = NFS_SERVER(hdr->inode);
4288
4289 if (!nfs4_write_need_cache_consistency_data(hdr)) {
4290 hdr->args.bitmask = NULL;
4291 hdr->res.fattr = NULL;
4292 } else
4293 hdr->args.bitmask = server->cache_consistency_bitmask;
4294
4295 if (!hdr->pgio_done_cb)
4296 hdr->pgio_done_cb = nfs4_write_done_cb;
4297 hdr->res.server = server;
4298 hdr->timestamp = jiffies;
4299
4300 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
4301 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1);
4302}
4303
4304static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
4305{
4306 nfs4_setup_sequence(NFS_SERVER(data->inode),
4307 &data->args.seq_args,
4308 &data->res.seq_res,
4309 task);
4310}
4311
4312static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
4313{
4314 struct inode *inode = data->inode;
4315
4316 trace_nfs4_commit(data, task->tk_status);
4317 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4318 NULL, NULL) == -EAGAIN) {
4319 rpc_restart_call_prepare(task);
4320 return -EAGAIN;
4321 }
4322 return 0;
4323}
4324
4325static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
4326{
4327 if (!nfs4_sequence_done(task, &data->res.seq_res))
4328 return -EAGAIN;
4329 return data->commit_done_cb(task, data);
4330}
4331
4332static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
4333{
4334 struct nfs_server *server = NFS_SERVER(data->inode);
4335
4336 if (data->commit_done_cb == NULL)
4337 data->commit_done_cb = nfs4_commit_done_cb;
4338 data->res.server = server;
4339 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
4340 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4341}
4342
4343struct nfs4_renewdata {
4344 struct nfs_client *client;
4345 unsigned long timestamp;
4346};
4347
4348/*
4349 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
4350 * standalone procedure for queueing an asynchronous RENEW.
4351 */
4352static void nfs4_renew_release(void *calldata)
4353{
4354 struct nfs4_renewdata *data = calldata;
4355 struct nfs_client *clp = data->client;
4356
4357 if (atomic_read(&clp->cl_count) > 1)
4358 nfs4_schedule_state_renewal(clp);
4359 nfs_put_client(clp);
4360 kfree(data);
4361}
4362
4363static void nfs4_renew_done(struct rpc_task *task, void *calldata)
4364{
4365 struct nfs4_renewdata *data = calldata;
4366 struct nfs_client *clp = data->client;
4367 unsigned long timestamp = data->timestamp;
4368
4369 trace_nfs4_renew_async(clp, task->tk_status);
4370 switch (task->tk_status) {
4371 case 0:
4372 break;
4373 case -NFS4ERR_LEASE_MOVED:
4374 nfs4_schedule_lease_moved_recovery(clp);
4375 break;
4376 default:
4377 /* Unless we're shutting down, schedule state recovery! */
4378 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
4379 return;
4380 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
4381 nfs4_schedule_lease_recovery(clp);
4382 return;
4383 }
4384 nfs4_schedule_path_down_recovery(clp);
4385 }
4386 do_renew_lease(clp, timestamp);
4387}
4388
4389static const struct rpc_call_ops nfs4_renew_ops = {
4390 .rpc_call_done = nfs4_renew_done,
4391 .rpc_release = nfs4_renew_release,
4392};
4393
4394static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
4395{
4396 struct rpc_message msg = {
4397 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4398 .rpc_argp = clp,
4399 .rpc_cred = cred,
4400 };
4401 struct nfs4_renewdata *data;
4402
4403 if (renew_flags == 0)
4404 return 0;
4405 if (!atomic_inc_not_zero(&clp->cl_count))
4406 return -EIO;
4407 data = kmalloc(sizeof(*data), GFP_NOFS);
4408 if (data == NULL)
4409 return -ENOMEM;
4410 data->client = clp;
4411 data->timestamp = jiffies;
4412 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
4413 &nfs4_renew_ops, data);
4414}
4415
4416static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
4417{
4418 struct rpc_message msg = {
4419 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4420 .rpc_argp = clp,
4421 .rpc_cred = cred,
4422 };
4423 unsigned long now = jiffies;
4424 int status;
4425
4426 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4427 if (status < 0)
4428 return status;
4429 do_renew_lease(clp, now);
4430 return 0;
4431}
4432
4433static inline int nfs4_server_supports_acls(struct nfs_server *server)
4434{
4435 return server->caps & NFS_CAP_ACLS;
4436}
4437
4438/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
4439 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
4440 * the stack.
4441 */
4442#define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
4443
4444static int buf_to_pages_noslab(const void *buf, size_t buflen,
4445 struct page **pages, unsigned int *pgbase)
4446{
4447 struct page *newpage, **spages;
4448 int rc = 0;
4449 size_t len;
4450 spages = pages;
4451
4452 do {
4453 len = min_t(size_t, PAGE_SIZE, buflen);
4454 newpage = alloc_page(GFP_KERNEL);
4455
4456 if (newpage == NULL)
4457 goto unwind;
4458 memcpy(page_address(newpage), buf, len);
4459 buf += len;
4460 buflen -= len;
4461 *pages++ = newpage;
4462 rc++;
4463 } while (buflen != 0);
4464
4465 return rc;
4466
4467unwind:
4468 for(; rc > 0; rc--)
4469 __free_page(spages[rc-1]);
4470 return -ENOMEM;
4471}
4472
4473struct nfs4_cached_acl {
4474 int cached;
4475 size_t len;
4476 char data[0];
4477};
4478
4479static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
4480{
4481 struct nfs_inode *nfsi = NFS_I(inode);
4482
4483 spin_lock(&inode->i_lock);
4484 kfree(nfsi->nfs4_acl);
4485 nfsi->nfs4_acl = acl;
4486 spin_unlock(&inode->i_lock);
4487}
4488
4489static void nfs4_zap_acl_attr(struct inode *inode)
4490{
4491 nfs4_set_cached_acl(inode, NULL);
4492}
4493
4494static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
4495{
4496 struct nfs_inode *nfsi = NFS_I(inode);
4497 struct nfs4_cached_acl *acl;
4498 int ret = -ENOENT;
4499
4500 spin_lock(&inode->i_lock);
4501 acl = nfsi->nfs4_acl;
4502 if (acl == NULL)
4503 goto out;
4504 if (buf == NULL) /* user is just asking for length */
4505 goto out_len;
4506 if (acl->cached == 0)
4507 goto out;
4508 ret = -ERANGE; /* see getxattr(2) man page */
4509 if (acl->len > buflen)
4510 goto out;
4511 memcpy(buf, acl->data, acl->len);
4512out_len:
4513 ret = acl->len;
4514out:
4515 spin_unlock(&inode->i_lock);
4516 return ret;
4517}
4518
4519static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
4520{
4521 struct nfs4_cached_acl *acl;
4522 size_t buflen = sizeof(*acl) + acl_len;
4523
4524 if (buflen <= PAGE_SIZE) {
4525 acl = kmalloc(buflen, GFP_KERNEL);
4526 if (acl == NULL)
4527 goto out;
4528 acl->cached = 1;
4529 _copy_from_pages(acl->data, pages, pgbase, acl_len);
4530 } else {
4531 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
4532 if (acl == NULL)
4533 goto out;
4534 acl->cached = 0;
4535 }
4536 acl->len = acl_len;
4537out:
4538 nfs4_set_cached_acl(inode, acl);
4539}
4540
4541/*
4542 * The getxattr API returns the required buffer length when called with a
4543 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
4544 * the required buf. On a NULL buf, we send a page of data to the server
4545 * guessing that the ACL request can be serviced by a page. If so, we cache
4546 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
4547 * the cache. If not so, we throw away the page, and cache the required
4548 * length. The next getxattr call will then produce another round trip to
4549 * the server, this time with the input buf of the required size.
4550 */
4551static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4552{
4553 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
4554 struct nfs_getaclargs args = {
4555 .fh = NFS_FH(inode),
4556 .acl_pages = pages,
4557 .acl_len = buflen,
4558 };
4559 struct nfs_getaclres res = {
4560 .acl_len = buflen,
4561 };
4562 struct rpc_message msg = {
4563 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
4564 .rpc_argp = &args,
4565 .rpc_resp = &res,
4566 };
4567 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4568 int ret = -ENOMEM, i;
4569
4570 /* As long as we're doing a round trip to the server anyway,
4571 * let's be prepared for a page of acl data. */
4572 if (npages == 0)
4573 npages = 1;
4574 if (npages > ARRAY_SIZE(pages))
4575 return -ERANGE;
4576
4577 for (i = 0; i < npages; i++) {
4578 pages[i] = alloc_page(GFP_KERNEL);
4579 if (!pages[i])
4580 goto out_free;
4581 }
4582
4583 /* for decoding across pages */
4584 res.acl_scratch = alloc_page(GFP_KERNEL);
4585 if (!res.acl_scratch)
4586 goto out_free;
4587
4588 args.acl_len = npages * PAGE_SIZE;
4589 args.acl_pgbase = 0;
4590
4591 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
4592 __func__, buf, buflen, npages, args.acl_len);
4593 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
4594 &msg, &args.seq_args, &res.seq_res, 0);
4595 if (ret)
4596 goto out_free;
4597
4598 /* Handle the case where the passed-in buffer is too short */
4599 if (res.acl_flags & NFS4_ACL_TRUNC) {
4600 /* Did the user only issue a request for the acl length? */
4601 if (buf == NULL)
4602 goto out_ok;
4603 ret = -ERANGE;
4604 goto out_free;
4605 }
4606 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
4607 if (buf) {
4608 if (res.acl_len > buflen) {
4609 ret = -ERANGE;
4610 goto out_free;
4611 }
4612 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
4613 }
4614out_ok:
4615 ret = res.acl_len;
4616out_free:
4617 for (i = 0; i < npages; i++)
4618 if (pages[i])
4619 __free_page(pages[i]);
4620 if (res.acl_scratch)
4621 __free_page(res.acl_scratch);
4622 return ret;
4623}
4624
4625static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4626{
4627 struct nfs4_exception exception = { };
4628 ssize_t ret;
4629 do {
4630 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
4631 trace_nfs4_get_acl(inode, ret);
4632 if (ret >= 0)
4633 break;
4634 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
4635 } while (exception.retry);
4636 return ret;
4637}
4638
4639static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
4640{
4641 struct nfs_server *server = NFS_SERVER(inode);
4642 int ret;
4643
4644 if (!nfs4_server_supports_acls(server))
4645 return -EOPNOTSUPP;
4646 ret = nfs_revalidate_inode(server, inode);
4647 if (ret < 0)
4648 return ret;
4649 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
4650 nfs_zap_acl_cache(inode);
4651 ret = nfs4_read_cached_acl(inode, buf, buflen);
4652 if (ret != -ENOENT)
4653 /* -ENOENT is returned if there is no ACL or if there is an ACL
4654 * but no cached acl data, just the acl length */
4655 return ret;
4656 return nfs4_get_acl_uncached(inode, buf, buflen);
4657}
4658
4659static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4660{
4661 struct nfs_server *server = NFS_SERVER(inode);
4662 struct page *pages[NFS4ACL_MAXPAGES];
4663 struct nfs_setaclargs arg = {
4664 .fh = NFS_FH(inode),
4665 .acl_pages = pages,
4666 .acl_len = buflen,
4667 };
4668 struct nfs_setaclres res;
4669 struct rpc_message msg = {
4670 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
4671 .rpc_argp = &arg,
4672 .rpc_resp = &res,
4673 };
4674 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4675 int ret, i;
4676
4677 if (!nfs4_server_supports_acls(server))
4678 return -EOPNOTSUPP;
4679 if (npages > ARRAY_SIZE(pages))
4680 return -ERANGE;
4681 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
4682 if (i < 0)
4683 return i;
4684 nfs4_inode_return_delegation(inode);
4685 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4686
4687 /*
4688 * Free each page after tx, so the only ref left is
4689 * held by the network stack
4690 */
4691 for (; i > 0; i--)
4692 put_page(pages[i-1]);
4693
4694 /*
4695 * Acl update can result in inode attribute update.
4696 * so mark the attribute cache invalid.
4697 */
4698 spin_lock(&inode->i_lock);
4699 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
4700 spin_unlock(&inode->i_lock);
4701 nfs_access_zap_cache(inode);
4702 nfs_zap_acl_cache(inode);
4703 return ret;
4704}
4705
4706static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4707{
4708 struct nfs4_exception exception = { };
4709 int err;
4710 do {
4711 err = __nfs4_proc_set_acl(inode, buf, buflen);
4712 trace_nfs4_set_acl(inode, err);
4713 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4714 &exception);
4715 } while (exception.retry);
4716 return err;
4717}
4718
4719#ifdef CONFIG_NFS_V4_SECURITY_LABEL
4720static int _nfs4_get_security_label(struct inode *inode, void *buf,
4721 size_t buflen)
4722{
4723 struct nfs_server *server = NFS_SERVER(inode);
4724 struct nfs_fattr fattr;
4725 struct nfs4_label label = {0, 0, buflen, buf};
4726
4727 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4728 struct nfs4_getattr_arg arg = {
4729 .fh = NFS_FH(inode),
4730 .bitmask = bitmask,
4731 };
4732 struct nfs4_getattr_res res = {
4733 .fattr = &fattr,
4734 .label = &label,
4735 .server = server,
4736 };
4737 struct rpc_message msg = {
4738 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4739 .rpc_argp = &arg,
4740 .rpc_resp = &res,
4741 };
4742 int ret;
4743
4744 nfs_fattr_init(&fattr);
4745
4746 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
4747 if (ret)
4748 return ret;
4749 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
4750 return -ENOENT;
4751 if (buflen < label.len)
4752 return -ERANGE;
4753 return 0;
4754}
4755
4756static int nfs4_get_security_label(struct inode *inode, void *buf,
4757 size_t buflen)
4758{
4759 struct nfs4_exception exception = { };
4760 int err;
4761
4762 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
4763 return -EOPNOTSUPP;
4764
4765 do {
4766 err = _nfs4_get_security_label(inode, buf, buflen);
4767 trace_nfs4_get_security_label(inode, err);
4768 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4769 &exception);
4770 } while (exception.retry);
4771 return err;
4772}
4773
4774static int _nfs4_do_set_security_label(struct inode *inode,
4775 struct nfs4_label *ilabel,
4776 struct nfs_fattr *fattr,
4777 struct nfs4_label *olabel)
4778{
4779
4780 struct iattr sattr = {0};
4781 struct nfs_server *server = NFS_SERVER(inode);
4782 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4783 struct nfs_setattrargs arg = {
4784 .fh = NFS_FH(inode),
4785 .iap = &sattr,
4786 .server = server,
4787 .bitmask = bitmask,
4788 .label = ilabel,
4789 };
4790 struct nfs_setattrres res = {
4791 .fattr = fattr,
4792 .label = olabel,
4793 .server = server,
4794 };
4795 struct rpc_message msg = {
4796 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
4797 .rpc_argp = &arg,
4798 .rpc_resp = &res,
4799 };
4800 int status;
4801
4802 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
4803
4804 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4805 if (status)
4806 dprintk("%s failed: %d\n", __func__, status);
4807
4808 return status;
4809}
4810
4811static int nfs4_do_set_security_label(struct inode *inode,
4812 struct nfs4_label *ilabel,
4813 struct nfs_fattr *fattr,
4814 struct nfs4_label *olabel)
4815{
4816 struct nfs4_exception exception = { };
4817 int err;
4818
4819 do {
4820 err = _nfs4_do_set_security_label(inode, ilabel,
4821 fattr, olabel);
4822 trace_nfs4_set_security_label(inode, err);
4823 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4824 &exception);
4825 } while (exception.retry);
4826 return err;
4827}
4828
4829static int
4830nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen)
4831{
4832 struct nfs4_label ilabel, *olabel = NULL;
4833 struct nfs_fattr fattr;
4834 struct rpc_cred *cred;
4835 struct inode *inode = d_inode(dentry);
4836 int status;
4837
4838 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
4839 return -EOPNOTSUPP;
4840
4841 nfs_fattr_init(&fattr);
4842
4843 ilabel.pi = 0;
4844 ilabel.lfs = 0;
4845 ilabel.label = (char *)buf;
4846 ilabel.len = buflen;
4847
4848 cred = rpc_lookup_cred();
4849 if (IS_ERR(cred))
4850 return PTR_ERR(cred);
4851
4852 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
4853 if (IS_ERR(olabel)) {
4854 status = -PTR_ERR(olabel);
4855 goto out;
4856 }
4857
4858 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel);
4859 if (status == 0)
4860 nfs_setsecurity(inode, &fattr, olabel);
4861
4862 nfs4_label_free(olabel);
4863out:
4864 put_rpccred(cred);
4865 return status;
4866}
4867#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
4868
4869
4870static int
4871nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
4872 struct nfs4_state *state, long *timeout)
4873{
4874 struct nfs_client *clp = server->nfs_client;
4875
4876 if (task->tk_status >= 0)
4877 return 0;
4878 switch(task->tk_status) {
4879 case -NFS4ERR_DELEG_REVOKED:
4880 case -NFS4ERR_ADMIN_REVOKED:
4881 case -NFS4ERR_BAD_STATEID:
4882 case -NFS4ERR_OPENMODE:
4883 if (state == NULL)
4884 break;
4885 if (nfs4_schedule_stateid_recovery(server, state) < 0)
4886 goto recovery_failed;
4887 goto wait_on_recovery;
4888 case -NFS4ERR_EXPIRED:
4889 if (state != NULL) {
4890 if (nfs4_schedule_stateid_recovery(server, state) < 0)
4891 goto recovery_failed;
4892 }
4893 case -NFS4ERR_STALE_STATEID:
4894 case -NFS4ERR_STALE_CLIENTID:
4895 nfs4_schedule_lease_recovery(clp);
4896 goto wait_on_recovery;
4897 case -NFS4ERR_MOVED:
4898 if (nfs4_schedule_migration_recovery(server) < 0)
4899 goto recovery_failed;
4900 goto wait_on_recovery;
4901 case -NFS4ERR_LEASE_MOVED:
4902 nfs4_schedule_lease_moved_recovery(clp);
4903 goto wait_on_recovery;
4904#if defined(CONFIG_NFS_V4_1)
4905 case -NFS4ERR_BADSESSION:
4906 case -NFS4ERR_BADSLOT:
4907 case -NFS4ERR_BAD_HIGH_SLOT:
4908 case -NFS4ERR_DEADSESSION:
4909 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4910 case -NFS4ERR_SEQ_FALSE_RETRY:
4911 case -NFS4ERR_SEQ_MISORDERED:
4912 dprintk("%s ERROR %d, Reset session\n", __func__,
4913 task->tk_status);
4914 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
4915 goto wait_on_recovery;
4916#endif /* CONFIG_NFS_V4_1 */
4917 case -NFS4ERR_DELAY:
4918 nfs_inc_server_stats(server, NFSIOS_DELAY);
4919 rpc_delay(task, nfs4_update_delay(timeout));
4920 goto restart_call;
4921 case -NFS4ERR_GRACE:
4922 rpc_delay(task, NFS4_POLL_RETRY_MAX);
4923 case -NFS4ERR_RETRY_UNCACHED_REP:
4924 case -NFS4ERR_OLD_STATEID:
4925 goto restart_call;
4926 }
4927 task->tk_status = nfs4_map_errors(task->tk_status);
4928 return 0;
4929recovery_failed:
4930 task->tk_status = -EIO;
4931 return 0;
4932wait_on_recovery:
4933 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
4934 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
4935 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
4936 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
4937 goto recovery_failed;
4938restart_call:
4939 task->tk_status = 0;
4940 return -EAGAIN;
4941}
4942
4943static void nfs4_init_boot_verifier(const struct nfs_client *clp,
4944 nfs4_verifier *bootverf)
4945{
4946 __be32 verf[2];
4947
4948 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
4949 /* An impossible timestamp guarantees this value
4950 * will never match a generated boot time. */
4951 verf[0] = 0;
4952 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1);
4953 } else {
4954 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
4955 verf[0] = cpu_to_be32(nn->boot_time.tv_sec);
4956 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec);
4957 }
4958 memcpy(bootverf->data, verf, sizeof(bootverf->data));
4959}
4960
4961static int
4962nfs4_init_nonuniform_client_string(struct nfs_client *clp)
4963{
4964 int result;
4965 size_t len;
4966 char *str;
4967 bool retried = false;
4968
4969 if (clp->cl_owner_id != NULL)
4970 return 0;
4971retry:
4972 rcu_read_lock();
4973 len = 10 + strlen(clp->cl_ipaddr) + 1 +
4974 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
4975 1 +
4976 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) +
4977 1;
4978 rcu_read_unlock();
4979
4980 if (len > NFS4_OPAQUE_LIMIT + 1)
4981 return -EINVAL;
4982
4983 /*
4984 * Since this string is allocated at mount time, and held until the
4985 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
4986 * about a memory-reclaim deadlock.
4987 */
4988 str = kmalloc(len, GFP_KERNEL);
4989 if (!str)
4990 return -ENOMEM;
4991
4992 rcu_read_lock();
4993 result = scnprintf(str, len, "Linux NFSv4.0 %s/%s %s",
4994 clp->cl_ipaddr,
4995 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
4996 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO));
4997 rcu_read_unlock();
4998
4999 /* Did something change? */
5000 if (result >= len) {
5001 kfree(str);
5002 if (retried)
5003 return -EINVAL;
5004 retried = true;
5005 goto retry;
5006 }
5007 clp->cl_owner_id = str;
5008 return 0;
5009}
5010
5011static int
5012nfs4_init_uniquifier_client_string(struct nfs_client *clp)
5013{
5014 int result;
5015 size_t len;
5016 char *str;
5017
5018 len = 10 + 10 + 1 + 10 + 1 +
5019 strlen(nfs4_client_id_uniquifier) + 1 +
5020 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5021
5022 if (len > NFS4_OPAQUE_LIMIT + 1)
5023 return -EINVAL;
5024
5025 /*
5026 * Since this string is allocated at mount time, and held until the
5027 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5028 * about a memory-reclaim deadlock.
5029 */
5030 str = kmalloc(len, GFP_KERNEL);
5031 if (!str)
5032 return -ENOMEM;
5033
5034 result = scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
5035 clp->rpc_ops->version, clp->cl_minorversion,
5036 nfs4_client_id_uniquifier,
5037 clp->cl_rpcclient->cl_nodename);
5038 if (result >= len) {
5039 kfree(str);
5040 return -EINVAL;
5041 }
5042 clp->cl_owner_id = str;
5043 return 0;
5044}
5045
5046static int
5047nfs4_init_uniform_client_string(struct nfs_client *clp)
5048{
5049 int result;
5050 size_t len;
5051 char *str;
5052
5053 if (clp->cl_owner_id != NULL)
5054 return 0;
5055
5056 if (nfs4_client_id_uniquifier[0] != '\0')
5057 return nfs4_init_uniquifier_client_string(clp);
5058
5059 len = 10 + 10 + 1 + 10 + 1 +
5060 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5061
5062 if (len > NFS4_OPAQUE_LIMIT + 1)
5063 return -EINVAL;
5064
5065 /*
5066 * Since this string is allocated at mount time, and held until the
5067 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5068 * about a memory-reclaim deadlock.
5069 */
5070 str = kmalloc(len, GFP_KERNEL);
5071 if (!str)
5072 return -ENOMEM;
5073
5074 result = scnprintf(str, len, "Linux NFSv%u.%u %s",
5075 clp->rpc_ops->version, clp->cl_minorversion,
5076 clp->cl_rpcclient->cl_nodename);
5077 if (result >= len) {
5078 kfree(str);
5079 return -EINVAL;
5080 }
5081 clp->cl_owner_id = str;
5082 return 0;
5083}
5084
5085/*
5086 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
5087 * services. Advertise one based on the address family of the
5088 * clientaddr.
5089 */
5090static unsigned int
5091nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
5092{
5093 if (strchr(clp->cl_ipaddr, ':') != NULL)
5094 return scnprintf(buf, len, "tcp6");
5095 else
5096 return scnprintf(buf, len, "tcp");
5097}
5098
5099static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
5100{
5101 struct nfs4_setclientid *sc = calldata;
5102
5103 if (task->tk_status == 0)
5104 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
5105}
5106
5107static const struct rpc_call_ops nfs4_setclientid_ops = {
5108 .rpc_call_done = nfs4_setclientid_done,
5109};
5110
5111/**
5112 * nfs4_proc_setclientid - Negotiate client ID
5113 * @clp: state data structure
5114 * @program: RPC program for NFSv4 callback service
5115 * @port: IP port number for NFS4 callback service
5116 * @cred: RPC credential to use for this call
5117 * @res: where to place the result
5118 *
5119 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5120 */
5121int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
5122 unsigned short port, struct rpc_cred *cred,
5123 struct nfs4_setclientid_res *res)
5124{
5125 nfs4_verifier sc_verifier;
5126 struct nfs4_setclientid setclientid = {
5127 .sc_verifier = &sc_verifier,
5128 .sc_prog = program,
5129 .sc_clnt = clp,
5130 };
5131 struct rpc_message msg = {
5132 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
5133 .rpc_argp = &setclientid,
5134 .rpc_resp = res,
5135 .rpc_cred = cred,
5136 };
5137 struct rpc_task *task;
5138 struct rpc_task_setup task_setup_data = {
5139 .rpc_client = clp->cl_rpcclient,
5140 .rpc_message = &msg,
5141 .callback_ops = &nfs4_setclientid_ops,
5142 .callback_data = &setclientid,
5143 .flags = RPC_TASK_TIMEOUT,
5144 };
5145 int status;
5146
5147 /* nfs_client_id4 */
5148 nfs4_init_boot_verifier(clp, &sc_verifier);
5149
5150 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
5151 status = nfs4_init_uniform_client_string(clp);
5152 else
5153 status = nfs4_init_nonuniform_client_string(clp);
5154
5155 if (status)
5156 goto out;
5157
5158 /* cb_client4 */
5159 setclientid.sc_netid_len =
5160 nfs4_init_callback_netid(clp,
5161 setclientid.sc_netid,
5162 sizeof(setclientid.sc_netid));
5163 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
5164 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
5165 clp->cl_ipaddr, port >> 8, port & 255);
5166
5167 dprintk("NFS call setclientid auth=%s, '%s'\n",
5168 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5169 clp->cl_owner_id);
5170 task = rpc_run_task(&task_setup_data);
5171 if (IS_ERR(task)) {
5172 status = PTR_ERR(task);
5173 goto out;
5174 }
5175 status = task->tk_status;
5176 if (setclientid.sc_cred) {
5177 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
5178 put_rpccred(setclientid.sc_cred);
5179 }
5180 rpc_put_task(task);
5181out:
5182 trace_nfs4_setclientid(clp, status);
5183 dprintk("NFS reply setclientid: %d\n", status);
5184 return status;
5185}
5186
5187/**
5188 * nfs4_proc_setclientid_confirm - Confirm client ID
5189 * @clp: state data structure
5190 * @res: result of a previous SETCLIENTID
5191 * @cred: RPC credential to use for this call
5192 *
5193 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5194 */
5195int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
5196 struct nfs4_setclientid_res *arg,
5197 struct rpc_cred *cred)
5198{
5199 struct rpc_message msg = {
5200 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
5201 .rpc_argp = arg,
5202 .rpc_cred = cred,
5203 };
5204 int status;
5205
5206 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
5207 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5208 clp->cl_clientid);
5209 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5210 trace_nfs4_setclientid_confirm(clp, status);
5211 dprintk("NFS reply setclientid_confirm: %d\n", status);
5212 return status;
5213}
5214
5215struct nfs4_delegreturndata {
5216 struct nfs4_delegreturnargs args;
5217 struct nfs4_delegreturnres res;
5218 struct nfs_fh fh;
5219 nfs4_stateid stateid;
5220 unsigned long timestamp;
5221 struct nfs_fattr fattr;
5222 int rpc_status;
5223 struct inode *inode;
5224 bool roc;
5225 u32 roc_barrier;
5226};
5227
5228static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5229{
5230 struct nfs4_delegreturndata *data = calldata;
5231
5232 if (!nfs4_sequence_done(task, &data->res.seq_res))
5233 return;
5234
5235 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
5236 switch (task->tk_status) {
5237 case 0:
5238 renew_lease(data->res.server, data->timestamp);
5239 case -NFS4ERR_ADMIN_REVOKED:
5240 case -NFS4ERR_DELEG_REVOKED:
5241 case -NFS4ERR_BAD_STATEID:
5242 case -NFS4ERR_OLD_STATEID:
5243 case -NFS4ERR_STALE_STATEID:
5244 case -NFS4ERR_EXPIRED:
5245 task->tk_status = 0;
5246 if (data->roc)
5247 pnfs_roc_set_barrier(data->inode, data->roc_barrier);
5248 break;
5249 default:
5250 if (nfs4_async_handle_error(task, data->res.server,
5251 NULL, NULL) == -EAGAIN) {
5252 rpc_restart_call_prepare(task);
5253 return;
5254 }
5255 }
5256 data->rpc_status = task->tk_status;
5257}
5258
5259static void nfs4_delegreturn_release(void *calldata)
5260{
5261 struct nfs4_delegreturndata *data = calldata;
5262 struct inode *inode = data->inode;
5263
5264 if (inode) {
5265 if (data->roc)
5266 pnfs_roc_release(inode);
5267 nfs_iput_and_deactive(inode);
5268 }
5269 kfree(calldata);
5270}
5271
5272static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
5273{
5274 struct nfs4_delegreturndata *d_data;
5275
5276 d_data = (struct nfs4_delegreturndata *)data;
5277
5278 if (d_data->roc &&
5279 pnfs_roc_drain(d_data->inode, &d_data->roc_barrier, task))
5280 return;
5281
5282 nfs4_setup_sequence(d_data->res.server,
5283 &d_data->args.seq_args,
5284 &d_data->res.seq_res,
5285 task);
5286}
5287
5288static const struct rpc_call_ops nfs4_delegreturn_ops = {
5289 .rpc_call_prepare = nfs4_delegreturn_prepare,
5290 .rpc_call_done = nfs4_delegreturn_done,
5291 .rpc_release = nfs4_delegreturn_release,
5292};
5293
5294static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5295{
5296 struct nfs4_delegreturndata *data;
5297 struct nfs_server *server = NFS_SERVER(inode);
5298 struct rpc_task *task;
5299 struct rpc_message msg = {
5300 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
5301 .rpc_cred = cred,
5302 };
5303 struct rpc_task_setup task_setup_data = {
5304 .rpc_client = server->client,
5305 .rpc_message = &msg,
5306 .callback_ops = &nfs4_delegreturn_ops,
5307 .flags = RPC_TASK_ASYNC,
5308 };
5309 int status = 0;
5310
5311 data = kzalloc(sizeof(*data), GFP_NOFS);
5312 if (data == NULL)
5313 return -ENOMEM;
5314 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
5315 data->args.fhandle = &data->fh;
5316 data->args.stateid = &data->stateid;
5317 data->args.bitmask = server->cache_consistency_bitmask;
5318 nfs_copy_fh(&data->fh, NFS_FH(inode));
5319 nfs4_stateid_copy(&data->stateid, stateid);
5320 data->res.fattr = &data->fattr;
5321 data->res.server = server;
5322 nfs_fattr_init(data->res.fattr);
5323 data->timestamp = jiffies;
5324 data->rpc_status = 0;
5325 data->inode = nfs_igrab_and_active(inode);
5326 if (data->inode)
5327 data->roc = nfs4_roc(inode);
5328
5329 task_setup_data.callback_data = data;
5330 msg.rpc_argp = &data->args;
5331 msg.rpc_resp = &data->res;
5332 task = rpc_run_task(&task_setup_data);
5333 if (IS_ERR(task))
5334 return PTR_ERR(task);
5335 if (!issync)
5336 goto out;
5337 status = nfs4_wait_for_completion_rpc_task(task);
5338 if (status != 0)
5339 goto out;
5340 status = data->rpc_status;
5341 if (status == 0)
5342 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
5343 else
5344 nfs_refresh_inode(inode, &data->fattr);
5345out:
5346 rpc_put_task(task);
5347 return status;
5348}
5349
5350int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5351{
5352 struct nfs_server *server = NFS_SERVER(inode);
5353 struct nfs4_exception exception = { };
5354 int err;
5355 do {
5356 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
5357 trace_nfs4_delegreturn(inode, err);
5358 switch (err) {
5359 case -NFS4ERR_STALE_STATEID:
5360 case -NFS4ERR_EXPIRED:
5361 case 0:
5362 return 0;
5363 }
5364 err = nfs4_handle_exception(server, err, &exception);
5365 } while (exception.retry);
5366 return err;
5367}
5368
5369#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
5370#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
5371
5372/*
5373 * sleep, with exponential backoff, and retry the LOCK operation.
5374 */
5375static unsigned long
5376nfs4_set_lock_task_retry(unsigned long timeout)
5377{
5378 freezable_schedule_timeout_killable_unsafe(timeout);
5379 timeout <<= 1;
5380 if (timeout > NFS4_LOCK_MAXTIMEOUT)
5381 return NFS4_LOCK_MAXTIMEOUT;
5382 return timeout;
5383}
5384
5385static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5386{
5387 struct inode *inode = state->inode;
5388 struct nfs_server *server = NFS_SERVER(inode);
5389 struct nfs_client *clp = server->nfs_client;
5390 struct nfs_lockt_args arg = {
5391 .fh = NFS_FH(inode),
5392 .fl = request,
5393 };
5394 struct nfs_lockt_res res = {
5395 .denied = request,
5396 };
5397 struct rpc_message msg = {
5398 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
5399 .rpc_argp = &arg,
5400 .rpc_resp = &res,
5401 .rpc_cred = state->owner->so_cred,
5402 };
5403 struct nfs4_lock_state *lsp;
5404 int status;
5405
5406 arg.lock_owner.clientid = clp->cl_clientid;
5407 status = nfs4_set_lock_state(state, request);
5408 if (status != 0)
5409 goto out;
5410 lsp = request->fl_u.nfs4_fl.owner;
5411 arg.lock_owner.id = lsp->ls_seqid.owner_id;
5412 arg.lock_owner.s_dev = server->s_dev;
5413 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5414 switch (status) {
5415 case 0:
5416 request->fl_type = F_UNLCK;
5417 break;
5418 case -NFS4ERR_DENIED:
5419 status = 0;
5420 }
5421 request->fl_ops->fl_release_private(request);
5422 request->fl_ops = NULL;
5423out:
5424 return status;
5425}
5426
5427static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5428{
5429 struct nfs4_exception exception = { };
5430 int err;
5431
5432 do {
5433 err = _nfs4_proc_getlk(state, cmd, request);
5434 trace_nfs4_get_lock(request, state, cmd, err);
5435 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
5436 &exception);
5437 } while (exception.retry);
5438 return err;
5439}
5440
5441static int do_vfs_lock(struct file *file, struct file_lock *fl)
5442{
5443 int res = 0;
5444 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
5445 case FL_POSIX:
5446 res = posix_lock_file_wait(file, fl);
5447 break;
5448 case FL_FLOCK:
5449 res = flock_lock_file_wait(file, fl);
5450 break;
5451 default:
5452 BUG();
5453 }
5454 return res;
5455}
5456
5457struct nfs4_unlockdata {
5458 struct nfs_locku_args arg;
5459 struct nfs_locku_res res;
5460 struct nfs4_lock_state *lsp;
5461 struct nfs_open_context *ctx;
5462 struct file_lock fl;
5463 const struct nfs_server *server;
5464 unsigned long timestamp;
5465};
5466
5467static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
5468 struct nfs_open_context *ctx,
5469 struct nfs4_lock_state *lsp,
5470 struct nfs_seqid *seqid)
5471{
5472 struct nfs4_unlockdata *p;
5473 struct inode *inode = lsp->ls_state->inode;
5474
5475 p = kzalloc(sizeof(*p), GFP_NOFS);
5476 if (p == NULL)
5477 return NULL;
5478 p->arg.fh = NFS_FH(inode);
5479 p->arg.fl = &p->fl;
5480 p->arg.seqid = seqid;
5481 p->res.seqid = seqid;
5482 p->lsp = lsp;
5483 atomic_inc(&lsp->ls_count);
5484 /* Ensure we don't close file until we're done freeing locks! */
5485 p->ctx = get_nfs_open_context(ctx);
5486 get_file(fl->fl_file);
5487 memcpy(&p->fl, fl, sizeof(p->fl));
5488 p->server = NFS_SERVER(inode);
5489 return p;
5490}
5491
5492static void nfs4_locku_release_calldata(void *data)
5493{
5494 struct nfs4_unlockdata *calldata = data;
5495 nfs_free_seqid(calldata->arg.seqid);
5496 nfs4_put_lock_state(calldata->lsp);
5497 put_nfs_open_context(calldata->ctx);
5498 fput(calldata->fl.fl_file);
5499 kfree(calldata);
5500}
5501
5502static void nfs4_locku_done(struct rpc_task *task, void *data)
5503{
5504 struct nfs4_unlockdata *calldata = data;
5505
5506 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
5507 return;
5508 switch (task->tk_status) {
5509 case 0:
5510 renew_lease(calldata->server, calldata->timestamp);
5511 do_vfs_lock(calldata->fl.fl_file, &calldata->fl);
5512 if (nfs4_update_lock_stateid(calldata->lsp,
5513 &calldata->res.stateid))
5514 break;
5515 case -NFS4ERR_BAD_STATEID:
5516 case -NFS4ERR_OLD_STATEID:
5517 case -NFS4ERR_STALE_STATEID:
5518 case -NFS4ERR_EXPIRED:
5519 if (!nfs4_stateid_match(&calldata->arg.stateid,
5520 &calldata->lsp->ls_stateid))
5521 rpc_restart_call_prepare(task);
5522 break;
5523 default:
5524 if (nfs4_async_handle_error(task, calldata->server,
5525 NULL, NULL) == -EAGAIN)
5526 rpc_restart_call_prepare(task);
5527 }
5528 nfs_release_seqid(calldata->arg.seqid);
5529}
5530
5531static void nfs4_locku_prepare(struct rpc_task *task, void *data)
5532{
5533 struct nfs4_unlockdata *calldata = data;
5534
5535 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
5536 goto out_wait;
5537 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
5538 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
5539 /* Note: exit _without_ running nfs4_locku_done */
5540 goto out_no_action;
5541 }
5542 calldata->timestamp = jiffies;
5543 if (nfs4_setup_sequence(calldata->server,
5544 &calldata->arg.seq_args,
5545 &calldata->res.seq_res,
5546 task) != 0)
5547 nfs_release_seqid(calldata->arg.seqid);
5548 return;
5549out_no_action:
5550 task->tk_action = NULL;
5551out_wait:
5552 nfs4_sequence_done(task, &calldata->res.seq_res);
5553}
5554
5555static const struct rpc_call_ops nfs4_locku_ops = {
5556 .rpc_call_prepare = nfs4_locku_prepare,
5557 .rpc_call_done = nfs4_locku_done,
5558 .rpc_release = nfs4_locku_release_calldata,
5559};
5560
5561static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
5562 struct nfs_open_context *ctx,
5563 struct nfs4_lock_state *lsp,
5564 struct nfs_seqid *seqid)
5565{
5566 struct nfs4_unlockdata *data;
5567 struct rpc_message msg = {
5568 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
5569 .rpc_cred = ctx->cred,
5570 };
5571 struct rpc_task_setup task_setup_data = {
5572 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
5573 .rpc_message = &msg,
5574 .callback_ops = &nfs4_locku_ops,
5575 .workqueue = nfsiod_workqueue,
5576 .flags = RPC_TASK_ASYNC,
5577 };
5578
5579 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
5580 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
5581
5582 /* Ensure this is an unlock - when canceling a lock, the
5583 * canceled lock is passed in, and it won't be an unlock.
5584 */
5585 fl->fl_type = F_UNLCK;
5586
5587 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
5588 if (data == NULL) {
5589 nfs_free_seqid(seqid);
5590 return ERR_PTR(-ENOMEM);
5591 }
5592
5593 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5594 msg.rpc_argp = &data->arg;
5595 msg.rpc_resp = &data->res;
5596 task_setup_data.callback_data = data;
5597 return rpc_run_task(&task_setup_data);
5598}
5599
5600static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
5601{
5602 struct inode *inode = state->inode;
5603 struct nfs4_state_owner *sp = state->owner;
5604 struct nfs_inode *nfsi = NFS_I(inode);
5605 struct nfs_seqid *seqid;
5606 struct nfs4_lock_state *lsp;
5607 struct rpc_task *task;
5608 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5609 int status = 0;
5610 unsigned char fl_flags = request->fl_flags;
5611
5612 status = nfs4_set_lock_state(state, request);
5613 /* Unlock _before_ we do the RPC call */
5614 request->fl_flags |= FL_EXISTS;
5615 /* Exclude nfs_delegation_claim_locks() */
5616 mutex_lock(&sp->so_delegreturn_mutex);
5617 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
5618 down_read(&nfsi->rwsem);
5619 if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
5620 up_read(&nfsi->rwsem);
5621 mutex_unlock(&sp->so_delegreturn_mutex);
5622 goto out;
5623 }
5624 up_read(&nfsi->rwsem);
5625 mutex_unlock(&sp->so_delegreturn_mutex);
5626 if (status != 0)
5627 goto out;
5628 /* Is this a delegated lock? */
5629 lsp = request->fl_u.nfs4_fl.owner;
5630 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
5631 goto out;
5632 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
5633 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
5634 status = -ENOMEM;
5635 if (IS_ERR(seqid))
5636 goto out;
5637 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
5638 status = PTR_ERR(task);
5639 if (IS_ERR(task))
5640 goto out;
5641 status = nfs4_wait_for_completion_rpc_task(task);
5642 rpc_put_task(task);
5643out:
5644 request->fl_flags = fl_flags;
5645 trace_nfs4_unlock(request, state, F_SETLK, status);
5646 return status;
5647}
5648
5649struct nfs4_lockdata {
5650 struct nfs_lock_args arg;
5651 struct nfs_lock_res res;
5652 struct nfs4_lock_state *lsp;
5653 struct nfs_open_context *ctx;
5654 struct file_lock fl;
5655 unsigned long timestamp;
5656 int rpc_status;
5657 int cancelled;
5658 struct nfs_server *server;
5659};
5660
5661static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
5662 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
5663 gfp_t gfp_mask)
5664{
5665 struct nfs4_lockdata *p;
5666 struct inode *inode = lsp->ls_state->inode;
5667 struct nfs_server *server = NFS_SERVER(inode);
5668 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5669
5670 p = kzalloc(sizeof(*p), gfp_mask);
5671 if (p == NULL)
5672 return NULL;
5673
5674 p->arg.fh = NFS_FH(inode);
5675 p->arg.fl = &p->fl;
5676 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
5677 if (IS_ERR(p->arg.open_seqid))
5678 goto out_free;
5679 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
5680 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
5681 if (IS_ERR(p->arg.lock_seqid))
5682 goto out_free_seqid;
5683 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
5684 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
5685 p->arg.lock_owner.s_dev = server->s_dev;
5686 p->res.lock_seqid = p->arg.lock_seqid;
5687 p->lsp = lsp;
5688 p->server = server;
5689 atomic_inc(&lsp->ls_count);
5690 p->ctx = get_nfs_open_context(ctx);
5691 get_file(fl->fl_file);
5692 memcpy(&p->fl, fl, sizeof(p->fl));
5693 return p;
5694out_free_seqid:
5695 nfs_free_seqid(p->arg.open_seqid);
5696out_free:
5697 kfree(p);
5698 return NULL;
5699}
5700
5701static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
5702{
5703 struct nfs4_lockdata *data = calldata;
5704 struct nfs4_state *state = data->lsp->ls_state;
5705
5706 dprintk("%s: begin!\n", __func__);
5707 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
5708 goto out_wait;
5709 /* Do we need to do an open_to_lock_owner? */
5710 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
5711 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
5712 goto out_release_lock_seqid;
5713 }
5714 nfs4_stateid_copy(&data->arg.open_stateid,
5715 &state->open_stateid);
5716 data->arg.new_lock_owner = 1;
5717 data->res.open_seqid = data->arg.open_seqid;
5718 } else {
5719 data->arg.new_lock_owner = 0;
5720 nfs4_stateid_copy(&data->arg.lock_stateid,
5721 &data->lsp->ls_stateid);
5722 }
5723 if (!nfs4_valid_open_stateid(state)) {
5724 data->rpc_status = -EBADF;
5725 task->tk_action = NULL;
5726 goto out_release_open_seqid;
5727 }
5728 data->timestamp = jiffies;
5729 if (nfs4_setup_sequence(data->server,
5730 &data->arg.seq_args,
5731 &data->res.seq_res,
5732 task) == 0)
5733 return;
5734out_release_open_seqid:
5735 nfs_release_seqid(data->arg.open_seqid);
5736out_release_lock_seqid:
5737 nfs_release_seqid(data->arg.lock_seqid);
5738out_wait:
5739 nfs4_sequence_done(task, &data->res.seq_res);
5740 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
5741}
5742
5743static void nfs4_lock_done(struct rpc_task *task, void *calldata)
5744{
5745 struct nfs4_lockdata *data = calldata;
5746 struct nfs4_lock_state *lsp = data->lsp;
5747
5748 dprintk("%s: begin!\n", __func__);
5749
5750 if (!nfs4_sequence_done(task, &data->res.seq_res))
5751 return;
5752
5753 data->rpc_status = task->tk_status;
5754 switch (task->tk_status) {
5755 case 0:
5756 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
5757 data->timestamp);
5758 if (data->arg.new_lock) {
5759 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
5760 if (do_vfs_lock(data->fl.fl_file, &data->fl) < 0) {
5761 rpc_restart_call_prepare(task);
5762 break;
5763 }
5764 }
5765 if (data->arg.new_lock_owner != 0) {
5766 nfs_confirm_seqid(&lsp->ls_seqid, 0);
5767 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
5768 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
5769 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
5770 rpc_restart_call_prepare(task);
5771 break;
5772 case -NFS4ERR_BAD_STATEID:
5773 case -NFS4ERR_OLD_STATEID:
5774 case -NFS4ERR_STALE_STATEID:
5775 case -NFS4ERR_EXPIRED:
5776 if (data->arg.new_lock_owner != 0) {
5777 if (!nfs4_stateid_match(&data->arg.open_stateid,
5778 &lsp->ls_state->open_stateid))
5779 rpc_restart_call_prepare(task);
5780 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
5781 &lsp->ls_stateid))
5782 rpc_restart_call_prepare(task);
5783 }
5784 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
5785}
5786
5787static void nfs4_lock_release(void *calldata)
5788{
5789 struct nfs4_lockdata *data = calldata;
5790
5791 dprintk("%s: begin!\n", __func__);
5792 nfs_free_seqid(data->arg.open_seqid);
5793 if (data->cancelled != 0) {
5794 struct rpc_task *task;
5795 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
5796 data->arg.lock_seqid);
5797 if (!IS_ERR(task))
5798 rpc_put_task_async(task);
5799 dprintk("%s: cancelling lock!\n", __func__);
5800 } else
5801 nfs_free_seqid(data->arg.lock_seqid);
5802 nfs4_put_lock_state(data->lsp);
5803 put_nfs_open_context(data->ctx);
5804 fput(data->fl.fl_file);
5805 kfree(data);
5806 dprintk("%s: done!\n", __func__);
5807}
5808
5809static const struct rpc_call_ops nfs4_lock_ops = {
5810 .rpc_call_prepare = nfs4_lock_prepare,
5811 .rpc_call_done = nfs4_lock_done,
5812 .rpc_release = nfs4_lock_release,
5813};
5814
5815static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
5816{
5817 switch (error) {
5818 case -NFS4ERR_ADMIN_REVOKED:
5819 case -NFS4ERR_BAD_STATEID:
5820 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5821 if (new_lock_owner != 0 ||
5822 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
5823 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
5824 break;
5825 case -NFS4ERR_STALE_STATEID:
5826 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5827 case -NFS4ERR_EXPIRED:
5828 nfs4_schedule_lease_recovery(server->nfs_client);
5829 };
5830}
5831
5832static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
5833{
5834 struct nfs4_lockdata *data;
5835 struct rpc_task *task;
5836 struct rpc_message msg = {
5837 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
5838 .rpc_cred = state->owner->so_cred,
5839 };
5840 struct rpc_task_setup task_setup_data = {
5841 .rpc_client = NFS_CLIENT(state->inode),
5842 .rpc_message = &msg,
5843 .callback_ops = &nfs4_lock_ops,
5844 .workqueue = nfsiod_workqueue,
5845 .flags = RPC_TASK_ASYNC,
5846 };
5847 int ret;
5848
5849 dprintk("%s: begin!\n", __func__);
5850 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
5851 fl->fl_u.nfs4_fl.owner,
5852 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
5853 if (data == NULL)
5854 return -ENOMEM;
5855 if (IS_SETLKW(cmd))
5856 data->arg.block = 1;
5857 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5858 msg.rpc_argp = &data->arg;
5859 msg.rpc_resp = &data->res;
5860 task_setup_data.callback_data = data;
5861 if (recovery_type > NFS_LOCK_NEW) {
5862 if (recovery_type == NFS_LOCK_RECLAIM)
5863 data->arg.reclaim = NFS_LOCK_RECLAIM;
5864 nfs4_set_sequence_privileged(&data->arg.seq_args);
5865 } else
5866 data->arg.new_lock = 1;
5867 task = rpc_run_task(&task_setup_data);
5868 if (IS_ERR(task))
5869 return PTR_ERR(task);
5870 ret = nfs4_wait_for_completion_rpc_task(task);
5871 if (ret == 0) {
5872 ret = data->rpc_status;
5873 if (ret)
5874 nfs4_handle_setlk_error(data->server, data->lsp,
5875 data->arg.new_lock_owner, ret);
5876 } else
5877 data->cancelled = 1;
5878 rpc_put_task(task);
5879 dprintk("%s: done, ret = %d!\n", __func__, ret);
5880 return ret;
5881}
5882
5883static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
5884{
5885 struct nfs_server *server = NFS_SERVER(state->inode);
5886 struct nfs4_exception exception = {
5887 .inode = state->inode,
5888 };
5889 int err;
5890
5891 do {
5892 /* Cache the lock if possible... */
5893 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5894 return 0;
5895 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
5896 trace_nfs4_lock_reclaim(request, state, F_SETLK, err);
5897 if (err != -NFS4ERR_DELAY)
5898 break;
5899 nfs4_handle_exception(server, err, &exception);
5900 } while (exception.retry);
5901 return err;
5902}
5903
5904static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
5905{
5906 struct nfs_server *server = NFS_SERVER(state->inode);
5907 struct nfs4_exception exception = {
5908 .inode = state->inode,
5909 };
5910 int err;
5911
5912 err = nfs4_set_lock_state(state, request);
5913 if (err != 0)
5914 return err;
5915 if (!recover_lost_locks) {
5916 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
5917 return 0;
5918 }
5919 do {
5920 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
5921 return 0;
5922 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
5923 trace_nfs4_lock_expired(request, state, F_SETLK, err);
5924 switch (err) {
5925 default:
5926 goto out;
5927 case -NFS4ERR_GRACE:
5928 case -NFS4ERR_DELAY:
5929 nfs4_handle_exception(server, err, &exception);
5930 err = 0;
5931 }
5932 } while (exception.retry);
5933out:
5934 return err;
5935}
5936
5937#if defined(CONFIG_NFS_V4_1)
5938/**
5939 * nfs41_check_expired_locks - possibly free a lock stateid
5940 *
5941 * @state: NFSv4 state for an inode
5942 *
5943 * Returns NFS_OK if recovery for this stateid is now finished.
5944 * Otherwise a negative NFS4ERR value is returned.
5945 */
5946static int nfs41_check_expired_locks(struct nfs4_state *state)
5947{
5948 int status, ret = -NFS4ERR_BAD_STATEID;
5949 struct nfs4_lock_state *lsp;
5950 struct nfs_server *server = NFS_SERVER(state->inode);
5951
5952 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
5953 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
5954 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
5955
5956 status = nfs41_test_stateid(server,
5957 &lsp->ls_stateid,
5958 cred);
5959 trace_nfs4_test_lock_stateid(state, lsp, status);
5960 if (status != NFS_OK) {
5961 /* Free the stateid unless the server
5962 * informs us the stateid is unrecognized. */
5963 if (status != -NFS4ERR_BAD_STATEID)
5964 nfs41_free_stateid(server,
5965 &lsp->ls_stateid,
5966 cred);
5967 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
5968 ret = status;
5969 }
5970 }
5971 };
5972
5973 return ret;
5974}
5975
5976static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
5977{
5978 int status = NFS_OK;
5979
5980 if (test_bit(LK_STATE_IN_USE, &state->flags))
5981 status = nfs41_check_expired_locks(state);
5982 if (status != NFS_OK)
5983 status = nfs4_lock_expired(state, request);
5984 return status;
5985}
5986#endif
5987
5988static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5989{
5990 struct nfs_inode *nfsi = NFS_I(state->inode);
5991 unsigned char fl_flags = request->fl_flags;
5992 int status = -ENOLCK;
5993
5994 if ((fl_flags & FL_POSIX) &&
5995 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
5996 goto out;
5997 /* Is this a delegated open? */
5998 status = nfs4_set_lock_state(state, request);
5999 if (status != 0)
6000 goto out;
6001 request->fl_flags |= FL_ACCESS;
6002 status = do_vfs_lock(request->fl_file, request);
6003 if (status < 0)
6004 goto out;
6005 down_read(&nfsi->rwsem);
6006 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
6007 /* Yes: cache locks! */
6008 /* ...but avoid races with delegation recall... */
6009 request->fl_flags = fl_flags & ~FL_SLEEP;
6010 status = do_vfs_lock(request->fl_file, request);
6011 up_read(&nfsi->rwsem);
6012 goto out;
6013 }
6014 up_read(&nfsi->rwsem);
6015 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
6016out:
6017 request->fl_flags = fl_flags;
6018 return status;
6019}
6020
6021static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6022{
6023 struct nfs4_exception exception = {
6024 .state = state,
6025 .inode = state->inode,
6026 };
6027 int err;
6028
6029 do {
6030 err = _nfs4_proc_setlk(state, cmd, request);
6031 trace_nfs4_set_lock(request, state, cmd, err);
6032 if (err == -NFS4ERR_DENIED)
6033 err = -EAGAIN;
6034 err = nfs4_handle_exception(NFS_SERVER(state->inode),
6035 err, &exception);
6036 } while (exception.retry);
6037 return err;
6038}
6039
6040static int
6041nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
6042{
6043 struct nfs_open_context *ctx;
6044 struct nfs4_state *state;
6045 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
6046 int status;
6047
6048 /* verify open state */
6049 ctx = nfs_file_open_context(filp);
6050 state = ctx->state;
6051
6052 if (request->fl_start < 0 || request->fl_end < 0)
6053 return -EINVAL;
6054
6055 if (IS_GETLK(cmd)) {
6056 if (state != NULL)
6057 return nfs4_proc_getlk(state, F_GETLK, request);
6058 return 0;
6059 }
6060
6061 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
6062 return -EINVAL;
6063
6064 if (request->fl_type == F_UNLCK) {
6065 if (state != NULL)
6066 return nfs4_proc_unlck(state, cmd, request);
6067 return 0;
6068 }
6069
6070 if (state == NULL)
6071 return -ENOLCK;
6072 /*
6073 * Don't rely on the VFS having checked the file open mode,
6074 * since it won't do this for flock() locks.
6075 */
6076 switch (request->fl_type) {
6077 case F_RDLCK:
6078 if (!(filp->f_mode & FMODE_READ))
6079 return -EBADF;
6080 break;
6081 case F_WRLCK:
6082 if (!(filp->f_mode & FMODE_WRITE))
6083 return -EBADF;
6084 }
6085
6086 do {
6087 status = nfs4_proc_setlk(state, cmd, request);
6088 if ((status != -EAGAIN) || IS_SETLK(cmd))
6089 break;
6090 timeout = nfs4_set_lock_task_retry(timeout);
6091 status = -ERESTARTSYS;
6092 if (signalled())
6093 break;
6094 } while(status < 0);
6095 return status;
6096}
6097
6098int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
6099{
6100 struct nfs_server *server = NFS_SERVER(state->inode);
6101 int err;
6102
6103 err = nfs4_set_lock_state(state, fl);
6104 if (err != 0)
6105 return err;
6106 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
6107 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
6108}
6109
6110struct nfs_release_lockowner_data {
6111 struct nfs4_lock_state *lsp;
6112 struct nfs_server *server;
6113 struct nfs_release_lockowner_args args;
6114 struct nfs_release_lockowner_res res;
6115 unsigned long timestamp;
6116};
6117
6118static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
6119{
6120 struct nfs_release_lockowner_data *data = calldata;
6121 struct nfs_server *server = data->server;
6122 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
6123 &data->args.seq_args, &data->res.seq_res, task);
6124 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6125 data->timestamp = jiffies;
6126}
6127
6128static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
6129{
6130 struct nfs_release_lockowner_data *data = calldata;
6131 struct nfs_server *server = data->server;
6132
6133 nfs40_sequence_done(task, &data->res.seq_res);
6134
6135 switch (task->tk_status) {
6136 case 0:
6137 renew_lease(server, data->timestamp);
6138 break;
6139 case -NFS4ERR_STALE_CLIENTID:
6140 case -NFS4ERR_EXPIRED:
6141 nfs4_schedule_lease_recovery(server->nfs_client);
6142 break;
6143 case -NFS4ERR_LEASE_MOVED:
6144 case -NFS4ERR_DELAY:
6145 if (nfs4_async_handle_error(task, server,
6146 NULL, NULL) == -EAGAIN)
6147 rpc_restart_call_prepare(task);
6148 }
6149}
6150
6151static void nfs4_release_lockowner_release(void *calldata)
6152{
6153 struct nfs_release_lockowner_data *data = calldata;
6154 nfs4_free_lock_state(data->server, data->lsp);
6155 kfree(calldata);
6156}
6157
6158static const struct rpc_call_ops nfs4_release_lockowner_ops = {
6159 .rpc_call_prepare = nfs4_release_lockowner_prepare,
6160 .rpc_call_done = nfs4_release_lockowner_done,
6161 .rpc_release = nfs4_release_lockowner_release,
6162};
6163
6164static void
6165nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
6166{
6167 struct nfs_release_lockowner_data *data;
6168 struct rpc_message msg = {
6169 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
6170 };
6171
6172 if (server->nfs_client->cl_mvops->minor_version != 0)
6173 return;
6174
6175 data = kmalloc(sizeof(*data), GFP_NOFS);
6176 if (!data)
6177 return;
6178 data->lsp = lsp;
6179 data->server = server;
6180 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6181 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
6182 data->args.lock_owner.s_dev = server->s_dev;
6183
6184 msg.rpc_argp = &data->args;
6185 msg.rpc_resp = &data->res;
6186 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
6187 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
6188}
6189
6190#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
6191
6192static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
6193 const void *buf, size_t buflen,
6194 int flags, int type)
6195{
6196 if (strcmp(key, "") != 0)
6197 return -EINVAL;
6198
6199 return nfs4_proc_set_acl(d_inode(dentry), buf, buflen);
6200}
6201
6202static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
6203 void *buf, size_t buflen, int type)
6204{
6205 if (strcmp(key, "") != 0)
6206 return -EINVAL;
6207
6208 return nfs4_proc_get_acl(d_inode(dentry), buf, buflen);
6209}
6210
6211static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
6212 size_t list_len, const char *name,
6213 size_t name_len, int type)
6214{
6215 size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
6216
6217 if (!nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry))))
6218 return 0;
6219
6220 if (list && len <= list_len)
6221 memcpy(list, XATTR_NAME_NFSV4_ACL, len);
6222 return len;
6223}
6224
6225#ifdef CONFIG_NFS_V4_SECURITY_LABEL
6226static inline int nfs4_server_supports_labels(struct nfs_server *server)
6227{
6228 return server->caps & NFS_CAP_SECURITY_LABEL;
6229}
6230
6231static int nfs4_xattr_set_nfs4_label(struct dentry *dentry, const char *key,
6232 const void *buf, size_t buflen,
6233 int flags, int type)
6234{
6235 if (security_ismaclabel(key))
6236 return nfs4_set_security_label(dentry, buf, buflen);
6237
6238 return -EOPNOTSUPP;
6239}
6240
6241static int nfs4_xattr_get_nfs4_label(struct dentry *dentry, const char *key,
6242 void *buf, size_t buflen, int type)
6243{
6244 if (security_ismaclabel(key))
6245 return nfs4_get_security_label(d_inode(dentry), buf, buflen);
6246 return -EOPNOTSUPP;
6247}
6248
6249static size_t nfs4_xattr_list_nfs4_label(struct dentry *dentry, char *list,
6250 size_t list_len, const char *name,
6251 size_t name_len, int type)
6252{
6253 size_t len = 0;
6254
6255 if (nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) {
6256 len = security_inode_listsecurity(d_inode(dentry), NULL, 0);
6257 if (list && len <= list_len)
6258 security_inode_listsecurity(d_inode(dentry), list, len);
6259 }
6260 return len;
6261}
6262
6263static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
6264 .prefix = XATTR_SECURITY_PREFIX,
6265 .list = nfs4_xattr_list_nfs4_label,
6266 .get = nfs4_xattr_get_nfs4_label,
6267 .set = nfs4_xattr_set_nfs4_label,
6268};
6269#endif
6270
6271
6272/*
6273 * nfs_fhget will use either the mounted_on_fileid or the fileid
6274 */
6275static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
6276{
6277 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
6278 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
6279 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
6280 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
6281 return;
6282
6283 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
6284 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
6285 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
6286 fattr->nlink = 2;
6287}
6288
6289static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6290 const struct qstr *name,
6291 struct nfs4_fs_locations *fs_locations,
6292 struct page *page)
6293{
6294 struct nfs_server *server = NFS_SERVER(dir);
6295 u32 bitmask[3] = {
6296 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6297 };
6298 struct nfs4_fs_locations_arg args = {
6299 .dir_fh = NFS_FH(dir),
6300 .name = name,
6301 .page = page,
6302 .bitmask = bitmask,
6303 };
6304 struct nfs4_fs_locations_res res = {
6305 .fs_locations = fs_locations,
6306 };
6307 struct rpc_message msg = {
6308 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6309 .rpc_argp = &args,
6310 .rpc_resp = &res,
6311 };
6312 int status;
6313
6314 dprintk("%s: start\n", __func__);
6315
6316 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
6317 * is not supported */
6318 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
6319 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
6320 else
6321 bitmask[0] |= FATTR4_WORD0_FILEID;
6322
6323 nfs_fattr_init(&fs_locations->fattr);
6324 fs_locations->server = server;
6325 fs_locations->nlocations = 0;
6326 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
6327 dprintk("%s: returned status = %d\n", __func__, status);
6328 return status;
6329}
6330
6331int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6332 const struct qstr *name,
6333 struct nfs4_fs_locations *fs_locations,
6334 struct page *page)
6335{
6336 struct nfs4_exception exception = { };
6337 int err;
6338 do {
6339 err = _nfs4_proc_fs_locations(client, dir, name,
6340 fs_locations, page);
6341 trace_nfs4_get_fs_locations(dir, name, err);
6342 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6343 &exception);
6344 } while (exception.retry);
6345 return err;
6346}
6347
6348/*
6349 * This operation also signals the server that this client is
6350 * performing migration recovery. The server can stop returning
6351 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
6352 * appended to this compound to identify the client ID which is
6353 * performing recovery.
6354 */
6355static int _nfs40_proc_get_locations(struct inode *inode,
6356 struct nfs4_fs_locations *locations,
6357 struct page *page, struct rpc_cred *cred)
6358{
6359 struct nfs_server *server = NFS_SERVER(inode);
6360 struct rpc_clnt *clnt = server->client;
6361 u32 bitmask[2] = {
6362 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6363 };
6364 struct nfs4_fs_locations_arg args = {
6365 .clientid = server->nfs_client->cl_clientid,
6366 .fh = NFS_FH(inode),
6367 .page = page,
6368 .bitmask = bitmask,
6369 .migration = 1, /* skip LOOKUP */
6370 .renew = 1, /* append RENEW */
6371 };
6372 struct nfs4_fs_locations_res res = {
6373 .fs_locations = locations,
6374 .migration = 1,
6375 .renew = 1,
6376 };
6377 struct rpc_message msg = {
6378 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6379 .rpc_argp = &args,
6380 .rpc_resp = &res,
6381 .rpc_cred = cred,
6382 };
6383 unsigned long now = jiffies;
6384 int status;
6385
6386 nfs_fattr_init(&locations->fattr);
6387 locations->server = server;
6388 locations->nlocations = 0;
6389
6390 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6391 nfs4_set_sequence_privileged(&args.seq_args);
6392 status = nfs4_call_sync_sequence(clnt, server, &msg,
6393 &args.seq_args, &res.seq_res);
6394 if (status)
6395 return status;
6396
6397 renew_lease(server, now);
6398 return 0;
6399}
6400
6401#ifdef CONFIG_NFS_V4_1
6402
6403/*
6404 * This operation also signals the server that this client is
6405 * performing migration recovery. The server can stop asserting
6406 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
6407 * performing this operation is identified in the SEQUENCE
6408 * operation in this compound.
6409 *
6410 * When the client supports GETATTR(fs_locations_info), it can
6411 * be plumbed in here.
6412 */
6413static int _nfs41_proc_get_locations(struct inode *inode,
6414 struct nfs4_fs_locations *locations,
6415 struct page *page, struct rpc_cred *cred)
6416{
6417 struct nfs_server *server = NFS_SERVER(inode);
6418 struct rpc_clnt *clnt = server->client;
6419 u32 bitmask[2] = {
6420 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6421 };
6422 struct nfs4_fs_locations_arg args = {
6423 .fh = NFS_FH(inode),
6424 .page = page,
6425 .bitmask = bitmask,
6426 .migration = 1, /* skip LOOKUP */
6427 };
6428 struct nfs4_fs_locations_res res = {
6429 .fs_locations = locations,
6430 .migration = 1,
6431 };
6432 struct rpc_message msg = {
6433 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6434 .rpc_argp = &args,
6435 .rpc_resp = &res,
6436 .rpc_cred = cred,
6437 };
6438 int status;
6439
6440 nfs_fattr_init(&locations->fattr);
6441 locations->server = server;
6442 locations->nlocations = 0;
6443
6444 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6445 nfs4_set_sequence_privileged(&args.seq_args);
6446 status = nfs4_call_sync_sequence(clnt, server, &msg,
6447 &args.seq_args, &res.seq_res);
6448 if (status == NFS4_OK &&
6449 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6450 status = -NFS4ERR_LEASE_MOVED;
6451 return status;
6452}
6453
6454#endif /* CONFIG_NFS_V4_1 */
6455
6456/**
6457 * nfs4_proc_get_locations - discover locations for a migrated FSID
6458 * @inode: inode on FSID that is migrating
6459 * @locations: result of query
6460 * @page: buffer
6461 * @cred: credential to use for this operation
6462 *
6463 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
6464 * operation failed, or a negative errno if a local error occurred.
6465 *
6466 * On success, "locations" is filled in, but if the server has
6467 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
6468 * asserted.
6469 *
6470 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
6471 * from this client that require migration recovery.
6472 */
6473int nfs4_proc_get_locations(struct inode *inode,
6474 struct nfs4_fs_locations *locations,
6475 struct page *page, struct rpc_cred *cred)
6476{
6477 struct nfs_server *server = NFS_SERVER(inode);
6478 struct nfs_client *clp = server->nfs_client;
6479 const struct nfs4_mig_recovery_ops *ops =
6480 clp->cl_mvops->mig_recovery_ops;
6481 struct nfs4_exception exception = { };
6482 int status;
6483
6484 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6485 (unsigned long long)server->fsid.major,
6486 (unsigned long long)server->fsid.minor,
6487 clp->cl_hostname);
6488 nfs_display_fhandle(NFS_FH(inode), __func__);
6489
6490 do {
6491 status = ops->get_locations(inode, locations, page, cred);
6492 if (status != -NFS4ERR_DELAY)
6493 break;
6494 nfs4_handle_exception(server, status, &exception);
6495 } while (exception.retry);
6496 return status;
6497}
6498
6499/*
6500 * This operation also signals the server that this client is
6501 * performing "lease moved" recovery. The server can stop
6502 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
6503 * is appended to this compound to identify the client ID which is
6504 * performing recovery.
6505 */
6506static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6507{
6508 struct nfs_server *server = NFS_SERVER(inode);
6509 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
6510 struct rpc_clnt *clnt = server->client;
6511 struct nfs4_fsid_present_arg args = {
6512 .fh = NFS_FH(inode),
6513 .clientid = clp->cl_clientid,
6514 .renew = 1, /* append RENEW */
6515 };
6516 struct nfs4_fsid_present_res res = {
6517 .renew = 1,
6518 };
6519 struct rpc_message msg = {
6520 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6521 .rpc_argp = &args,
6522 .rpc_resp = &res,
6523 .rpc_cred = cred,
6524 };
6525 unsigned long now = jiffies;
6526 int status;
6527
6528 res.fh = nfs_alloc_fhandle();
6529 if (res.fh == NULL)
6530 return -ENOMEM;
6531
6532 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6533 nfs4_set_sequence_privileged(&args.seq_args);
6534 status = nfs4_call_sync_sequence(clnt, server, &msg,
6535 &args.seq_args, &res.seq_res);
6536 nfs_free_fhandle(res.fh);
6537 if (status)
6538 return status;
6539
6540 do_renew_lease(clp, now);
6541 return 0;
6542}
6543
6544#ifdef CONFIG_NFS_V4_1
6545
6546/*
6547 * This operation also signals the server that this client is
6548 * performing "lease moved" recovery. The server can stop asserting
6549 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
6550 * this operation is identified in the SEQUENCE operation in this
6551 * compound.
6552 */
6553static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6554{
6555 struct nfs_server *server = NFS_SERVER(inode);
6556 struct rpc_clnt *clnt = server->client;
6557 struct nfs4_fsid_present_arg args = {
6558 .fh = NFS_FH(inode),
6559 };
6560 struct nfs4_fsid_present_res res = {
6561 };
6562 struct rpc_message msg = {
6563 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6564 .rpc_argp = &args,
6565 .rpc_resp = &res,
6566 .rpc_cred = cred,
6567 };
6568 int status;
6569
6570 res.fh = nfs_alloc_fhandle();
6571 if (res.fh == NULL)
6572 return -ENOMEM;
6573
6574 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6575 nfs4_set_sequence_privileged(&args.seq_args);
6576 status = nfs4_call_sync_sequence(clnt, server, &msg,
6577 &args.seq_args, &res.seq_res);
6578 nfs_free_fhandle(res.fh);
6579 if (status == NFS4_OK &&
6580 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6581 status = -NFS4ERR_LEASE_MOVED;
6582 return status;
6583}
6584
6585#endif /* CONFIG_NFS_V4_1 */
6586
6587/**
6588 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
6589 * @inode: inode on FSID to check
6590 * @cred: credential to use for this operation
6591 *
6592 * Server indicates whether the FSID is present, moved, or not
6593 * recognized. This operation is necessary to clear a LEASE_MOVED
6594 * condition for this client ID.
6595 *
6596 * Returns NFS4_OK if the FSID is present on this server,
6597 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
6598 * NFS4ERR code if some error occurred on the server, or a
6599 * negative errno if a local failure occurred.
6600 */
6601int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6602{
6603 struct nfs_server *server = NFS_SERVER(inode);
6604 struct nfs_client *clp = server->nfs_client;
6605 const struct nfs4_mig_recovery_ops *ops =
6606 clp->cl_mvops->mig_recovery_ops;
6607 struct nfs4_exception exception = { };
6608 int status;
6609
6610 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6611 (unsigned long long)server->fsid.major,
6612 (unsigned long long)server->fsid.minor,
6613 clp->cl_hostname);
6614 nfs_display_fhandle(NFS_FH(inode), __func__);
6615
6616 do {
6617 status = ops->fsid_present(inode, cred);
6618 if (status != -NFS4ERR_DELAY)
6619 break;
6620 nfs4_handle_exception(server, status, &exception);
6621 } while (exception.retry);
6622 return status;
6623}
6624
6625/**
6626 * If 'use_integrity' is true and the state managment nfs_client
6627 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
6628 * and the machine credential as per RFC3530bis and RFC5661 Security
6629 * Considerations sections. Otherwise, just use the user cred with the
6630 * filesystem's rpc_client.
6631 */
6632static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
6633{
6634 int status;
6635 struct nfs4_secinfo_arg args = {
6636 .dir_fh = NFS_FH(dir),
6637 .name = name,
6638 };
6639 struct nfs4_secinfo_res res = {
6640 .flavors = flavors,
6641 };
6642 struct rpc_message msg = {
6643 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
6644 .rpc_argp = &args,
6645 .rpc_resp = &res,
6646 };
6647 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
6648 struct rpc_cred *cred = NULL;
6649
6650 if (use_integrity) {
6651 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient;
6652 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client);
6653 msg.rpc_cred = cred;
6654 }
6655
6656 dprintk("NFS call secinfo %s\n", name->name);
6657
6658 nfs4_state_protect(NFS_SERVER(dir)->nfs_client,
6659 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
6660
6661 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args,
6662 &res.seq_res, 0);
6663 dprintk("NFS reply secinfo: %d\n", status);
6664
6665 if (cred)
6666 put_rpccred(cred);
6667
6668 return status;
6669}
6670
6671int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
6672 struct nfs4_secinfo_flavors *flavors)
6673{
6674 struct nfs4_exception exception = { };
6675 int err;
6676 do {
6677 err = -NFS4ERR_WRONGSEC;
6678
6679 /* try to use integrity protection with machine cred */
6680 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
6681 err = _nfs4_proc_secinfo(dir, name, flavors, true);
6682
6683 /*
6684 * if unable to use integrity protection, or SECINFO with
6685 * integrity protection returns NFS4ERR_WRONGSEC (which is
6686 * disallowed by spec, but exists in deployed servers) use
6687 * the current filesystem's rpc_client and the user cred.
6688 */
6689 if (err == -NFS4ERR_WRONGSEC)
6690 err = _nfs4_proc_secinfo(dir, name, flavors, false);
6691
6692 trace_nfs4_secinfo(dir, name, err);
6693 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6694 &exception);
6695 } while (exception.retry);
6696 return err;
6697}
6698
6699#ifdef CONFIG_NFS_V4_1
6700/*
6701 * Check the exchange flags returned by the server for invalid flags, having
6702 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
6703 * DS flags set.
6704 */
6705static int nfs4_check_cl_exchange_flags(u32 flags)
6706{
6707 if (flags & ~EXCHGID4_FLAG_MASK_R)
6708 goto out_inval;
6709 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
6710 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
6711 goto out_inval;
6712 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
6713 goto out_inval;
6714 return NFS_OK;
6715out_inval:
6716 return -NFS4ERR_INVAL;
6717}
6718
6719static bool
6720nfs41_same_server_scope(struct nfs41_server_scope *a,
6721 struct nfs41_server_scope *b)
6722{
6723 if (a->server_scope_sz == b->server_scope_sz &&
6724 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
6725 return true;
6726
6727 return false;
6728}
6729
6730/*
6731 * nfs4_proc_bind_conn_to_session()
6732 *
6733 * The 4.1 client currently uses the same TCP connection for the
6734 * fore and backchannel.
6735 */
6736int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
6737{
6738 int status;
6739 struct nfs41_bind_conn_to_session_args args = {
6740 .client = clp,
6741 .dir = NFS4_CDFC4_FORE_OR_BOTH,
6742 };
6743 struct nfs41_bind_conn_to_session_res res;
6744 struct rpc_message msg = {
6745 .rpc_proc =
6746 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
6747 .rpc_argp = &args,
6748 .rpc_resp = &res,
6749 .rpc_cred = cred,
6750 };
6751
6752 dprintk("--> %s\n", __func__);
6753
6754 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
6755 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
6756 args.dir = NFS4_CDFC4_FORE;
6757
6758 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
6759 trace_nfs4_bind_conn_to_session(clp, status);
6760 if (status == 0) {
6761 if (memcmp(res.sessionid.data,
6762 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
6763 dprintk("NFS: %s: Session ID mismatch\n", __func__);
6764 status = -EIO;
6765 goto out;
6766 }
6767 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
6768 dprintk("NFS: %s: Unexpected direction from server\n",
6769 __func__);
6770 status = -EIO;
6771 goto out;
6772 }
6773 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
6774 dprintk("NFS: %s: Server returned RDMA mode = true\n",
6775 __func__);
6776 status = -EIO;
6777 goto out;
6778 }
6779 }
6780out:
6781 dprintk("<-- %s status= %d\n", __func__, status);
6782 return status;
6783}
6784
6785/*
6786 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
6787 * and operations we'd like to see to enable certain features in the allow map
6788 */
6789static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
6790 .how = SP4_MACH_CRED,
6791 .enforce.u.words = {
6792 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6793 1 << (OP_EXCHANGE_ID - 32) |
6794 1 << (OP_CREATE_SESSION - 32) |
6795 1 << (OP_DESTROY_SESSION - 32) |
6796 1 << (OP_DESTROY_CLIENTID - 32)
6797 },
6798 .allow.u.words = {
6799 [0] = 1 << (OP_CLOSE) |
6800 1 << (OP_LOCKU) |
6801 1 << (OP_COMMIT),
6802 [1] = 1 << (OP_SECINFO - 32) |
6803 1 << (OP_SECINFO_NO_NAME - 32) |
6804 1 << (OP_TEST_STATEID - 32) |
6805 1 << (OP_FREE_STATEID - 32) |
6806 1 << (OP_WRITE - 32)
6807 }
6808};
6809
6810/*
6811 * Select the state protection mode for client `clp' given the server results
6812 * from exchange_id in `sp'.
6813 *
6814 * Returns 0 on success, negative errno otherwise.
6815 */
6816static int nfs4_sp4_select_mode(struct nfs_client *clp,
6817 struct nfs41_state_protection *sp)
6818{
6819 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
6820 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6821 1 << (OP_EXCHANGE_ID - 32) |
6822 1 << (OP_CREATE_SESSION - 32) |
6823 1 << (OP_DESTROY_SESSION - 32) |
6824 1 << (OP_DESTROY_CLIENTID - 32)
6825 };
6826 unsigned int i;
6827
6828 if (sp->how == SP4_MACH_CRED) {
6829 /* Print state protect result */
6830 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
6831 for (i = 0; i <= LAST_NFS4_OP; i++) {
6832 if (test_bit(i, sp->enforce.u.longs))
6833 dfprintk(MOUNT, " enforce op %d\n", i);
6834 if (test_bit(i, sp->allow.u.longs))
6835 dfprintk(MOUNT, " allow op %d\n", i);
6836 }
6837
6838 /* make sure nothing is on enforce list that isn't supported */
6839 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
6840 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
6841 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6842 return -EINVAL;
6843 }
6844 }
6845
6846 /*
6847 * Minimal mode - state operations are allowed to use machine
6848 * credential. Note this already happens by default, so the
6849 * client doesn't have to do anything more than the negotiation.
6850 *
6851 * NOTE: we don't care if EXCHANGE_ID is in the list -
6852 * we're already using the machine cred for exchange_id
6853 * and will never use a different cred.
6854 */
6855 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
6856 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
6857 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
6858 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
6859 dfprintk(MOUNT, "sp4_mach_cred:\n");
6860 dfprintk(MOUNT, " minimal mode enabled\n");
6861 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags);
6862 } else {
6863 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
6864 return -EINVAL;
6865 }
6866
6867 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
6868 test_bit(OP_LOCKU, sp->allow.u.longs)) {
6869 dfprintk(MOUNT, " cleanup mode enabled\n");
6870 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags);
6871 }
6872
6873 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
6874 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
6875 dfprintk(MOUNT, " secinfo mode enabled\n");
6876 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags);
6877 }
6878
6879 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
6880 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
6881 dfprintk(MOUNT, " stateid mode enabled\n");
6882 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags);
6883 }
6884
6885 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
6886 dfprintk(MOUNT, " write mode enabled\n");
6887 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags);
6888 }
6889
6890 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
6891 dfprintk(MOUNT, " commit mode enabled\n");
6892 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags);
6893 }
6894 }
6895
6896 return 0;
6897}
6898
6899/*
6900 * _nfs4_proc_exchange_id()
6901 *
6902 * Wrapper for EXCHANGE_ID operation.
6903 */
6904static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
6905 u32 sp4_how)
6906{
6907 nfs4_verifier verifier;
6908 struct nfs41_exchange_id_args args = {
6909 .verifier = &verifier,
6910 .client = clp,
6911#ifdef CONFIG_NFS_V4_1_MIGRATION
6912 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
6913 EXCHGID4_FLAG_BIND_PRINC_STATEID |
6914 EXCHGID4_FLAG_SUPP_MOVED_MIGR,
6915#else
6916 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
6917 EXCHGID4_FLAG_BIND_PRINC_STATEID,
6918#endif
6919 };
6920 struct nfs41_exchange_id_res res = {
6921 0
6922 };
6923 int status;
6924 struct rpc_message msg = {
6925 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
6926 .rpc_argp = &args,
6927 .rpc_resp = &res,
6928 .rpc_cred = cred,
6929 };
6930
6931 nfs4_init_boot_verifier(clp, &verifier);
6932
6933 status = nfs4_init_uniform_client_string(clp);
6934 if (status)
6935 goto out;
6936
6937 dprintk("NFS call exchange_id auth=%s, '%s'\n",
6938 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6939 clp->cl_owner_id);
6940
6941 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
6942 GFP_NOFS);
6943 if (unlikely(res.server_owner == NULL)) {
6944 status = -ENOMEM;
6945 goto out;
6946 }
6947
6948 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
6949 GFP_NOFS);
6950 if (unlikely(res.server_scope == NULL)) {
6951 status = -ENOMEM;
6952 goto out_server_owner;
6953 }
6954
6955 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
6956 if (unlikely(res.impl_id == NULL)) {
6957 status = -ENOMEM;
6958 goto out_server_scope;
6959 }
6960
6961 switch (sp4_how) {
6962 case SP4_NONE:
6963 args.state_protect.how = SP4_NONE;
6964 break;
6965
6966 case SP4_MACH_CRED:
6967 args.state_protect = nfs4_sp4_mach_cred_request;
6968 break;
6969
6970 default:
6971 /* unsupported! */
6972 WARN_ON_ONCE(1);
6973 status = -EINVAL;
6974 goto out_impl_id;
6975 }
6976
6977 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
6978 trace_nfs4_exchange_id(clp, status);
6979 if (status == 0)
6980 status = nfs4_check_cl_exchange_flags(res.flags);
6981
6982 if (status == 0)
6983 status = nfs4_sp4_select_mode(clp, &res.state_protect);
6984
6985 if (status == 0) {
6986 clp->cl_clientid = res.clientid;
6987 clp->cl_exchange_flags = res.flags;
6988 /* Client ID is not confirmed */
6989 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) {
6990 clear_bit(NFS4_SESSION_ESTABLISHED,
6991 &clp->cl_session->session_state);
6992 clp->cl_seqid = res.seqid;
6993 }
6994
6995 kfree(clp->cl_serverowner);
6996 clp->cl_serverowner = res.server_owner;
6997 res.server_owner = NULL;
6998
6999 /* use the most recent implementation id */
7000 kfree(clp->cl_implid);
7001 clp->cl_implid = res.impl_id;
7002 res.impl_id = NULL;
7003
7004 if (clp->cl_serverscope != NULL &&
7005 !nfs41_same_server_scope(clp->cl_serverscope,
7006 res.server_scope)) {
7007 dprintk("%s: server_scope mismatch detected\n",
7008 __func__);
7009 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
7010 kfree(clp->cl_serverscope);
7011 clp->cl_serverscope = NULL;
7012 }
7013
7014 if (clp->cl_serverscope == NULL) {
7015 clp->cl_serverscope = res.server_scope;
7016 res.server_scope = NULL;
7017 }
7018 }
7019
7020out_impl_id:
7021 kfree(res.impl_id);
7022out_server_scope:
7023 kfree(res.server_scope);
7024out_server_owner:
7025 kfree(res.server_owner);
7026out:
7027 if (clp->cl_implid != NULL)
7028 dprintk("NFS reply exchange_id: Server Implementation ID: "
7029 "domain: %s, name: %s, date: %llu,%u\n",
7030 clp->cl_implid->domain, clp->cl_implid->name,
7031 clp->cl_implid->date.seconds,
7032 clp->cl_implid->date.nseconds);
7033 dprintk("NFS reply exchange_id: %d\n", status);
7034 return status;
7035}
7036
7037/*
7038 * nfs4_proc_exchange_id()
7039 *
7040 * Returns zero, a negative errno, or a negative NFS4ERR status code.
7041 *
7042 * Since the clientid has expired, all compounds using sessions
7043 * associated with the stale clientid will be returning
7044 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
7045 * be in some phase of session reset.
7046 *
7047 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
7048 */
7049int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
7050{
7051 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
7052 int status;
7053
7054 /* try SP4_MACH_CRED if krb5i/p */
7055 if (authflavor == RPC_AUTH_GSS_KRB5I ||
7056 authflavor == RPC_AUTH_GSS_KRB5P) {
7057 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
7058 if (!status)
7059 return 0;
7060 }
7061
7062 /* try SP4_NONE */
7063 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
7064}
7065
7066static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
7067 struct rpc_cred *cred)
7068{
7069 struct rpc_message msg = {
7070 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
7071 .rpc_argp = clp,
7072 .rpc_cred = cred,
7073 };
7074 int status;
7075
7076 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7077 trace_nfs4_destroy_clientid(clp, status);
7078 if (status)
7079 dprintk("NFS: Got error %d from the server %s on "
7080 "DESTROY_CLIENTID.", status, clp->cl_hostname);
7081 return status;
7082}
7083
7084static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
7085 struct rpc_cred *cred)
7086{
7087 unsigned int loop;
7088 int ret;
7089
7090 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
7091 ret = _nfs4_proc_destroy_clientid(clp, cred);
7092 switch (ret) {
7093 case -NFS4ERR_DELAY:
7094 case -NFS4ERR_CLIENTID_BUSY:
7095 ssleep(1);
7096 break;
7097 default:
7098 return ret;
7099 }
7100 }
7101 return 0;
7102}
7103
7104int nfs4_destroy_clientid(struct nfs_client *clp)
7105{
7106 struct rpc_cred *cred;
7107 int ret = 0;
7108
7109 if (clp->cl_mvops->minor_version < 1)
7110 goto out;
7111 if (clp->cl_exchange_flags == 0)
7112 goto out;
7113 if (clp->cl_preserve_clid)
7114 goto out;
7115 cred = nfs4_get_clid_cred(clp);
7116 ret = nfs4_proc_destroy_clientid(clp, cred);
7117 if (cred)
7118 put_rpccred(cred);
7119 switch (ret) {
7120 case 0:
7121 case -NFS4ERR_STALE_CLIENTID:
7122 clp->cl_exchange_flags = 0;
7123 }
7124out:
7125 return ret;
7126}
7127
7128struct nfs4_get_lease_time_data {
7129 struct nfs4_get_lease_time_args *args;
7130 struct nfs4_get_lease_time_res *res;
7131 struct nfs_client *clp;
7132};
7133
7134static void nfs4_get_lease_time_prepare(struct rpc_task *task,
7135 void *calldata)
7136{
7137 struct nfs4_get_lease_time_data *data =
7138 (struct nfs4_get_lease_time_data *)calldata;
7139
7140 dprintk("--> %s\n", __func__);
7141 /* just setup sequence, do not trigger session recovery
7142 since we're invoked within one */
7143 nfs41_setup_sequence(data->clp->cl_session,
7144 &data->args->la_seq_args,
7145 &data->res->lr_seq_res,
7146 task);
7147 dprintk("<-- %s\n", __func__);
7148}
7149
7150/*
7151 * Called from nfs4_state_manager thread for session setup, so don't recover
7152 * from sequence operation or clientid errors.
7153 */
7154static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
7155{
7156 struct nfs4_get_lease_time_data *data =
7157 (struct nfs4_get_lease_time_data *)calldata;
7158
7159 dprintk("--> %s\n", __func__);
7160 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
7161 return;
7162 switch (task->tk_status) {
7163 case -NFS4ERR_DELAY:
7164 case -NFS4ERR_GRACE:
7165 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
7166 rpc_delay(task, NFS4_POLL_RETRY_MIN);
7167 task->tk_status = 0;
7168 /* fall through */
7169 case -NFS4ERR_RETRY_UNCACHED_REP:
7170 rpc_restart_call_prepare(task);
7171 return;
7172 }
7173 dprintk("<-- %s\n", __func__);
7174}
7175
7176static const struct rpc_call_ops nfs4_get_lease_time_ops = {
7177 .rpc_call_prepare = nfs4_get_lease_time_prepare,
7178 .rpc_call_done = nfs4_get_lease_time_done,
7179};
7180
7181int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
7182{
7183 struct rpc_task *task;
7184 struct nfs4_get_lease_time_args args;
7185 struct nfs4_get_lease_time_res res = {
7186 .lr_fsinfo = fsinfo,
7187 };
7188 struct nfs4_get_lease_time_data data = {
7189 .args = &args,
7190 .res = &res,
7191 .clp = clp,
7192 };
7193 struct rpc_message msg = {
7194 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
7195 .rpc_argp = &args,
7196 .rpc_resp = &res,
7197 };
7198 struct rpc_task_setup task_setup = {
7199 .rpc_client = clp->cl_rpcclient,
7200 .rpc_message = &msg,
7201 .callback_ops = &nfs4_get_lease_time_ops,
7202 .callback_data = &data,
7203 .flags = RPC_TASK_TIMEOUT,
7204 };
7205 int status;
7206
7207 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
7208 nfs4_set_sequence_privileged(&args.la_seq_args);
7209 dprintk("--> %s\n", __func__);
7210 task = rpc_run_task(&task_setup);
7211
7212 if (IS_ERR(task))
7213 status = PTR_ERR(task);
7214 else {
7215 status = task->tk_status;
7216 rpc_put_task(task);
7217 }
7218 dprintk("<-- %s return %d\n", __func__, status);
7219
7220 return status;
7221}
7222
7223/*
7224 * Initialize the values to be used by the client in CREATE_SESSION
7225 * If nfs4_init_session set the fore channel request and response sizes,
7226 * use them.
7227 *
7228 * Set the back channel max_resp_sz_cached to zero to force the client to
7229 * always set csa_cachethis to FALSE because the current implementation
7230 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
7231 */
7232static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
7233{
7234 unsigned int max_rqst_sz, max_resp_sz;
7235
7236 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
7237 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
7238
7239 /* Fore channel attributes */
7240 args->fc_attrs.max_rqst_sz = max_rqst_sz;
7241 args->fc_attrs.max_resp_sz = max_resp_sz;
7242 args->fc_attrs.max_ops = NFS4_MAX_OPS;
7243 args->fc_attrs.max_reqs = max_session_slots;
7244
7245 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
7246 "max_ops=%u max_reqs=%u\n",
7247 __func__,
7248 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
7249 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
7250
7251 /* Back channel attributes */
7252 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
7253 args->bc_attrs.max_resp_sz = PAGE_SIZE;
7254 args->bc_attrs.max_resp_sz_cached = 0;
7255 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
7256 args->bc_attrs.max_reqs = 1;
7257
7258 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
7259 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
7260 __func__,
7261 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
7262 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
7263 args->bc_attrs.max_reqs);
7264}
7265
7266static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
7267 struct nfs41_create_session_res *res)
7268{
7269 struct nfs4_channel_attrs *sent = &args->fc_attrs;
7270 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
7271
7272 if (rcvd->max_resp_sz > sent->max_resp_sz)
7273 return -EINVAL;
7274 /*
7275 * Our requested max_ops is the minimum we need; we're not
7276 * prepared to break up compounds into smaller pieces than that.
7277 * So, no point even trying to continue if the server won't
7278 * cooperate:
7279 */
7280 if (rcvd->max_ops < sent->max_ops)
7281 return -EINVAL;
7282 if (rcvd->max_reqs == 0)
7283 return -EINVAL;
7284 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
7285 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
7286 return 0;
7287}
7288
7289static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
7290 struct nfs41_create_session_res *res)
7291{
7292 struct nfs4_channel_attrs *sent = &args->bc_attrs;
7293 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
7294
7295 if (!(res->flags & SESSION4_BACK_CHAN))
7296 goto out;
7297 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
7298 return -EINVAL;
7299 if (rcvd->max_resp_sz < sent->max_resp_sz)
7300 return -EINVAL;
7301 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
7302 return -EINVAL;
7303 /* These would render the backchannel useless: */
7304 if (rcvd->max_ops != sent->max_ops)
7305 return -EINVAL;
7306 if (rcvd->max_reqs != sent->max_reqs)
7307 return -EINVAL;
7308out:
7309 return 0;
7310}
7311
7312static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
7313 struct nfs41_create_session_res *res)
7314{
7315 int ret;
7316
7317 ret = nfs4_verify_fore_channel_attrs(args, res);
7318 if (ret)
7319 return ret;
7320 return nfs4_verify_back_channel_attrs(args, res);
7321}
7322
7323static void nfs4_update_session(struct nfs4_session *session,
7324 struct nfs41_create_session_res *res)
7325{
7326 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
7327 /* Mark client id and session as being confirmed */
7328 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
7329 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
7330 session->flags = res->flags;
7331 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
7332 if (res->flags & SESSION4_BACK_CHAN)
7333 memcpy(&session->bc_attrs, &res->bc_attrs,
7334 sizeof(session->bc_attrs));
7335}
7336
7337static int _nfs4_proc_create_session(struct nfs_client *clp,
7338 struct rpc_cred *cred)
7339{
7340 struct nfs4_session *session = clp->cl_session;
7341 struct nfs41_create_session_args args = {
7342 .client = clp,
7343 .clientid = clp->cl_clientid,
7344 .seqid = clp->cl_seqid,
7345 .cb_program = NFS4_CALLBACK,
7346 };
7347 struct nfs41_create_session_res res;
7348
7349 struct rpc_message msg = {
7350 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
7351 .rpc_argp = &args,
7352 .rpc_resp = &res,
7353 .rpc_cred = cred,
7354 };
7355 int status;
7356
7357 nfs4_init_channel_attrs(&args);
7358 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
7359
7360 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7361 trace_nfs4_create_session(clp, status);
7362
7363 if (!status) {
7364 /* Verify the session's negotiated channel_attrs values */
7365 status = nfs4_verify_channel_attrs(&args, &res);
7366 /* Increment the clientid slot sequence id */
7367 if (clp->cl_seqid == res.seqid)
7368 clp->cl_seqid++;
7369 if (status)
7370 goto out;
7371 nfs4_update_session(session, &res);
7372 }
7373out:
7374 return status;
7375}
7376
7377/*
7378 * Issues a CREATE_SESSION operation to the server.
7379 * It is the responsibility of the caller to verify the session is
7380 * expired before calling this routine.
7381 */
7382int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
7383{
7384 int status;
7385 unsigned *ptr;
7386 struct nfs4_session *session = clp->cl_session;
7387
7388 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
7389
7390 status = _nfs4_proc_create_session(clp, cred);
7391 if (status)
7392 goto out;
7393
7394 /* Init or reset the session slot tables */
7395 status = nfs4_setup_session_slot_tables(session);
7396 dprintk("slot table setup returned %d\n", status);
7397 if (status)
7398 goto out;
7399
7400 ptr = (unsigned *)&session->sess_id.data[0];
7401 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
7402 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
7403out:
7404 dprintk("<-- %s\n", __func__);
7405 return status;
7406}
7407
7408/*
7409 * Issue the over-the-wire RPC DESTROY_SESSION.
7410 * The caller must serialize access to this routine.
7411 */
7412int nfs4_proc_destroy_session(struct nfs4_session *session,
7413 struct rpc_cred *cred)
7414{
7415 struct rpc_message msg = {
7416 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
7417 .rpc_argp = session,
7418 .rpc_cred = cred,
7419 };
7420 int status = 0;
7421
7422 dprintk("--> nfs4_proc_destroy_session\n");
7423
7424 /* session is still being setup */
7425 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
7426 return 0;
7427
7428 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7429 trace_nfs4_destroy_session(session->clp, status);
7430
7431 if (status)
7432 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
7433 "Session has been destroyed regardless...\n", status);
7434
7435 dprintk("<-- nfs4_proc_destroy_session\n");
7436 return status;
7437}
7438
7439/*
7440 * Renew the cl_session lease.
7441 */
7442struct nfs4_sequence_data {
7443 struct nfs_client *clp;
7444 struct nfs4_sequence_args args;
7445 struct nfs4_sequence_res res;
7446};
7447
7448static void nfs41_sequence_release(void *data)
7449{
7450 struct nfs4_sequence_data *calldata = data;
7451 struct nfs_client *clp = calldata->clp;
7452
7453 if (atomic_read(&clp->cl_count) > 1)
7454 nfs4_schedule_state_renewal(clp);
7455 nfs_put_client(clp);
7456 kfree(calldata);
7457}
7458
7459static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7460{
7461 switch(task->tk_status) {
7462 case -NFS4ERR_DELAY:
7463 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7464 return -EAGAIN;
7465 default:
7466 nfs4_schedule_lease_recovery(clp);
7467 }
7468 return 0;
7469}
7470
7471static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
7472{
7473 struct nfs4_sequence_data *calldata = data;
7474 struct nfs_client *clp = calldata->clp;
7475
7476 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
7477 return;
7478
7479 trace_nfs4_sequence(clp, task->tk_status);
7480 if (task->tk_status < 0) {
7481 dprintk("%s ERROR %d\n", __func__, task->tk_status);
7482 if (atomic_read(&clp->cl_count) == 1)
7483 goto out;
7484
7485 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
7486 rpc_restart_call_prepare(task);
7487 return;
7488 }
7489 }
7490 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
7491out:
7492 dprintk("<-- %s\n", __func__);
7493}
7494
7495static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
7496{
7497 struct nfs4_sequence_data *calldata = data;
7498 struct nfs_client *clp = calldata->clp;
7499 struct nfs4_sequence_args *args;
7500 struct nfs4_sequence_res *res;
7501
7502 args = task->tk_msg.rpc_argp;
7503 res = task->tk_msg.rpc_resp;
7504
7505 nfs41_setup_sequence(clp->cl_session, args, res, task);
7506}
7507
7508static const struct rpc_call_ops nfs41_sequence_ops = {
7509 .rpc_call_done = nfs41_sequence_call_done,
7510 .rpc_call_prepare = nfs41_sequence_prepare,
7511 .rpc_release = nfs41_sequence_release,
7512};
7513
7514static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
7515 struct rpc_cred *cred,
7516 bool is_privileged)
7517{
7518 struct nfs4_sequence_data *calldata;
7519 struct rpc_message msg = {
7520 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
7521 .rpc_cred = cred,
7522 };
7523 struct rpc_task_setup task_setup_data = {
7524 .rpc_client = clp->cl_rpcclient,
7525 .rpc_message = &msg,
7526 .callback_ops = &nfs41_sequence_ops,
7527 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
7528 };
7529
7530 if (!atomic_inc_not_zero(&clp->cl_count))
7531 return ERR_PTR(-EIO);
7532 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7533 if (calldata == NULL) {
7534 nfs_put_client(clp);
7535 return ERR_PTR(-ENOMEM);
7536 }
7537 nfs4_init_sequence(&calldata->args, &calldata->res, 0);
7538 if (is_privileged)
7539 nfs4_set_sequence_privileged(&calldata->args);
7540 msg.rpc_argp = &calldata->args;
7541 msg.rpc_resp = &calldata->res;
7542 calldata->clp = clp;
7543 task_setup_data.callback_data = calldata;
7544
7545 return rpc_run_task(&task_setup_data);
7546}
7547
7548static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
7549{
7550 struct rpc_task *task;
7551 int ret = 0;
7552
7553 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
7554 return -EAGAIN;
7555 task = _nfs41_proc_sequence(clp, cred, false);
7556 if (IS_ERR(task))
7557 ret = PTR_ERR(task);
7558 else
7559 rpc_put_task_async(task);
7560 dprintk("<-- %s status=%d\n", __func__, ret);
7561 return ret;
7562}
7563
7564static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
7565{
7566 struct rpc_task *task;
7567 int ret;
7568
7569 task = _nfs41_proc_sequence(clp, cred, true);
7570 if (IS_ERR(task)) {
7571 ret = PTR_ERR(task);
7572 goto out;
7573 }
7574 ret = rpc_wait_for_completion_task(task);
7575 if (!ret) {
7576 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
7577
7578 if (task->tk_status == 0)
7579 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
7580 ret = task->tk_status;
7581 }
7582 rpc_put_task(task);
7583out:
7584 dprintk("<-- %s status=%d\n", __func__, ret);
7585 return ret;
7586}
7587
7588struct nfs4_reclaim_complete_data {
7589 struct nfs_client *clp;
7590 struct nfs41_reclaim_complete_args arg;
7591 struct nfs41_reclaim_complete_res res;
7592};
7593
7594static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
7595{
7596 struct nfs4_reclaim_complete_data *calldata = data;
7597
7598 nfs41_setup_sequence(calldata->clp->cl_session,
7599 &calldata->arg.seq_args,
7600 &calldata->res.seq_res,
7601 task);
7602}
7603
7604static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7605{
7606 switch(task->tk_status) {
7607 case 0:
7608 case -NFS4ERR_COMPLETE_ALREADY:
7609 case -NFS4ERR_WRONG_CRED: /* What to do here? */
7610 break;
7611 case -NFS4ERR_DELAY:
7612 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7613 /* fall through */
7614 case -NFS4ERR_RETRY_UNCACHED_REP:
7615 return -EAGAIN;
7616 default:
7617 nfs4_schedule_lease_recovery(clp);
7618 }
7619 return 0;
7620}
7621
7622static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
7623{
7624 struct nfs4_reclaim_complete_data *calldata = data;
7625 struct nfs_client *clp = calldata->clp;
7626 struct nfs4_sequence_res *res = &calldata->res.seq_res;
7627
7628 dprintk("--> %s\n", __func__);
7629 if (!nfs41_sequence_done(task, res))
7630 return;
7631
7632 trace_nfs4_reclaim_complete(clp, task->tk_status);
7633 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
7634 rpc_restart_call_prepare(task);
7635 return;
7636 }
7637 dprintk("<-- %s\n", __func__);
7638}
7639
7640static void nfs4_free_reclaim_complete_data(void *data)
7641{
7642 struct nfs4_reclaim_complete_data *calldata = data;
7643
7644 kfree(calldata);
7645}
7646
7647static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
7648 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
7649 .rpc_call_done = nfs4_reclaim_complete_done,
7650 .rpc_release = nfs4_free_reclaim_complete_data,
7651};
7652
7653/*
7654 * Issue a global reclaim complete.
7655 */
7656static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
7657 struct rpc_cred *cred)
7658{
7659 struct nfs4_reclaim_complete_data *calldata;
7660 struct rpc_task *task;
7661 struct rpc_message msg = {
7662 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
7663 .rpc_cred = cred,
7664 };
7665 struct rpc_task_setup task_setup_data = {
7666 .rpc_client = clp->cl_rpcclient,
7667 .rpc_message = &msg,
7668 .callback_ops = &nfs4_reclaim_complete_call_ops,
7669 .flags = RPC_TASK_ASYNC,
7670 };
7671 int status = -ENOMEM;
7672
7673 dprintk("--> %s\n", __func__);
7674 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7675 if (calldata == NULL)
7676 goto out;
7677 calldata->clp = clp;
7678 calldata->arg.one_fs = 0;
7679
7680 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
7681 nfs4_set_sequence_privileged(&calldata->arg.seq_args);
7682 msg.rpc_argp = &calldata->arg;
7683 msg.rpc_resp = &calldata->res;
7684 task_setup_data.callback_data = calldata;
7685 task = rpc_run_task(&task_setup_data);
7686 if (IS_ERR(task)) {
7687 status = PTR_ERR(task);
7688 goto out;
7689 }
7690 status = nfs4_wait_for_completion_rpc_task(task);
7691 if (status == 0)
7692 status = task->tk_status;
7693 rpc_put_task(task);
7694 return 0;
7695out:
7696 dprintk("<-- %s status=%d\n", __func__, status);
7697 return status;
7698}
7699
7700static void
7701nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
7702{
7703 struct nfs4_layoutget *lgp = calldata;
7704 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
7705 struct nfs4_session *session = nfs4_get_session(server);
7706
7707 dprintk("--> %s\n", __func__);
7708 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
7709 * right now covering the LAYOUTGET we are about to send.
7710 * However, that is not so catastrophic, and there seems
7711 * to be no way to prevent it completely.
7712 */
7713 if (nfs41_setup_sequence(session, &lgp->args.seq_args,
7714 &lgp->res.seq_res, task))
7715 return;
7716 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
7717 NFS_I(lgp->args.inode)->layout,
7718 &lgp->args.range,
7719 lgp->args.ctx->state)) {
7720 rpc_exit(task, NFS4_OK);
7721 }
7722}
7723
7724static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
7725{
7726 struct nfs4_layoutget *lgp = calldata;
7727 struct inode *inode = lgp->args.inode;
7728 struct nfs_server *server = NFS_SERVER(inode);
7729 struct pnfs_layout_hdr *lo;
7730 struct nfs4_state *state = NULL;
7731 unsigned long timeo, now, giveup;
7732
7733 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
7734
7735 if (!nfs41_sequence_done(task, &lgp->res.seq_res))
7736 goto out;
7737
7738 switch (task->tk_status) {
7739 case 0:
7740 goto out;
7741 /*
7742 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
7743 * (or clients) writing to the same RAID stripe
7744 */
7745 case -NFS4ERR_LAYOUTTRYLATER:
7746 /*
7747 * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall
7748 * existing layout before getting a new one).
7749 */
7750 case -NFS4ERR_RECALLCONFLICT:
7751 timeo = rpc_get_timeout(task->tk_client);
7752 giveup = lgp->args.timestamp + timeo;
7753 now = jiffies;
7754 if (time_after(giveup, now)) {
7755 unsigned long delay;
7756
7757 /* Delay for:
7758 * - Not less then NFS4_POLL_RETRY_MIN.
7759 * - One last time a jiffie before we give up
7760 * - exponential backoff (time_now minus start_attempt)
7761 */
7762 delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN,
7763 min((giveup - now - 1),
7764 now - lgp->args.timestamp));
7765
7766 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
7767 __func__, delay);
7768 rpc_delay(task, delay);
7769 task->tk_status = 0;
7770 rpc_restart_call_prepare(task);
7771 goto out; /* Do not call nfs4_async_handle_error() */
7772 }
7773 break;
7774 case -NFS4ERR_EXPIRED:
7775 case -NFS4ERR_BAD_STATEID:
7776 spin_lock(&inode->i_lock);
7777 lo = NFS_I(inode)->layout;
7778 if (!lo || list_empty(&lo->plh_segs)) {
7779 spin_unlock(&inode->i_lock);
7780 /* If the open stateid was bad, then recover it. */
7781 state = lgp->args.ctx->state;
7782 } else {
7783 LIST_HEAD(head);
7784
7785 /*
7786 * Mark the bad layout state as invalid, then retry
7787 * with the current stateid.
7788 */
7789 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
7790 spin_unlock(&inode->i_lock);
7791 pnfs_free_lseg_list(&head);
7792
7793 task->tk_status = 0;
7794 rpc_restart_call_prepare(task);
7795 }
7796 }
7797 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN)
7798 rpc_restart_call_prepare(task);
7799out:
7800 dprintk("<-- %s\n", __func__);
7801}
7802
7803static size_t max_response_pages(struct nfs_server *server)
7804{
7805 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
7806 return nfs_page_array_len(0, max_resp_sz);
7807}
7808
7809static void nfs4_free_pages(struct page **pages, size_t size)
7810{
7811 int i;
7812
7813 if (!pages)
7814 return;
7815
7816 for (i = 0; i < size; i++) {
7817 if (!pages[i])
7818 break;
7819 __free_page(pages[i]);
7820 }
7821 kfree(pages);
7822}
7823
7824static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
7825{
7826 struct page **pages;
7827 int i;
7828
7829 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
7830 if (!pages) {
7831 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
7832 return NULL;
7833 }
7834
7835 for (i = 0; i < size; i++) {
7836 pages[i] = alloc_page(gfp_flags);
7837 if (!pages[i]) {
7838 dprintk("%s: failed to allocate page\n", __func__);
7839 nfs4_free_pages(pages, size);
7840 return NULL;
7841 }
7842 }
7843
7844 return pages;
7845}
7846
7847static void nfs4_layoutget_release(void *calldata)
7848{
7849 struct nfs4_layoutget *lgp = calldata;
7850 struct inode *inode = lgp->args.inode;
7851 struct nfs_server *server = NFS_SERVER(inode);
7852 size_t max_pages = max_response_pages(server);
7853
7854 dprintk("--> %s\n", __func__);
7855 nfs4_free_pages(lgp->args.layout.pages, max_pages);
7856 pnfs_put_layout_hdr(NFS_I(inode)->layout);
7857 put_nfs_open_context(lgp->args.ctx);
7858 kfree(calldata);
7859 dprintk("<-- %s\n", __func__);
7860}
7861
7862static const struct rpc_call_ops nfs4_layoutget_call_ops = {
7863 .rpc_call_prepare = nfs4_layoutget_prepare,
7864 .rpc_call_done = nfs4_layoutget_done,
7865 .rpc_release = nfs4_layoutget_release,
7866};
7867
7868struct pnfs_layout_segment *
7869nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
7870{
7871 struct inode *inode = lgp->args.inode;
7872 struct nfs_server *server = NFS_SERVER(inode);
7873 size_t max_pages = max_response_pages(server);
7874 struct rpc_task *task;
7875 struct rpc_message msg = {
7876 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
7877 .rpc_argp = &lgp->args,
7878 .rpc_resp = &lgp->res,
7879 .rpc_cred = lgp->cred,
7880 };
7881 struct rpc_task_setup task_setup_data = {
7882 .rpc_client = server->client,
7883 .rpc_message = &msg,
7884 .callback_ops = &nfs4_layoutget_call_ops,
7885 .callback_data = lgp,
7886 .flags = RPC_TASK_ASYNC,
7887 };
7888 struct pnfs_layout_segment *lseg = NULL;
7889 int status = 0;
7890
7891 dprintk("--> %s\n", __func__);
7892
7893 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
7894 pnfs_get_layout_hdr(NFS_I(inode)->layout);
7895
7896 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
7897 if (!lgp->args.layout.pages) {
7898 nfs4_layoutget_release(lgp);
7899 return ERR_PTR(-ENOMEM);
7900 }
7901 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
7902 lgp->args.timestamp = jiffies;
7903
7904 lgp->res.layoutp = &lgp->args.layout;
7905 lgp->res.seq_res.sr_slot = NULL;
7906 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
7907
7908 task = rpc_run_task(&task_setup_data);
7909 if (IS_ERR(task))
7910 return ERR_CAST(task);
7911 status = nfs4_wait_for_completion_rpc_task(task);
7912 if (status == 0)
7913 status = task->tk_status;
7914 trace_nfs4_layoutget(lgp->args.ctx,
7915 &lgp->args.range,
7916 &lgp->res.range,
7917 status);
7918 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
7919 if (status == 0 && lgp->res.layoutp->len)
7920 lseg = pnfs_layout_process(lgp);
7921 rpc_put_task(task);
7922 dprintk("<-- %s status=%d\n", __func__, status);
7923 if (status)
7924 return ERR_PTR(status);
7925 return lseg;
7926}
7927
7928static void
7929nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
7930{
7931 struct nfs4_layoutreturn *lrp = calldata;
7932
7933 dprintk("--> %s\n", __func__);
7934 nfs41_setup_sequence(lrp->clp->cl_session,
7935 &lrp->args.seq_args,
7936 &lrp->res.seq_res,
7937 task);
7938}
7939
7940static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
7941{
7942 struct nfs4_layoutreturn *lrp = calldata;
7943 struct nfs_server *server;
7944
7945 dprintk("--> %s\n", __func__);
7946
7947 if (!nfs41_sequence_done(task, &lrp->res.seq_res))
7948 return;
7949
7950 server = NFS_SERVER(lrp->args.inode);
7951 switch (task->tk_status) {
7952 default:
7953 task->tk_status = 0;
7954 case 0:
7955 break;
7956 case -NFS4ERR_DELAY:
7957 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
7958 break;
7959 rpc_restart_call_prepare(task);
7960 return;
7961 }
7962 dprintk("<-- %s\n", __func__);
7963}
7964
7965static void nfs4_layoutreturn_release(void *calldata)
7966{
7967 struct nfs4_layoutreturn *lrp = calldata;
7968 struct pnfs_layout_hdr *lo = lrp->args.layout;
7969
7970 dprintk("--> %s\n", __func__);
7971 spin_lock(&lo->plh_inode->i_lock);
7972 if (lrp->res.lrs_present)
7973 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
7974 pnfs_clear_layoutreturn_waitbit(lo);
7975 clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
7976 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
7977 lo->plh_block_lgets--;
7978 spin_unlock(&lo->plh_inode->i_lock);
7979 pnfs_put_layout_hdr(lrp->args.layout);
7980 nfs_iput_and_deactive(lrp->inode);
7981 kfree(calldata);
7982 dprintk("<-- %s\n", __func__);
7983}
7984
7985static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
7986 .rpc_call_prepare = nfs4_layoutreturn_prepare,
7987 .rpc_call_done = nfs4_layoutreturn_done,
7988 .rpc_release = nfs4_layoutreturn_release,
7989};
7990
7991int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
7992{
7993 struct rpc_task *task;
7994 struct rpc_message msg = {
7995 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
7996 .rpc_argp = &lrp->args,
7997 .rpc_resp = &lrp->res,
7998 .rpc_cred = lrp->cred,
7999 };
8000 struct rpc_task_setup task_setup_data = {
8001 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
8002 .rpc_message = &msg,
8003 .callback_ops = &nfs4_layoutreturn_call_ops,
8004 .callback_data = lrp,
8005 };
8006 int status = 0;
8007
8008 dprintk("--> %s\n", __func__);
8009 if (!sync) {
8010 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
8011 if (!lrp->inode) {
8012 nfs4_layoutreturn_release(lrp);
8013 return -EAGAIN;
8014 }
8015 task_setup_data.flags |= RPC_TASK_ASYNC;
8016 }
8017 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
8018 task = rpc_run_task(&task_setup_data);
8019 if (IS_ERR(task))
8020 return PTR_ERR(task);
8021 if (sync)
8022 status = task->tk_status;
8023 trace_nfs4_layoutreturn(lrp->args.inode, status);
8024 dprintk("<-- %s status=%d\n", __func__, status);
8025 rpc_put_task(task);
8026 return status;
8027}
8028
8029static int
8030_nfs4_proc_getdeviceinfo(struct nfs_server *server,
8031 struct pnfs_device *pdev,
8032 struct rpc_cred *cred)
8033{
8034 struct nfs4_getdeviceinfo_args args = {
8035 .pdev = pdev,
8036 .notify_types = NOTIFY_DEVICEID4_CHANGE |
8037 NOTIFY_DEVICEID4_DELETE,
8038 };
8039 struct nfs4_getdeviceinfo_res res = {
8040 .pdev = pdev,
8041 };
8042 struct rpc_message msg = {
8043 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
8044 .rpc_argp = &args,
8045 .rpc_resp = &res,
8046 .rpc_cred = cred,
8047 };
8048 int status;
8049
8050 dprintk("--> %s\n", __func__);
8051 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
8052 if (res.notification & ~args.notify_types)
8053 dprintk("%s: unsupported notification\n", __func__);
8054 if (res.notification != args.notify_types)
8055 pdev->nocache = 1;
8056
8057 dprintk("<-- %s status=%d\n", __func__, status);
8058
8059 return status;
8060}
8061
8062int nfs4_proc_getdeviceinfo(struct nfs_server *server,
8063 struct pnfs_device *pdev,
8064 struct rpc_cred *cred)
8065{
8066 struct nfs4_exception exception = { };
8067 int err;
8068
8069 do {
8070 err = nfs4_handle_exception(server,
8071 _nfs4_proc_getdeviceinfo(server, pdev, cred),
8072 &exception);
8073 } while (exception.retry);
8074 return err;
8075}
8076EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
8077
8078static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
8079{
8080 struct nfs4_layoutcommit_data *data = calldata;
8081 struct nfs_server *server = NFS_SERVER(data->args.inode);
8082 struct nfs4_session *session = nfs4_get_session(server);
8083
8084 nfs41_setup_sequence(session,
8085 &data->args.seq_args,
8086 &data->res.seq_res,
8087 task);
8088}
8089
8090static void
8091nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
8092{
8093 struct nfs4_layoutcommit_data *data = calldata;
8094 struct nfs_server *server = NFS_SERVER(data->args.inode);
8095
8096 if (!nfs41_sequence_done(task, &data->res.seq_res))
8097 return;
8098
8099 switch (task->tk_status) { /* Just ignore these failures */
8100 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
8101 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
8102 case -NFS4ERR_BADLAYOUT: /* no layout */
8103 case -NFS4ERR_GRACE: /* loca_recalim always false */
8104 task->tk_status = 0;
8105 case 0:
8106 break;
8107 default:
8108 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
8109 rpc_restart_call_prepare(task);
8110 return;
8111 }
8112 }
8113}
8114
8115static void nfs4_layoutcommit_release(void *calldata)
8116{
8117 struct nfs4_layoutcommit_data *data = calldata;
8118
8119 pnfs_cleanup_layoutcommit(data);
8120 nfs_post_op_update_inode_force_wcc(data->args.inode,
8121 data->res.fattr);
8122 put_rpccred(data->cred);
8123 nfs_iput_and_deactive(data->inode);
8124 kfree(data);
8125}
8126
8127static const struct rpc_call_ops nfs4_layoutcommit_ops = {
8128 .rpc_call_prepare = nfs4_layoutcommit_prepare,
8129 .rpc_call_done = nfs4_layoutcommit_done,
8130 .rpc_release = nfs4_layoutcommit_release,
8131};
8132
8133int
8134nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
8135{
8136 struct rpc_message msg = {
8137 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
8138 .rpc_argp = &data->args,
8139 .rpc_resp = &data->res,
8140 .rpc_cred = data->cred,
8141 };
8142 struct rpc_task_setup task_setup_data = {
8143 .task = &data->task,
8144 .rpc_client = NFS_CLIENT(data->args.inode),
8145 .rpc_message = &msg,
8146 .callback_ops = &nfs4_layoutcommit_ops,
8147 .callback_data = data,
8148 };
8149 struct rpc_task *task;
8150 int status = 0;
8151
8152 dprintk("NFS: initiating layoutcommit call. sync %d "
8153 "lbw: %llu inode %lu\n", sync,
8154 data->args.lastbytewritten,
8155 data->args.inode->i_ino);
8156
8157 if (!sync) {
8158 data->inode = nfs_igrab_and_active(data->args.inode);
8159 if (data->inode == NULL) {
8160 nfs4_layoutcommit_release(data);
8161 return -EAGAIN;
8162 }
8163 task_setup_data.flags = RPC_TASK_ASYNC;
8164 }
8165 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
8166 task = rpc_run_task(&task_setup_data);
8167 if (IS_ERR(task))
8168 return PTR_ERR(task);
8169 if (sync)
8170 status = task->tk_status;
8171 trace_nfs4_layoutcommit(data->args.inode, status);
8172 dprintk("%s: status %d\n", __func__, status);
8173 rpc_put_task(task);
8174 return status;
8175}
8176
8177/**
8178 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
8179 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
8180 */
8181static int
8182_nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8183 struct nfs_fsinfo *info,
8184 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
8185{
8186 struct nfs41_secinfo_no_name_args args = {
8187 .style = SECINFO_STYLE_CURRENT_FH,
8188 };
8189 struct nfs4_secinfo_res res = {
8190 .flavors = flavors,
8191 };
8192 struct rpc_message msg = {
8193 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
8194 .rpc_argp = &args,
8195 .rpc_resp = &res,
8196 };
8197 struct rpc_clnt *clnt = server->client;
8198 struct rpc_cred *cred = NULL;
8199 int status;
8200
8201 if (use_integrity) {
8202 clnt = server->nfs_client->cl_rpcclient;
8203 cred = nfs4_get_clid_cred(server->nfs_client);
8204 msg.rpc_cred = cred;
8205 }
8206
8207 dprintk("--> %s\n", __func__);
8208 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
8209 &res.seq_res, 0);
8210 dprintk("<-- %s status=%d\n", __func__, status);
8211
8212 if (cred)
8213 put_rpccred(cred);
8214
8215 return status;
8216}
8217
8218static int
8219nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8220 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
8221{
8222 struct nfs4_exception exception = { };
8223 int err;
8224 do {
8225 /* first try using integrity protection */
8226 err = -NFS4ERR_WRONGSEC;
8227
8228 /* try to use integrity protection with machine cred */
8229 if (_nfs4_is_integrity_protected(server->nfs_client))
8230 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8231 flavors, true);
8232
8233 /*
8234 * if unable to use integrity protection, or SECINFO with
8235 * integrity protection returns NFS4ERR_WRONGSEC (which is
8236 * disallowed by spec, but exists in deployed servers) use
8237 * the current filesystem's rpc_client and the user cred.
8238 */
8239 if (err == -NFS4ERR_WRONGSEC)
8240 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8241 flavors, false);
8242
8243 switch (err) {
8244 case 0:
8245 case -NFS4ERR_WRONGSEC:
8246 case -ENOTSUPP:
8247 goto out;
8248 default:
8249 err = nfs4_handle_exception(server, err, &exception);
8250 }
8251 } while (exception.retry);
8252out:
8253 return err;
8254}
8255
8256static int
8257nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
8258 struct nfs_fsinfo *info)
8259{
8260 int err;
8261 struct page *page;
8262 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
8263 struct nfs4_secinfo_flavors *flavors;
8264 struct nfs4_secinfo4 *secinfo;
8265 int i;
8266
8267 page = alloc_page(GFP_KERNEL);
8268 if (!page) {
8269 err = -ENOMEM;
8270 goto out;
8271 }
8272
8273 flavors = page_address(page);
8274 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
8275
8276 /*
8277 * Fall back on "guess and check" method if
8278 * the server doesn't support SECINFO_NO_NAME
8279 */
8280 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
8281 err = nfs4_find_root_sec(server, fhandle, info);
8282 goto out_freepage;
8283 }
8284 if (err)
8285 goto out_freepage;
8286
8287 for (i = 0; i < flavors->num_flavors; i++) {
8288 secinfo = &flavors->flavors[i];
8289
8290 switch (secinfo->flavor) {
8291 case RPC_AUTH_NULL:
8292 case RPC_AUTH_UNIX:
8293 case RPC_AUTH_GSS:
8294 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
8295 &secinfo->flavor_info);
8296 break;
8297 default:
8298 flavor = RPC_AUTH_MAXFLAVOR;
8299 break;
8300 }
8301
8302 if (!nfs_auth_info_match(&server->auth_info, flavor))
8303 flavor = RPC_AUTH_MAXFLAVOR;
8304
8305 if (flavor != RPC_AUTH_MAXFLAVOR) {
8306 err = nfs4_lookup_root_sec(server, fhandle,
8307 info, flavor);
8308 if (!err)
8309 break;
8310 }
8311 }
8312
8313 if (flavor == RPC_AUTH_MAXFLAVOR)
8314 err = -EPERM;
8315
8316out_freepage:
8317 put_page(page);
8318 if (err == -EACCES)
8319 return -EPERM;
8320out:
8321 return err;
8322}
8323
8324static int _nfs41_test_stateid(struct nfs_server *server,
8325 nfs4_stateid *stateid,
8326 struct rpc_cred *cred)
8327{
8328 int status;
8329 struct nfs41_test_stateid_args args = {
8330 .stateid = stateid,
8331 };
8332 struct nfs41_test_stateid_res res;
8333 struct rpc_message msg = {
8334 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
8335 .rpc_argp = &args,
8336 .rpc_resp = &res,
8337 .rpc_cred = cred,
8338 };
8339 struct rpc_clnt *rpc_client = server->client;
8340
8341 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8342 &rpc_client, &msg);
8343
8344 dprintk("NFS call test_stateid %p\n", stateid);
8345 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
8346 nfs4_set_sequence_privileged(&args.seq_args);
8347 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
8348 &args.seq_args, &res.seq_res);
8349 if (status != NFS_OK) {
8350 dprintk("NFS reply test_stateid: failed, %d\n", status);
8351 return status;
8352 }
8353 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
8354 return -res.status;
8355}
8356
8357/**
8358 * nfs41_test_stateid - perform a TEST_STATEID operation
8359 *
8360 * @server: server / transport on which to perform the operation
8361 * @stateid: state ID to test
8362 * @cred: credential
8363 *
8364 * Returns NFS_OK if the server recognizes that "stateid" is valid.
8365 * Otherwise a negative NFS4ERR value is returned if the operation
8366 * failed or the state ID is not currently valid.
8367 */
8368static int nfs41_test_stateid(struct nfs_server *server,
8369 nfs4_stateid *stateid,
8370 struct rpc_cred *cred)
8371{
8372 struct nfs4_exception exception = { };
8373 int err;
8374 do {
8375 err = _nfs41_test_stateid(server, stateid, cred);
8376 if (err != -NFS4ERR_DELAY)
8377 break;
8378 nfs4_handle_exception(server, err, &exception);
8379 } while (exception.retry);
8380 return err;
8381}
8382
8383struct nfs_free_stateid_data {
8384 struct nfs_server *server;
8385 struct nfs41_free_stateid_args args;
8386 struct nfs41_free_stateid_res res;
8387};
8388
8389static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
8390{
8391 struct nfs_free_stateid_data *data = calldata;
8392 nfs41_setup_sequence(nfs4_get_session(data->server),
8393 &data->args.seq_args,
8394 &data->res.seq_res,
8395 task);
8396}
8397
8398static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
8399{
8400 struct nfs_free_stateid_data *data = calldata;
8401
8402 nfs41_sequence_done(task, &data->res.seq_res);
8403
8404 switch (task->tk_status) {
8405 case -NFS4ERR_DELAY:
8406 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
8407 rpc_restart_call_prepare(task);
8408 }
8409}
8410
8411static void nfs41_free_stateid_release(void *calldata)
8412{
8413 kfree(calldata);
8414}
8415
8416static const struct rpc_call_ops nfs41_free_stateid_ops = {
8417 .rpc_call_prepare = nfs41_free_stateid_prepare,
8418 .rpc_call_done = nfs41_free_stateid_done,
8419 .rpc_release = nfs41_free_stateid_release,
8420};
8421
8422static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server,
8423 nfs4_stateid *stateid,
8424 struct rpc_cred *cred,
8425 bool privileged)
8426{
8427 struct rpc_message msg = {
8428 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
8429 .rpc_cred = cred,
8430 };
8431 struct rpc_task_setup task_setup = {
8432 .rpc_client = server->client,
8433 .rpc_message = &msg,
8434 .callback_ops = &nfs41_free_stateid_ops,
8435 .flags = RPC_TASK_ASYNC,
8436 };
8437 struct nfs_free_stateid_data *data;
8438
8439 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8440 &task_setup.rpc_client, &msg);
8441
8442 dprintk("NFS call free_stateid %p\n", stateid);
8443 data = kmalloc(sizeof(*data), GFP_NOFS);
8444 if (!data)
8445 return ERR_PTR(-ENOMEM);
8446 data->server = server;
8447 nfs4_stateid_copy(&data->args.stateid, stateid);
8448
8449 task_setup.callback_data = data;
8450
8451 msg.rpc_argp = &data->args;
8452 msg.rpc_resp = &data->res;
8453 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
8454 if (privileged)
8455 nfs4_set_sequence_privileged(&data->args.seq_args);
8456
8457 return rpc_run_task(&task_setup);
8458}
8459
8460/**
8461 * nfs41_free_stateid - perform a FREE_STATEID operation
8462 *
8463 * @server: server / transport on which to perform the operation
8464 * @stateid: state ID to release
8465 * @cred: credential
8466 *
8467 * Returns NFS_OK if the server freed "stateid". Otherwise a
8468 * negative NFS4ERR value is returned.
8469 */
8470static int nfs41_free_stateid(struct nfs_server *server,
8471 nfs4_stateid *stateid,
8472 struct rpc_cred *cred)
8473{
8474 struct rpc_task *task;
8475 int ret;
8476
8477 task = _nfs41_free_stateid(server, stateid, cred, true);
8478 if (IS_ERR(task))
8479 return PTR_ERR(task);
8480 ret = rpc_wait_for_completion_task(task);
8481 if (!ret)
8482 ret = task->tk_status;
8483 rpc_put_task(task);
8484 return ret;
8485}
8486
8487static void
8488nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
8489{
8490 struct rpc_task *task;
8491 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
8492
8493 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
8494 nfs4_free_lock_state(server, lsp);
8495 if (IS_ERR(task))
8496 return;
8497 rpc_put_task(task);
8498}
8499
8500static bool nfs41_match_stateid(const nfs4_stateid *s1,
8501 const nfs4_stateid *s2)
8502{
8503 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
8504 return false;
8505
8506 if (s1->seqid == s2->seqid)
8507 return true;
8508 if (s1->seqid == 0 || s2->seqid == 0)
8509 return true;
8510
8511 return false;
8512}
8513
8514#endif /* CONFIG_NFS_V4_1 */
8515
8516static bool nfs4_match_stateid(const nfs4_stateid *s1,
8517 const nfs4_stateid *s2)
8518{
8519 return nfs4_stateid_match(s1, s2);
8520}
8521
8522
8523static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
8524 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8525 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8526 .recover_open = nfs4_open_reclaim,
8527 .recover_lock = nfs4_lock_reclaim,
8528 .establish_clid = nfs4_init_clientid,
8529 .detect_trunking = nfs40_discover_server_trunking,
8530};
8531
8532#if defined(CONFIG_NFS_V4_1)
8533static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
8534 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8535 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8536 .recover_open = nfs4_open_reclaim,
8537 .recover_lock = nfs4_lock_reclaim,
8538 .establish_clid = nfs41_init_clientid,
8539 .reclaim_complete = nfs41_proc_reclaim_complete,
8540 .detect_trunking = nfs41_discover_server_trunking,
8541};
8542#endif /* CONFIG_NFS_V4_1 */
8543
8544static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
8545 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8546 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8547 .recover_open = nfs40_open_expired,
8548 .recover_lock = nfs4_lock_expired,
8549 .establish_clid = nfs4_init_clientid,
8550};
8551
8552#if defined(CONFIG_NFS_V4_1)
8553static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
8554 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8555 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8556 .recover_open = nfs41_open_expired,
8557 .recover_lock = nfs41_lock_expired,
8558 .establish_clid = nfs41_init_clientid,
8559};
8560#endif /* CONFIG_NFS_V4_1 */
8561
8562static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
8563 .sched_state_renewal = nfs4_proc_async_renew,
8564 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
8565 .renew_lease = nfs4_proc_renew,
8566};
8567
8568#if defined(CONFIG_NFS_V4_1)
8569static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
8570 .sched_state_renewal = nfs41_proc_async_sequence,
8571 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
8572 .renew_lease = nfs4_proc_sequence,
8573};
8574#endif
8575
8576static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
8577 .get_locations = _nfs40_proc_get_locations,
8578 .fsid_present = _nfs40_proc_fsid_present,
8579};
8580
8581#if defined(CONFIG_NFS_V4_1)
8582static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
8583 .get_locations = _nfs41_proc_get_locations,
8584 .fsid_present = _nfs41_proc_fsid_present,
8585};
8586#endif /* CONFIG_NFS_V4_1 */
8587
8588static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
8589 .minor_version = 0,
8590 .init_caps = NFS_CAP_READDIRPLUS
8591 | NFS_CAP_ATOMIC_OPEN
8592 | NFS_CAP_CHANGE_ATTR
8593 | NFS_CAP_POSIX_LOCK,
8594 .init_client = nfs40_init_client,
8595 .shutdown_client = nfs40_shutdown_client,
8596 .match_stateid = nfs4_match_stateid,
8597 .find_root_sec = nfs4_find_root_sec,
8598 .free_lock_state = nfs4_release_lockowner,
8599 .alloc_seqid = nfs_alloc_seqid,
8600 .call_sync_ops = &nfs40_call_sync_ops,
8601 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
8602 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
8603 .state_renewal_ops = &nfs40_state_renewal_ops,
8604 .mig_recovery_ops = &nfs40_mig_recovery_ops,
8605};
8606
8607#if defined(CONFIG_NFS_V4_1)
8608static struct nfs_seqid *
8609nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
8610{
8611 return NULL;
8612}
8613
8614static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
8615 .minor_version = 1,
8616 .init_caps = NFS_CAP_READDIRPLUS
8617 | NFS_CAP_ATOMIC_OPEN
8618 | NFS_CAP_CHANGE_ATTR
8619 | NFS_CAP_POSIX_LOCK
8620 | NFS_CAP_STATEID_NFSV41
8621 | NFS_CAP_ATOMIC_OPEN_V1,
8622 .init_client = nfs41_init_client,
8623 .shutdown_client = nfs41_shutdown_client,
8624 .match_stateid = nfs41_match_stateid,
8625 .find_root_sec = nfs41_find_root_sec,
8626 .free_lock_state = nfs41_free_lock_state,
8627 .alloc_seqid = nfs_alloc_no_seqid,
8628 .call_sync_ops = &nfs41_call_sync_ops,
8629 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8630 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8631 .state_renewal_ops = &nfs41_state_renewal_ops,
8632 .mig_recovery_ops = &nfs41_mig_recovery_ops,
8633};
8634#endif
8635
8636#if defined(CONFIG_NFS_V4_2)
8637static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
8638 .minor_version = 2,
8639 .init_caps = NFS_CAP_READDIRPLUS
8640 | NFS_CAP_ATOMIC_OPEN
8641 | NFS_CAP_CHANGE_ATTR
8642 | NFS_CAP_POSIX_LOCK
8643 | NFS_CAP_STATEID_NFSV41
8644 | NFS_CAP_ATOMIC_OPEN_V1
8645 | NFS_CAP_ALLOCATE
8646 | NFS_CAP_DEALLOCATE
8647 | NFS_CAP_SEEK
8648 | NFS_CAP_LAYOUTSTATS,
8649 .init_client = nfs41_init_client,
8650 .shutdown_client = nfs41_shutdown_client,
8651 .match_stateid = nfs41_match_stateid,
8652 .find_root_sec = nfs41_find_root_sec,
8653 .free_lock_state = nfs41_free_lock_state,
8654 .call_sync_ops = &nfs41_call_sync_ops,
8655 .alloc_seqid = nfs_alloc_no_seqid,
8656 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8657 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8658 .state_renewal_ops = &nfs41_state_renewal_ops,
8659};
8660#endif
8661
8662const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
8663 [0] = &nfs_v4_0_minor_ops,
8664#if defined(CONFIG_NFS_V4_1)
8665 [1] = &nfs_v4_1_minor_ops,
8666#endif
8667#if defined(CONFIG_NFS_V4_2)
8668 [2] = &nfs_v4_2_minor_ops,
8669#endif
8670};
8671
8672static const struct inode_operations nfs4_dir_inode_operations = {
8673 .create = nfs_create,
8674 .lookup = nfs_lookup,
8675 .atomic_open = nfs_atomic_open,
8676 .link = nfs_link,
8677 .unlink = nfs_unlink,
8678 .symlink = nfs_symlink,
8679 .mkdir = nfs_mkdir,
8680 .rmdir = nfs_rmdir,
8681 .mknod = nfs_mknod,
8682 .rename = nfs_rename,
8683 .permission = nfs_permission,
8684 .getattr = nfs_getattr,
8685 .setattr = nfs_setattr,
8686 .getxattr = generic_getxattr,
8687 .setxattr = generic_setxattr,
8688 .listxattr = generic_listxattr,
8689 .removexattr = generic_removexattr,
8690};
8691
8692static const struct inode_operations nfs4_file_inode_operations = {
8693 .permission = nfs_permission,
8694 .getattr = nfs_getattr,
8695 .setattr = nfs_setattr,
8696 .getxattr = generic_getxattr,
8697 .setxattr = generic_setxattr,
8698 .listxattr = generic_listxattr,
8699 .removexattr = generic_removexattr,
8700};
8701
8702const struct nfs_rpc_ops nfs_v4_clientops = {
8703 .version = 4, /* protocol version */
8704 .dentry_ops = &nfs4_dentry_operations,
8705 .dir_inode_ops = &nfs4_dir_inode_operations,
8706 .file_inode_ops = &nfs4_file_inode_operations,
8707 .file_ops = &nfs4_file_operations,
8708 .getroot = nfs4_proc_get_root,
8709 .submount = nfs4_submount,
8710 .try_mount = nfs4_try_mount,
8711 .getattr = nfs4_proc_getattr,
8712 .setattr = nfs4_proc_setattr,
8713 .lookup = nfs4_proc_lookup,
8714 .access = nfs4_proc_access,
8715 .readlink = nfs4_proc_readlink,
8716 .create = nfs4_proc_create,
8717 .remove = nfs4_proc_remove,
8718 .unlink_setup = nfs4_proc_unlink_setup,
8719 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
8720 .unlink_done = nfs4_proc_unlink_done,
8721 .rename_setup = nfs4_proc_rename_setup,
8722 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
8723 .rename_done = nfs4_proc_rename_done,
8724 .link = nfs4_proc_link,
8725 .symlink = nfs4_proc_symlink,
8726 .mkdir = nfs4_proc_mkdir,
8727 .rmdir = nfs4_proc_remove,
8728 .readdir = nfs4_proc_readdir,
8729 .mknod = nfs4_proc_mknod,
8730 .statfs = nfs4_proc_statfs,
8731 .fsinfo = nfs4_proc_fsinfo,
8732 .pathconf = nfs4_proc_pathconf,
8733 .set_capabilities = nfs4_server_capabilities,
8734 .decode_dirent = nfs4_decode_dirent,
8735 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
8736 .read_setup = nfs4_proc_read_setup,
8737 .read_done = nfs4_read_done,
8738 .write_setup = nfs4_proc_write_setup,
8739 .write_done = nfs4_write_done,
8740 .commit_setup = nfs4_proc_commit_setup,
8741 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
8742 .commit_done = nfs4_commit_done,
8743 .lock = nfs4_proc_lock,
8744 .clear_acl_cache = nfs4_zap_acl_attr,
8745 .close_context = nfs4_close_context,
8746 .open_context = nfs4_atomic_open,
8747 .have_delegation = nfs4_have_delegation,
8748 .return_delegation = nfs4_inode_return_delegation,
8749 .alloc_client = nfs4_alloc_client,
8750 .init_client = nfs4_init_client,
8751 .free_client = nfs4_free_client,
8752 .create_server = nfs4_create_server,
8753 .clone_server = nfs_clone_server,
8754};
8755
8756static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
8757 .prefix = XATTR_NAME_NFSV4_ACL,
8758 .list = nfs4_xattr_list_nfs4_acl,
8759 .get = nfs4_xattr_get_nfs4_acl,
8760 .set = nfs4_xattr_set_nfs4_acl,
8761};
8762
8763const struct xattr_handler *nfs4_xattr_handlers[] = {
8764 &nfs4_xattr_nfs4_acl_handler,
8765#ifdef CONFIG_NFS_V4_SECURITY_LABEL
8766 &nfs4_xattr_nfs4_label_handler,
8767#endif
8768 NULL
8769};
8770
8771/*
8772 * Local variables:
8773 * c-basic-offset: 8
8774 * End:
8775 */