NFS: nfs_getaclargs.acl_len is a size_t
[linux-2.6-block.git] / fs / nfs / nfs4proc.c
... / ...
CommitLineData
1/*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/mm.h>
39#include <linux/delay.h>
40#include <linux/errno.h>
41#include <linux/string.h>
42#include <linux/ratelimit.h>
43#include <linux/printk.h>
44#include <linux/slab.h>
45#include <linux/sunrpc/clnt.h>
46#include <linux/sunrpc/gss_api.h>
47#include <linux/nfs.h>
48#include <linux/nfs4.h>
49#include <linux/nfs_fs.h>
50#include <linux/nfs_page.h>
51#include <linux/nfs_mount.h>
52#include <linux/namei.h>
53#include <linux/mount.h>
54#include <linux/module.h>
55#include <linux/nfs_idmap.h>
56#include <linux/sunrpc/bc_xprt.h>
57#include <linux/xattr.h>
58#include <linux/utsname.h>
59#include <linux/freezer.h>
60
61#include "nfs4_fs.h"
62#include "delegation.h"
63#include "internal.h"
64#include "iostat.h"
65#include "callback.h"
66#include "pnfs.h"
67#include "netns.h"
68
69#define NFSDBG_FACILITY NFSDBG_PROC
70
71#define NFS4_POLL_RETRY_MIN (HZ/10)
72#define NFS4_POLL_RETRY_MAX (15*HZ)
73
74#define NFS4_MAX_LOOP_ON_RECOVER (10)
75
76static unsigned short max_session_slots = NFS4_DEF_SLOT_TABLE_SIZE;
77
78struct nfs4_opendata;
79static int _nfs4_proc_open(struct nfs4_opendata *data);
80static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
81static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
82static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
83static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
84static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *);
85static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
86static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
87 struct nfs_fattr *fattr, struct iattr *sattr,
88 struct nfs4_state *state);
89#ifdef CONFIG_NFS_V4_1
90static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *);
91static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *);
92#endif
93/* Prevent leaks of NFSv4 errors into userland */
94static int nfs4_map_errors(int err)
95{
96 if (err >= -1000)
97 return err;
98 switch (err) {
99 case -NFS4ERR_RESOURCE:
100 return -EREMOTEIO;
101 case -NFS4ERR_WRONGSEC:
102 return -EPERM;
103 case -NFS4ERR_BADOWNER:
104 case -NFS4ERR_BADNAME:
105 return -EINVAL;
106 case -NFS4ERR_SHARE_DENIED:
107 return -EACCES;
108 case -NFS4ERR_MINOR_VERS_MISMATCH:
109 return -EPROTONOSUPPORT;
110 default:
111 dprintk("%s could not handle NFSv4 error %d\n",
112 __func__, -err);
113 break;
114 }
115 return -EIO;
116}
117
118/*
119 * This is our standard bitmap for GETATTR requests.
120 */
121const u32 nfs4_fattr_bitmap[3] = {
122 FATTR4_WORD0_TYPE
123 | FATTR4_WORD0_CHANGE
124 | FATTR4_WORD0_SIZE
125 | FATTR4_WORD0_FSID
126 | FATTR4_WORD0_FILEID,
127 FATTR4_WORD1_MODE
128 | FATTR4_WORD1_NUMLINKS
129 | FATTR4_WORD1_OWNER
130 | FATTR4_WORD1_OWNER_GROUP
131 | FATTR4_WORD1_RAWDEV
132 | FATTR4_WORD1_SPACE_USED
133 | FATTR4_WORD1_TIME_ACCESS
134 | FATTR4_WORD1_TIME_METADATA
135 | FATTR4_WORD1_TIME_MODIFY
136};
137
138static const u32 nfs4_pnfs_open_bitmap[3] = {
139 FATTR4_WORD0_TYPE
140 | FATTR4_WORD0_CHANGE
141 | FATTR4_WORD0_SIZE
142 | FATTR4_WORD0_FSID
143 | FATTR4_WORD0_FILEID,
144 FATTR4_WORD1_MODE
145 | FATTR4_WORD1_NUMLINKS
146 | FATTR4_WORD1_OWNER
147 | FATTR4_WORD1_OWNER_GROUP
148 | FATTR4_WORD1_RAWDEV
149 | FATTR4_WORD1_SPACE_USED
150 | FATTR4_WORD1_TIME_ACCESS
151 | FATTR4_WORD1_TIME_METADATA
152 | FATTR4_WORD1_TIME_MODIFY,
153 FATTR4_WORD2_MDSTHRESHOLD
154};
155
156const u32 nfs4_statfs_bitmap[2] = {
157 FATTR4_WORD0_FILES_AVAIL
158 | FATTR4_WORD0_FILES_FREE
159 | FATTR4_WORD0_FILES_TOTAL,
160 FATTR4_WORD1_SPACE_AVAIL
161 | FATTR4_WORD1_SPACE_FREE
162 | FATTR4_WORD1_SPACE_TOTAL
163};
164
165const u32 nfs4_pathconf_bitmap[2] = {
166 FATTR4_WORD0_MAXLINK
167 | FATTR4_WORD0_MAXNAME,
168 0
169};
170
171const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
172 | FATTR4_WORD0_MAXREAD
173 | FATTR4_WORD0_MAXWRITE
174 | FATTR4_WORD0_LEASE_TIME,
175 FATTR4_WORD1_TIME_DELTA
176 | FATTR4_WORD1_FS_LAYOUT_TYPES,
177 FATTR4_WORD2_LAYOUT_BLKSIZE
178};
179
180const u32 nfs4_fs_locations_bitmap[2] = {
181 FATTR4_WORD0_TYPE
182 | FATTR4_WORD0_CHANGE
183 | FATTR4_WORD0_SIZE
184 | FATTR4_WORD0_FSID
185 | FATTR4_WORD0_FILEID
186 | FATTR4_WORD0_FS_LOCATIONS,
187 FATTR4_WORD1_MODE
188 | FATTR4_WORD1_NUMLINKS
189 | FATTR4_WORD1_OWNER
190 | FATTR4_WORD1_OWNER_GROUP
191 | FATTR4_WORD1_RAWDEV
192 | FATTR4_WORD1_SPACE_USED
193 | FATTR4_WORD1_TIME_ACCESS
194 | FATTR4_WORD1_TIME_METADATA
195 | FATTR4_WORD1_TIME_MODIFY
196 | FATTR4_WORD1_MOUNTED_ON_FILEID
197};
198
199static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
200 struct nfs4_readdir_arg *readdir)
201{
202 __be32 *start, *p;
203
204 BUG_ON(readdir->count < 80);
205 if (cookie > 2) {
206 readdir->cookie = cookie;
207 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
208 return;
209 }
210
211 readdir->cookie = 0;
212 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
213 if (cookie == 2)
214 return;
215
216 /*
217 * NFSv4 servers do not return entries for '.' and '..'
218 * Therefore, we fake these entries here. We let '.'
219 * have cookie 0 and '..' have cookie 1. Note that
220 * when talking to the server, we always send cookie 0
221 * instead of 1 or 2.
222 */
223 start = p = kmap_atomic(*readdir->pages);
224
225 if (cookie == 0) {
226 *p++ = xdr_one; /* next */
227 *p++ = xdr_zero; /* cookie, first word */
228 *p++ = xdr_one; /* cookie, second word */
229 *p++ = xdr_one; /* entry len */
230 memcpy(p, ".\0\0\0", 4); /* entry */
231 p++;
232 *p++ = xdr_one; /* bitmap length */
233 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
234 *p++ = htonl(8); /* attribute buffer length */
235 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
236 }
237
238 *p++ = xdr_one; /* next */
239 *p++ = xdr_zero; /* cookie, first word */
240 *p++ = xdr_two; /* cookie, second word */
241 *p++ = xdr_two; /* entry len */
242 memcpy(p, "..\0\0", 4); /* entry */
243 p++;
244 *p++ = xdr_one; /* bitmap length */
245 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
246 *p++ = htonl(8); /* attribute buffer length */
247 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
248
249 readdir->pgbase = (char *)p - (char *)start;
250 readdir->count -= readdir->pgbase;
251 kunmap_atomic(start);
252}
253
254static int nfs4_wait_clnt_recover(struct nfs_client *clp)
255{
256 int res;
257
258 might_sleep();
259
260 res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
261 nfs_wait_bit_killable, TASK_KILLABLE);
262 return res;
263}
264
265static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
266{
267 int res = 0;
268
269 might_sleep();
270
271 if (*timeout <= 0)
272 *timeout = NFS4_POLL_RETRY_MIN;
273 if (*timeout > NFS4_POLL_RETRY_MAX)
274 *timeout = NFS4_POLL_RETRY_MAX;
275 freezable_schedule_timeout_killable(*timeout);
276 if (fatal_signal_pending(current))
277 res = -ERESTARTSYS;
278 *timeout <<= 1;
279 return res;
280}
281
282/* This is the error handling routine for processes that are allowed
283 * to sleep.
284 */
285static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
286{
287 struct nfs_client *clp = server->nfs_client;
288 struct nfs4_state *state = exception->state;
289 struct inode *inode = exception->inode;
290 int ret = errorcode;
291
292 exception->retry = 0;
293 switch(errorcode) {
294 case 0:
295 return 0;
296 case -NFS4ERR_OPENMODE:
297 if (inode && nfs4_have_delegation(inode, FMODE_READ)) {
298 nfs4_inode_return_delegation(inode);
299 exception->retry = 1;
300 return 0;
301 }
302 if (state == NULL)
303 break;
304 nfs4_schedule_stateid_recovery(server, state);
305 goto wait_on_recovery;
306 case -NFS4ERR_DELEG_REVOKED:
307 case -NFS4ERR_ADMIN_REVOKED:
308 case -NFS4ERR_BAD_STATEID:
309 if (state == NULL)
310 break;
311 nfs_remove_bad_delegation(state->inode);
312 nfs4_schedule_stateid_recovery(server, state);
313 goto wait_on_recovery;
314 case -NFS4ERR_EXPIRED:
315 if (state != NULL)
316 nfs4_schedule_stateid_recovery(server, state);
317 case -NFS4ERR_STALE_STATEID:
318 case -NFS4ERR_STALE_CLIENTID:
319 nfs4_schedule_lease_recovery(clp);
320 goto wait_on_recovery;
321#if defined(CONFIG_NFS_V4_1)
322 case -NFS4ERR_BADSESSION:
323 case -NFS4ERR_BADSLOT:
324 case -NFS4ERR_BAD_HIGH_SLOT:
325 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
326 case -NFS4ERR_DEADSESSION:
327 case -NFS4ERR_SEQ_FALSE_RETRY:
328 case -NFS4ERR_SEQ_MISORDERED:
329 dprintk("%s ERROR: %d Reset session\n", __func__,
330 errorcode);
331 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
332 exception->retry = 1;
333 break;
334#endif /* defined(CONFIG_NFS_V4_1) */
335 case -NFS4ERR_FILE_OPEN:
336 if (exception->timeout > HZ) {
337 /* We have retried a decent amount, time to
338 * fail
339 */
340 ret = -EBUSY;
341 break;
342 }
343 case -NFS4ERR_GRACE:
344 case -NFS4ERR_DELAY:
345 case -EKEYEXPIRED:
346 ret = nfs4_delay(server->client, &exception->timeout);
347 if (ret != 0)
348 break;
349 case -NFS4ERR_RETRY_UNCACHED_REP:
350 case -NFS4ERR_OLD_STATEID:
351 exception->retry = 1;
352 break;
353 case -NFS4ERR_BADOWNER:
354 /* The following works around a Linux server bug! */
355 case -NFS4ERR_BADNAME:
356 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
357 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
358 exception->retry = 1;
359 printk(KERN_WARNING "NFS: v4 server %s "
360 "does not accept raw "
361 "uid/gids. "
362 "Reenabling the idmapper.\n",
363 server->nfs_client->cl_hostname);
364 }
365 }
366 /* We failed to handle the error */
367 return nfs4_map_errors(ret);
368wait_on_recovery:
369 ret = nfs4_wait_clnt_recover(clp);
370 if (ret == 0)
371 exception->retry = 1;
372 return ret;
373}
374
375
376static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
377{
378 spin_lock(&clp->cl_lock);
379 if (time_before(clp->cl_last_renewal,timestamp))
380 clp->cl_last_renewal = timestamp;
381 spin_unlock(&clp->cl_lock);
382}
383
384static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
385{
386 do_renew_lease(server->nfs_client, timestamp);
387}
388
389#if defined(CONFIG_NFS_V4_1)
390
391/*
392 * nfs4_free_slot - free a slot and efficiently update slot table.
393 *
394 * freeing a slot is trivially done by clearing its respective bit
395 * in the bitmap.
396 * If the freed slotid equals highest_used_slotid we want to update it
397 * so that the server would be able to size down the slot table if needed,
398 * otherwise we know that the highest_used_slotid is still in use.
399 * When updating highest_used_slotid there may be "holes" in the bitmap
400 * so we need to scan down from highest_used_slotid to 0 looking for the now
401 * highest slotid in use.
402 * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
403 *
404 * Must be called while holding tbl->slot_tbl_lock
405 */
406static void
407nfs4_free_slot(struct nfs4_slot_table *tbl, u32 slotid)
408{
409 BUG_ON(slotid >= NFS4_MAX_SLOT_TABLE);
410 /* clear used bit in bitmap */
411 __clear_bit(slotid, tbl->used_slots);
412
413 /* update highest_used_slotid when it is freed */
414 if (slotid == tbl->highest_used_slotid) {
415 slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
416 if (slotid < tbl->max_slots)
417 tbl->highest_used_slotid = slotid;
418 else
419 tbl->highest_used_slotid = NFS4_NO_SLOT;
420 }
421 dprintk("%s: slotid %u highest_used_slotid %d\n", __func__,
422 slotid, tbl->highest_used_slotid);
423}
424
425bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy)
426{
427 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
428 return true;
429}
430
431/*
432 * Signal state manager thread if session fore channel is drained
433 */
434static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
435{
436 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
437 rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq,
438 nfs4_set_task_privileged, NULL);
439 return;
440 }
441
442 if (ses->fc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
443 return;
444
445 dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
446 complete(&ses->fc_slot_table.complete);
447}
448
449/*
450 * Signal state manager thread if session back channel is drained
451 */
452void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
453{
454 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
455 ses->bc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
456 return;
457 dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
458 complete(&ses->bc_slot_table.complete);
459}
460
461static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
462{
463 struct nfs4_slot_table *tbl;
464
465 tbl = &res->sr_session->fc_slot_table;
466 if (!res->sr_slot) {
467 /* just wake up the next guy waiting since
468 * we may have not consumed a slot after all */
469 dprintk("%s: No slot\n", __func__);
470 return;
471 }
472
473 spin_lock(&tbl->slot_tbl_lock);
474 nfs4_free_slot(tbl, res->sr_slot - tbl->slots);
475 nfs4_check_drain_fc_complete(res->sr_session);
476 spin_unlock(&tbl->slot_tbl_lock);
477 res->sr_slot = NULL;
478}
479
480static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
481{
482 unsigned long timestamp;
483 struct nfs_client *clp;
484
485 /*
486 * sr_status remains 1 if an RPC level error occurred. The server
487 * may or may not have processed the sequence operation..
488 * Proceed as if the server received and processed the sequence
489 * operation.
490 */
491 if (res->sr_status == 1)
492 res->sr_status = NFS_OK;
493
494 /* don't increment the sequence number if the task wasn't sent */
495 if (!RPC_WAS_SENT(task))
496 goto out;
497
498 /* Check the SEQUENCE operation status */
499 switch (res->sr_status) {
500 case 0:
501 /* Update the slot's sequence and clientid lease timer */
502 ++res->sr_slot->seq_nr;
503 timestamp = res->sr_renewal_time;
504 clp = res->sr_session->clp;
505 do_renew_lease(clp, timestamp);
506 /* Check sequence flags */
507 if (res->sr_status_flags != 0)
508 nfs4_schedule_lease_recovery(clp);
509 break;
510 case -NFS4ERR_DELAY:
511 /* The server detected a resend of the RPC call and
512 * returned NFS4ERR_DELAY as per Section 2.10.6.2
513 * of RFC5661.
514 */
515 dprintk("%s: slot=%td seq=%d: Operation in progress\n",
516 __func__,
517 res->sr_slot - res->sr_session->fc_slot_table.slots,
518 res->sr_slot->seq_nr);
519 goto out_retry;
520 default:
521 /* Just update the slot sequence no. */
522 ++res->sr_slot->seq_nr;
523 }
524out:
525 /* The session may be reset by one of the error handlers. */
526 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
527 nfs41_sequence_free_slot(res);
528 return 1;
529out_retry:
530 if (!rpc_restart_call(task))
531 goto out;
532 rpc_delay(task, NFS4_POLL_RETRY_MAX);
533 return 0;
534}
535
536static int nfs4_sequence_done(struct rpc_task *task,
537 struct nfs4_sequence_res *res)
538{
539 if (res->sr_session == NULL)
540 return 1;
541 return nfs41_sequence_done(task, res);
542}
543
544/*
545 * nfs4_find_slot - efficiently look for a free slot
546 *
547 * nfs4_find_slot looks for an unset bit in the used_slots bitmap.
548 * If found, we mark the slot as used, update the highest_used_slotid,
549 * and respectively set up the sequence operation args.
550 * The slot number is returned if found, or NFS4_NO_SLOT otherwise.
551 *
552 * Note: must be called with under the slot_tbl_lock.
553 */
554static u32
555nfs4_find_slot(struct nfs4_slot_table *tbl)
556{
557 u32 slotid;
558 u32 ret_id = NFS4_NO_SLOT;
559
560 dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
561 __func__, tbl->used_slots[0], tbl->highest_used_slotid,
562 tbl->max_slots);
563 slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
564 if (slotid >= tbl->max_slots)
565 goto out;
566 __set_bit(slotid, tbl->used_slots);
567 if (slotid > tbl->highest_used_slotid ||
568 tbl->highest_used_slotid == NFS4_NO_SLOT)
569 tbl->highest_used_slotid = slotid;
570 ret_id = slotid;
571out:
572 dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
573 __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
574 return ret_id;
575}
576
577static void nfs41_init_sequence(struct nfs4_sequence_args *args,
578 struct nfs4_sequence_res *res, int cache_reply)
579{
580 args->sa_session = NULL;
581 args->sa_cache_this = 0;
582 if (cache_reply)
583 args->sa_cache_this = 1;
584 res->sr_session = NULL;
585 res->sr_slot = NULL;
586}
587
588int nfs41_setup_sequence(struct nfs4_session *session,
589 struct nfs4_sequence_args *args,
590 struct nfs4_sequence_res *res,
591 struct rpc_task *task)
592{
593 struct nfs4_slot *slot;
594 struct nfs4_slot_table *tbl;
595 u32 slotid;
596
597 dprintk("--> %s\n", __func__);
598 /* slot already allocated? */
599 if (res->sr_slot != NULL)
600 return 0;
601
602 tbl = &session->fc_slot_table;
603
604 spin_lock(&tbl->slot_tbl_lock);
605 if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
606 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
607 /* The state manager will wait until the slot table is empty */
608 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
609 spin_unlock(&tbl->slot_tbl_lock);
610 dprintk("%s session is draining\n", __func__);
611 return -EAGAIN;
612 }
613
614 if (!rpc_queue_empty(&tbl->slot_tbl_waitq) &&
615 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
616 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
617 spin_unlock(&tbl->slot_tbl_lock);
618 dprintk("%s enforce FIFO order\n", __func__);
619 return -EAGAIN;
620 }
621
622 slotid = nfs4_find_slot(tbl);
623 if (slotid == NFS4_NO_SLOT) {
624 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
625 spin_unlock(&tbl->slot_tbl_lock);
626 dprintk("<-- %s: no free slots\n", __func__);
627 return -EAGAIN;
628 }
629 spin_unlock(&tbl->slot_tbl_lock);
630
631 rpc_task_set_priority(task, RPC_PRIORITY_NORMAL);
632 slot = tbl->slots + slotid;
633 args->sa_session = session;
634 args->sa_slotid = slotid;
635
636 dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
637
638 res->sr_session = session;
639 res->sr_slot = slot;
640 res->sr_renewal_time = jiffies;
641 res->sr_status_flags = 0;
642 /*
643 * sr_status is only set in decode_sequence, and so will remain
644 * set to 1 if an rpc level failure occurs.
645 */
646 res->sr_status = 1;
647 return 0;
648}
649EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
650
651int nfs4_setup_sequence(const struct nfs_server *server,
652 struct nfs4_sequence_args *args,
653 struct nfs4_sequence_res *res,
654 struct rpc_task *task)
655{
656 struct nfs4_session *session = nfs4_get_session(server);
657 int ret = 0;
658
659 if (session == NULL)
660 goto out;
661
662 dprintk("--> %s clp %p session %p sr_slot %td\n",
663 __func__, session->clp, session, res->sr_slot ?
664 res->sr_slot - session->fc_slot_table.slots : -1);
665
666 ret = nfs41_setup_sequence(session, args, res, task);
667out:
668 dprintk("<-- %s status=%d\n", __func__, ret);
669 return ret;
670}
671
672struct nfs41_call_sync_data {
673 const struct nfs_server *seq_server;
674 struct nfs4_sequence_args *seq_args;
675 struct nfs4_sequence_res *seq_res;
676};
677
678static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
679{
680 struct nfs41_call_sync_data *data = calldata;
681
682 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
683
684 if (nfs4_setup_sequence(data->seq_server, data->seq_args,
685 data->seq_res, task))
686 return;
687 rpc_call_start(task);
688}
689
690static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata)
691{
692 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
693 nfs41_call_sync_prepare(task, calldata);
694}
695
696static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
697{
698 struct nfs41_call_sync_data *data = calldata;
699
700 nfs41_sequence_done(task, data->seq_res);
701}
702
703static const struct rpc_call_ops nfs41_call_sync_ops = {
704 .rpc_call_prepare = nfs41_call_sync_prepare,
705 .rpc_call_done = nfs41_call_sync_done,
706};
707
708static const struct rpc_call_ops nfs41_call_priv_sync_ops = {
709 .rpc_call_prepare = nfs41_call_priv_sync_prepare,
710 .rpc_call_done = nfs41_call_sync_done,
711};
712
713static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
714 struct nfs_server *server,
715 struct rpc_message *msg,
716 struct nfs4_sequence_args *args,
717 struct nfs4_sequence_res *res,
718 int privileged)
719{
720 int ret;
721 struct rpc_task *task;
722 struct nfs41_call_sync_data data = {
723 .seq_server = server,
724 .seq_args = args,
725 .seq_res = res,
726 };
727 struct rpc_task_setup task_setup = {
728 .rpc_client = clnt,
729 .rpc_message = msg,
730 .callback_ops = &nfs41_call_sync_ops,
731 .callback_data = &data
732 };
733
734 if (privileged)
735 task_setup.callback_ops = &nfs41_call_priv_sync_ops;
736 task = rpc_run_task(&task_setup);
737 if (IS_ERR(task))
738 ret = PTR_ERR(task);
739 else {
740 ret = task->tk_status;
741 rpc_put_task(task);
742 }
743 return ret;
744}
745
746int _nfs4_call_sync_session(struct rpc_clnt *clnt,
747 struct nfs_server *server,
748 struct rpc_message *msg,
749 struct nfs4_sequence_args *args,
750 struct nfs4_sequence_res *res,
751 int cache_reply)
752{
753 nfs41_init_sequence(args, res, cache_reply);
754 return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0);
755}
756
757#else
758static inline
759void nfs41_init_sequence(struct nfs4_sequence_args *args,
760 struct nfs4_sequence_res *res, int cache_reply)
761{
762}
763
764static int nfs4_sequence_done(struct rpc_task *task,
765 struct nfs4_sequence_res *res)
766{
767 return 1;
768}
769#endif /* CONFIG_NFS_V4_1 */
770
771int _nfs4_call_sync(struct rpc_clnt *clnt,
772 struct nfs_server *server,
773 struct rpc_message *msg,
774 struct nfs4_sequence_args *args,
775 struct nfs4_sequence_res *res,
776 int cache_reply)
777{
778 nfs41_init_sequence(args, res, cache_reply);
779 return rpc_call_sync(clnt, msg, 0);
780}
781
782static inline
783int nfs4_call_sync(struct rpc_clnt *clnt,
784 struct nfs_server *server,
785 struct rpc_message *msg,
786 struct nfs4_sequence_args *args,
787 struct nfs4_sequence_res *res,
788 int cache_reply)
789{
790 return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
791 args, res, cache_reply);
792}
793
794static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
795{
796 struct nfs_inode *nfsi = NFS_I(dir);
797
798 spin_lock(&dir->i_lock);
799 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
800 if (!cinfo->atomic || cinfo->before != dir->i_version)
801 nfs_force_lookup_revalidate(dir);
802 dir->i_version = cinfo->after;
803 spin_unlock(&dir->i_lock);
804}
805
806struct nfs4_opendata {
807 struct kref kref;
808 struct nfs_openargs o_arg;
809 struct nfs_openres o_res;
810 struct nfs_open_confirmargs c_arg;
811 struct nfs_open_confirmres c_res;
812 struct nfs4_string owner_name;
813 struct nfs4_string group_name;
814 struct nfs_fattr f_attr;
815 struct dentry *dir;
816 struct dentry *dentry;
817 struct nfs4_state_owner *owner;
818 struct nfs4_state *state;
819 struct iattr attrs;
820 unsigned long timestamp;
821 unsigned int rpc_done : 1;
822 int rpc_status;
823 int cancelled;
824};
825
826
827static void nfs4_init_opendata_res(struct nfs4_opendata *p)
828{
829 p->o_res.f_attr = &p->f_attr;
830 p->o_res.seqid = p->o_arg.seqid;
831 p->c_res.seqid = p->c_arg.seqid;
832 p->o_res.server = p->o_arg.server;
833 nfs_fattr_init(&p->f_attr);
834 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
835}
836
837static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
838 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
839 const struct iattr *attrs,
840 gfp_t gfp_mask)
841{
842 struct dentry *parent = dget_parent(dentry);
843 struct inode *dir = parent->d_inode;
844 struct nfs_server *server = NFS_SERVER(dir);
845 struct nfs4_opendata *p;
846
847 p = kzalloc(sizeof(*p), gfp_mask);
848 if (p == NULL)
849 goto err;
850 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
851 if (p->o_arg.seqid == NULL)
852 goto err_free;
853 nfs_sb_active(dentry->d_sb);
854 p->dentry = dget(dentry);
855 p->dir = parent;
856 p->owner = sp;
857 atomic_inc(&sp->so_count);
858 p->o_arg.fh = NFS_FH(dir);
859 p->o_arg.open_flags = flags;
860 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
861 p->o_arg.clientid = server->nfs_client->cl_clientid;
862 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
863 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
864 p->o_arg.name = &dentry->d_name;
865 p->o_arg.server = server;
866 p->o_arg.bitmask = server->attr_bitmask;
867 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
868 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
869 if (attrs != NULL && attrs->ia_valid != 0) {
870 __be32 verf[2];
871
872 p->o_arg.u.attrs = &p->attrs;
873 memcpy(&p->attrs, attrs, sizeof(p->attrs));
874
875 verf[0] = jiffies;
876 verf[1] = current->pid;
877 memcpy(p->o_arg.u.verifier.data, verf,
878 sizeof(p->o_arg.u.verifier.data));
879 }
880 p->c_arg.fh = &p->o_res.fh;
881 p->c_arg.stateid = &p->o_res.stateid;
882 p->c_arg.seqid = p->o_arg.seqid;
883 nfs4_init_opendata_res(p);
884 kref_init(&p->kref);
885 return p;
886err_free:
887 kfree(p);
888err:
889 dput(parent);
890 return NULL;
891}
892
893static void nfs4_opendata_free(struct kref *kref)
894{
895 struct nfs4_opendata *p = container_of(kref,
896 struct nfs4_opendata, kref);
897 struct super_block *sb = p->dentry->d_sb;
898
899 nfs_free_seqid(p->o_arg.seqid);
900 if (p->state != NULL)
901 nfs4_put_open_state(p->state);
902 nfs4_put_state_owner(p->owner);
903 dput(p->dir);
904 dput(p->dentry);
905 nfs_sb_deactive(sb);
906 nfs_fattr_free_names(&p->f_attr);
907 kfree(p);
908}
909
910static void nfs4_opendata_put(struct nfs4_opendata *p)
911{
912 if (p != NULL)
913 kref_put(&p->kref, nfs4_opendata_free);
914}
915
916static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
917{
918 int ret;
919
920 ret = rpc_wait_for_completion_task(task);
921 return ret;
922}
923
924static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
925{
926 int ret = 0;
927
928 if (open_mode & (O_EXCL|O_TRUNC))
929 goto out;
930 switch (mode & (FMODE_READ|FMODE_WRITE)) {
931 case FMODE_READ:
932 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
933 && state->n_rdonly != 0;
934 break;
935 case FMODE_WRITE:
936 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
937 && state->n_wronly != 0;
938 break;
939 case FMODE_READ|FMODE_WRITE:
940 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
941 && state->n_rdwr != 0;
942 }
943out:
944 return ret;
945}
946
947static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
948{
949 if (delegation == NULL)
950 return 0;
951 if ((delegation->type & fmode) != fmode)
952 return 0;
953 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
954 return 0;
955 nfs_mark_delegation_referenced(delegation);
956 return 1;
957}
958
959static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
960{
961 switch (fmode) {
962 case FMODE_WRITE:
963 state->n_wronly++;
964 break;
965 case FMODE_READ:
966 state->n_rdonly++;
967 break;
968 case FMODE_READ|FMODE_WRITE:
969 state->n_rdwr++;
970 }
971 nfs4_state_set_mode_locked(state, state->state | fmode);
972}
973
974static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
975{
976 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
977 nfs4_stateid_copy(&state->stateid, stateid);
978 nfs4_stateid_copy(&state->open_stateid, stateid);
979 switch (fmode) {
980 case FMODE_READ:
981 set_bit(NFS_O_RDONLY_STATE, &state->flags);
982 break;
983 case FMODE_WRITE:
984 set_bit(NFS_O_WRONLY_STATE, &state->flags);
985 break;
986 case FMODE_READ|FMODE_WRITE:
987 set_bit(NFS_O_RDWR_STATE, &state->flags);
988 }
989}
990
991static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
992{
993 write_seqlock(&state->seqlock);
994 nfs_set_open_stateid_locked(state, stateid, fmode);
995 write_sequnlock(&state->seqlock);
996}
997
998static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
999{
1000 /*
1001 * Protect the call to nfs4_state_set_mode_locked and
1002 * serialise the stateid update
1003 */
1004 write_seqlock(&state->seqlock);
1005 if (deleg_stateid != NULL) {
1006 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1007 set_bit(NFS_DELEGATED_STATE, &state->flags);
1008 }
1009 if (open_stateid != NULL)
1010 nfs_set_open_stateid_locked(state, open_stateid, fmode);
1011 write_sequnlock(&state->seqlock);
1012 spin_lock(&state->owner->so_lock);
1013 update_open_stateflags(state, fmode);
1014 spin_unlock(&state->owner->so_lock);
1015}
1016
1017static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
1018{
1019 struct nfs_inode *nfsi = NFS_I(state->inode);
1020 struct nfs_delegation *deleg_cur;
1021 int ret = 0;
1022
1023 fmode &= (FMODE_READ|FMODE_WRITE);
1024
1025 rcu_read_lock();
1026 deleg_cur = rcu_dereference(nfsi->delegation);
1027 if (deleg_cur == NULL)
1028 goto no_delegation;
1029
1030 spin_lock(&deleg_cur->lock);
1031 if (nfsi->delegation != deleg_cur ||
1032 (deleg_cur->type & fmode) != fmode)
1033 goto no_delegation_unlock;
1034
1035 if (delegation == NULL)
1036 delegation = &deleg_cur->stateid;
1037 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1038 goto no_delegation_unlock;
1039
1040 nfs_mark_delegation_referenced(deleg_cur);
1041 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1042 ret = 1;
1043no_delegation_unlock:
1044 spin_unlock(&deleg_cur->lock);
1045no_delegation:
1046 rcu_read_unlock();
1047
1048 if (!ret && open_stateid != NULL) {
1049 __update_open_stateid(state, open_stateid, NULL, fmode);
1050 ret = 1;
1051 }
1052
1053 return ret;
1054}
1055
1056
1057static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1058{
1059 struct nfs_delegation *delegation;
1060
1061 rcu_read_lock();
1062 delegation = rcu_dereference(NFS_I(inode)->delegation);
1063 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1064 rcu_read_unlock();
1065 return;
1066 }
1067 rcu_read_unlock();
1068 nfs4_inode_return_delegation(inode);
1069}
1070
1071static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1072{
1073 struct nfs4_state *state = opendata->state;
1074 struct nfs_inode *nfsi = NFS_I(state->inode);
1075 struct nfs_delegation *delegation;
1076 int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC);
1077 fmode_t fmode = opendata->o_arg.fmode;
1078 nfs4_stateid stateid;
1079 int ret = -EAGAIN;
1080
1081 for (;;) {
1082 if (can_open_cached(state, fmode, open_mode)) {
1083 spin_lock(&state->owner->so_lock);
1084 if (can_open_cached(state, fmode, open_mode)) {
1085 update_open_stateflags(state, fmode);
1086 spin_unlock(&state->owner->so_lock);
1087 goto out_return_state;
1088 }
1089 spin_unlock(&state->owner->so_lock);
1090 }
1091 rcu_read_lock();
1092 delegation = rcu_dereference(nfsi->delegation);
1093 if (!can_open_delegated(delegation, fmode)) {
1094 rcu_read_unlock();
1095 break;
1096 }
1097 /* Save the delegation */
1098 nfs4_stateid_copy(&stateid, &delegation->stateid);
1099 rcu_read_unlock();
1100 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1101 if (ret != 0)
1102 goto out;
1103 ret = -EAGAIN;
1104
1105 /* Try to update the stateid using the delegation */
1106 if (update_open_stateid(state, NULL, &stateid, fmode))
1107 goto out_return_state;
1108 }
1109out:
1110 return ERR_PTR(ret);
1111out_return_state:
1112 atomic_inc(&state->count);
1113 return state;
1114}
1115
1116static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1117{
1118 struct inode *inode;
1119 struct nfs4_state *state = NULL;
1120 struct nfs_delegation *delegation;
1121 int ret;
1122
1123 if (!data->rpc_done) {
1124 state = nfs4_try_open_cached(data);
1125 goto out;
1126 }
1127
1128 ret = -EAGAIN;
1129 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1130 goto err;
1131 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
1132 ret = PTR_ERR(inode);
1133 if (IS_ERR(inode))
1134 goto err;
1135 ret = -ENOMEM;
1136 state = nfs4_get_open_state(inode, data->owner);
1137 if (state == NULL)
1138 goto err_put_inode;
1139 if (data->o_res.delegation_type != 0) {
1140 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
1141 int delegation_flags = 0;
1142
1143 rcu_read_lock();
1144 delegation = rcu_dereference(NFS_I(inode)->delegation);
1145 if (delegation)
1146 delegation_flags = delegation->flags;
1147 rcu_read_unlock();
1148 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
1149 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1150 "returning a delegation for "
1151 "OPEN(CLAIM_DELEGATE_CUR)\n",
1152 clp->cl_hostname);
1153 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1154 nfs_inode_set_delegation(state->inode,
1155 data->owner->so_cred,
1156 &data->o_res);
1157 else
1158 nfs_inode_reclaim_delegation(state->inode,
1159 data->owner->so_cred,
1160 &data->o_res);
1161 }
1162
1163 update_open_stateid(state, &data->o_res.stateid, NULL,
1164 data->o_arg.fmode);
1165 iput(inode);
1166out:
1167 return state;
1168err_put_inode:
1169 iput(inode);
1170err:
1171 return ERR_PTR(ret);
1172}
1173
1174static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1175{
1176 struct nfs_inode *nfsi = NFS_I(state->inode);
1177 struct nfs_open_context *ctx;
1178
1179 spin_lock(&state->inode->i_lock);
1180 list_for_each_entry(ctx, &nfsi->open_files, list) {
1181 if (ctx->state != state)
1182 continue;
1183 get_nfs_open_context(ctx);
1184 spin_unlock(&state->inode->i_lock);
1185 return ctx;
1186 }
1187 spin_unlock(&state->inode->i_lock);
1188 return ERR_PTR(-ENOENT);
1189}
1190
1191static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
1192{
1193 struct nfs4_opendata *opendata;
1194
1195 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS);
1196 if (opendata == NULL)
1197 return ERR_PTR(-ENOMEM);
1198 opendata->state = state;
1199 atomic_inc(&state->count);
1200 return opendata;
1201}
1202
1203static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
1204{
1205 struct nfs4_state *newstate;
1206 int ret;
1207
1208 opendata->o_arg.open_flags = 0;
1209 opendata->o_arg.fmode = fmode;
1210 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1211 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1212 nfs4_init_opendata_res(opendata);
1213 ret = _nfs4_recover_proc_open(opendata);
1214 if (ret != 0)
1215 return ret;
1216 newstate = nfs4_opendata_to_nfs4_state(opendata);
1217 if (IS_ERR(newstate))
1218 return PTR_ERR(newstate);
1219 nfs4_close_state(newstate, fmode);
1220 *res = newstate;
1221 return 0;
1222}
1223
1224static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1225{
1226 struct nfs4_state *newstate;
1227 int ret;
1228
1229 /* memory barrier prior to reading state->n_* */
1230 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1231 smp_rmb();
1232 if (state->n_rdwr != 0) {
1233 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1234 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
1235 if (ret != 0)
1236 return ret;
1237 if (newstate != state)
1238 return -ESTALE;
1239 }
1240 if (state->n_wronly != 0) {
1241 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1242 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
1243 if (ret != 0)
1244 return ret;
1245 if (newstate != state)
1246 return -ESTALE;
1247 }
1248 if (state->n_rdonly != 0) {
1249 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1250 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1251 if (ret != 0)
1252 return ret;
1253 if (newstate != state)
1254 return -ESTALE;
1255 }
1256 /*
1257 * We may have performed cached opens for all three recoveries.
1258 * Check if we need to update the current stateid.
1259 */
1260 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1261 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1262 write_seqlock(&state->seqlock);
1263 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1264 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1265 write_sequnlock(&state->seqlock);
1266 }
1267 return 0;
1268}
1269
1270/*
1271 * OPEN_RECLAIM:
1272 * reclaim state on the server after a reboot.
1273 */
1274static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1275{
1276 struct nfs_delegation *delegation;
1277 struct nfs4_opendata *opendata;
1278 fmode_t delegation_type = 0;
1279 int status;
1280
1281 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1282 if (IS_ERR(opendata))
1283 return PTR_ERR(opendata);
1284 opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
1285 opendata->o_arg.fh = NFS_FH(state->inode);
1286 rcu_read_lock();
1287 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1288 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1289 delegation_type = delegation->type;
1290 rcu_read_unlock();
1291 opendata->o_arg.u.delegation_type = delegation_type;
1292 status = nfs4_open_recover(opendata, state);
1293 nfs4_opendata_put(opendata);
1294 return status;
1295}
1296
1297static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1298{
1299 struct nfs_server *server = NFS_SERVER(state->inode);
1300 struct nfs4_exception exception = { };
1301 int err;
1302 do {
1303 err = _nfs4_do_open_reclaim(ctx, state);
1304 if (err != -NFS4ERR_DELAY)
1305 break;
1306 nfs4_handle_exception(server, err, &exception);
1307 } while (exception.retry);
1308 return err;
1309}
1310
1311static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1312{
1313 struct nfs_open_context *ctx;
1314 int ret;
1315
1316 ctx = nfs4_state_find_open_context(state);
1317 if (IS_ERR(ctx))
1318 return PTR_ERR(ctx);
1319 ret = nfs4_do_open_reclaim(ctx, state);
1320 put_nfs_open_context(ctx);
1321 return ret;
1322}
1323
1324static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1325{
1326 struct nfs4_opendata *opendata;
1327 int ret;
1328
1329 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1330 if (IS_ERR(opendata))
1331 return PTR_ERR(opendata);
1332 opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
1333 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1334 ret = nfs4_open_recover(opendata, state);
1335 nfs4_opendata_put(opendata);
1336 return ret;
1337}
1338
1339int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1340{
1341 struct nfs4_exception exception = { };
1342 struct nfs_server *server = NFS_SERVER(state->inode);
1343 int err;
1344 do {
1345 err = _nfs4_open_delegation_recall(ctx, state, stateid);
1346 switch (err) {
1347 case 0:
1348 case -ENOENT:
1349 case -ESTALE:
1350 goto out;
1351 case -NFS4ERR_BADSESSION:
1352 case -NFS4ERR_BADSLOT:
1353 case -NFS4ERR_BAD_HIGH_SLOT:
1354 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1355 case -NFS4ERR_DEADSESSION:
1356 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1357 goto out;
1358 case -NFS4ERR_STALE_CLIENTID:
1359 case -NFS4ERR_STALE_STATEID:
1360 case -NFS4ERR_EXPIRED:
1361 /* Don't recall a delegation if it was lost */
1362 nfs4_schedule_lease_recovery(server->nfs_client);
1363 goto out;
1364 case -ERESTARTSYS:
1365 /*
1366 * The show must go on: exit, but mark the
1367 * stateid as needing recovery.
1368 */
1369 case -NFS4ERR_DELEG_REVOKED:
1370 case -NFS4ERR_ADMIN_REVOKED:
1371 case -NFS4ERR_BAD_STATEID:
1372 nfs_inode_find_state_and_recover(state->inode,
1373 stateid);
1374 nfs4_schedule_stateid_recovery(server, state);
1375 case -EKEYEXPIRED:
1376 /*
1377 * User RPCSEC_GSS context has expired.
1378 * We cannot recover this stateid now, so
1379 * skip it and allow recovery thread to
1380 * proceed.
1381 */
1382 case -ENOMEM:
1383 err = 0;
1384 goto out;
1385 }
1386 err = nfs4_handle_exception(server, err, &exception);
1387 } while (exception.retry);
1388out:
1389 return err;
1390}
1391
1392static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1393{
1394 struct nfs4_opendata *data = calldata;
1395
1396 data->rpc_status = task->tk_status;
1397 if (data->rpc_status == 0) {
1398 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1399 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1400 renew_lease(data->o_res.server, data->timestamp);
1401 data->rpc_done = 1;
1402 }
1403}
1404
1405static void nfs4_open_confirm_release(void *calldata)
1406{
1407 struct nfs4_opendata *data = calldata;
1408 struct nfs4_state *state = NULL;
1409
1410 /* If this request hasn't been cancelled, do nothing */
1411 if (data->cancelled == 0)
1412 goto out_free;
1413 /* In case of error, no cleanup! */
1414 if (!data->rpc_done)
1415 goto out_free;
1416 state = nfs4_opendata_to_nfs4_state(data);
1417 if (!IS_ERR(state))
1418 nfs4_close_state(state, data->o_arg.fmode);
1419out_free:
1420 nfs4_opendata_put(data);
1421}
1422
1423static const struct rpc_call_ops nfs4_open_confirm_ops = {
1424 .rpc_call_done = nfs4_open_confirm_done,
1425 .rpc_release = nfs4_open_confirm_release,
1426};
1427
1428/*
1429 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1430 */
1431static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1432{
1433 struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
1434 struct rpc_task *task;
1435 struct rpc_message msg = {
1436 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1437 .rpc_argp = &data->c_arg,
1438 .rpc_resp = &data->c_res,
1439 .rpc_cred = data->owner->so_cred,
1440 };
1441 struct rpc_task_setup task_setup_data = {
1442 .rpc_client = server->client,
1443 .rpc_message = &msg,
1444 .callback_ops = &nfs4_open_confirm_ops,
1445 .callback_data = data,
1446 .workqueue = nfsiod_workqueue,
1447 .flags = RPC_TASK_ASYNC,
1448 };
1449 int status;
1450
1451 kref_get(&data->kref);
1452 data->rpc_done = 0;
1453 data->rpc_status = 0;
1454 data->timestamp = jiffies;
1455 task = rpc_run_task(&task_setup_data);
1456 if (IS_ERR(task))
1457 return PTR_ERR(task);
1458 status = nfs4_wait_for_completion_rpc_task(task);
1459 if (status != 0) {
1460 data->cancelled = 1;
1461 smp_wmb();
1462 } else
1463 status = data->rpc_status;
1464 rpc_put_task(task);
1465 return status;
1466}
1467
1468static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1469{
1470 struct nfs4_opendata *data = calldata;
1471 struct nfs4_state_owner *sp = data->owner;
1472
1473 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1474 return;
1475 /*
1476 * Check if we still need to send an OPEN call, or if we can use
1477 * a delegation instead.
1478 */
1479 if (data->state != NULL) {
1480 struct nfs_delegation *delegation;
1481
1482 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1483 goto out_no_action;
1484 rcu_read_lock();
1485 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1486 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
1487 can_open_delegated(delegation, data->o_arg.fmode))
1488 goto unlock_no_action;
1489 rcu_read_unlock();
1490 }
1491 /* Update client id. */
1492 data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
1493 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
1494 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1495 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1496 }
1497 data->timestamp = jiffies;
1498 if (nfs4_setup_sequence(data->o_arg.server,
1499 &data->o_arg.seq_args,
1500 &data->o_res.seq_res, task))
1501 return;
1502 rpc_call_start(task);
1503 return;
1504unlock_no_action:
1505 rcu_read_unlock();
1506out_no_action:
1507 task->tk_action = NULL;
1508
1509}
1510
1511static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata)
1512{
1513 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
1514 nfs4_open_prepare(task, calldata);
1515}
1516
1517static void nfs4_open_done(struct rpc_task *task, void *calldata)
1518{
1519 struct nfs4_opendata *data = calldata;
1520
1521 data->rpc_status = task->tk_status;
1522
1523 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1524 return;
1525
1526 if (task->tk_status == 0) {
1527 switch (data->o_res.f_attr->mode & S_IFMT) {
1528 case S_IFREG:
1529 break;
1530 case S_IFLNK:
1531 data->rpc_status = -ELOOP;
1532 break;
1533 case S_IFDIR:
1534 data->rpc_status = -EISDIR;
1535 break;
1536 default:
1537 data->rpc_status = -ENOTDIR;
1538 }
1539 renew_lease(data->o_res.server, data->timestamp);
1540 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1541 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1542 }
1543 data->rpc_done = 1;
1544}
1545
1546static void nfs4_open_release(void *calldata)
1547{
1548 struct nfs4_opendata *data = calldata;
1549 struct nfs4_state *state = NULL;
1550
1551 /* If this request hasn't been cancelled, do nothing */
1552 if (data->cancelled == 0)
1553 goto out_free;
1554 /* In case of error, no cleanup! */
1555 if (data->rpc_status != 0 || !data->rpc_done)
1556 goto out_free;
1557 /* In case we need an open_confirm, no cleanup! */
1558 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1559 goto out_free;
1560 state = nfs4_opendata_to_nfs4_state(data);
1561 if (!IS_ERR(state))
1562 nfs4_close_state(state, data->o_arg.fmode);
1563out_free:
1564 nfs4_opendata_put(data);
1565}
1566
1567static const struct rpc_call_ops nfs4_open_ops = {
1568 .rpc_call_prepare = nfs4_open_prepare,
1569 .rpc_call_done = nfs4_open_done,
1570 .rpc_release = nfs4_open_release,
1571};
1572
1573static const struct rpc_call_ops nfs4_recover_open_ops = {
1574 .rpc_call_prepare = nfs4_recover_open_prepare,
1575 .rpc_call_done = nfs4_open_done,
1576 .rpc_release = nfs4_open_release,
1577};
1578
1579static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
1580{
1581 struct inode *dir = data->dir->d_inode;
1582 struct nfs_server *server = NFS_SERVER(dir);
1583 struct nfs_openargs *o_arg = &data->o_arg;
1584 struct nfs_openres *o_res = &data->o_res;
1585 struct rpc_task *task;
1586 struct rpc_message msg = {
1587 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
1588 .rpc_argp = o_arg,
1589 .rpc_resp = o_res,
1590 .rpc_cred = data->owner->so_cred,
1591 };
1592 struct rpc_task_setup task_setup_data = {
1593 .rpc_client = server->client,
1594 .rpc_message = &msg,
1595 .callback_ops = &nfs4_open_ops,
1596 .callback_data = data,
1597 .workqueue = nfsiod_workqueue,
1598 .flags = RPC_TASK_ASYNC,
1599 };
1600 int status;
1601
1602 nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
1603 kref_get(&data->kref);
1604 data->rpc_done = 0;
1605 data->rpc_status = 0;
1606 data->cancelled = 0;
1607 if (isrecover)
1608 task_setup_data.callback_ops = &nfs4_recover_open_ops;
1609 task = rpc_run_task(&task_setup_data);
1610 if (IS_ERR(task))
1611 return PTR_ERR(task);
1612 status = nfs4_wait_for_completion_rpc_task(task);
1613 if (status != 0) {
1614 data->cancelled = 1;
1615 smp_wmb();
1616 } else
1617 status = data->rpc_status;
1618 rpc_put_task(task);
1619
1620 return status;
1621}
1622
1623static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
1624{
1625 struct inode *dir = data->dir->d_inode;
1626 struct nfs_openres *o_res = &data->o_res;
1627 int status;
1628
1629 status = nfs4_run_open_task(data, 1);
1630 if (status != 0 || !data->rpc_done)
1631 return status;
1632
1633 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
1634
1635 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1636 status = _nfs4_proc_open_confirm(data);
1637 if (status != 0)
1638 return status;
1639 }
1640
1641 return status;
1642}
1643
1644/*
1645 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
1646 */
1647static int _nfs4_proc_open(struct nfs4_opendata *data)
1648{
1649 struct inode *dir = data->dir->d_inode;
1650 struct nfs_server *server = NFS_SERVER(dir);
1651 struct nfs_openargs *o_arg = &data->o_arg;
1652 struct nfs_openres *o_res = &data->o_res;
1653 int status;
1654
1655 status = nfs4_run_open_task(data, 0);
1656 if (!data->rpc_done)
1657 return status;
1658 if (status != 0) {
1659 if (status == -NFS4ERR_BADNAME &&
1660 !(o_arg->open_flags & O_CREAT))
1661 return -ENOENT;
1662 return status;
1663 }
1664
1665 nfs_fattr_map_and_free_names(server, &data->f_attr);
1666
1667 if (o_arg->open_flags & O_CREAT)
1668 update_changeattr(dir, &o_res->cinfo);
1669 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
1670 server->caps &= ~NFS_CAP_POSIX_LOCK;
1671 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1672 status = _nfs4_proc_open_confirm(data);
1673 if (status != 0)
1674 return status;
1675 }
1676 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
1677 _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
1678 return 0;
1679}
1680
1681static int nfs4_client_recover_expired_lease(struct nfs_client *clp)
1682{
1683 unsigned int loop;
1684 int ret;
1685
1686 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
1687 ret = nfs4_wait_clnt_recover(clp);
1688 if (ret != 0)
1689 break;
1690 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
1691 !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
1692 break;
1693 nfs4_schedule_state_manager(clp);
1694 ret = -EIO;
1695 }
1696 return ret;
1697}
1698
1699static int nfs4_recover_expired_lease(struct nfs_server *server)
1700{
1701 return nfs4_client_recover_expired_lease(server->nfs_client);
1702}
1703
1704/*
1705 * OPEN_EXPIRED:
1706 * reclaim state on the server after a network partition.
1707 * Assumes caller holds the appropriate lock
1708 */
1709static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1710{
1711 struct nfs4_opendata *opendata;
1712 int ret;
1713
1714 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1715 if (IS_ERR(opendata))
1716 return PTR_ERR(opendata);
1717 ret = nfs4_open_recover(opendata, state);
1718 if (ret == -ESTALE)
1719 d_drop(ctx->dentry);
1720 nfs4_opendata_put(opendata);
1721 return ret;
1722}
1723
1724static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1725{
1726 struct nfs_server *server = NFS_SERVER(state->inode);
1727 struct nfs4_exception exception = { };
1728 int err;
1729
1730 do {
1731 err = _nfs4_open_expired(ctx, state);
1732 switch (err) {
1733 default:
1734 goto out;
1735 case -NFS4ERR_GRACE:
1736 case -NFS4ERR_DELAY:
1737 nfs4_handle_exception(server, err, &exception);
1738 err = 0;
1739 }
1740 } while (exception.retry);
1741out:
1742 return err;
1743}
1744
1745static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1746{
1747 struct nfs_open_context *ctx;
1748 int ret;
1749
1750 ctx = nfs4_state_find_open_context(state);
1751 if (IS_ERR(ctx))
1752 return PTR_ERR(ctx);
1753 ret = nfs4_do_open_expired(ctx, state);
1754 put_nfs_open_context(ctx);
1755 return ret;
1756}
1757
1758#if defined(CONFIG_NFS_V4_1)
1759static void nfs41_clear_delegation_stateid(struct nfs4_state *state)
1760{
1761 struct nfs_server *server = NFS_SERVER(state->inode);
1762 nfs4_stateid *stateid = &state->stateid;
1763 int status;
1764
1765 /* If a state reset has been done, test_stateid is unneeded */
1766 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1767 return;
1768
1769 status = nfs41_test_stateid(server, stateid);
1770 if (status != NFS_OK) {
1771 /* Free the stateid unless the server explicitly
1772 * informs us the stateid is unrecognized. */
1773 if (status != -NFS4ERR_BAD_STATEID)
1774 nfs41_free_stateid(server, stateid);
1775
1776 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1777 }
1778}
1779
1780/**
1781 * nfs41_check_open_stateid - possibly free an open stateid
1782 *
1783 * @state: NFSv4 state for an inode
1784 *
1785 * Returns NFS_OK if recovery for this stateid is now finished.
1786 * Otherwise a negative NFS4ERR value is returned.
1787 */
1788static int nfs41_check_open_stateid(struct nfs4_state *state)
1789{
1790 struct nfs_server *server = NFS_SERVER(state->inode);
1791 nfs4_stateid *stateid = &state->stateid;
1792 int status;
1793
1794 /* If a state reset has been done, test_stateid is unneeded */
1795 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) &&
1796 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) &&
1797 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0))
1798 return -NFS4ERR_BAD_STATEID;
1799
1800 status = nfs41_test_stateid(server, stateid);
1801 if (status != NFS_OK) {
1802 /* Free the stateid unless the server explicitly
1803 * informs us the stateid is unrecognized. */
1804 if (status != -NFS4ERR_BAD_STATEID)
1805 nfs41_free_stateid(server, stateid);
1806
1807 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1808 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1809 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1810 }
1811 return status;
1812}
1813
1814static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1815{
1816 int status;
1817
1818 nfs41_clear_delegation_stateid(state);
1819 status = nfs41_check_open_stateid(state);
1820 if (status != NFS_OK)
1821 status = nfs4_open_expired(sp, state);
1822 return status;
1823}
1824#endif
1825
1826/*
1827 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
1828 * fields corresponding to attributes that were used to store the verifier.
1829 * Make sure we clobber those fields in the later setattr call
1830 */
1831static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
1832{
1833 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
1834 !(sattr->ia_valid & ATTR_ATIME_SET))
1835 sattr->ia_valid |= ATTR_ATIME;
1836
1837 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
1838 !(sattr->ia_valid & ATTR_MTIME_SET))
1839 sattr->ia_valid |= ATTR_MTIME;
1840}
1841
1842/*
1843 * Returns a referenced nfs4_state
1844 */
1845static int _nfs4_do_open(struct inode *dir,
1846 struct dentry *dentry,
1847 fmode_t fmode,
1848 int flags,
1849 struct iattr *sattr,
1850 struct rpc_cred *cred,
1851 struct nfs4_state **res,
1852 struct nfs4_threshold **ctx_th)
1853{
1854 struct nfs4_state_owner *sp;
1855 struct nfs4_state *state = NULL;
1856 struct nfs_server *server = NFS_SERVER(dir);
1857 struct nfs4_opendata *opendata;
1858 int status;
1859
1860 /* Protect against reboot recovery conflicts */
1861 status = -ENOMEM;
1862 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
1863 if (sp == NULL) {
1864 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
1865 goto out_err;
1866 }
1867 status = nfs4_recover_expired_lease(server);
1868 if (status != 0)
1869 goto err_put_state_owner;
1870 if (dentry->d_inode != NULL)
1871 nfs4_return_incompatible_delegation(dentry->d_inode, fmode);
1872 status = -ENOMEM;
1873 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL);
1874 if (opendata == NULL)
1875 goto err_put_state_owner;
1876
1877 if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
1878 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
1879 if (!opendata->f_attr.mdsthreshold)
1880 goto err_opendata_put;
1881 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
1882 }
1883 if (dentry->d_inode != NULL)
1884 opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
1885
1886 status = _nfs4_proc_open(opendata);
1887 if (status != 0)
1888 goto err_opendata_put;
1889
1890 state = nfs4_opendata_to_nfs4_state(opendata);
1891 status = PTR_ERR(state);
1892 if (IS_ERR(state))
1893 goto err_opendata_put;
1894 if (server->caps & NFS_CAP_POSIX_LOCK)
1895 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
1896
1897 if (opendata->o_arg.open_flags & O_EXCL) {
1898 nfs4_exclusive_attrset(opendata, sattr);
1899
1900 nfs_fattr_init(opendata->o_res.f_attr);
1901 status = nfs4_do_setattr(state->inode, cred,
1902 opendata->o_res.f_attr, sattr,
1903 state);
1904 if (status == 0)
1905 nfs_setattr_update_inode(state->inode, sattr);
1906 nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
1907 }
1908
1909 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
1910 *ctx_th = opendata->f_attr.mdsthreshold;
1911 else
1912 kfree(opendata->f_attr.mdsthreshold);
1913 opendata->f_attr.mdsthreshold = NULL;
1914
1915 nfs4_opendata_put(opendata);
1916 nfs4_put_state_owner(sp);
1917 *res = state;
1918 return 0;
1919err_opendata_put:
1920 kfree(opendata->f_attr.mdsthreshold);
1921 nfs4_opendata_put(opendata);
1922err_put_state_owner:
1923 nfs4_put_state_owner(sp);
1924out_err:
1925 *res = NULL;
1926 return status;
1927}
1928
1929
1930static struct nfs4_state *nfs4_do_open(struct inode *dir,
1931 struct dentry *dentry,
1932 fmode_t fmode,
1933 int flags,
1934 struct iattr *sattr,
1935 struct rpc_cred *cred,
1936 struct nfs4_threshold **ctx_th)
1937{
1938 struct nfs4_exception exception = { };
1939 struct nfs4_state *res;
1940 int status;
1941
1942 fmode &= FMODE_READ|FMODE_WRITE;
1943 do {
1944 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred,
1945 &res, ctx_th);
1946 if (status == 0)
1947 break;
1948 /* NOTE: BAD_SEQID means the server and client disagree about the
1949 * book-keeping w.r.t. state-changing operations
1950 * (OPEN/CLOSE/LOCK/LOCKU...)
1951 * It is actually a sign of a bug on the client or on the server.
1952 *
1953 * If we receive a BAD_SEQID error in the particular case of
1954 * doing an OPEN, we assume that nfs_increment_open_seqid() will
1955 * have unhashed the old state_owner for us, and that we can
1956 * therefore safely retry using a new one. We should still warn
1957 * the user though...
1958 */
1959 if (status == -NFS4ERR_BAD_SEQID) {
1960 pr_warn_ratelimited("NFS: v4 server %s "
1961 " returned a bad sequence-id error!\n",
1962 NFS_SERVER(dir)->nfs_client->cl_hostname);
1963 exception.retry = 1;
1964 continue;
1965 }
1966 /*
1967 * BAD_STATEID on OPEN means that the server cancelled our
1968 * state before it received the OPEN_CONFIRM.
1969 * Recover by retrying the request as per the discussion
1970 * on Page 181 of RFC3530.
1971 */
1972 if (status == -NFS4ERR_BAD_STATEID) {
1973 exception.retry = 1;
1974 continue;
1975 }
1976 if (status == -EAGAIN) {
1977 /* We must have found a delegation */
1978 exception.retry = 1;
1979 continue;
1980 }
1981 res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
1982 status, &exception));
1983 } while (exception.retry);
1984 return res;
1985}
1986
1987static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1988 struct nfs_fattr *fattr, struct iattr *sattr,
1989 struct nfs4_state *state)
1990{
1991 struct nfs_server *server = NFS_SERVER(inode);
1992 struct nfs_setattrargs arg = {
1993 .fh = NFS_FH(inode),
1994 .iap = sattr,
1995 .server = server,
1996 .bitmask = server->attr_bitmask,
1997 };
1998 struct nfs_setattrres res = {
1999 .fattr = fattr,
2000 .server = server,
2001 };
2002 struct rpc_message msg = {
2003 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
2004 .rpc_argp = &arg,
2005 .rpc_resp = &res,
2006 .rpc_cred = cred,
2007 };
2008 unsigned long timestamp = jiffies;
2009 int status;
2010
2011 nfs_fattr_init(fattr);
2012
2013 if (state != NULL) {
2014 nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
2015 current->files, current->tgid);
2016 } else if (nfs4_copy_delegation_stateid(&arg.stateid, inode,
2017 FMODE_WRITE)) {
2018 /* Use that stateid */
2019 } else
2020 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
2021
2022 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2023 if (status == 0 && state != NULL)
2024 renew_lease(server, timestamp);
2025 return status;
2026}
2027
2028static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2029 struct nfs_fattr *fattr, struct iattr *sattr,
2030 struct nfs4_state *state)
2031{
2032 struct nfs_server *server = NFS_SERVER(inode);
2033 struct nfs4_exception exception = {
2034 .state = state,
2035 .inode = inode,
2036 };
2037 int err;
2038 do {
2039 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state);
2040 switch (err) {
2041 case -NFS4ERR_OPENMODE:
2042 if (state && !(state->state & FMODE_WRITE)) {
2043 err = -EBADF;
2044 if (sattr->ia_valid & ATTR_OPEN)
2045 err = -EACCES;
2046 goto out;
2047 }
2048 }
2049 err = nfs4_handle_exception(server, err, &exception);
2050 } while (exception.retry);
2051out:
2052 return err;
2053}
2054
2055struct nfs4_closedata {
2056 struct inode *inode;
2057 struct nfs4_state *state;
2058 struct nfs_closeargs arg;
2059 struct nfs_closeres res;
2060 struct nfs_fattr fattr;
2061 unsigned long timestamp;
2062 bool roc;
2063 u32 roc_barrier;
2064};
2065
2066static void nfs4_free_closedata(void *data)
2067{
2068 struct nfs4_closedata *calldata = data;
2069 struct nfs4_state_owner *sp = calldata->state->owner;
2070 struct super_block *sb = calldata->state->inode->i_sb;
2071
2072 if (calldata->roc)
2073 pnfs_roc_release(calldata->state->inode);
2074 nfs4_put_open_state(calldata->state);
2075 nfs_free_seqid(calldata->arg.seqid);
2076 nfs4_put_state_owner(sp);
2077 nfs_sb_deactive(sb);
2078 kfree(calldata);
2079}
2080
2081static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
2082 fmode_t fmode)
2083{
2084 spin_lock(&state->owner->so_lock);
2085 if (!(fmode & FMODE_READ))
2086 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2087 if (!(fmode & FMODE_WRITE))
2088 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2089 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2090 spin_unlock(&state->owner->so_lock);
2091}
2092
2093static void nfs4_close_done(struct rpc_task *task, void *data)
2094{
2095 struct nfs4_closedata *calldata = data;
2096 struct nfs4_state *state = calldata->state;
2097 struct nfs_server *server = NFS_SERVER(calldata->inode);
2098
2099 dprintk("%s: begin!\n", __func__);
2100 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2101 return;
2102 /* hmm. we are done with the inode, and in the process of freeing
2103 * the state_owner. we keep this around to process errors
2104 */
2105 switch (task->tk_status) {
2106 case 0:
2107 if (calldata->roc)
2108 pnfs_roc_set_barrier(state->inode,
2109 calldata->roc_barrier);
2110 nfs_set_open_stateid(state, &calldata->res.stateid, 0);
2111 renew_lease(server, calldata->timestamp);
2112 nfs4_close_clear_stateid_flags(state,
2113 calldata->arg.fmode);
2114 break;
2115 case -NFS4ERR_STALE_STATEID:
2116 case -NFS4ERR_OLD_STATEID:
2117 case -NFS4ERR_BAD_STATEID:
2118 case -NFS4ERR_EXPIRED:
2119 if (calldata->arg.fmode == 0)
2120 break;
2121 default:
2122 if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
2123 rpc_restart_call_prepare(task);
2124 }
2125 nfs_release_seqid(calldata->arg.seqid);
2126 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2127 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2128}
2129
2130static void nfs4_close_prepare(struct rpc_task *task, void *data)
2131{
2132 struct nfs4_closedata *calldata = data;
2133 struct nfs4_state *state = calldata->state;
2134 int call_close = 0;
2135
2136 dprintk("%s: begin!\n", __func__);
2137 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2138 return;
2139
2140 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2141 calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
2142 spin_lock(&state->owner->so_lock);
2143 /* Calculate the change in open mode */
2144 if (state->n_rdwr == 0) {
2145 if (state->n_rdonly == 0) {
2146 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
2147 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2148 calldata->arg.fmode &= ~FMODE_READ;
2149 }
2150 if (state->n_wronly == 0) {
2151 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
2152 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2153 calldata->arg.fmode &= ~FMODE_WRITE;
2154 }
2155 }
2156 spin_unlock(&state->owner->so_lock);
2157
2158 if (!call_close) {
2159 /* Note: exit _without_ calling nfs4_close_done */
2160 task->tk_action = NULL;
2161 goto out;
2162 }
2163
2164 if (calldata->arg.fmode == 0) {
2165 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2166 if (calldata->roc &&
2167 pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) {
2168 rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq,
2169 task, NULL);
2170 goto out;
2171 }
2172 }
2173
2174 nfs_fattr_init(calldata->res.fattr);
2175 calldata->timestamp = jiffies;
2176 if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
2177 &calldata->arg.seq_args,
2178 &calldata->res.seq_res,
2179 task))
2180 goto out;
2181 rpc_call_start(task);
2182out:
2183 dprintk("%s: done!\n", __func__);
2184}
2185
2186static const struct rpc_call_ops nfs4_close_ops = {
2187 .rpc_call_prepare = nfs4_close_prepare,
2188 .rpc_call_done = nfs4_close_done,
2189 .rpc_release = nfs4_free_closedata,
2190};
2191
2192/*
2193 * It is possible for data to be read/written from a mem-mapped file
2194 * after the sys_close call (which hits the vfs layer as a flush).
2195 * This means that we can't safely call nfsv4 close on a file until
2196 * the inode is cleared. This in turn means that we are not good
2197 * NFSv4 citizens - we do not indicate to the server to update the file's
2198 * share state even when we are done with one of the three share
2199 * stateid's in the inode.
2200 *
2201 * NOTE: Caller must be holding the sp->so_owner semaphore!
2202 */
2203int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
2204{
2205 struct nfs_server *server = NFS_SERVER(state->inode);
2206 struct nfs4_closedata *calldata;
2207 struct nfs4_state_owner *sp = state->owner;
2208 struct rpc_task *task;
2209 struct rpc_message msg = {
2210 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2211 .rpc_cred = state->owner->so_cred,
2212 };
2213 struct rpc_task_setup task_setup_data = {
2214 .rpc_client = server->client,
2215 .rpc_message = &msg,
2216 .callback_ops = &nfs4_close_ops,
2217 .workqueue = nfsiod_workqueue,
2218 .flags = RPC_TASK_ASYNC,
2219 };
2220 int status = -ENOMEM;
2221
2222 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2223 if (calldata == NULL)
2224 goto out;
2225 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2226 calldata->inode = state->inode;
2227 calldata->state = state;
2228 calldata->arg.fh = NFS_FH(state->inode);
2229 calldata->arg.stateid = &state->open_stateid;
2230 /* Serialization for the sequence id */
2231 calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
2232 if (calldata->arg.seqid == NULL)
2233 goto out_free_calldata;
2234 calldata->arg.fmode = 0;
2235 calldata->arg.bitmask = server->cache_consistency_bitmask;
2236 calldata->res.fattr = &calldata->fattr;
2237 calldata->res.seqid = calldata->arg.seqid;
2238 calldata->res.server = server;
2239 calldata->roc = roc;
2240 nfs_sb_active(calldata->inode->i_sb);
2241
2242 msg.rpc_argp = &calldata->arg;
2243 msg.rpc_resp = &calldata->res;
2244 task_setup_data.callback_data = calldata;
2245 task = rpc_run_task(&task_setup_data);
2246 if (IS_ERR(task))
2247 return PTR_ERR(task);
2248 status = 0;
2249 if (wait)
2250 status = rpc_wait_for_completion_task(task);
2251 rpc_put_task(task);
2252 return status;
2253out_free_calldata:
2254 kfree(calldata);
2255out:
2256 if (roc)
2257 pnfs_roc_release(state->inode);
2258 nfs4_put_open_state(state);
2259 nfs4_put_state_owner(sp);
2260 return status;
2261}
2262
2263static struct inode *
2264nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
2265{
2266 struct nfs4_state *state;
2267
2268 /* Protect against concurrent sillydeletes */
2269 state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr,
2270 ctx->cred, &ctx->mdsthreshold);
2271 if (IS_ERR(state))
2272 return ERR_CAST(state);
2273 ctx->state = state;
2274 return igrab(state->inode);
2275}
2276
2277static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
2278{
2279 if (ctx->state == NULL)
2280 return;
2281 if (is_sync)
2282 nfs4_close_sync(ctx->state, ctx->mode);
2283 else
2284 nfs4_close_state(ctx->state, ctx->mode);
2285}
2286
2287static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2288{
2289 struct nfs4_server_caps_arg args = {
2290 .fhandle = fhandle,
2291 };
2292 struct nfs4_server_caps_res res = {};
2293 struct rpc_message msg = {
2294 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
2295 .rpc_argp = &args,
2296 .rpc_resp = &res,
2297 };
2298 int status;
2299
2300 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2301 if (status == 0) {
2302 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2303 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2304 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2305 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2306 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2307 NFS_CAP_CTIME|NFS_CAP_MTIME);
2308 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
2309 server->caps |= NFS_CAP_ACLS;
2310 if (res.has_links != 0)
2311 server->caps |= NFS_CAP_HARDLINKS;
2312 if (res.has_symlinks != 0)
2313 server->caps |= NFS_CAP_SYMLINKS;
2314 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2315 server->caps |= NFS_CAP_FILEID;
2316 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2317 server->caps |= NFS_CAP_MODE;
2318 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2319 server->caps |= NFS_CAP_NLINK;
2320 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2321 server->caps |= NFS_CAP_OWNER;
2322 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2323 server->caps |= NFS_CAP_OWNER_GROUP;
2324 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2325 server->caps |= NFS_CAP_ATIME;
2326 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2327 server->caps |= NFS_CAP_CTIME;
2328 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2329 server->caps |= NFS_CAP_MTIME;
2330
2331 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2332 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2333 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
2334 server->acl_bitmask = res.acl_bitmask;
2335 server->fh_expire_type = res.fh_expire_type;
2336 }
2337
2338 return status;
2339}
2340
2341int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2342{
2343 struct nfs4_exception exception = { };
2344 int err;
2345 do {
2346 err = nfs4_handle_exception(server,
2347 _nfs4_server_capabilities(server, fhandle),
2348 &exception);
2349 } while (exception.retry);
2350 return err;
2351}
2352
2353static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2354 struct nfs_fsinfo *info)
2355{
2356 struct nfs4_lookup_root_arg args = {
2357 .bitmask = nfs4_fattr_bitmap,
2358 };
2359 struct nfs4_lookup_res res = {
2360 .server = server,
2361 .fattr = info->fattr,
2362 .fh = fhandle,
2363 };
2364 struct rpc_message msg = {
2365 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
2366 .rpc_argp = &args,
2367 .rpc_resp = &res,
2368 };
2369
2370 nfs_fattr_init(info->fattr);
2371 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2372}
2373
2374static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2375 struct nfs_fsinfo *info)
2376{
2377 struct nfs4_exception exception = { };
2378 int err;
2379 do {
2380 err = _nfs4_lookup_root(server, fhandle, info);
2381 switch (err) {
2382 case 0:
2383 case -NFS4ERR_WRONGSEC:
2384 goto out;
2385 default:
2386 err = nfs4_handle_exception(server, err, &exception);
2387 }
2388 } while (exception.retry);
2389out:
2390 return err;
2391}
2392
2393static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2394 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
2395{
2396 struct rpc_auth *auth;
2397 int ret;
2398
2399 auth = rpcauth_create(flavor, server->client);
2400 if (!auth) {
2401 ret = -EIO;
2402 goto out;
2403 }
2404 ret = nfs4_lookup_root(server, fhandle, info);
2405out:
2406 return ret;
2407}
2408
2409static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2410 struct nfs_fsinfo *info)
2411{
2412 int i, len, status = 0;
2413 rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS];
2414
2415 len = gss_mech_list_pseudoflavors(&flav_array[0]);
2416 flav_array[len] = RPC_AUTH_NULL;
2417 len += 1;
2418
2419 for (i = 0; i < len; i++) {
2420 status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
2421 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
2422 continue;
2423 break;
2424 }
2425 /*
2426 * -EACCESS could mean that the user doesn't have correct permissions
2427 * to access the mount. It could also mean that we tried to mount
2428 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
2429 * existing mount programs don't handle -EACCES very well so it should
2430 * be mapped to -EPERM instead.
2431 */
2432 if (status == -EACCES)
2433 status = -EPERM;
2434 return status;
2435}
2436
2437/*
2438 * get the file handle for the "/" directory on the server
2439 */
2440int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
2441 struct nfs_fsinfo *info)
2442{
2443 int minor_version = server->nfs_client->cl_minorversion;
2444 int status = nfs4_lookup_root(server, fhandle, info);
2445 if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR))
2446 /*
2447 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM
2448 * by nfs4_map_errors() as this function exits.
2449 */
2450 status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info);
2451 if (status == 0)
2452 status = nfs4_server_capabilities(server, fhandle);
2453 if (status == 0)
2454 status = nfs4_do_fsinfo(server, fhandle, info);
2455 return nfs4_map_errors(status);
2456}
2457
2458static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
2459 struct nfs_fsinfo *info)
2460{
2461 int error;
2462 struct nfs_fattr *fattr = info->fattr;
2463
2464 error = nfs4_server_capabilities(server, mntfh);
2465 if (error < 0) {
2466 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
2467 return error;
2468 }
2469
2470 error = nfs4_proc_getattr(server, mntfh, fattr);
2471 if (error < 0) {
2472 dprintk("nfs4_get_root: getattr error = %d\n", -error);
2473 return error;
2474 }
2475
2476 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
2477 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
2478 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
2479
2480 return error;
2481}
2482
2483/*
2484 * Get locations and (maybe) other attributes of a referral.
2485 * Note that we'll actually follow the referral later when
2486 * we detect fsid mismatch in inode revalidation
2487 */
2488static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
2489 const struct qstr *name, struct nfs_fattr *fattr,
2490 struct nfs_fh *fhandle)
2491{
2492 int status = -ENOMEM;
2493 struct page *page = NULL;
2494 struct nfs4_fs_locations *locations = NULL;
2495
2496 page = alloc_page(GFP_KERNEL);
2497 if (page == NULL)
2498 goto out;
2499 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
2500 if (locations == NULL)
2501 goto out;
2502
2503 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
2504 if (status != 0)
2505 goto out;
2506 /* Make sure server returned a different fsid for the referral */
2507 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
2508 dprintk("%s: server did not return a different fsid for"
2509 " a referral at %s\n", __func__, name->name);
2510 status = -EIO;
2511 goto out;
2512 }
2513 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
2514 nfs_fixup_referral_attributes(&locations->fattr);
2515
2516 /* replace the lookup nfs_fattr with the locations nfs_fattr */
2517 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
2518 memset(fhandle, 0, sizeof(struct nfs_fh));
2519out:
2520 if (page)
2521 __free_page(page);
2522 kfree(locations);
2523 return status;
2524}
2525
2526static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2527{
2528 struct nfs4_getattr_arg args = {
2529 .fh = fhandle,
2530 .bitmask = server->attr_bitmask,
2531 };
2532 struct nfs4_getattr_res res = {
2533 .fattr = fattr,
2534 .server = server,
2535 };
2536 struct rpc_message msg = {
2537 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
2538 .rpc_argp = &args,
2539 .rpc_resp = &res,
2540 };
2541
2542 nfs_fattr_init(fattr);
2543 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2544}
2545
2546static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2547{
2548 struct nfs4_exception exception = { };
2549 int err;
2550 do {
2551 err = nfs4_handle_exception(server,
2552 _nfs4_proc_getattr(server, fhandle, fattr),
2553 &exception);
2554 } while (exception.retry);
2555 return err;
2556}
2557
2558/*
2559 * The file is not closed if it is opened due to the a request to change
2560 * the size of the file. The open call will not be needed once the
2561 * VFS layer lookup-intents are implemented.
2562 *
2563 * Close is called when the inode is destroyed.
2564 * If we haven't opened the file for O_WRONLY, we
2565 * need to in the size_change case to obtain a stateid.
2566 *
2567 * Got race?
2568 * Because OPEN is always done by name in nfsv4, it is
2569 * possible that we opened a different file by the same
2570 * name. We can recognize this race condition, but we
2571 * can't do anything about it besides returning an error.
2572 *
2573 * This will be fixed with VFS changes (lookup-intent).
2574 */
2575static int
2576nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
2577 struct iattr *sattr)
2578{
2579 struct inode *inode = dentry->d_inode;
2580 struct rpc_cred *cred = NULL;
2581 struct nfs4_state *state = NULL;
2582 int status;
2583
2584 if (pnfs_ld_layoutret_on_setattr(inode))
2585 pnfs_return_layout(inode);
2586
2587 nfs_fattr_init(fattr);
2588
2589 /* Deal with open(O_TRUNC) */
2590 if (sattr->ia_valid & ATTR_OPEN)
2591 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
2592
2593 /* Optimization: if the end result is no change, don't RPC */
2594 if ((sattr->ia_valid & ~(ATTR_FILE)) == 0)
2595 return 0;
2596
2597 /* Search for an existing open(O_WRITE) file */
2598 if (sattr->ia_valid & ATTR_FILE) {
2599 struct nfs_open_context *ctx;
2600
2601 ctx = nfs_file_open_context(sattr->ia_file);
2602 if (ctx) {
2603 cred = ctx->cred;
2604 state = ctx->state;
2605 }
2606 }
2607
2608 status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
2609 if (status == 0)
2610 nfs_setattr_update_inode(inode, sattr);
2611 return status;
2612}
2613
2614static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
2615 const struct qstr *name, struct nfs_fh *fhandle,
2616 struct nfs_fattr *fattr)
2617{
2618 struct nfs_server *server = NFS_SERVER(dir);
2619 int status;
2620 struct nfs4_lookup_arg args = {
2621 .bitmask = server->attr_bitmask,
2622 .dir_fh = NFS_FH(dir),
2623 .name = name,
2624 };
2625 struct nfs4_lookup_res res = {
2626 .server = server,
2627 .fattr = fattr,
2628 .fh = fhandle,
2629 };
2630 struct rpc_message msg = {
2631 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
2632 .rpc_argp = &args,
2633 .rpc_resp = &res,
2634 };
2635
2636 nfs_fattr_init(fattr);
2637
2638 dprintk("NFS call lookup %s\n", name->name);
2639 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
2640 dprintk("NFS reply lookup: %d\n", status);
2641 return status;
2642}
2643
2644static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
2645{
2646 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
2647 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
2648 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
2649 fattr->nlink = 2;
2650}
2651
2652static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
2653 struct qstr *name, struct nfs_fh *fhandle,
2654 struct nfs_fattr *fattr)
2655{
2656 struct nfs4_exception exception = { };
2657 struct rpc_clnt *client = *clnt;
2658 int err;
2659 do {
2660 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr);
2661 switch (err) {
2662 case -NFS4ERR_BADNAME:
2663 err = -ENOENT;
2664 goto out;
2665 case -NFS4ERR_MOVED:
2666 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
2667 goto out;
2668 case -NFS4ERR_WRONGSEC:
2669 err = -EPERM;
2670 if (client != *clnt)
2671 goto out;
2672
2673 client = nfs4_create_sec_client(client, dir, name);
2674 if (IS_ERR(client))
2675 return PTR_ERR(client);
2676
2677 exception.retry = 1;
2678 break;
2679 default:
2680 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
2681 }
2682 } while (exception.retry);
2683
2684out:
2685 if (err == 0)
2686 *clnt = client;
2687 else if (client != *clnt)
2688 rpc_shutdown_client(client);
2689
2690 return err;
2691}
2692
2693static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
2694 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2695{
2696 int status;
2697 struct rpc_clnt *client = NFS_CLIENT(dir);
2698
2699 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2700 if (client != NFS_CLIENT(dir)) {
2701 rpc_shutdown_client(client);
2702 nfs_fixup_secinfo_attributes(fattr);
2703 }
2704 return status;
2705}
2706
2707struct rpc_clnt *
2708nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
2709 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2710{
2711 int status;
2712 struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir));
2713
2714 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2715 if (status < 0) {
2716 rpc_shutdown_client(client);
2717 return ERR_PTR(status);
2718 }
2719 return client;
2720}
2721
2722static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2723{
2724 struct nfs_server *server = NFS_SERVER(inode);
2725 struct nfs4_accessargs args = {
2726 .fh = NFS_FH(inode),
2727 .bitmask = server->cache_consistency_bitmask,
2728 };
2729 struct nfs4_accessres res = {
2730 .server = server,
2731 };
2732 struct rpc_message msg = {
2733 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
2734 .rpc_argp = &args,
2735 .rpc_resp = &res,
2736 .rpc_cred = entry->cred,
2737 };
2738 int mode = entry->mask;
2739 int status;
2740
2741 /*
2742 * Determine which access bits we want to ask for...
2743 */
2744 if (mode & MAY_READ)
2745 args.access |= NFS4_ACCESS_READ;
2746 if (S_ISDIR(inode->i_mode)) {
2747 if (mode & MAY_WRITE)
2748 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
2749 if (mode & MAY_EXEC)
2750 args.access |= NFS4_ACCESS_LOOKUP;
2751 } else {
2752 if (mode & MAY_WRITE)
2753 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
2754 if (mode & MAY_EXEC)
2755 args.access |= NFS4_ACCESS_EXECUTE;
2756 }
2757
2758 res.fattr = nfs_alloc_fattr();
2759 if (res.fattr == NULL)
2760 return -ENOMEM;
2761
2762 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2763 if (!status) {
2764 entry->mask = 0;
2765 if (res.access & NFS4_ACCESS_READ)
2766 entry->mask |= MAY_READ;
2767 if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE))
2768 entry->mask |= MAY_WRITE;
2769 if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
2770 entry->mask |= MAY_EXEC;
2771 nfs_refresh_inode(inode, res.fattr);
2772 }
2773 nfs_free_fattr(res.fattr);
2774 return status;
2775}
2776
2777static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2778{
2779 struct nfs4_exception exception = { };
2780 int err;
2781 do {
2782 err = nfs4_handle_exception(NFS_SERVER(inode),
2783 _nfs4_proc_access(inode, entry),
2784 &exception);
2785 } while (exception.retry);
2786 return err;
2787}
2788
2789/*
2790 * TODO: For the time being, we don't try to get any attributes
2791 * along with any of the zero-copy operations READ, READDIR,
2792 * READLINK, WRITE.
2793 *
2794 * In the case of the first three, we want to put the GETATTR
2795 * after the read-type operation -- this is because it is hard
2796 * to predict the length of a GETATTR response in v4, and thus
2797 * align the READ data correctly. This means that the GETATTR
2798 * may end up partially falling into the page cache, and we should
2799 * shift it into the 'tail' of the xdr_buf before processing.
2800 * To do this efficiently, we need to know the total length
2801 * of data received, which doesn't seem to be available outside
2802 * of the RPC layer.
2803 *
2804 * In the case of WRITE, we also want to put the GETATTR after
2805 * the operation -- in this case because we want to make sure
2806 * we get the post-operation mtime and size.
2807 *
2808 * Both of these changes to the XDR layer would in fact be quite
2809 * minor, but I decided to leave them for a subsequent patch.
2810 */
2811static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
2812 unsigned int pgbase, unsigned int pglen)
2813{
2814 struct nfs4_readlink args = {
2815 .fh = NFS_FH(inode),
2816 .pgbase = pgbase,
2817 .pglen = pglen,
2818 .pages = &page,
2819 };
2820 struct nfs4_readlink_res res;
2821 struct rpc_message msg = {
2822 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
2823 .rpc_argp = &args,
2824 .rpc_resp = &res,
2825 };
2826
2827 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
2828}
2829
2830static int nfs4_proc_readlink(struct inode *inode, struct page *page,
2831 unsigned int pgbase, unsigned int pglen)
2832{
2833 struct nfs4_exception exception = { };
2834 int err;
2835 do {
2836 err = nfs4_handle_exception(NFS_SERVER(inode),
2837 _nfs4_proc_readlink(inode, page, pgbase, pglen),
2838 &exception);
2839 } while (exception.retry);
2840 return err;
2841}
2842
2843/*
2844 * This is just for mknod. open(O_CREAT) will always do ->open_context().
2845 */
2846static int
2847nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
2848 int flags)
2849{
2850 struct nfs_open_context *ctx;
2851 struct nfs4_state *state;
2852 int status = 0;
2853
2854 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
2855 if (IS_ERR(ctx))
2856 return PTR_ERR(ctx);
2857
2858 sattr->ia_mode &= ~current_umask();
2859 state = nfs4_do_open(dir, dentry, ctx->mode,
2860 flags, sattr, ctx->cred,
2861 &ctx->mdsthreshold);
2862 d_drop(dentry);
2863 if (IS_ERR(state)) {
2864 status = PTR_ERR(state);
2865 goto out;
2866 }
2867 d_add(dentry, igrab(state->inode));
2868 nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
2869 ctx->state = state;
2870out:
2871 put_nfs_open_context(ctx);
2872 return status;
2873}
2874
2875static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
2876{
2877 struct nfs_server *server = NFS_SERVER(dir);
2878 struct nfs_removeargs args = {
2879 .fh = NFS_FH(dir),
2880 .name = *name,
2881 };
2882 struct nfs_removeres res = {
2883 .server = server,
2884 };
2885 struct rpc_message msg = {
2886 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
2887 .rpc_argp = &args,
2888 .rpc_resp = &res,
2889 };
2890 int status;
2891
2892 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
2893 if (status == 0)
2894 update_changeattr(dir, &res.cinfo);
2895 return status;
2896}
2897
2898static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
2899{
2900 struct nfs4_exception exception = { };
2901 int err;
2902 do {
2903 err = nfs4_handle_exception(NFS_SERVER(dir),
2904 _nfs4_proc_remove(dir, name),
2905 &exception);
2906 } while (exception.retry);
2907 return err;
2908}
2909
2910static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
2911{
2912 struct nfs_server *server = NFS_SERVER(dir);
2913 struct nfs_removeargs *args = msg->rpc_argp;
2914 struct nfs_removeres *res = msg->rpc_resp;
2915
2916 res->server = server;
2917 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
2918 nfs41_init_sequence(&args->seq_args, &res->seq_res, 1);
2919}
2920
2921static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
2922{
2923 if (nfs4_setup_sequence(NFS_SERVER(data->dir),
2924 &data->args.seq_args,
2925 &data->res.seq_res,
2926 task))
2927 return;
2928 rpc_call_start(task);
2929}
2930
2931static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
2932{
2933 struct nfs_removeres *res = task->tk_msg.rpc_resp;
2934
2935 if (!nfs4_sequence_done(task, &res->seq_res))
2936 return 0;
2937 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2938 return 0;
2939 update_changeattr(dir, &res->cinfo);
2940 return 1;
2941}
2942
2943static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
2944{
2945 struct nfs_server *server = NFS_SERVER(dir);
2946 struct nfs_renameargs *arg = msg->rpc_argp;
2947 struct nfs_renameres *res = msg->rpc_resp;
2948
2949 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
2950 res->server = server;
2951 nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1);
2952}
2953
2954static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
2955{
2956 if (nfs4_setup_sequence(NFS_SERVER(data->old_dir),
2957 &data->args.seq_args,
2958 &data->res.seq_res,
2959 task))
2960 return;
2961 rpc_call_start(task);
2962}
2963
2964static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
2965 struct inode *new_dir)
2966{
2967 struct nfs_renameres *res = task->tk_msg.rpc_resp;
2968
2969 if (!nfs4_sequence_done(task, &res->seq_res))
2970 return 0;
2971 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2972 return 0;
2973
2974 update_changeattr(old_dir, &res->old_cinfo);
2975 update_changeattr(new_dir, &res->new_cinfo);
2976 return 1;
2977}
2978
2979static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2980 struct inode *new_dir, struct qstr *new_name)
2981{
2982 struct nfs_server *server = NFS_SERVER(old_dir);
2983 struct nfs_renameargs arg = {
2984 .old_dir = NFS_FH(old_dir),
2985 .new_dir = NFS_FH(new_dir),
2986 .old_name = old_name,
2987 .new_name = new_name,
2988 };
2989 struct nfs_renameres res = {
2990 .server = server,
2991 };
2992 struct rpc_message msg = {
2993 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
2994 .rpc_argp = &arg,
2995 .rpc_resp = &res,
2996 };
2997 int status = -ENOMEM;
2998
2999 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3000 if (!status) {
3001 update_changeattr(old_dir, &res.old_cinfo);
3002 update_changeattr(new_dir, &res.new_cinfo);
3003 }
3004 return status;
3005}
3006
3007static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
3008 struct inode *new_dir, struct qstr *new_name)
3009{
3010 struct nfs4_exception exception = { };
3011 int err;
3012 do {
3013 err = nfs4_handle_exception(NFS_SERVER(old_dir),
3014 _nfs4_proc_rename(old_dir, old_name,
3015 new_dir, new_name),
3016 &exception);
3017 } while (exception.retry);
3018 return err;
3019}
3020
3021static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3022{
3023 struct nfs_server *server = NFS_SERVER(inode);
3024 struct nfs4_link_arg arg = {
3025 .fh = NFS_FH(inode),
3026 .dir_fh = NFS_FH(dir),
3027 .name = name,
3028 .bitmask = server->attr_bitmask,
3029 };
3030 struct nfs4_link_res res = {
3031 .server = server,
3032 };
3033 struct rpc_message msg = {
3034 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
3035 .rpc_argp = &arg,
3036 .rpc_resp = &res,
3037 };
3038 int status = -ENOMEM;
3039
3040 res.fattr = nfs_alloc_fattr();
3041 if (res.fattr == NULL)
3042 goto out;
3043
3044 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3045 if (!status) {
3046 update_changeattr(dir, &res.cinfo);
3047 nfs_post_op_update_inode(inode, res.fattr);
3048 }
3049out:
3050 nfs_free_fattr(res.fattr);
3051 return status;
3052}
3053
3054static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3055{
3056 struct nfs4_exception exception = { };
3057 int err;
3058 do {
3059 err = nfs4_handle_exception(NFS_SERVER(inode),
3060 _nfs4_proc_link(inode, dir, name),
3061 &exception);
3062 } while (exception.retry);
3063 return err;
3064}
3065
3066struct nfs4_createdata {
3067 struct rpc_message msg;
3068 struct nfs4_create_arg arg;
3069 struct nfs4_create_res res;
3070 struct nfs_fh fh;
3071 struct nfs_fattr fattr;
3072};
3073
3074static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3075 struct qstr *name, struct iattr *sattr, u32 ftype)
3076{
3077 struct nfs4_createdata *data;
3078
3079 data = kzalloc(sizeof(*data), GFP_KERNEL);
3080 if (data != NULL) {
3081 struct nfs_server *server = NFS_SERVER(dir);
3082
3083 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3084 data->msg.rpc_argp = &data->arg;
3085 data->msg.rpc_resp = &data->res;
3086 data->arg.dir_fh = NFS_FH(dir);
3087 data->arg.server = server;
3088 data->arg.name = name;
3089 data->arg.attrs = sattr;
3090 data->arg.ftype = ftype;
3091 data->arg.bitmask = server->attr_bitmask;
3092 data->res.server = server;
3093 data->res.fh = &data->fh;
3094 data->res.fattr = &data->fattr;
3095 nfs_fattr_init(data->res.fattr);
3096 }
3097 return data;
3098}
3099
3100static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3101{
3102 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3103 &data->arg.seq_args, &data->res.seq_res, 1);
3104 if (status == 0) {
3105 update_changeattr(dir, &data->res.dir_cinfo);
3106 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
3107 }
3108 return status;
3109}
3110
3111static void nfs4_free_createdata(struct nfs4_createdata *data)
3112{
3113 kfree(data);
3114}
3115
3116static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3117 struct page *page, unsigned int len, struct iattr *sattr)
3118{
3119 struct nfs4_createdata *data;
3120 int status = -ENAMETOOLONG;
3121
3122 if (len > NFS4_MAXPATHLEN)
3123 goto out;
3124
3125 status = -ENOMEM;
3126 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3127 if (data == NULL)
3128 goto out;
3129
3130 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3131 data->arg.u.symlink.pages = &page;
3132 data->arg.u.symlink.len = len;
3133
3134 status = nfs4_do_create(dir, dentry, data);
3135
3136 nfs4_free_createdata(data);
3137out:
3138 return status;
3139}
3140
3141static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3142 struct page *page, unsigned int len, struct iattr *sattr)
3143{
3144 struct nfs4_exception exception = { };
3145 int err;
3146 do {
3147 err = nfs4_handle_exception(NFS_SERVER(dir),
3148 _nfs4_proc_symlink(dir, dentry, page,
3149 len, sattr),
3150 &exception);
3151 } while (exception.retry);
3152 return err;
3153}
3154
3155static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3156 struct iattr *sattr)
3157{
3158 struct nfs4_createdata *data;
3159 int status = -ENOMEM;
3160
3161 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
3162 if (data == NULL)
3163 goto out;
3164
3165 status = nfs4_do_create(dir, dentry, data);
3166
3167 nfs4_free_createdata(data);
3168out:
3169 return status;
3170}
3171
3172static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3173 struct iattr *sattr)
3174{
3175 struct nfs4_exception exception = { };
3176 int err;
3177
3178 sattr->ia_mode &= ~current_umask();
3179 do {
3180 err = nfs4_handle_exception(NFS_SERVER(dir),
3181 _nfs4_proc_mkdir(dir, dentry, sattr),
3182 &exception);
3183 } while (exception.retry);
3184 return err;
3185}
3186
3187static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3188 u64 cookie, struct page **pages, unsigned int count, int plus)
3189{
3190 struct inode *dir = dentry->d_inode;
3191 struct nfs4_readdir_arg args = {
3192 .fh = NFS_FH(dir),
3193 .pages = pages,
3194 .pgbase = 0,
3195 .count = count,
3196 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
3197 .plus = plus,
3198 };
3199 struct nfs4_readdir_res res;
3200 struct rpc_message msg = {
3201 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
3202 .rpc_argp = &args,
3203 .rpc_resp = &res,
3204 .rpc_cred = cred,
3205 };
3206 int status;
3207
3208 dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
3209 dentry->d_parent->d_name.name,
3210 dentry->d_name.name,
3211 (unsigned long long)cookie);
3212 nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
3213 res.pgbase = args.pgbase;
3214 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
3215 if (status >= 0) {
3216 memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
3217 status += args.pgbase;
3218 }
3219
3220 nfs_invalidate_atime(dir);
3221
3222 dprintk("%s: returns %d\n", __func__, status);
3223 return status;
3224}
3225
3226static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3227 u64 cookie, struct page **pages, unsigned int count, int plus)
3228{
3229 struct nfs4_exception exception = { };
3230 int err;
3231 do {
3232 err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
3233 _nfs4_proc_readdir(dentry, cred, cookie,
3234 pages, count, plus),
3235 &exception);
3236 } while (exception.retry);
3237 return err;
3238}
3239
3240static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3241 struct iattr *sattr, dev_t rdev)
3242{
3243 struct nfs4_createdata *data;
3244 int mode = sattr->ia_mode;
3245 int status = -ENOMEM;
3246
3247 BUG_ON(!(sattr->ia_valid & ATTR_MODE));
3248 BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
3249
3250 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
3251 if (data == NULL)
3252 goto out;
3253
3254 if (S_ISFIFO(mode))
3255 data->arg.ftype = NF4FIFO;
3256 else if (S_ISBLK(mode)) {
3257 data->arg.ftype = NF4BLK;
3258 data->arg.u.device.specdata1 = MAJOR(rdev);
3259 data->arg.u.device.specdata2 = MINOR(rdev);
3260 }
3261 else if (S_ISCHR(mode)) {
3262 data->arg.ftype = NF4CHR;
3263 data->arg.u.device.specdata1 = MAJOR(rdev);
3264 data->arg.u.device.specdata2 = MINOR(rdev);
3265 }
3266
3267 status = nfs4_do_create(dir, dentry, data);
3268
3269 nfs4_free_createdata(data);
3270out:
3271 return status;
3272}
3273
3274static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3275 struct iattr *sattr, dev_t rdev)
3276{
3277 struct nfs4_exception exception = { };
3278 int err;
3279
3280 sattr->ia_mode &= ~current_umask();
3281 do {
3282 err = nfs4_handle_exception(NFS_SERVER(dir),
3283 _nfs4_proc_mknod(dir, dentry, sattr, rdev),
3284 &exception);
3285 } while (exception.retry);
3286 return err;
3287}
3288
3289static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
3290 struct nfs_fsstat *fsstat)
3291{
3292 struct nfs4_statfs_arg args = {
3293 .fh = fhandle,
3294 .bitmask = server->attr_bitmask,
3295 };
3296 struct nfs4_statfs_res res = {
3297 .fsstat = fsstat,
3298 };
3299 struct rpc_message msg = {
3300 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
3301 .rpc_argp = &args,
3302 .rpc_resp = &res,
3303 };
3304
3305 nfs_fattr_init(fsstat->fattr);
3306 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3307}
3308
3309static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
3310{
3311 struct nfs4_exception exception = { };
3312 int err;
3313 do {
3314 err = nfs4_handle_exception(server,
3315 _nfs4_proc_statfs(server, fhandle, fsstat),
3316 &exception);
3317 } while (exception.retry);
3318 return err;
3319}
3320
3321static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
3322 struct nfs_fsinfo *fsinfo)
3323{
3324 struct nfs4_fsinfo_arg args = {
3325 .fh = fhandle,
3326 .bitmask = server->attr_bitmask,
3327 };
3328 struct nfs4_fsinfo_res res = {
3329 .fsinfo = fsinfo,
3330 };
3331 struct rpc_message msg = {
3332 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
3333 .rpc_argp = &args,
3334 .rpc_resp = &res,
3335 };
3336
3337 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3338}
3339
3340static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3341{
3342 struct nfs4_exception exception = { };
3343 int err;
3344
3345 do {
3346 err = nfs4_handle_exception(server,
3347 _nfs4_do_fsinfo(server, fhandle, fsinfo),
3348 &exception);
3349 } while (exception.retry);
3350 return err;
3351}
3352
3353static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3354{
3355 int error;
3356
3357 nfs_fattr_init(fsinfo->fattr);
3358 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
3359 if (error == 0)
3360 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
3361
3362 return error;
3363}
3364
3365static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3366 struct nfs_pathconf *pathconf)
3367{
3368 struct nfs4_pathconf_arg args = {
3369 .fh = fhandle,
3370 .bitmask = server->attr_bitmask,
3371 };
3372 struct nfs4_pathconf_res res = {
3373 .pathconf = pathconf,
3374 };
3375 struct rpc_message msg = {
3376 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
3377 .rpc_argp = &args,
3378 .rpc_resp = &res,
3379 };
3380
3381 /* None of the pathconf attributes are mandatory to implement */
3382 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
3383 memset(pathconf, 0, sizeof(*pathconf));
3384 return 0;
3385 }
3386
3387 nfs_fattr_init(pathconf->fattr);
3388 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3389}
3390
3391static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3392 struct nfs_pathconf *pathconf)
3393{
3394 struct nfs4_exception exception = { };
3395 int err;
3396
3397 do {
3398 err = nfs4_handle_exception(server,
3399 _nfs4_proc_pathconf(server, fhandle, pathconf),
3400 &exception);
3401 } while (exception.retry);
3402 return err;
3403}
3404
3405void __nfs4_read_done_cb(struct nfs_read_data *data)
3406{
3407 nfs_invalidate_atime(data->header->inode);
3408}
3409
3410static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
3411{
3412 struct nfs_server *server = NFS_SERVER(data->header->inode);
3413
3414 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
3415 rpc_restart_call_prepare(task);
3416 return -EAGAIN;
3417 }
3418
3419 __nfs4_read_done_cb(data);
3420 if (task->tk_status > 0)
3421 renew_lease(server, data->timestamp);
3422 return 0;
3423}
3424
3425static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
3426{
3427
3428 dprintk("--> %s\n", __func__);
3429
3430 if (!nfs4_sequence_done(task, &data->res.seq_res))
3431 return -EAGAIN;
3432
3433 return data->read_done_cb ? data->read_done_cb(task, data) :
3434 nfs4_read_done_cb(task, data);
3435}
3436
3437static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
3438{
3439 data->timestamp = jiffies;
3440 data->read_done_cb = nfs4_read_done_cb;
3441 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
3442 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
3443}
3444
3445static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
3446{
3447 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
3448 &data->args.seq_args,
3449 &data->res.seq_res,
3450 task))
3451 return;
3452 rpc_call_start(task);
3453}
3454
3455static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
3456{
3457 struct inode *inode = data->header->inode;
3458
3459 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
3460 rpc_restart_call_prepare(task);
3461 return -EAGAIN;
3462 }
3463 if (task->tk_status >= 0) {
3464 renew_lease(NFS_SERVER(inode), data->timestamp);
3465 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
3466 }
3467 return 0;
3468}
3469
3470static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
3471{
3472 if (!nfs4_sequence_done(task, &data->res.seq_res))
3473 return -EAGAIN;
3474 return data->write_done_cb ? data->write_done_cb(task, data) :
3475 nfs4_write_done_cb(task, data);
3476}
3477
3478static
3479bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data)
3480{
3481 const struct nfs_pgio_header *hdr = data->header;
3482
3483 /* Don't request attributes for pNFS or O_DIRECT writes */
3484 if (data->ds_clp != NULL || hdr->dreq != NULL)
3485 return false;
3486 /* Otherwise, request attributes if and only if we don't hold
3487 * a delegation
3488 */
3489 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
3490}
3491
3492static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
3493{
3494 struct nfs_server *server = NFS_SERVER(data->header->inode);
3495
3496 if (!nfs4_write_need_cache_consistency_data(data)) {
3497 data->args.bitmask = NULL;
3498 data->res.fattr = NULL;
3499 } else
3500 data->args.bitmask = server->cache_consistency_bitmask;
3501
3502 if (!data->write_done_cb)
3503 data->write_done_cb = nfs4_write_done_cb;
3504 data->res.server = server;
3505 data->timestamp = jiffies;
3506
3507 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
3508 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3509}
3510
3511static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
3512{
3513 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
3514 &data->args.seq_args,
3515 &data->res.seq_res,
3516 task))
3517 return;
3518 rpc_call_start(task);
3519}
3520
3521static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
3522{
3523 if (nfs4_setup_sequence(NFS_SERVER(data->inode),
3524 &data->args.seq_args,
3525 &data->res.seq_res,
3526 task))
3527 return;
3528 rpc_call_start(task);
3529}
3530
3531static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
3532{
3533 struct inode *inode = data->inode;
3534
3535 if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
3536 rpc_restart_call_prepare(task);
3537 return -EAGAIN;
3538 }
3539 return 0;
3540}
3541
3542static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
3543{
3544 if (!nfs4_sequence_done(task, &data->res.seq_res))
3545 return -EAGAIN;
3546 return data->commit_done_cb(task, data);
3547}
3548
3549static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
3550{
3551 struct nfs_server *server = NFS_SERVER(data->inode);
3552
3553 if (data->commit_done_cb == NULL)
3554 data->commit_done_cb = nfs4_commit_done_cb;
3555 data->res.server = server;
3556 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
3557 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3558}
3559
3560struct nfs4_renewdata {
3561 struct nfs_client *client;
3562 unsigned long timestamp;
3563};
3564
3565/*
3566 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
3567 * standalone procedure for queueing an asynchronous RENEW.
3568 */
3569static void nfs4_renew_release(void *calldata)
3570{
3571 struct nfs4_renewdata *data = calldata;
3572 struct nfs_client *clp = data->client;
3573
3574 if (atomic_read(&clp->cl_count) > 1)
3575 nfs4_schedule_state_renewal(clp);
3576 nfs_put_client(clp);
3577 kfree(data);
3578}
3579
3580static void nfs4_renew_done(struct rpc_task *task, void *calldata)
3581{
3582 struct nfs4_renewdata *data = calldata;
3583 struct nfs_client *clp = data->client;
3584 unsigned long timestamp = data->timestamp;
3585
3586 if (task->tk_status < 0) {
3587 /* Unless we're shutting down, schedule state recovery! */
3588 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
3589 return;
3590 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
3591 nfs4_schedule_lease_recovery(clp);
3592 return;
3593 }
3594 nfs4_schedule_path_down_recovery(clp);
3595 }
3596 do_renew_lease(clp, timestamp);
3597}
3598
3599static const struct rpc_call_ops nfs4_renew_ops = {
3600 .rpc_call_done = nfs4_renew_done,
3601 .rpc_release = nfs4_renew_release,
3602};
3603
3604static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
3605{
3606 struct rpc_message msg = {
3607 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3608 .rpc_argp = clp,
3609 .rpc_cred = cred,
3610 };
3611 struct nfs4_renewdata *data;
3612
3613 if (renew_flags == 0)
3614 return 0;
3615 if (!atomic_inc_not_zero(&clp->cl_count))
3616 return -EIO;
3617 data = kmalloc(sizeof(*data), GFP_NOFS);
3618 if (data == NULL)
3619 return -ENOMEM;
3620 data->client = clp;
3621 data->timestamp = jiffies;
3622 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
3623 &nfs4_renew_ops, data);
3624}
3625
3626static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
3627{
3628 struct rpc_message msg = {
3629 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3630 .rpc_argp = clp,
3631 .rpc_cred = cred,
3632 };
3633 unsigned long now = jiffies;
3634 int status;
3635
3636 status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
3637 if (status < 0)
3638 return status;
3639 do_renew_lease(clp, now);
3640 return 0;
3641}
3642
3643static inline int nfs4_server_supports_acls(struct nfs_server *server)
3644{
3645 return (server->caps & NFS_CAP_ACLS)
3646 && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3647 && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
3648}
3649
3650/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
3651 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
3652 * the stack.
3653 */
3654#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
3655
3656static int buf_to_pages_noslab(const void *buf, size_t buflen,
3657 struct page **pages, unsigned int *pgbase)
3658{
3659 struct page *newpage, **spages;
3660 int rc = 0;
3661 size_t len;
3662 spages = pages;
3663
3664 do {
3665 len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
3666 newpage = alloc_page(GFP_KERNEL);
3667
3668 if (newpage == NULL)
3669 goto unwind;
3670 memcpy(page_address(newpage), buf, len);
3671 buf += len;
3672 buflen -= len;
3673 *pages++ = newpage;
3674 rc++;
3675 } while (buflen != 0);
3676
3677 return rc;
3678
3679unwind:
3680 for(; rc > 0; rc--)
3681 __free_page(spages[rc-1]);
3682 return -ENOMEM;
3683}
3684
3685struct nfs4_cached_acl {
3686 int cached;
3687 size_t len;
3688 char data[0];
3689};
3690
3691static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
3692{
3693 struct nfs_inode *nfsi = NFS_I(inode);
3694
3695 spin_lock(&inode->i_lock);
3696 kfree(nfsi->nfs4_acl);
3697 nfsi->nfs4_acl = acl;
3698 spin_unlock(&inode->i_lock);
3699}
3700
3701static void nfs4_zap_acl_attr(struct inode *inode)
3702{
3703 nfs4_set_cached_acl(inode, NULL);
3704}
3705
3706static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
3707{
3708 struct nfs_inode *nfsi = NFS_I(inode);
3709 struct nfs4_cached_acl *acl;
3710 int ret = -ENOENT;
3711
3712 spin_lock(&inode->i_lock);
3713 acl = nfsi->nfs4_acl;
3714 if (acl == NULL)
3715 goto out;
3716 if (buf == NULL) /* user is just asking for length */
3717 goto out_len;
3718 if (acl->cached == 0)
3719 goto out;
3720 ret = -ERANGE; /* see getxattr(2) man page */
3721 if (acl->len > buflen)
3722 goto out;
3723 memcpy(buf, acl->data, acl->len);
3724out_len:
3725 ret = acl->len;
3726out:
3727 spin_unlock(&inode->i_lock);
3728 return ret;
3729}
3730
3731static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
3732{
3733 struct nfs4_cached_acl *acl;
3734
3735 if (pages && acl_len <= PAGE_SIZE) {
3736 acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
3737 if (acl == NULL)
3738 goto out;
3739 acl->cached = 1;
3740 _copy_from_pages(acl->data, pages, pgbase, acl_len);
3741 } else {
3742 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
3743 if (acl == NULL)
3744 goto out;
3745 acl->cached = 0;
3746 }
3747 acl->len = acl_len;
3748out:
3749 nfs4_set_cached_acl(inode, acl);
3750}
3751
3752/*
3753 * The getxattr API returns the required buffer length when called with a
3754 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
3755 * the required buf. On a NULL buf, we send a page of data to the server
3756 * guessing that the ACL request can be serviced by a page. If so, we cache
3757 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
3758 * the cache. If not so, we throw away the page, and cache the required
3759 * length. The next getxattr call will then produce another round trip to
3760 * the server, this time with the input buf of the required size.
3761 */
3762static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3763{
3764 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
3765 struct nfs_getaclargs args = {
3766 .fh = NFS_FH(inode),
3767 .acl_pages = pages,
3768 .acl_len = buflen,
3769 };
3770 struct nfs_getaclres res = {
3771 .acl_len = buflen,
3772 };
3773 struct rpc_message msg = {
3774 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
3775 .rpc_argp = &args,
3776 .rpc_resp = &res,
3777 };
3778 int ret = -ENOMEM, npages, i;
3779 size_t acl_len = 0;
3780
3781 npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3782 /* As long as we're doing a round trip to the server anyway,
3783 * let's be prepared for a page of acl data. */
3784 if (npages == 0)
3785 npages = 1;
3786
3787 /* Add an extra page to handle the bitmap returned */
3788 npages++;
3789
3790 for (i = 0; i < npages; i++) {
3791 pages[i] = alloc_page(GFP_KERNEL);
3792 if (!pages[i])
3793 goto out_free;
3794 }
3795
3796 /* for decoding across pages */
3797 res.acl_scratch = alloc_page(GFP_KERNEL);
3798 if (!res.acl_scratch)
3799 goto out_free;
3800
3801 args.acl_len = npages * PAGE_SIZE;
3802 args.acl_pgbase = 0;
3803
3804 /* Let decode_getfacl know not to fail if the ACL data is larger than
3805 * the page we send as a guess */
3806 if (buf == NULL)
3807 res.acl_flags |= NFS4_ACL_LEN_REQUEST;
3808
3809 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
3810 __func__, buf, buflen, npages, args.acl_len);
3811 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
3812 &msg, &args.seq_args, &res.seq_res, 0);
3813 if (ret)
3814 goto out_free;
3815
3816 acl_len = res.acl_len - res.acl_data_offset;
3817 if (acl_len > args.acl_len)
3818 nfs4_write_cached_acl(inode, NULL, 0, acl_len);
3819 else
3820 nfs4_write_cached_acl(inode, pages, res.acl_data_offset,
3821 acl_len);
3822 if (buf) {
3823 ret = -ERANGE;
3824 if (acl_len > buflen)
3825 goto out_free;
3826 _copy_from_pages(buf, pages, res.acl_data_offset,
3827 acl_len);
3828 }
3829 ret = acl_len;
3830out_free:
3831 for (i = 0; i < npages; i++)
3832 if (pages[i])
3833 __free_page(pages[i]);
3834 if (res.acl_scratch)
3835 __free_page(res.acl_scratch);
3836 return ret;
3837}
3838
3839static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3840{
3841 struct nfs4_exception exception = { };
3842 ssize_t ret;
3843 do {
3844 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
3845 if (ret >= 0)
3846 break;
3847 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
3848 } while (exception.retry);
3849 return ret;
3850}
3851
3852static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
3853{
3854 struct nfs_server *server = NFS_SERVER(inode);
3855 int ret;
3856
3857 if (!nfs4_server_supports_acls(server))
3858 return -EOPNOTSUPP;
3859 ret = nfs_revalidate_inode(server, inode);
3860 if (ret < 0)
3861 return ret;
3862 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
3863 nfs_zap_acl_cache(inode);
3864 ret = nfs4_read_cached_acl(inode, buf, buflen);
3865 if (ret != -ENOENT)
3866 /* -ENOENT is returned if there is no ACL or if there is an ACL
3867 * but no cached acl data, just the acl length */
3868 return ret;
3869 return nfs4_get_acl_uncached(inode, buf, buflen);
3870}
3871
3872static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3873{
3874 struct nfs_server *server = NFS_SERVER(inode);
3875 struct page *pages[NFS4ACL_MAXPAGES];
3876 struct nfs_setaclargs arg = {
3877 .fh = NFS_FH(inode),
3878 .acl_pages = pages,
3879 .acl_len = buflen,
3880 };
3881 struct nfs_setaclres res;
3882 struct rpc_message msg = {
3883 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
3884 .rpc_argp = &arg,
3885 .rpc_resp = &res,
3886 };
3887 int ret, i;
3888
3889 if (!nfs4_server_supports_acls(server))
3890 return -EOPNOTSUPP;
3891 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
3892 if (i < 0)
3893 return i;
3894 nfs4_inode_return_delegation(inode);
3895 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3896
3897 /*
3898 * Free each page after tx, so the only ref left is
3899 * held by the network stack
3900 */
3901 for (; i > 0; i--)
3902 put_page(pages[i-1]);
3903
3904 /*
3905 * Acl update can result in inode attribute update.
3906 * so mark the attribute cache invalid.
3907 */
3908 spin_lock(&inode->i_lock);
3909 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
3910 spin_unlock(&inode->i_lock);
3911 nfs_access_zap_cache(inode);
3912 nfs_zap_acl_cache(inode);
3913 return ret;
3914}
3915
3916static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3917{
3918 struct nfs4_exception exception = { };
3919 int err;
3920 do {
3921 err = nfs4_handle_exception(NFS_SERVER(inode),
3922 __nfs4_proc_set_acl(inode, buf, buflen),
3923 &exception);
3924 } while (exception.retry);
3925 return err;
3926}
3927
3928static int
3929nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
3930{
3931 struct nfs_client *clp = server->nfs_client;
3932
3933 if (task->tk_status >= 0)
3934 return 0;
3935 switch(task->tk_status) {
3936 case -NFS4ERR_DELEG_REVOKED:
3937 case -NFS4ERR_ADMIN_REVOKED:
3938 case -NFS4ERR_BAD_STATEID:
3939 if (state == NULL)
3940 break;
3941 nfs_remove_bad_delegation(state->inode);
3942 case -NFS4ERR_OPENMODE:
3943 if (state == NULL)
3944 break;
3945 nfs4_schedule_stateid_recovery(server, state);
3946 goto wait_on_recovery;
3947 case -NFS4ERR_EXPIRED:
3948 if (state != NULL)
3949 nfs4_schedule_stateid_recovery(server, state);
3950 case -NFS4ERR_STALE_STATEID:
3951 case -NFS4ERR_STALE_CLIENTID:
3952 nfs4_schedule_lease_recovery(clp);
3953 goto wait_on_recovery;
3954#if defined(CONFIG_NFS_V4_1)
3955 case -NFS4ERR_BADSESSION:
3956 case -NFS4ERR_BADSLOT:
3957 case -NFS4ERR_BAD_HIGH_SLOT:
3958 case -NFS4ERR_DEADSESSION:
3959 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
3960 case -NFS4ERR_SEQ_FALSE_RETRY:
3961 case -NFS4ERR_SEQ_MISORDERED:
3962 dprintk("%s ERROR %d, Reset session\n", __func__,
3963 task->tk_status);
3964 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
3965 task->tk_status = 0;
3966 return -EAGAIN;
3967#endif /* CONFIG_NFS_V4_1 */
3968 case -NFS4ERR_DELAY:
3969 nfs_inc_server_stats(server, NFSIOS_DELAY);
3970 case -NFS4ERR_GRACE:
3971 case -EKEYEXPIRED:
3972 rpc_delay(task, NFS4_POLL_RETRY_MAX);
3973 task->tk_status = 0;
3974 return -EAGAIN;
3975 case -NFS4ERR_RETRY_UNCACHED_REP:
3976 case -NFS4ERR_OLD_STATEID:
3977 task->tk_status = 0;
3978 return -EAGAIN;
3979 }
3980 task->tk_status = nfs4_map_errors(task->tk_status);
3981 return 0;
3982wait_on_recovery:
3983 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
3984 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
3985 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
3986 task->tk_status = 0;
3987 return -EAGAIN;
3988}
3989
3990static void nfs4_init_boot_verifier(const struct nfs_client *clp,
3991 nfs4_verifier *bootverf)
3992{
3993 __be32 verf[2];
3994
3995 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
3996 /* An impossible timestamp guarantees this value
3997 * will never match a generated boot time. */
3998 verf[0] = 0;
3999 verf[1] = (__be32)(NSEC_PER_SEC + 1);
4000 } else {
4001 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
4002 verf[0] = (__be32)nn->boot_time.tv_sec;
4003 verf[1] = (__be32)nn->boot_time.tv_nsec;
4004 }
4005 memcpy(bootverf->data, verf, sizeof(bootverf->data));
4006}
4007
4008int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
4009 unsigned short port, struct rpc_cred *cred,
4010 struct nfs4_setclientid_res *res)
4011{
4012 nfs4_verifier sc_verifier;
4013 struct nfs4_setclientid setclientid = {
4014 .sc_verifier = &sc_verifier,
4015 .sc_prog = program,
4016 .sc_cb_ident = clp->cl_cb_ident,
4017 };
4018 struct rpc_message msg = {
4019 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
4020 .rpc_argp = &setclientid,
4021 .rpc_resp = res,
4022 .rpc_cred = cred,
4023 };
4024 int loop = 0;
4025 int status;
4026
4027 nfs4_init_boot_verifier(clp, &sc_verifier);
4028
4029 for(;;) {
4030 rcu_read_lock();
4031 setclientid.sc_name_len = scnprintf(setclientid.sc_name,
4032 sizeof(setclientid.sc_name), "%s/%s %s %s %u",
4033 clp->cl_ipaddr,
4034 rpc_peeraddr2str(clp->cl_rpcclient,
4035 RPC_DISPLAY_ADDR),
4036 rpc_peeraddr2str(clp->cl_rpcclient,
4037 RPC_DISPLAY_PROTO),
4038 clp->cl_rpcclient->cl_auth->au_ops->au_name,
4039 clp->cl_id_uniquifier);
4040 setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
4041 sizeof(setclientid.sc_netid),
4042 rpc_peeraddr2str(clp->cl_rpcclient,
4043 RPC_DISPLAY_NETID));
4044 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
4045 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
4046 clp->cl_ipaddr, port >> 8, port & 255);
4047 rcu_read_unlock();
4048
4049 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4050 if (status != -NFS4ERR_CLID_INUSE)
4051 break;
4052 if (loop != 0) {
4053 ++clp->cl_id_uniquifier;
4054 break;
4055 }
4056 ++loop;
4057 ssleep(clp->cl_lease_time / HZ + 1);
4058 }
4059 return status;
4060}
4061
4062int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
4063 struct nfs4_setclientid_res *arg,
4064 struct rpc_cred *cred)
4065{
4066 struct nfs_fsinfo fsinfo;
4067 struct rpc_message msg = {
4068 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
4069 .rpc_argp = arg,
4070 .rpc_resp = &fsinfo,
4071 .rpc_cred = cred,
4072 };
4073 unsigned long now;
4074 int status;
4075
4076 now = jiffies;
4077 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4078 if (status == 0) {
4079 spin_lock(&clp->cl_lock);
4080 clp->cl_lease_time = fsinfo.lease_time * HZ;
4081 clp->cl_last_renewal = now;
4082 spin_unlock(&clp->cl_lock);
4083 }
4084 return status;
4085}
4086
4087struct nfs4_delegreturndata {
4088 struct nfs4_delegreturnargs args;
4089 struct nfs4_delegreturnres res;
4090 struct nfs_fh fh;
4091 nfs4_stateid stateid;
4092 unsigned long timestamp;
4093 struct nfs_fattr fattr;
4094 int rpc_status;
4095};
4096
4097static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
4098{
4099 struct nfs4_delegreturndata *data = calldata;
4100
4101 if (!nfs4_sequence_done(task, &data->res.seq_res))
4102 return;
4103
4104 switch (task->tk_status) {
4105 case -NFS4ERR_STALE_STATEID:
4106 case -NFS4ERR_EXPIRED:
4107 case 0:
4108 renew_lease(data->res.server, data->timestamp);
4109 break;
4110 default:
4111 if (nfs4_async_handle_error(task, data->res.server, NULL) ==
4112 -EAGAIN) {
4113 rpc_restart_call_prepare(task);
4114 return;
4115 }
4116 }
4117 data->rpc_status = task->tk_status;
4118}
4119
4120static void nfs4_delegreturn_release(void *calldata)
4121{
4122 kfree(calldata);
4123}
4124
4125#if defined(CONFIG_NFS_V4_1)
4126static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
4127{
4128 struct nfs4_delegreturndata *d_data;
4129
4130 d_data = (struct nfs4_delegreturndata *)data;
4131
4132 if (nfs4_setup_sequence(d_data->res.server,
4133 &d_data->args.seq_args,
4134 &d_data->res.seq_res, task))
4135 return;
4136 rpc_call_start(task);
4137}
4138#endif /* CONFIG_NFS_V4_1 */
4139
4140static const struct rpc_call_ops nfs4_delegreturn_ops = {
4141#if defined(CONFIG_NFS_V4_1)
4142 .rpc_call_prepare = nfs4_delegreturn_prepare,
4143#endif /* CONFIG_NFS_V4_1 */
4144 .rpc_call_done = nfs4_delegreturn_done,
4145 .rpc_release = nfs4_delegreturn_release,
4146};
4147
4148static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4149{
4150 struct nfs4_delegreturndata *data;
4151 struct nfs_server *server = NFS_SERVER(inode);
4152 struct rpc_task *task;
4153 struct rpc_message msg = {
4154 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
4155 .rpc_cred = cred,
4156 };
4157 struct rpc_task_setup task_setup_data = {
4158 .rpc_client = server->client,
4159 .rpc_message = &msg,
4160 .callback_ops = &nfs4_delegreturn_ops,
4161 .flags = RPC_TASK_ASYNC,
4162 };
4163 int status = 0;
4164
4165 data = kzalloc(sizeof(*data), GFP_NOFS);
4166 if (data == NULL)
4167 return -ENOMEM;
4168 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4169 data->args.fhandle = &data->fh;
4170 data->args.stateid = &data->stateid;
4171 data->args.bitmask = server->cache_consistency_bitmask;
4172 nfs_copy_fh(&data->fh, NFS_FH(inode));
4173 nfs4_stateid_copy(&data->stateid, stateid);
4174 data->res.fattr = &data->fattr;
4175 data->res.server = server;
4176 nfs_fattr_init(data->res.fattr);
4177 data->timestamp = jiffies;
4178 data->rpc_status = 0;
4179
4180 task_setup_data.callback_data = data;
4181 msg.rpc_argp = &data->args;
4182 msg.rpc_resp = &data->res;
4183 task = rpc_run_task(&task_setup_data);
4184 if (IS_ERR(task))
4185 return PTR_ERR(task);
4186 if (!issync)
4187 goto out;
4188 status = nfs4_wait_for_completion_rpc_task(task);
4189 if (status != 0)
4190 goto out;
4191 status = data->rpc_status;
4192 if (status == 0)
4193 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
4194 else
4195 nfs_refresh_inode(inode, &data->fattr);
4196out:
4197 rpc_put_task(task);
4198 return status;
4199}
4200
4201int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4202{
4203 struct nfs_server *server = NFS_SERVER(inode);
4204 struct nfs4_exception exception = { };
4205 int err;
4206 do {
4207 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
4208 switch (err) {
4209 case -NFS4ERR_STALE_STATEID:
4210 case -NFS4ERR_EXPIRED:
4211 case 0:
4212 return 0;
4213 }
4214 err = nfs4_handle_exception(server, err, &exception);
4215 } while (exception.retry);
4216 return err;
4217}
4218
4219#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
4220#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
4221
4222/*
4223 * sleep, with exponential backoff, and retry the LOCK operation.
4224 */
4225static unsigned long
4226nfs4_set_lock_task_retry(unsigned long timeout)
4227{
4228 freezable_schedule_timeout_killable(timeout);
4229 timeout <<= 1;
4230 if (timeout > NFS4_LOCK_MAXTIMEOUT)
4231 return NFS4_LOCK_MAXTIMEOUT;
4232 return timeout;
4233}
4234
4235static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4236{
4237 struct inode *inode = state->inode;
4238 struct nfs_server *server = NFS_SERVER(inode);
4239 struct nfs_client *clp = server->nfs_client;
4240 struct nfs_lockt_args arg = {
4241 .fh = NFS_FH(inode),
4242 .fl = request,
4243 };
4244 struct nfs_lockt_res res = {
4245 .denied = request,
4246 };
4247 struct rpc_message msg = {
4248 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
4249 .rpc_argp = &arg,
4250 .rpc_resp = &res,
4251 .rpc_cred = state->owner->so_cred,
4252 };
4253 struct nfs4_lock_state *lsp;
4254 int status;
4255
4256 arg.lock_owner.clientid = clp->cl_clientid;
4257 status = nfs4_set_lock_state(state, request);
4258 if (status != 0)
4259 goto out;
4260 lsp = request->fl_u.nfs4_fl.owner;
4261 arg.lock_owner.id = lsp->ls_seqid.owner_id;
4262 arg.lock_owner.s_dev = server->s_dev;
4263 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4264 switch (status) {
4265 case 0:
4266 request->fl_type = F_UNLCK;
4267 break;
4268 case -NFS4ERR_DENIED:
4269 status = 0;
4270 }
4271 request->fl_ops->fl_release_private(request);
4272out:
4273 return status;
4274}
4275
4276static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4277{
4278 struct nfs4_exception exception = { };
4279 int err;
4280
4281 do {
4282 err = nfs4_handle_exception(NFS_SERVER(state->inode),
4283 _nfs4_proc_getlk(state, cmd, request),
4284 &exception);
4285 } while (exception.retry);
4286 return err;
4287}
4288
4289static int do_vfs_lock(struct file *file, struct file_lock *fl)
4290{
4291 int res = 0;
4292 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
4293 case FL_POSIX:
4294 res = posix_lock_file_wait(file, fl);
4295 break;
4296 case FL_FLOCK:
4297 res = flock_lock_file_wait(file, fl);
4298 break;
4299 default:
4300 BUG();
4301 }
4302 return res;
4303}
4304
4305struct nfs4_unlockdata {
4306 struct nfs_locku_args arg;
4307 struct nfs_locku_res res;
4308 struct nfs4_lock_state *lsp;
4309 struct nfs_open_context *ctx;
4310 struct file_lock fl;
4311 const struct nfs_server *server;
4312 unsigned long timestamp;
4313};
4314
4315static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
4316 struct nfs_open_context *ctx,
4317 struct nfs4_lock_state *lsp,
4318 struct nfs_seqid *seqid)
4319{
4320 struct nfs4_unlockdata *p;
4321 struct inode *inode = lsp->ls_state->inode;
4322
4323 p = kzalloc(sizeof(*p), GFP_NOFS);
4324 if (p == NULL)
4325 return NULL;
4326 p->arg.fh = NFS_FH(inode);
4327 p->arg.fl = &p->fl;
4328 p->arg.seqid = seqid;
4329 p->res.seqid = seqid;
4330 p->arg.stateid = &lsp->ls_stateid;
4331 p->lsp = lsp;
4332 atomic_inc(&lsp->ls_count);
4333 /* Ensure we don't close file until we're done freeing locks! */
4334 p->ctx = get_nfs_open_context(ctx);
4335 memcpy(&p->fl, fl, sizeof(p->fl));
4336 p->server = NFS_SERVER(inode);
4337 return p;
4338}
4339
4340static void nfs4_locku_release_calldata(void *data)
4341{
4342 struct nfs4_unlockdata *calldata = data;
4343 nfs_free_seqid(calldata->arg.seqid);
4344 nfs4_put_lock_state(calldata->lsp);
4345 put_nfs_open_context(calldata->ctx);
4346 kfree(calldata);
4347}
4348
4349static void nfs4_locku_done(struct rpc_task *task, void *data)
4350{
4351 struct nfs4_unlockdata *calldata = data;
4352
4353 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
4354 return;
4355 switch (task->tk_status) {
4356 case 0:
4357 nfs4_stateid_copy(&calldata->lsp->ls_stateid,
4358 &calldata->res.stateid);
4359 renew_lease(calldata->server, calldata->timestamp);
4360 break;
4361 case -NFS4ERR_BAD_STATEID:
4362 case -NFS4ERR_OLD_STATEID:
4363 case -NFS4ERR_STALE_STATEID:
4364 case -NFS4ERR_EXPIRED:
4365 break;
4366 default:
4367 if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
4368 rpc_restart_call_prepare(task);
4369 }
4370}
4371
4372static void nfs4_locku_prepare(struct rpc_task *task, void *data)
4373{
4374 struct nfs4_unlockdata *calldata = data;
4375
4376 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
4377 return;
4378 if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
4379 /* Note: exit _without_ running nfs4_locku_done */
4380 task->tk_action = NULL;
4381 return;
4382 }
4383 calldata->timestamp = jiffies;
4384 if (nfs4_setup_sequence(calldata->server,
4385 &calldata->arg.seq_args,
4386 &calldata->res.seq_res, task))
4387 return;
4388 rpc_call_start(task);
4389}
4390
4391static const struct rpc_call_ops nfs4_locku_ops = {
4392 .rpc_call_prepare = nfs4_locku_prepare,
4393 .rpc_call_done = nfs4_locku_done,
4394 .rpc_release = nfs4_locku_release_calldata,
4395};
4396
4397static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
4398 struct nfs_open_context *ctx,
4399 struct nfs4_lock_state *lsp,
4400 struct nfs_seqid *seqid)
4401{
4402 struct nfs4_unlockdata *data;
4403 struct rpc_message msg = {
4404 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
4405 .rpc_cred = ctx->cred,
4406 };
4407 struct rpc_task_setup task_setup_data = {
4408 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
4409 .rpc_message = &msg,
4410 .callback_ops = &nfs4_locku_ops,
4411 .workqueue = nfsiod_workqueue,
4412 .flags = RPC_TASK_ASYNC,
4413 };
4414
4415 /* Ensure this is an unlock - when canceling a lock, the
4416 * canceled lock is passed in, and it won't be an unlock.
4417 */
4418 fl->fl_type = F_UNLCK;
4419
4420 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
4421 if (data == NULL) {
4422 nfs_free_seqid(seqid);
4423 return ERR_PTR(-ENOMEM);
4424 }
4425
4426 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4427 msg.rpc_argp = &data->arg;
4428 msg.rpc_resp = &data->res;
4429 task_setup_data.callback_data = data;
4430 return rpc_run_task(&task_setup_data);
4431}
4432
4433static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
4434{
4435 struct nfs_inode *nfsi = NFS_I(state->inode);
4436 struct nfs_seqid *seqid;
4437 struct nfs4_lock_state *lsp;
4438 struct rpc_task *task;
4439 int status = 0;
4440 unsigned char fl_flags = request->fl_flags;
4441
4442 status = nfs4_set_lock_state(state, request);
4443 /* Unlock _before_ we do the RPC call */
4444 request->fl_flags |= FL_EXISTS;
4445 down_read(&nfsi->rwsem);
4446 if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
4447 up_read(&nfsi->rwsem);
4448 goto out;
4449 }
4450 up_read(&nfsi->rwsem);
4451 if (status != 0)
4452 goto out;
4453 /* Is this a delegated lock? */
4454 if (test_bit(NFS_DELEGATED_STATE, &state->flags))
4455 goto out;
4456 lsp = request->fl_u.nfs4_fl.owner;
4457 seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
4458 status = -ENOMEM;
4459 if (seqid == NULL)
4460 goto out;
4461 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
4462 status = PTR_ERR(task);
4463 if (IS_ERR(task))
4464 goto out;
4465 status = nfs4_wait_for_completion_rpc_task(task);
4466 rpc_put_task(task);
4467out:
4468 request->fl_flags = fl_flags;
4469 return status;
4470}
4471
4472struct nfs4_lockdata {
4473 struct nfs_lock_args arg;
4474 struct nfs_lock_res res;
4475 struct nfs4_lock_state *lsp;
4476 struct nfs_open_context *ctx;
4477 struct file_lock fl;
4478 unsigned long timestamp;
4479 int rpc_status;
4480 int cancelled;
4481 struct nfs_server *server;
4482};
4483
4484static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
4485 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
4486 gfp_t gfp_mask)
4487{
4488 struct nfs4_lockdata *p;
4489 struct inode *inode = lsp->ls_state->inode;
4490 struct nfs_server *server = NFS_SERVER(inode);
4491
4492 p = kzalloc(sizeof(*p), gfp_mask);
4493 if (p == NULL)
4494 return NULL;
4495
4496 p->arg.fh = NFS_FH(inode);
4497 p->arg.fl = &p->fl;
4498 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
4499 if (p->arg.open_seqid == NULL)
4500 goto out_free;
4501 p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
4502 if (p->arg.lock_seqid == NULL)
4503 goto out_free_seqid;
4504 p->arg.lock_stateid = &lsp->ls_stateid;
4505 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
4506 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
4507 p->arg.lock_owner.s_dev = server->s_dev;
4508 p->res.lock_seqid = p->arg.lock_seqid;
4509 p->lsp = lsp;
4510 p->server = server;
4511 atomic_inc(&lsp->ls_count);
4512 p->ctx = get_nfs_open_context(ctx);
4513 memcpy(&p->fl, fl, sizeof(p->fl));
4514 return p;
4515out_free_seqid:
4516 nfs_free_seqid(p->arg.open_seqid);
4517out_free:
4518 kfree(p);
4519 return NULL;
4520}
4521
4522static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
4523{
4524 struct nfs4_lockdata *data = calldata;
4525 struct nfs4_state *state = data->lsp->ls_state;
4526
4527 dprintk("%s: begin!\n", __func__);
4528 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
4529 return;
4530 /* Do we need to do an open_to_lock_owner? */
4531 if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
4532 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
4533 return;
4534 data->arg.open_stateid = &state->stateid;
4535 data->arg.new_lock_owner = 1;
4536 data->res.open_seqid = data->arg.open_seqid;
4537 } else
4538 data->arg.new_lock_owner = 0;
4539 data->timestamp = jiffies;
4540 if (nfs4_setup_sequence(data->server,
4541 &data->arg.seq_args,
4542 &data->res.seq_res, task))
4543 return;
4544 rpc_call_start(task);
4545 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
4546}
4547
4548static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
4549{
4550 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
4551 nfs4_lock_prepare(task, calldata);
4552}
4553
4554static void nfs4_lock_done(struct rpc_task *task, void *calldata)
4555{
4556 struct nfs4_lockdata *data = calldata;
4557
4558 dprintk("%s: begin!\n", __func__);
4559
4560 if (!nfs4_sequence_done(task, &data->res.seq_res))
4561 return;
4562
4563 data->rpc_status = task->tk_status;
4564 if (data->arg.new_lock_owner != 0) {
4565 if (data->rpc_status == 0)
4566 nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
4567 else
4568 goto out;
4569 }
4570 if (data->rpc_status == 0) {
4571 nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid);
4572 data->lsp->ls_flags |= NFS_LOCK_INITIALIZED;
4573 renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
4574 }
4575out:
4576 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
4577}
4578
4579static void nfs4_lock_release(void *calldata)
4580{
4581 struct nfs4_lockdata *data = calldata;
4582
4583 dprintk("%s: begin!\n", __func__);
4584 nfs_free_seqid(data->arg.open_seqid);
4585 if (data->cancelled != 0) {
4586 struct rpc_task *task;
4587 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
4588 data->arg.lock_seqid);
4589 if (!IS_ERR(task))
4590 rpc_put_task_async(task);
4591 dprintk("%s: cancelling lock!\n", __func__);
4592 } else
4593 nfs_free_seqid(data->arg.lock_seqid);
4594 nfs4_put_lock_state(data->lsp);
4595 put_nfs_open_context(data->ctx);
4596 kfree(data);
4597 dprintk("%s: done!\n", __func__);
4598}
4599
4600static const struct rpc_call_ops nfs4_lock_ops = {
4601 .rpc_call_prepare = nfs4_lock_prepare,
4602 .rpc_call_done = nfs4_lock_done,
4603 .rpc_release = nfs4_lock_release,
4604};
4605
4606static const struct rpc_call_ops nfs4_recover_lock_ops = {
4607 .rpc_call_prepare = nfs4_recover_lock_prepare,
4608 .rpc_call_done = nfs4_lock_done,
4609 .rpc_release = nfs4_lock_release,
4610};
4611
4612static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
4613{
4614 switch (error) {
4615 case -NFS4ERR_ADMIN_REVOKED:
4616 case -NFS4ERR_BAD_STATEID:
4617 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4618 if (new_lock_owner != 0 ||
4619 (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
4620 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
4621 break;
4622 case -NFS4ERR_STALE_STATEID:
4623 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4624 case -NFS4ERR_EXPIRED:
4625 nfs4_schedule_lease_recovery(server->nfs_client);
4626 };
4627}
4628
4629static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
4630{
4631 struct nfs4_lockdata *data;
4632 struct rpc_task *task;
4633 struct rpc_message msg = {
4634 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
4635 .rpc_cred = state->owner->so_cred,
4636 };
4637 struct rpc_task_setup task_setup_data = {
4638 .rpc_client = NFS_CLIENT(state->inode),
4639 .rpc_message = &msg,
4640 .callback_ops = &nfs4_lock_ops,
4641 .workqueue = nfsiod_workqueue,
4642 .flags = RPC_TASK_ASYNC,
4643 };
4644 int ret;
4645
4646 dprintk("%s: begin!\n", __func__);
4647 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
4648 fl->fl_u.nfs4_fl.owner,
4649 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
4650 if (data == NULL)
4651 return -ENOMEM;
4652 if (IS_SETLKW(cmd))
4653 data->arg.block = 1;
4654 if (recovery_type > NFS_LOCK_NEW) {
4655 if (recovery_type == NFS_LOCK_RECLAIM)
4656 data->arg.reclaim = NFS_LOCK_RECLAIM;
4657 task_setup_data.callback_ops = &nfs4_recover_lock_ops;
4658 }
4659 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4660 msg.rpc_argp = &data->arg;
4661 msg.rpc_resp = &data->res;
4662 task_setup_data.callback_data = data;
4663 task = rpc_run_task(&task_setup_data);
4664 if (IS_ERR(task))
4665 return PTR_ERR(task);
4666 ret = nfs4_wait_for_completion_rpc_task(task);
4667 if (ret == 0) {
4668 ret = data->rpc_status;
4669 if (ret)
4670 nfs4_handle_setlk_error(data->server, data->lsp,
4671 data->arg.new_lock_owner, ret);
4672 } else
4673 data->cancelled = 1;
4674 rpc_put_task(task);
4675 dprintk("%s: done, ret = %d!\n", __func__, ret);
4676 return ret;
4677}
4678
4679static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
4680{
4681 struct nfs_server *server = NFS_SERVER(state->inode);
4682 struct nfs4_exception exception = {
4683 .inode = state->inode,
4684 };
4685 int err;
4686
4687 do {
4688 /* Cache the lock if possible... */
4689 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4690 return 0;
4691 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
4692 if (err != -NFS4ERR_DELAY)
4693 break;
4694 nfs4_handle_exception(server, err, &exception);
4695 } while (exception.retry);
4696 return err;
4697}
4698
4699static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
4700{
4701 struct nfs_server *server = NFS_SERVER(state->inode);
4702 struct nfs4_exception exception = {
4703 .inode = state->inode,
4704 };
4705 int err;
4706
4707 err = nfs4_set_lock_state(state, request);
4708 if (err != 0)
4709 return err;
4710 do {
4711 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4712 return 0;
4713 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
4714 switch (err) {
4715 default:
4716 goto out;
4717 case -NFS4ERR_GRACE:
4718 case -NFS4ERR_DELAY:
4719 nfs4_handle_exception(server, err, &exception);
4720 err = 0;
4721 }
4722 } while (exception.retry);
4723out:
4724 return err;
4725}
4726
4727#if defined(CONFIG_NFS_V4_1)
4728/**
4729 * nfs41_check_expired_locks - possibly free a lock stateid
4730 *
4731 * @state: NFSv4 state for an inode
4732 *
4733 * Returns NFS_OK if recovery for this stateid is now finished.
4734 * Otherwise a negative NFS4ERR value is returned.
4735 */
4736static int nfs41_check_expired_locks(struct nfs4_state *state)
4737{
4738 int status, ret = -NFS4ERR_BAD_STATEID;
4739 struct nfs4_lock_state *lsp;
4740 struct nfs_server *server = NFS_SERVER(state->inode);
4741
4742 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
4743 if (lsp->ls_flags & NFS_LOCK_INITIALIZED) {
4744 status = nfs41_test_stateid(server, &lsp->ls_stateid);
4745 if (status != NFS_OK) {
4746 /* Free the stateid unless the server
4747 * informs us the stateid is unrecognized. */
4748 if (status != -NFS4ERR_BAD_STATEID)
4749 nfs41_free_stateid(server,
4750 &lsp->ls_stateid);
4751 lsp->ls_flags &= ~NFS_LOCK_INITIALIZED;
4752 ret = status;
4753 }
4754 }
4755 };
4756
4757 return ret;
4758}
4759
4760static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
4761{
4762 int status = NFS_OK;
4763
4764 if (test_bit(LK_STATE_IN_USE, &state->flags))
4765 status = nfs41_check_expired_locks(state);
4766 if (status != NFS_OK)
4767 status = nfs4_lock_expired(state, request);
4768 return status;
4769}
4770#endif
4771
4772static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4773{
4774 struct nfs_inode *nfsi = NFS_I(state->inode);
4775 unsigned char fl_flags = request->fl_flags;
4776 int status = -ENOLCK;
4777
4778 if ((fl_flags & FL_POSIX) &&
4779 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
4780 goto out;
4781 /* Is this a delegated open? */
4782 status = nfs4_set_lock_state(state, request);
4783 if (status != 0)
4784 goto out;
4785 request->fl_flags |= FL_ACCESS;
4786 status = do_vfs_lock(request->fl_file, request);
4787 if (status < 0)
4788 goto out;
4789 down_read(&nfsi->rwsem);
4790 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
4791 /* Yes: cache locks! */
4792 /* ...but avoid races with delegation recall... */
4793 request->fl_flags = fl_flags & ~FL_SLEEP;
4794 status = do_vfs_lock(request->fl_file, request);
4795 goto out_unlock;
4796 }
4797 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
4798 if (status != 0)
4799 goto out_unlock;
4800 /* Note: we always want to sleep here! */
4801 request->fl_flags = fl_flags | FL_SLEEP;
4802 if (do_vfs_lock(request->fl_file, request) < 0)
4803 printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock "
4804 "manager!\n", __func__);
4805out_unlock:
4806 up_read(&nfsi->rwsem);
4807out:
4808 request->fl_flags = fl_flags;
4809 return status;
4810}
4811
4812static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4813{
4814 struct nfs4_exception exception = {
4815 .state = state,
4816 .inode = state->inode,
4817 };
4818 int err;
4819
4820 do {
4821 err = _nfs4_proc_setlk(state, cmd, request);
4822 if (err == -NFS4ERR_DENIED)
4823 err = -EAGAIN;
4824 err = nfs4_handle_exception(NFS_SERVER(state->inode),
4825 err, &exception);
4826 } while (exception.retry);
4827 return err;
4828}
4829
4830static int
4831nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
4832{
4833 struct nfs_open_context *ctx;
4834 struct nfs4_state *state;
4835 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
4836 int status;
4837
4838 /* verify open state */
4839 ctx = nfs_file_open_context(filp);
4840 state = ctx->state;
4841
4842 if (request->fl_start < 0 || request->fl_end < 0)
4843 return -EINVAL;
4844
4845 if (IS_GETLK(cmd)) {
4846 if (state != NULL)
4847 return nfs4_proc_getlk(state, F_GETLK, request);
4848 return 0;
4849 }
4850
4851 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
4852 return -EINVAL;
4853
4854 if (request->fl_type == F_UNLCK) {
4855 if (state != NULL)
4856 return nfs4_proc_unlck(state, cmd, request);
4857 return 0;
4858 }
4859
4860 if (state == NULL)
4861 return -ENOLCK;
4862 /*
4863 * Don't rely on the VFS having checked the file open mode,
4864 * since it won't do this for flock() locks.
4865 */
4866 switch (request->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) {
4867 case F_RDLCK:
4868 if (!(filp->f_mode & FMODE_READ))
4869 return -EBADF;
4870 break;
4871 case F_WRLCK:
4872 if (!(filp->f_mode & FMODE_WRITE))
4873 return -EBADF;
4874 }
4875
4876 do {
4877 status = nfs4_proc_setlk(state, cmd, request);
4878 if ((status != -EAGAIN) || IS_SETLK(cmd))
4879 break;
4880 timeout = nfs4_set_lock_task_retry(timeout);
4881 status = -ERESTARTSYS;
4882 if (signalled())
4883 break;
4884 } while(status < 0);
4885 return status;
4886}
4887
4888int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
4889{
4890 struct nfs_server *server = NFS_SERVER(state->inode);
4891 struct nfs4_exception exception = { };
4892 int err;
4893
4894 err = nfs4_set_lock_state(state, fl);
4895 if (err != 0)
4896 goto out;
4897 do {
4898 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
4899 switch (err) {
4900 default:
4901 printk(KERN_ERR "NFS: %s: unhandled error "
4902 "%d.\n", __func__, err);
4903 case 0:
4904 case -ESTALE:
4905 goto out;
4906 case -NFS4ERR_EXPIRED:
4907 nfs4_schedule_stateid_recovery(server, state);
4908 case -NFS4ERR_STALE_CLIENTID:
4909 case -NFS4ERR_STALE_STATEID:
4910 nfs4_schedule_lease_recovery(server->nfs_client);
4911 goto out;
4912 case -NFS4ERR_BADSESSION:
4913 case -NFS4ERR_BADSLOT:
4914 case -NFS4ERR_BAD_HIGH_SLOT:
4915 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4916 case -NFS4ERR_DEADSESSION:
4917 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
4918 goto out;
4919 case -ERESTARTSYS:
4920 /*
4921 * The show must go on: exit, but mark the
4922 * stateid as needing recovery.
4923 */
4924 case -NFS4ERR_DELEG_REVOKED:
4925 case -NFS4ERR_ADMIN_REVOKED:
4926 case -NFS4ERR_BAD_STATEID:
4927 case -NFS4ERR_OPENMODE:
4928 nfs4_schedule_stateid_recovery(server, state);
4929 err = 0;
4930 goto out;
4931 case -EKEYEXPIRED:
4932 /*
4933 * User RPCSEC_GSS context has expired.
4934 * We cannot recover this stateid now, so
4935 * skip it and allow recovery thread to
4936 * proceed.
4937 */
4938 err = 0;
4939 goto out;
4940 case -ENOMEM:
4941 case -NFS4ERR_DENIED:
4942 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
4943 err = 0;
4944 goto out;
4945 case -NFS4ERR_DELAY:
4946 break;
4947 }
4948 err = nfs4_handle_exception(server, err, &exception);
4949 } while (exception.retry);
4950out:
4951 return err;
4952}
4953
4954struct nfs_release_lockowner_data {
4955 struct nfs4_lock_state *lsp;
4956 struct nfs_server *server;
4957 struct nfs_release_lockowner_args args;
4958};
4959
4960static void nfs4_release_lockowner_release(void *calldata)
4961{
4962 struct nfs_release_lockowner_data *data = calldata;
4963 nfs4_free_lock_state(data->server, data->lsp);
4964 kfree(calldata);
4965}
4966
4967static const struct rpc_call_ops nfs4_release_lockowner_ops = {
4968 .rpc_release = nfs4_release_lockowner_release,
4969};
4970
4971int nfs4_release_lockowner(struct nfs4_lock_state *lsp)
4972{
4973 struct nfs_server *server = lsp->ls_state->owner->so_server;
4974 struct nfs_release_lockowner_data *data;
4975 struct rpc_message msg = {
4976 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
4977 };
4978
4979 if (server->nfs_client->cl_mvops->minor_version != 0)
4980 return -EINVAL;
4981 data = kmalloc(sizeof(*data), GFP_NOFS);
4982 if (!data)
4983 return -ENOMEM;
4984 data->lsp = lsp;
4985 data->server = server;
4986 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
4987 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
4988 data->args.lock_owner.s_dev = server->s_dev;
4989 msg.rpc_argp = &data->args;
4990 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
4991 return 0;
4992}
4993
4994#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
4995
4996static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
4997 const void *buf, size_t buflen,
4998 int flags, int type)
4999{
5000 if (strcmp(key, "") != 0)
5001 return -EINVAL;
5002
5003 return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
5004}
5005
5006static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
5007 void *buf, size_t buflen, int type)
5008{
5009 if (strcmp(key, "") != 0)
5010 return -EINVAL;
5011
5012 return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
5013}
5014
5015static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
5016 size_t list_len, const char *name,
5017 size_t name_len, int type)
5018{
5019 size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
5020
5021 if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
5022 return 0;
5023
5024 if (list && len <= list_len)
5025 memcpy(list, XATTR_NAME_NFSV4_ACL, len);
5026 return len;
5027}
5028
5029/*
5030 * nfs_fhget will use either the mounted_on_fileid or the fileid
5031 */
5032static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
5033{
5034 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
5035 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
5036 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
5037 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
5038 return;
5039
5040 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
5041 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
5042 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
5043 fattr->nlink = 2;
5044}
5045
5046static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
5047 const struct qstr *name,
5048 struct nfs4_fs_locations *fs_locations,
5049 struct page *page)
5050{
5051 struct nfs_server *server = NFS_SERVER(dir);
5052 u32 bitmask[2] = {
5053 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
5054 };
5055 struct nfs4_fs_locations_arg args = {
5056 .dir_fh = NFS_FH(dir),
5057 .name = name,
5058 .page = page,
5059 .bitmask = bitmask,
5060 };
5061 struct nfs4_fs_locations_res res = {
5062 .fs_locations = fs_locations,
5063 };
5064 struct rpc_message msg = {
5065 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
5066 .rpc_argp = &args,
5067 .rpc_resp = &res,
5068 };
5069 int status;
5070
5071 dprintk("%s: start\n", __func__);
5072
5073 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
5074 * is not supported */
5075 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
5076 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
5077 else
5078 bitmask[0] |= FATTR4_WORD0_FILEID;
5079
5080 nfs_fattr_init(&fs_locations->fattr);
5081 fs_locations->server = server;
5082 fs_locations->nlocations = 0;
5083 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
5084 dprintk("%s: returned status = %d\n", __func__, status);
5085 return status;
5086}
5087
5088int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
5089 const struct qstr *name,
5090 struct nfs4_fs_locations *fs_locations,
5091 struct page *page)
5092{
5093 struct nfs4_exception exception = { };
5094 int err;
5095 do {
5096 err = nfs4_handle_exception(NFS_SERVER(dir),
5097 _nfs4_proc_fs_locations(client, dir, name, fs_locations, page),
5098 &exception);
5099 } while (exception.retry);
5100 return err;
5101}
5102
5103static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
5104{
5105 int status;
5106 struct nfs4_secinfo_arg args = {
5107 .dir_fh = NFS_FH(dir),
5108 .name = name,
5109 };
5110 struct nfs4_secinfo_res res = {
5111 .flavors = flavors,
5112 };
5113 struct rpc_message msg = {
5114 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
5115 .rpc_argp = &args,
5116 .rpc_resp = &res,
5117 };
5118
5119 dprintk("NFS call secinfo %s\n", name->name);
5120 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
5121 dprintk("NFS reply secinfo: %d\n", status);
5122 return status;
5123}
5124
5125int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
5126 struct nfs4_secinfo_flavors *flavors)
5127{
5128 struct nfs4_exception exception = { };
5129 int err;
5130 do {
5131 err = nfs4_handle_exception(NFS_SERVER(dir),
5132 _nfs4_proc_secinfo(dir, name, flavors),
5133 &exception);
5134 } while (exception.retry);
5135 return err;
5136}
5137
5138#ifdef CONFIG_NFS_V4_1
5139/*
5140 * Check the exchange flags returned by the server for invalid flags, having
5141 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
5142 * DS flags set.
5143 */
5144static int nfs4_check_cl_exchange_flags(u32 flags)
5145{
5146 if (flags & ~EXCHGID4_FLAG_MASK_R)
5147 goto out_inval;
5148 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
5149 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
5150 goto out_inval;
5151 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
5152 goto out_inval;
5153 return NFS_OK;
5154out_inval:
5155 return -NFS4ERR_INVAL;
5156}
5157
5158static bool
5159nfs41_same_server_scope(struct nfs41_server_scope *a,
5160 struct nfs41_server_scope *b)
5161{
5162 if (a->server_scope_sz == b->server_scope_sz &&
5163 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
5164 return true;
5165
5166 return false;
5167}
5168
5169/*
5170 * nfs4_proc_bind_conn_to_session()
5171 *
5172 * The 4.1 client currently uses the same TCP connection for the
5173 * fore and backchannel.
5174 */
5175int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
5176{
5177 int status;
5178 struct nfs41_bind_conn_to_session_res res;
5179 struct rpc_message msg = {
5180 .rpc_proc =
5181 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
5182 .rpc_argp = clp,
5183 .rpc_resp = &res,
5184 .rpc_cred = cred,
5185 };
5186
5187 dprintk("--> %s\n", __func__);
5188 BUG_ON(clp == NULL);
5189
5190 res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
5191 if (unlikely(res.session == NULL)) {
5192 status = -ENOMEM;
5193 goto out;
5194 }
5195
5196 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5197 if (status == 0) {
5198 if (memcmp(res.session->sess_id.data,
5199 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
5200 dprintk("NFS: %s: Session ID mismatch\n", __func__);
5201 status = -EIO;
5202 goto out_session;
5203 }
5204 if (res.dir != NFS4_CDFS4_BOTH) {
5205 dprintk("NFS: %s: Unexpected direction from server\n",
5206 __func__);
5207 status = -EIO;
5208 goto out_session;
5209 }
5210 if (res.use_conn_in_rdma_mode) {
5211 dprintk("NFS: %s: Server returned RDMA mode = true\n",
5212 __func__);
5213 status = -EIO;
5214 goto out_session;
5215 }
5216 }
5217out_session:
5218 kfree(res.session);
5219out:
5220 dprintk("<-- %s status= %d\n", __func__, status);
5221 return status;
5222}
5223
5224/*
5225 * nfs4_proc_exchange_id()
5226 *
5227 * Since the clientid has expired, all compounds using sessions
5228 * associated with the stale clientid will be returning
5229 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
5230 * be in some phase of session reset.
5231 */
5232int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
5233{
5234 nfs4_verifier verifier;
5235 struct nfs41_exchange_id_args args = {
5236 .verifier = &verifier,
5237 .client = clp,
5238 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
5239 };
5240 struct nfs41_exchange_id_res res = {
5241 0
5242 };
5243 int status;
5244 struct rpc_message msg = {
5245 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
5246 .rpc_argp = &args,
5247 .rpc_resp = &res,
5248 .rpc_cred = cred,
5249 };
5250
5251 dprintk("--> %s\n", __func__);
5252 BUG_ON(clp == NULL);
5253
5254 nfs4_init_boot_verifier(clp, &verifier);
5255
5256 args.id_len = scnprintf(args.id, sizeof(args.id),
5257 "%s/%s/%u",
5258 clp->cl_ipaddr,
5259 clp->cl_rpcclient->cl_nodename,
5260 clp->cl_rpcclient->cl_auth->au_flavor);
5261
5262 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
5263 GFP_NOFS);
5264 if (unlikely(res.server_owner == NULL)) {
5265 status = -ENOMEM;
5266 goto out;
5267 }
5268
5269 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
5270 GFP_NOFS);
5271 if (unlikely(res.server_scope == NULL)) {
5272 status = -ENOMEM;
5273 goto out_server_owner;
5274 }
5275
5276 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
5277 if (unlikely(res.impl_id == NULL)) {
5278 status = -ENOMEM;
5279 goto out_server_scope;
5280 }
5281
5282 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5283 if (status == 0)
5284 status = nfs4_check_cl_exchange_flags(res.flags);
5285
5286 if (status == 0) {
5287 clp->cl_clientid = res.clientid;
5288 clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R);
5289 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R))
5290 clp->cl_seqid = res.seqid;
5291
5292 kfree(clp->cl_serverowner);
5293 clp->cl_serverowner = res.server_owner;
5294 res.server_owner = NULL;
5295
5296 /* use the most recent implementation id */
5297 kfree(clp->cl_implid);
5298 clp->cl_implid = res.impl_id;
5299
5300 if (clp->cl_serverscope != NULL &&
5301 !nfs41_same_server_scope(clp->cl_serverscope,
5302 res.server_scope)) {
5303 dprintk("%s: server_scope mismatch detected\n",
5304 __func__);
5305 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
5306 kfree(clp->cl_serverscope);
5307 clp->cl_serverscope = NULL;
5308 }
5309
5310 if (clp->cl_serverscope == NULL) {
5311 clp->cl_serverscope = res.server_scope;
5312 goto out;
5313 }
5314 } else
5315 kfree(res.impl_id);
5316
5317out_server_owner:
5318 kfree(res.server_owner);
5319out_server_scope:
5320 kfree(res.server_scope);
5321out:
5322 if (clp->cl_implid != NULL)
5323 dprintk("%s: Server Implementation ID: "
5324 "domain: %s, name: %s, date: %llu,%u\n",
5325 __func__, clp->cl_implid->domain, clp->cl_implid->name,
5326 clp->cl_implid->date.seconds,
5327 clp->cl_implid->date.nseconds);
5328 dprintk("<-- %s status= %d\n", __func__, status);
5329 return status;
5330}
5331
5332static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
5333 struct rpc_cred *cred)
5334{
5335 struct rpc_message msg = {
5336 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
5337 .rpc_argp = clp,
5338 .rpc_cred = cred,
5339 };
5340 int status;
5341
5342 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5343 if (status)
5344 dprintk("NFS: Got error %d from the server %s on "
5345 "DESTROY_CLIENTID.", status, clp->cl_hostname);
5346 return status;
5347}
5348
5349static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
5350 struct rpc_cred *cred)
5351{
5352 unsigned int loop;
5353 int ret;
5354
5355 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
5356 ret = _nfs4_proc_destroy_clientid(clp, cred);
5357 switch (ret) {
5358 case -NFS4ERR_DELAY:
5359 case -NFS4ERR_CLIENTID_BUSY:
5360 ssleep(1);
5361 break;
5362 default:
5363 return ret;
5364 }
5365 }
5366 return 0;
5367}
5368
5369int nfs4_destroy_clientid(struct nfs_client *clp)
5370{
5371 struct rpc_cred *cred;
5372 int ret = 0;
5373
5374 if (clp->cl_mvops->minor_version < 1)
5375 goto out;
5376 if (clp->cl_exchange_flags == 0)
5377 goto out;
5378 cred = nfs4_get_exchange_id_cred(clp);
5379 ret = nfs4_proc_destroy_clientid(clp, cred);
5380 if (cred)
5381 put_rpccred(cred);
5382 switch (ret) {
5383 case 0:
5384 case -NFS4ERR_STALE_CLIENTID:
5385 clp->cl_exchange_flags = 0;
5386 }
5387out:
5388 return ret;
5389}
5390
5391struct nfs4_get_lease_time_data {
5392 struct nfs4_get_lease_time_args *args;
5393 struct nfs4_get_lease_time_res *res;
5394 struct nfs_client *clp;
5395};
5396
5397static void nfs4_get_lease_time_prepare(struct rpc_task *task,
5398 void *calldata)
5399{
5400 int ret;
5401 struct nfs4_get_lease_time_data *data =
5402 (struct nfs4_get_lease_time_data *)calldata;
5403
5404 dprintk("--> %s\n", __func__);
5405 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
5406 /* just setup sequence, do not trigger session recovery
5407 since we're invoked within one */
5408 ret = nfs41_setup_sequence(data->clp->cl_session,
5409 &data->args->la_seq_args,
5410 &data->res->lr_seq_res, task);
5411
5412 BUG_ON(ret == -EAGAIN);
5413 rpc_call_start(task);
5414 dprintk("<-- %s\n", __func__);
5415}
5416
5417/*
5418 * Called from nfs4_state_manager thread for session setup, so don't recover
5419 * from sequence operation or clientid errors.
5420 */
5421static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
5422{
5423 struct nfs4_get_lease_time_data *data =
5424 (struct nfs4_get_lease_time_data *)calldata;
5425
5426 dprintk("--> %s\n", __func__);
5427 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
5428 return;
5429 switch (task->tk_status) {
5430 case -NFS4ERR_DELAY:
5431 case -NFS4ERR_GRACE:
5432 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
5433 rpc_delay(task, NFS4_POLL_RETRY_MIN);
5434 task->tk_status = 0;
5435 /* fall through */
5436 case -NFS4ERR_RETRY_UNCACHED_REP:
5437 rpc_restart_call_prepare(task);
5438 return;
5439 }
5440 dprintk("<-- %s\n", __func__);
5441}
5442
5443static const struct rpc_call_ops nfs4_get_lease_time_ops = {
5444 .rpc_call_prepare = nfs4_get_lease_time_prepare,
5445 .rpc_call_done = nfs4_get_lease_time_done,
5446};
5447
5448int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
5449{
5450 struct rpc_task *task;
5451 struct nfs4_get_lease_time_args args;
5452 struct nfs4_get_lease_time_res res = {
5453 .lr_fsinfo = fsinfo,
5454 };
5455 struct nfs4_get_lease_time_data data = {
5456 .args = &args,
5457 .res = &res,
5458 .clp = clp,
5459 };
5460 struct rpc_message msg = {
5461 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
5462 .rpc_argp = &args,
5463 .rpc_resp = &res,
5464 };
5465 struct rpc_task_setup task_setup = {
5466 .rpc_client = clp->cl_rpcclient,
5467 .rpc_message = &msg,
5468 .callback_ops = &nfs4_get_lease_time_ops,
5469 .callback_data = &data,
5470 .flags = RPC_TASK_TIMEOUT,
5471 };
5472 int status;
5473
5474 nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
5475 dprintk("--> %s\n", __func__);
5476 task = rpc_run_task(&task_setup);
5477
5478 if (IS_ERR(task))
5479 status = PTR_ERR(task);
5480 else {
5481 status = task->tk_status;
5482 rpc_put_task(task);
5483 }
5484 dprintk("<-- %s return %d\n", __func__, status);
5485
5486 return status;
5487}
5488
5489static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags)
5490{
5491 return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags);
5492}
5493
5494static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl,
5495 struct nfs4_slot *new,
5496 u32 max_slots,
5497 u32 ivalue)
5498{
5499 struct nfs4_slot *old = NULL;
5500 u32 i;
5501
5502 spin_lock(&tbl->slot_tbl_lock);
5503 if (new) {
5504 old = tbl->slots;
5505 tbl->slots = new;
5506 tbl->max_slots = max_slots;
5507 }
5508 tbl->highest_used_slotid = -1; /* no slot is currently used */
5509 for (i = 0; i < tbl->max_slots; i++)
5510 tbl->slots[i].seq_nr = ivalue;
5511 spin_unlock(&tbl->slot_tbl_lock);
5512 kfree(old);
5513}
5514
5515/*
5516 * (re)Initialise a slot table
5517 */
5518static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs,
5519 u32 ivalue)
5520{
5521 struct nfs4_slot *new = NULL;
5522 int ret = -ENOMEM;
5523
5524 dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__,
5525 max_reqs, tbl->max_slots);
5526
5527 /* Does the newly negotiated max_reqs match the existing slot table? */
5528 if (max_reqs != tbl->max_slots) {
5529 new = nfs4_alloc_slots(max_reqs, GFP_NOFS);
5530 if (!new)
5531 goto out;
5532 }
5533 ret = 0;
5534
5535 nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue);
5536 dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
5537 tbl, tbl->slots, tbl->max_slots);
5538out:
5539 dprintk("<-- %s: return %d\n", __func__, ret);
5540 return ret;
5541}
5542
5543/* Destroy the slot table */
5544static void nfs4_destroy_slot_tables(struct nfs4_session *session)
5545{
5546 if (session->fc_slot_table.slots != NULL) {
5547 kfree(session->fc_slot_table.slots);
5548 session->fc_slot_table.slots = NULL;
5549 }
5550 if (session->bc_slot_table.slots != NULL) {
5551 kfree(session->bc_slot_table.slots);
5552 session->bc_slot_table.slots = NULL;
5553 }
5554 return;
5555}
5556
5557/*
5558 * Initialize or reset the forechannel and backchannel tables
5559 */
5560static int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
5561{
5562 struct nfs4_slot_table *tbl;
5563 int status;
5564
5565 dprintk("--> %s\n", __func__);
5566 /* Fore channel */
5567 tbl = &ses->fc_slot_table;
5568 status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
5569 if (status) /* -ENOMEM */
5570 return status;
5571 /* Back channel */
5572 tbl = &ses->bc_slot_table;
5573 status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
5574 if (status && tbl->slots == NULL)
5575 /* Fore and back channel share a connection so get
5576 * both slot tables or neither */
5577 nfs4_destroy_slot_tables(ses);
5578 return status;
5579}
5580
5581struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
5582{
5583 struct nfs4_session *session;
5584 struct nfs4_slot_table *tbl;
5585
5586 session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
5587 if (!session)
5588 return NULL;
5589
5590 tbl = &session->fc_slot_table;
5591 tbl->highest_used_slotid = NFS4_NO_SLOT;
5592 spin_lock_init(&tbl->slot_tbl_lock);
5593 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
5594 init_completion(&tbl->complete);
5595
5596 tbl = &session->bc_slot_table;
5597 tbl->highest_used_slotid = NFS4_NO_SLOT;
5598 spin_lock_init(&tbl->slot_tbl_lock);
5599 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
5600 init_completion(&tbl->complete);
5601
5602 session->session_state = 1<<NFS4_SESSION_INITING;
5603
5604 session->clp = clp;
5605 return session;
5606}
5607
5608void nfs4_destroy_session(struct nfs4_session *session)
5609{
5610 struct rpc_xprt *xprt;
5611 struct rpc_cred *cred;
5612
5613 cred = nfs4_get_exchange_id_cred(session->clp);
5614 nfs4_proc_destroy_session(session, cred);
5615 if (cred)
5616 put_rpccred(cred);
5617
5618 rcu_read_lock();
5619 xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
5620 rcu_read_unlock();
5621 dprintk("%s Destroy backchannel for xprt %p\n",
5622 __func__, xprt);
5623 xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
5624 nfs4_destroy_slot_tables(session);
5625 kfree(session);
5626}
5627
5628/*
5629 * Initialize the values to be used by the client in CREATE_SESSION
5630 * If nfs4_init_session set the fore channel request and response sizes,
5631 * use them.
5632 *
5633 * Set the back channel max_resp_sz_cached to zero to force the client to
5634 * always set csa_cachethis to FALSE because the current implementation
5635 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
5636 */
5637static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
5638{
5639 struct nfs4_session *session = args->client->cl_session;
5640 unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
5641 mxresp_sz = session->fc_attrs.max_resp_sz;
5642
5643 if (mxrqst_sz == 0)
5644 mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
5645 if (mxresp_sz == 0)
5646 mxresp_sz = NFS_MAX_FILE_IO_SIZE;
5647 /* Fore channel attributes */
5648 args->fc_attrs.max_rqst_sz = mxrqst_sz;
5649 args->fc_attrs.max_resp_sz = mxresp_sz;
5650 args->fc_attrs.max_ops = NFS4_MAX_OPS;
5651 args->fc_attrs.max_reqs = max_session_slots;
5652
5653 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
5654 "max_ops=%u max_reqs=%u\n",
5655 __func__,
5656 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
5657 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
5658
5659 /* Back channel attributes */
5660 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
5661 args->bc_attrs.max_resp_sz = PAGE_SIZE;
5662 args->bc_attrs.max_resp_sz_cached = 0;
5663 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
5664 args->bc_attrs.max_reqs = 1;
5665
5666 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
5667 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
5668 __func__,
5669 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
5670 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
5671 args->bc_attrs.max_reqs);
5672}
5673
5674static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5675{
5676 struct nfs4_channel_attrs *sent = &args->fc_attrs;
5677 struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
5678
5679 if (rcvd->max_resp_sz > sent->max_resp_sz)
5680 return -EINVAL;
5681 /*
5682 * Our requested max_ops is the minimum we need; we're not
5683 * prepared to break up compounds into smaller pieces than that.
5684 * So, no point even trying to continue if the server won't
5685 * cooperate:
5686 */
5687 if (rcvd->max_ops < sent->max_ops)
5688 return -EINVAL;
5689 if (rcvd->max_reqs == 0)
5690 return -EINVAL;
5691 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
5692 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
5693 return 0;
5694}
5695
5696static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5697{
5698 struct nfs4_channel_attrs *sent = &args->bc_attrs;
5699 struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
5700
5701 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
5702 return -EINVAL;
5703 if (rcvd->max_resp_sz < sent->max_resp_sz)
5704 return -EINVAL;
5705 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
5706 return -EINVAL;
5707 /* These would render the backchannel useless: */
5708 if (rcvd->max_ops != sent->max_ops)
5709 return -EINVAL;
5710 if (rcvd->max_reqs != sent->max_reqs)
5711 return -EINVAL;
5712 return 0;
5713}
5714
5715static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
5716 struct nfs4_session *session)
5717{
5718 int ret;
5719
5720 ret = nfs4_verify_fore_channel_attrs(args, session);
5721 if (ret)
5722 return ret;
5723 return nfs4_verify_back_channel_attrs(args, session);
5724}
5725
5726static int _nfs4_proc_create_session(struct nfs_client *clp,
5727 struct rpc_cred *cred)
5728{
5729 struct nfs4_session *session = clp->cl_session;
5730 struct nfs41_create_session_args args = {
5731 .client = clp,
5732 .cb_program = NFS4_CALLBACK,
5733 };
5734 struct nfs41_create_session_res res = {
5735 .client = clp,
5736 };
5737 struct rpc_message msg = {
5738 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
5739 .rpc_argp = &args,
5740 .rpc_resp = &res,
5741 .rpc_cred = cred,
5742 };
5743 int status;
5744
5745 nfs4_init_channel_attrs(&args);
5746 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
5747
5748 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5749
5750 if (!status)
5751 /* Verify the session's negotiated channel_attrs values */
5752 status = nfs4_verify_channel_attrs(&args, session);
5753 if (!status) {
5754 /* Increment the clientid slot sequence id */
5755 clp->cl_seqid++;
5756 }
5757
5758 return status;
5759}
5760
5761/*
5762 * Issues a CREATE_SESSION operation to the server.
5763 * It is the responsibility of the caller to verify the session is
5764 * expired before calling this routine.
5765 */
5766int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
5767{
5768 int status;
5769 unsigned *ptr;
5770 struct nfs4_session *session = clp->cl_session;
5771
5772 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
5773
5774 status = _nfs4_proc_create_session(clp, cred);
5775 if (status)
5776 goto out;
5777
5778 /* Init or reset the session slot tables */
5779 status = nfs4_setup_session_slot_tables(session);
5780 dprintk("slot table setup returned %d\n", status);
5781 if (status)
5782 goto out;
5783
5784 ptr = (unsigned *)&session->sess_id.data[0];
5785 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
5786 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
5787out:
5788 dprintk("<-- %s\n", __func__);
5789 return status;
5790}
5791
5792/*
5793 * Issue the over-the-wire RPC DESTROY_SESSION.
5794 * The caller must serialize access to this routine.
5795 */
5796int nfs4_proc_destroy_session(struct nfs4_session *session,
5797 struct rpc_cred *cred)
5798{
5799 struct rpc_message msg = {
5800 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
5801 .rpc_argp = session,
5802 .rpc_cred = cred,
5803 };
5804 int status = 0;
5805
5806 dprintk("--> nfs4_proc_destroy_session\n");
5807
5808 /* session is still being setup */
5809 if (session->clp->cl_cons_state != NFS_CS_READY)
5810 return status;
5811
5812 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5813
5814 if (status)
5815 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
5816 "Session has been destroyed regardless...\n", status);
5817
5818 dprintk("<-- nfs4_proc_destroy_session\n");
5819 return status;
5820}
5821
5822/*
5823 * With sessions, the client is not marked ready until after a
5824 * successful EXCHANGE_ID and CREATE_SESSION.
5825 *
5826 * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
5827 * other versions of NFS can be tried.
5828 */
5829static int nfs41_check_session_ready(struct nfs_client *clp)
5830{
5831 int ret;
5832
5833 if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
5834 ret = nfs4_client_recover_expired_lease(clp);
5835 if (ret)
5836 return ret;
5837 }
5838 if (clp->cl_cons_state < NFS_CS_READY)
5839 return -EPROTONOSUPPORT;
5840 smp_rmb();
5841 return 0;
5842}
5843
5844int nfs4_init_session(struct nfs_server *server)
5845{
5846 struct nfs_client *clp = server->nfs_client;
5847 struct nfs4_session *session;
5848 unsigned int rsize, wsize;
5849
5850 if (!nfs4_has_session(clp))
5851 return 0;
5852
5853 session = clp->cl_session;
5854 spin_lock(&clp->cl_lock);
5855 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
5856
5857 rsize = server->rsize;
5858 if (rsize == 0)
5859 rsize = NFS_MAX_FILE_IO_SIZE;
5860 wsize = server->wsize;
5861 if (wsize == 0)
5862 wsize = NFS_MAX_FILE_IO_SIZE;
5863
5864 session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
5865 session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
5866 }
5867 spin_unlock(&clp->cl_lock);
5868
5869 return nfs41_check_session_ready(clp);
5870}
5871
5872int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time)
5873{
5874 struct nfs4_session *session = clp->cl_session;
5875 int ret;
5876
5877 spin_lock(&clp->cl_lock);
5878 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
5879 /*
5880 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
5881 * DS lease to be equal to the MDS lease.
5882 */
5883 clp->cl_lease_time = lease_time;
5884 clp->cl_last_renewal = jiffies;
5885 }
5886 spin_unlock(&clp->cl_lock);
5887
5888 ret = nfs41_check_session_ready(clp);
5889 if (ret)
5890 return ret;
5891 /* Test for the DS role */
5892 if (!is_ds_client(clp))
5893 return -ENODEV;
5894 return 0;
5895}
5896EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
5897
5898
5899/*
5900 * Renew the cl_session lease.
5901 */
5902struct nfs4_sequence_data {
5903 struct nfs_client *clp;
5904 struct nfs4_sequence_args args;
5905 struct nfs4_sequence_res res;
5906};
5907
5908static void nfs41_sequence_release(void *data)
5909{
5910 struct nfs4_sequence_data *calldata = data;
5911 struct nfs_client *clp = calldata->clp;
5912
5913 if (atomic_read(&clp->cl_count) > 1)
5914 nfs4_schedule_state_renewal(clp);
5915 nfs_put_client(clp);
5916 kfree(calldata);
5917}
5918
5919static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5920{
5921 switch(task->tk_status) {
5922 case -NFS4ERR_DELAY:
5923 rpc_delay(task, NFS4_POLL_RETRY_MAX);
5924 return -EAGAIN;
5925 default:
5926 nfs4_schedule_lease_recovery(clp);
5927 }
5928 return 0;
5929}
5930
5931static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
5932{
5933 struct nfs4_sequence_data *calldata = data;
5934 struct nfs_client *clp = calldata->clp;
5935
5936 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
5937 return;
5938
5939 if (task->tk_status < 0) {
5940 dprintk("%s ERROR %d\n", __func__, task->tk_status);
5941 if (atomic_read(&clp->cl_count) == 1)
5942 goto out;
5943
5944 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
5945 rpc_restart_call_prepare(task);
5946 return;
5947 }
5948 }
5949 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
5950out:
5951 dprintk("<-- %s\n", __func__);
5952}
5953
5954static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
5955{
5956 struct nfs4_sequence_data *calldata = data;
5957 struct nfs_client *clp = calldata->clp;
5958 struct nfs4_sequence_args *args;
5959 struct nfs4_sequence_res *res;
5960
5961 args = task->tk_msg.rpc_argp;
5962 res = task->tk_msg.rpc_resp;
5963
5964 if (nfs41_setup_sequence(clp->cl_session, args, res, task))
5965 return;
5966 rpc_call_start(task);
5967}
5968
5969static const struct rpc_call_ops nfs41_sequence_ops = {
5970 .rpc_call_done = nfs41_sequence_call_done,
5971 .rpc_call_prepare = nfs41_sequence_prepare,
5972 .rpc_release = nfs41_sequence_release,
5973};
5974
5975static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5976{
5977 struct nfs4_sequence_data *calldata;
5978 struct rpc_message msg = {
5979 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
5980 .rpc_cred = cred,
5981 };
5982 struct rpc_task_setup task_setup_data = {
5983 .rpc_client = clp->cl_rpcclient,
5984 .rpc_message = &msg,
5985 .callback_ops = &nfs41_sequence_ops,
5986 .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
5987 };
5988
5989 if (!atomic_inc_not_zero(&clp->cl_count))
5990 return ERR_PTR(-EIO);
5991 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
5992 if (calldata == NULL) {
5993 nfs_put_client(clp);
5994 return ERR_PTR(-ENOMEM);
5995 }
5996 nfs41_init_sequence(&calldata->args, &calldata->res, 0);
5997 msg.rpc_argp = &calldata->args;
5998 msg.rpc_resp = &calldata->res;
5999 calldata->clp = clp;
6000 task_setup_data.callback_data = calldata;
6001
6002 return rpc_run_task(&task_setup_data);
6003}
6004
6005static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
6006{
6007 struct rpc_task *task;
6008 int ret = 0;
6009
6010 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
6011 return 0;
6012 task = _nfs41_proc_sequence(clp, cred);
6013 if (IS_ERR(task))
6014 ret = PTR_ERR(task);
6015 else
6016 rpc_put_task_async(task);
6017 dprintk("<-- %s status=%d\n", __func__, ret);
6018 return ret;
6019}
6020
6021static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
6022{
6023 struct rpc_task *task;
6024 int ret;
6025
6026 task = _nfs41_proc_sequence(clp, cred);
6027 if (IS_ERR(task)) {
6028 ret = PTR_ERR(task);
6029 goto out;
6030 }
6031 ret = rpc_wait_for_completion_task(task);
6032 if (!ret) {
6033 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
6034
6035 if (task->tk_status == 0)
6036 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
6037 ret = task->tk_status;
6038 }
6039 rpc_put_task(task);
6040out:
6041 dprintk("<-- %s status=%d\n", __func__, ret);
6042 return ret;
6043}
6044
6045struct nfs4_reclaim_complete_data {
6046 struct nfs_client *clp;
6047 struct nfs41_reclaim_complete_args arg;
6048 struct nfs41_reclaim_complete_res res;
6049};
6050
6051static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
6052{
6053 struct nfs4_reclaim_complete_data *calldata = data;
6054
6055 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
6056 if (nfs41_setup_sequence(calldata->clp->cl_session,
6057 &calldata->arg.seq_args,
6058 &calldata->res.seq_res, task))
6059 return;
6060
6061 rpc_call_start(task);
6062}
6063
6064static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
6065{
6066 switch(task->tk_status) {
6067 case 0:
6068 case -NFS4ERR_COMPLETE_ALREADY:
6069 case -NFS4ERR_WRONG_CRED: /* What to do here? */
6070 break;
6071 case -NFS4ERR_DELAY:
6072 rpc_delay(task, NFS4_POLL_RETRY_MAX);
6073 /* fall through */
6074 case -NFS4ERR_RETRY_UNCACHED_REP:
6075 return -EAGAIN;
6076 default:
6077 nfs4_schedule_lease_recovery(clp);
6078 }
6079 return 0;
6080}
6081
6082static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
6083{
6084 struct nfs4_reclaim_complete_data *calldata = data;
6085 struct nfs_client *clp = calldata->clp;
6086 struct nfs4_sequence_res *res = &calldata->res.seq_res;
6087
6088 dprintk("--> %s\n", __func__);
6089 if (!nfs41_sequence_done(task, res))
6090 return;
6091
6092 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
6093 rpc_restart_call_prepare(task);
6094 return;
6095 }
6096 dprintk("<-- %s\n", __func__);
6097}
6098
6099static void nfs4_free_reclaim_complete_data(void *data)
6100{
6101 struct nfs4_reclaim_complete_data *calldata = data;
6102
6103 kfree(calldata);
6104}
6105
6106static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
6107 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
6108 .rpc_call_done = nfs4_reclaim_complete_done,
6109 .rpc_release = nfs4_free_reclaim_complete_data,
6110};
6111
6112/*
6113 * Issue a global reclaim complete.
6114 */
6115static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
6116{
6117 struct nfs4_reclaim_complete_data *calldata;
6118 struct rpc_task *task;
6119 struct rpc_message msg = {
6120 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
6121 };
6122 struct rpc_task_setup task_setup_data = {
6123 .rpc_client = clp->cl_rpcclient,
6124 .rpc_message = &msg,
6125 .callback_ops = &nfs4_reclaim_complete_call_ops,
6126 .flags = RPC_TASK_ASYNC,
6127 };
6128 int status = -ENOMEM;
6129
6130 dprintk("--> %s\n", __func__);
6131 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
6132 if (calldata == NULL)
6133 goto out;
6134 calldata->clp = clp;
6135 calldata->arg.one_fs = 0;
6136
6137 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
6138 msg.rpc_argp = &calldata->arg;
6139 msg.rpc_resp = &calldata->res;
6140 task_setup_data.callback_data = calldata;
6141 task = rpc_run_task(&task_setup_data);
6142 if (IS_ERR(task)) {
6143 status = PTR_ERR(task);
6144 goto out;
6145 }
6146 status = nfs4_wait_for_completion_rpc_task(task);
6147 if (status == 0)
6148 status = task->tk_status;
6149 rpc_put_task(task);
6150 return 0;
6151out:
6152 dprintk("<-- %s status=%d\n", __func__, status);
6153 return status;
6154}
6155
6156static void
6157nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
6158{
6159 struct nfs4_layoutget *lgp = calldata;
6160 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6161
6162 dprintk("--> %s\n", __func__);
6163 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
6164 * right now covering the LAYOUTGET we are about to send.
6165 * However, that is not so catastrophic, and there seems
6166 * to be no way to prevent it completely.
6167 */
6168 if (nfs4_setup_sequence(server, &lgp->args.seq_args,
6169 &lgp->res.seq_res, task))
6170 return;
6171 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
6172 NFS_I(lgp->args.inode)->layout,
6173 lgp->args.ctx->state)) {
6174 rpc_exit(task, NFS4_OK);
6175 return;
6176 }
6177 rpc_call_start(task);
6178}
6179
6180static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
6181{
6182 struct nfs4_layoutget *lgp = calldata;
6183 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6184
6185 dprintk("--> %s\n", __func__);
6186
6187 if (!nfs4_sequence_done(task, &lgp->res.seq_res))
6188 return;
6189
6190 switch (task->tk_status) {
6191 case 0:
6192 break;
6193 case -NFS4ERR_LAYOUTTRYLATER:
6194 case -NFS4ERR_RECALLCONFLICT:
6195 task->tk_status = -NFS4ERR_DELAY;
6196 /* Fall through */
6197 default:
6198 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6199 rpc_restart_call_prepare(task);
6200 return;
6201 }
6202 }
6203 dprintk("<-- %s\n", __func__);
6204}
6205
6206static void nfs4_layoutget_release(void *calldata)
6207{
6208 struct nfs4_layoutget *lgp = calldata;
6209
6210 dprintk("--> %s\n", __func__);
6211 put_nfs_open_context(lgp->args.ctx);
6212 kfree(calldata);
6213 dprintk("<-- %s\n", __func__);
6214}
6215
6216static const struct rpc_call_ops nfs4_layoutget_call_ops = {
6217 .rpc_call_prepare = nfs4_layoutget_prepare,
6218 .rpc_call_done = nfs4_layoutget_done,
6219 .rpc_release = nfs4_layoutget_release,
6220};
6221
6222int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
6223{
6224 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6225 struct rpc_task *task;
6226 struct rpc_message msg = {
6227 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
6228 .rpc_argp = &lgp->args,
6229 .rpc_resp = &lgp->res,
6230 };
6231 struct rpc_task_setup task_setup_data = {
6232 .rpc_client = server->client,
6233 .rpc_message = &msg,
6234 .callback_ops = &nfs4_layoutget_call_ops,
6235 .callback_data = lgp,
6236 .flags = RPC_TASK_ASYNC,
6237 };
6238 int status = 0;
6239
6240 dprintk("--> %s\n", __func__);
6241
6242 lgp->res.layoutp = &lgp->args.layout;
6243 lgp->res.seq_res.sr_slot = NULL;
6244 nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
6245 task = rpc_run_task(&task_setup_data);
6246 if (IS_ERR(task))
6247 return PTR_ERR(task);
6248 status = nfs4_wait_for_completion_rpc_task(task);
6249 if (status == 0)
6250 status = task->tk_status;
6251 if (status == 0)
6252 status = pnfs_layout_process(lgp);
6253 rpc_put_task(task);
6254 dprintk("<-- %s status=%d\n", __func__, status);
6255 return status;
6256}
6257
6258static void
6259nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
6260{
6261 struct nfs4_layoutreturn *lrp = calldata;
6262
6263 dprintk("--> %s\n", __func__);
6264 if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args,
6265 &lrp->res.seq_res, task))
6266 return;
6267 rpc_call_start(task);
6268}
6269
6270static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
6271{
6272 struct nfs4_layoutreturn *lrp = calldata;
6273 struct nfs_server *server;
6274 struct pnfs_layout_hdr *lo = lrp->args.layout;
6275
6276 dprintk("--> %s\n", __func__);
6277
6278 if (!nfs4_sequence_done(task, &lrp->res.seq_res))
6279 return;
6280
6281 server = NFS_SERVER(lrp->args.inode);
6282 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6283 rpc_restart_call_prepare(task);
6284 return;
6285 }
6286 spin_lock(&lo->plh_inode->i_lock);
6287 if (task->tk_status == 0) {
6288 if (lrp->res.lrs_present) {
6289 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
6290 } else
6291 BUG_ON(!list_empty(&lo->plh_segs));
6292 }
6293 lo->plh_block_lgets--;
6294 spin_unlock(&lo->plh_inode->i_lock);
6295 dprintk("<-- %s\n", __func__);
6296}
6297
6298static void nfs4_layoutreturn_release(void *calldata)
6299{
6300 struct nfs4_layoutreturn *lrp = calldata;
6301
6302 dprintk("--> %s\n", __func__);
6303 put_layout_hdr(lrp->args.layout);
6304 kfree(calldata);
6305 dprintk("<-- %s\n", __func__);
6306}
6307
6308static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
6309 .rpc_call_prepare = nfs4_layoutreturn_prepare,
6310 .rpc_call_done = nfs4_layoutreturn_done,
6311 .rpc_release = nfs4_layoutreturn_release,
6312};
6313
6314int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
6315{
6316 struct rpc_task *task;
6317 struct rpc_message msg = {
6318 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
6319 .rpc_argp = &lrp->args,
6320 .rpc_resp = &lrp->res,
6321 };
6322 struct rpc_task_setup task_setup_data = {
6323 .rpc_client = lrp->clp->cl_rpcclient,
6324 .rpc_message = &msg,
6325 .callback_ops = &nfs4_layoutreturn_call_ops,
6326 .callback_data = lrp,
6327 };
6328 int status;
6329
6330 dprintk("--> %s\n", __func__);
6331 nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
6332 task = rpc_run_task(&task_setup_data);
6333 if (IS_ERR(task))
6334 return PTR_ERR(task);
6335 status = task->tk_status;
6336 dprintk("<-- %s status=%d\n", __func__, status);
6337 rpc_put_task(task);
6338 return status;
6339}
6340
6341/*
6342 * Retrieve the list of Data Server devices from the MDS.
6343 */
6344static int _nfs4_getdevicelist(struct nfs_server *server,
6345 const struct nfs_fh *fh,
6346 struct pnfs_devicelist *devlist)
6347{
6348 struct nfs4_getdevicelist_args args = {
6349 .fh = fh,
6350 .layoutclass = server->pnfs_curr_ld->id,
6351 };
6352 struct nfs4_getdevicelist_res res = {
6353 .devlist = devlist,
6354 };
6355 struct rpc_message msg = {
6356 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST],
6357 .rpc_argp = &args,
6358 .rpc_resp = &res,
6359 };
6360 int status;
6361
6362 dprintk("--> %s\n", __func__);
6363 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
6364 &res.seq_res, 0);
6365 dprintk("<-- %s status=%d\n", __func__, status);
6366 return status;
6367}
6368
6369int nfs4_proc_getdevicelist(struct nfs_server *server,
6370 const struct nfs_fh *fh,
6371 struct pnfs_devicelist *devlist)
6372{
6373 struct nfs4_exception exception = { };
6374 int err;
6375
6376 do {
6377 err = nfs4_handle_exception(server,
6378 _nfs4_getdevicelist(server, fh, devlist),
6379 &exception);
6380 } while (exception.retry);
6381
6382 dprintk("%s: err=%d, num_devs=%u\n", __func__,
6383 err, devlist->num_devs);
6384
6385 return err;
6386}
6387EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist);
6388
6389static int
6390_nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6391{
6392 struct nfs4_getdeviceinfo_args args = {
6393 .pdev = pdev,
6394 };
6395 struct nfs4_getdeviceinfo_res res = {
6396 .pdev = pdev,
6397 };
6398 struct rpc_message msg = {
6399 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
6400 .rpc_argp = &args,
6401 .rpc_resp = &res,
6402 };
6403 int status;
6404
6405 dprintk("--> %s\n", __func__);
6406 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6407 dprintk("<-- %s status=%d\n", __func__, status);
6408
6409 return status;
6410}
6411
6412int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6413{
6414 struct nfs4_exception exception = { };
6415 int err;
6416
6417 do {
6418 err = nfs4_handle_exception(server,
6419 _nfs4_proc_getdeviceinfo(server, pdev),
6420 &exception);
6421 } while (exception.retry);
6422 return err;
6423}
6424EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
6425
6426static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
6427{
6428 struct nfs4_layoutcommit_data *data = calldata;
6429 struct nfs_server *server = NFS_SERVER(data->args.inode);
6430
6431 if (nfs4_setup_sequence(server, &data->args.seq_args,
6432 &data->res.seq_res, task))
6433 return;
6434 rpc_call_start(task);
6435}
6436
6437static void
6438nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
6439{
6440 struct nfs4_layoutcommit_data *data = calldata;
6441 struct nfs_server *server = NFS_SERVER(data->args.inode);
6442
6443 if (!nfs4_sequence_done(task, &data->res.seq_res))
6444 return;
6445
6446 switch (task->tk_status) { /* Just ignore these failures */
6447 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
6448 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
6449 case -NFS4ERR_BADLAYOUT: /* no layout */
6450 case -NFS4ERR_GRACE: /* loca_recalim always false */
6451 task->tk_status = 0;
6452 break;
6453 case 0:
6454 nfs_post_op_update_inode_force_wcc(data->args.inode,
6455 data->res.fattr);
6456 break;
6457 default:
6458 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6459 rpc_restart_call_prepare(task);
6460 return;
6461 }
6462 }
6463}
6464
6465static void nfs4_layoutcommit_release(void *calldata)
6466{
6467 struct nfs4_layoutcommit_data *data = calldata;
6468 struct pnfs_layout_segment *lseg, *tmp;
6469 unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
6470
6471 pnfs_cleanup_layoutcommit(data);
6472 /* Matched by references in pnfs_set_layoutcommit */
6473 list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
6474 list_del_init(&lseg->pls_lc_list);
6475 if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
6476 &lseg->pls_flags))
6477 put_lseg(lseg);
6478 }
6479
6480 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
6481 smp_mb__after_clear_bit();
6482 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
6483
6484 put_rpccred(data->cred);
6485 kfree(data);
6486}
6487
6488static const struct rpc_call_ops nfs4_layoutcommit_ops = {
6489 .rpc_call_prepare = nfs4_layoutcommit_prepare,
6490 .rpc_call_done = nfs4_layoutcommit_done,
6491 .rpc_release = nfs4_layoutcommit_release,
6492};
6493
6494int
6495nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
6496{
6497 struct rpc_message msg = {
6498 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
6499 .rpc_argp = &data->args,
6500 .rpc_resp = &data->res,
6501 .rpc_cred = data->cred,
6502 };
6503 struct rpc_task_setup task_setup_data = {
6504 .task = &data->task,
6505 .rpc_client = NFS_CLIENT(data->args.inode),
6506 .rpc_message = &msg,
6507 .callback_ops = &nfs4_layoutcommit_ops,
6508 .callback_data = data,
6509 .flags = RPC_TASK_ASYNC,
6510 };
6511 struct rpc_task *task;
6512 int status = 0;
6513
6514 dprintk("NFS: %4d initiating layoutcommit call. sync %d "
6515 "lbw: %llu inode %lu\n",
6516 data->task.tk_pid, sync,
6517 data->args.lastbytewritten,
6518 data->args.inode->i_ino);
6519
6520 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
6521 task = rpc_run_task(&task_setup_data);
6522 if (IS_ERR(task))
6523 return PTR_ERR(task);
6524 if (sync == false)
6525 goto out;
6526 status = nfs4_wait_for_completion_rpc_task(task);
6527 if (status != 0)
6528 goto out;
6529 status = task->tk_status;
6530out:
6531 dprintk("%s: status %d\n", __func__, status);
6532 rpc_put_task(task);
6533 return status;
6534}
6535
6536static int
6537_nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6538 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6539{
6540 struct nfs41_secinfo_no_name_args args = {
6541 .style = SECINFO_STYLE_CURRENT_FH,
6542 };
6543 struct nfs4_secinfo_res res = {
6544 .flavors = flavors,
6545 };
6546 struct rpc_message msg = {
6547 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
6548 .rpc_argp = &args,
6549 .rpc_resp = &res,
6550 };
6551 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6552}
6553
6554static int
6555nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6556 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6557{
6558 struct nfs4_exception exception = { };
6559 int err;
6560 do {
6561 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6562 switch (err) {
6563 case 0:
6564 case -NFS4ERR_WRONGSEC:
6565 case -NFS4ERR_NOTSUPP:
6566 goto out;
6567 default:
6568 err = nfs4_handle_exception(server, err, &exception);
6569 }
6570 } while (exception.retry);
6571out:
6572 return err;
6573}
6574
6575static int
6576nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
6577 struct nfs_fsinfo *info)
6578{
6579 int err;
6580 struct page *page;
6581 rpc_authflavor_t flavor;
6582 struct nfs4_secinfo_flavors *flavors;
6583
6584 page = alloc_page(GFP_KERNEL);
6585 if (!page) {
6586 err = -ENOMEM;
6587 goto out;
6588 }
6589
6590 flavors = page_address(page);
6591 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6592
6593 /*
6594 * Fall back on "guess and check" method if
6595 * the server doesn't support SECINFO_NO_NAME
6596 */
6597 if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
6598 err = nfs4_find_root_sec(server, fhandle, info);
6599 goto out_freepage;
6600 }
6601 if (err)
6602 goto out_freepage;
6603
6604 flavor = nfs_find_best_sec(flavors);
6605 if (err == 0)
6606 err = nfs4_lookup_root_sec(server, fhandle, info, flavor);
6607
6608out_freepage:
6609 put_page(page);
6610 if (err == -EACCES)
6611 return -EPERM;
6612out:
6613 return err;
6614}
6615
6616static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6617{
6618 int status;
6619 struct nfs41_test_stateid_args args = {
6620 .stateid = stateid,
6621 };
6622 struct nfs41_test_stateid_res res;
6623 struct rpc_message msg = {
6624 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
6625 .rpc_argp = &args,
6626 .rpc_resp = &res,
6627 };
6628
6629 dprintk("NFS call test_stateid %p\n", stateid);
6630 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
6631 status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
6632 if (status != NFS_OK) {
6633 dprintk("NFS reply test_stateid: failed, %d\n", status);
6634 return status;
6635 }
6636 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
6637 return -res.status;
6638}
6639
6640/**
6641 * nfs41_test_stateid - perform a TEST_STATEID operation
6642 *
6643 * @server: server / transport on which to perform the operation
6644 * @stateid: state ID to test
6645 *
6646 * Returns NFS_OK if the server recognizes that "stateid" is valid.
6647 * Otherwise a negative NFS4ERR value is returned if the operation
6648 * failed or the state ID is not currently valid.
6649 */
6650static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6651{
6652 struct nfs4_exception exception = { };
6653 int err;
6654 do {
6655 err = _nfs41_test_stateid(server, stateid);
6656 if (err != -NFS4ERR_DELAY)
6657 break;
6658 nfs4_handle_exception(server, err, &exception);
6659 } while (exception.retry);
6660 return err;
6661}
6662
6663static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6664{
6665 struct nfs41_free_stateid_args args = {
6666 .stateid = stateid,
6667 };
6668 struct nfs41_free_stateid_res res;
6669 struct rpc_message msg = {
6670 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
6671 .rpc_argp = &args,
6672 .rpc_resp = &res,
6673 };
6674 int status;
6675
6676 dprintk("NFS call free_stateid %p\n", stateid);
6677 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
6678 status = nfs4_call_sync_sequence(server->client, server, &msg,
6679 &args.seq_args, &res.seq_res, 1);
6680 dprintk("NFS reply free_stateid: %d\n", status);
6681 return status;
6682}
6683
6684/**
6685 * nfs41_free_stateid - perform a FREE_STATEID operation
6686 *
6687 * @server: server / transport on which to perform the operation
6688 * @stateid: state ID to release
6689 *
6690 * Returns NFS_OK if the server freed "stateid". Otherwise a
6691 * negative NFS4ERR value is returned.
6692 */
6693static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6694{
6695 struct nfs4_exception exception = { };
6696 int err;
6697 do {
6698 err = _nfs4_free_stateid(server, stateid);
6699 if (err != -NFS4ERR_DELAY)
6700 break;
6701 nfs4_handle_exception(server, err, &exception);
6702 } while (exception.retry);
6703 return err;
6704}
6705
6706static bool nfs41_match_stateid(const nfs4_stateid *s1,
6707 const nfs4_stateid *s2)
6708{
6709 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
6710 return false;
6711
6712 if (s1->seqid == s2->seqid)
6713 return true;
6714 if (s1->seqid == 0 || s2->seqid == 0)
6715 return true;
6716
6717 return false;
6718}
6719
6720#endif /* CONFIG_NFS_V4_1 */
6721
6722static bool nfs4_match_stateid(const nfs4_stateid *s1,
6723 const nfs4_stateid *s2)
6724{
6725 return nfs4_stateid_match(s1, s2);
6726}
6727
6728
6729static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
6730 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6731 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
6732 .recover_open = nfs4_open_reclaim,
6733 .recover_lock = nfs4_lock_reclaim,
6734 .establish_clid = nfs4_init_clientid,
6735 .get_clid_cred = nfs4_get_setclientid_cred,
6736};
6737
6738#if defined(CONFIG_NFS_V4_1)
6739static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
6740 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6741 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
6742 .recover_open = nfs4_open_reclaim,
6743 .recover_lock = nfs4_lock_reclaim,
6744 .establish_clid = nfs41_init_clientid,
6745 .get_clid_cred = nfs4_get_exchange_id_cred,
6746 .reclaim_complete = nfs41_proc_reclaim_complete,
6747};
6748#endif /* CONFIG_NFS_V4_1 */
6749
6750static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
6751 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
6752 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
6753 .recover_open = nfs4_open_expired,
6754 .recover_lock = nfs4_lock_expired,
6755 .establish_clid = nfs4_init_clientid,
6756 .get_clid_cred = nfs4_get_setclientid_cred,
6757};
6758
6759#if defined(CONFIG_NFS_V4_1)
6760static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
6761 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
6762 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
6763 .recover_open = nfs41_open_expired,
6764 .recover_lock = nfs41_lock_expired,
6765 .establish_clid = nfs41_init_clientid,
6766 .get_clid_cred = nfs4_get_exchange_id_cred,
6767};
6768#endif /* CONFIG_NFS_V4_1 */
6769
6770static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
6771 .sched_state_renewal = nfs4_proc_async_renew,
6772 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
6773 .renew_lease = nfs4_proc_renew,
6774};
6775
6776#if defined(CONFIG_NFS_V4_1)
6777static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
6778 .sched_state_renewal = nfs41_proc_async_sequence,
6779 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
6780 .renew_lease = nfs4_proc_sequence,
6781};
6782#endif
6783
6784static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
6785 .minor_version = 0,
6786 .call_sync = _nfs4_call_sync,
6787 .match_stateid = nfs4_match_stateid,
6788 .find_root_sec = nfs4_find_root_sec,
6789 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
6790 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
6791 .state_renewal_ops = &nfs40_state_renewal_ops,
6792};
6793
6794#if defined(CONFIG_NFS_V4_1)
6795static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
6796 .minor_version = 1,
6797 .call_sync = _nfs4_call_sync_session,
6798 .match_stateid = nfs41_match_stateid,
6799 .find_root_sec = nfs41_find_root_sec,
6800 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
6801 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
6802 .state_renewal_ops = &nfs41_state_renewal_ops,
6803};
6804#endif
6805
6806const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
6807 [0] = &nfs_v4_0_minor_ops,
6808#if defined(CONFIG_NFS_V4_1)
6809 [1] = &nfs_v4_1_minor_ops,
6810#endif
6811};
6812
6813static const struct inode_operations nfs4_file_inode_operations = {
6814 .permission = nfs_permission,
6815 .getattr = nfs_getattr,
6816 .setattr = nfs_setattr,
6817 .getxattr = generic_getxattr,
6818 .setxattr = generic_setxattr,
6819 .listxattr = generic_listxattr,
6820 .removexattr = generic_removexattr,
6821};
6822
6823const struct nfs_rpc_ops nfs_v4_clientops = {
6824 .version = 4, /* protocol version */
6825 .dentry_ops = &nfs4_dentry_operations,
6826 .dir_inode_ops = &nfs4_dir_inode_operations,
6827 .file_inode_ops = &nfs4_file_inode_operations,
6828 .file_ops = &nfs4_file_operations,
6829 .getroot = nfs4_proc_get_root,
6830 .submount = nfs4_submount,
6831 .getattr = nfs4_proc_getattr,
6832 .setattr = nfs4_proc_setattr,
6833 .lookup = nfs4_proc_lookup,
6834 .access = nfs4_proc_access,
6835 .readlink = nfs4_proc_readlink,
6836 .create = nfs4_proc_create,
6837 .remove = nfs4_proc_remove,
6838 .unlink_setup = nfs4_proc_unlink_setup,
6839 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
6840 .unlink_done = nfs4_proc_unlink_done,
6841 .rename = nfs4_proc_rename,
6842 .rename_setup = nfs4_proc_rename_setup,
6843 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
6844 .rename_done = nfs4_proc_rename_done,
6845 .link = nfs4_proc_link,
6846 .symlink = nfs4_proc_symlink,
6847 .mkdir = nfs4_proc_mkdir,
6848 .rmdir = nfs4_proc_remove,
6849 .readdir = nfs4_proc_readdir,
6850 .mknod = nfs4_proc_mknod,
6851 .statfs = nfs4_proc_statfs,
6852 .fsinfo = nfs4_proc_fsinfo,
6853 .pathconf = nfs4_proc_pathconf,
6854 .set_capabilities = nfs4_server_capabilities,
6855 .decode_dirent = nfs4_decode_dirent,
6856 .read_setup = nfs4_proc_read_setup,
6857 .read_pageio_init = pnfs_pageio_init_read,
6858 .read_rpc_prepare = nfs4_proc_read_rpc_prepare,
6859 .read_done = nfs4_read_done,
6860 .write_setup = nfs4_proc_write_setup,
6861 .write_pageio_init = pnfs_pageio_init_write,
6862 .write_rpc_prepare = nfs4_proc_write_rpc_prepare,
6863 .write_done = nfs4_write_done,
6864 .commit_setup = nfs4_proc_commit_setup,
6865 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
6866 .commit_done = nfs4_commit_done,
6867 .lock = nfs4_proc_lock,
6868 .clear_acl_cache = nfs4_zap_acl_attr,
6869 .close_context = nfs4_close_context,
6870 .open_context = nfs4_atomic_open,
6871 .have_delegation = nfs4_have_delegation,
6872 .return_delegation = nfs4_inode_return_delegation,
6873 .alloc_client = nfs4_alloc_client,
6874 .init_client = nfs4_init_client,
6875 .free_client = nfs4_free_client,
6876};
6877
6878static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
6879 .prefix = XATTR_NAME_NFSV4_ACL,
6880 .list = nfs4_xattr_list_nfs4_acl,
6881 .get = nfs4_xattr_get_nfs4_acl,
6882 .set = nfs4_xattr_set_nfs4_acl,
6883};
6884
6885const struct xattr_handler *nfs4_xattr_handlers[] = {
6886 &nfs4_xattr_nfs4_acl_handler,
6887 NULL
6888};
6889
6890module_param(max_session_slots, ushort, 0644);
6891MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
6892 "requests the client will negotiate");
6893
6894/*
6895 * Local variables:
6896 * c-basic-offset: 8
6897 * End:
6898 */