NFSv4: Rename nfs4_copy_stateid()
[linux-2.6-block.git] / fs / nfs / nfs4proc.c
... / ...
CommitLineData
1/*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/mm.h>
39#include <linux/delay.h>
40#include <linux/errno.h>
41#include <linux/string.h>
42#include <linux/ratelimit.h>
43#include <linux/printk.h>
44#include <linux/slab.h>
45#include <linux/sunrpc/clnt.h>
46#include <linux/sunrpc/gss_api.h>
47#include <linux/nfs.h>
48#include <linux/nfs4.h>
49#include <linux/nfs_fs.h>
50#include <linux/nfs_page.h>
51#include <linux/nfs_mount.h>
52#include <linux/namei.h>
53#include <linux/mount.h>
54#include <linux/module.h>
55#include <linux/nfs_idmap.h>
56#include <linux/sunrpc/bc_xprt.h>
57#include <linux/xattr.h>
58#include <linux/utsname.h>
59#include <linux/freezer.h>
60
61#include "nfs4_fs.h"
62#include "delegation.h"
63#include "internal.h"
64#include "iostat.h"
65#include "callback.h"
66#include "pnfs.h"
67
68#define NFSDBG_FACILITY NFSDBG_PROC
69
70#define NFS4_POLL_RETRY_MIN (HZ/10)
71#define NFS4_POLL_RETRY_MAX (15*HZ)
72
73#define NFS4_MAX_LOOP_ON_RECOVER (10)
74
75static unsigned short max_session_slots = NFS4_DEF_SLOT_TABLE_SIZE;
76
77struct nfs4_opendata;
78static int _nfs4_proc_open(struct nfs4_opendata *data);
79static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
80static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
81static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
82static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
83static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
84static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
85 struct nfs_fattr *fattr, struct iattr *sattr,
86 struct nfs4_state *state);
87#ifdef CONFIG_NFS_V4_1
88static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *);
89static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *);
90#endif
91/* Prevent leaks of NFSv4 errors into userland */
92static int nfs4_map_errors(int err)
93{
94 if (err >= -1000)
95 return err;
96 switch (err) {
97 case -NFS4ERR_RESOURCE:
98 return -EREMOTEIO;
99 case -NFS4ERR_WRONGSEC:
100 return -EPERM;
101 case -NFS4ERR_BADOWNER:
102 case -NFS4ERR_BADNAME:
103 return -EINVAL;
104 default:
105 dprintk("%s could not handle NFSv4 error %d\n",
106 __func__, -err);
107 break;
108 }
109 return -EIO;
110}
111
112/*
113 * This is our standard bitmap for GETATTR requests.
114 */
115const u32 nfs4_fattr_bitmap[2] = {
116 FATTR4_WORD0_TYPE
117 | FATTR4_WORD0_CHANGE
118 | FATTR4_WORD0_SIZE
119 | FATTR4_WORD0_FSID
120 | FATTR4_WORD0_FILEID,
121 FATTR4_WORD1_MODE
122 | FATTR4_WORD1_NUMLINKS
123 | FATTR4_WORD1_OWNER
124 | FATTR4_WORD1_OWNER_GROUP
125 | FATTR4_WORD1_RAWDEV
126 | FATTR4_WORD1_SPACE_USED
127 | FATTR4_WORD1_TIME_ACCESS
128 | FATTR4_WORD1_TIME_METADATA
129 | FATTR4_WORD1_TIME_MODIFY
130};
131
132const u32 nfs4_statfs_bitmap[2] = {
133 FATTR4_WORD0_FILES_AVAIL
134 | FATTR4_WORD0_FILES_FREE
135 | FATTR4_WORD0_FILES_TOTAL,
136 FATTR4_WORD1_SPACE_AVAIL
137 | FATTR4_WORD1_SPACE_FREE
138 | FATTR4_WORD1_SPACE_TOTAL
139};
140
141const u32 nfs4_pathconf_bitmap[2] = {
142 FATTR4_WORD0_MAXLINK
143 | FATTR4_WORD0_MAXNAME,
144 0
145};
146
147const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
148 | FATTR4_WORD0_MAXREAD
149 | FATTR4_WORD0_MAXWRITE
150 | FATTR4_WORD0_LEASE_TIME,
151 FATTR4_WORD1_TIME_DELTA
152 | FATTR4_WORD1_FS_LAYOUT_TYPES,
153 FATTR4_WORD2_LAYOUT_BLKSIZE
154};
155
156const u32 nfs4_fs_locations_bitmap[2] = {
157 FATTR4_WORD0_TYPE
158 | FATTR4_WORD0_CHANGE
159 | FATTR4_WORD0_SIZE
160 | FATTR4_WORD0_FSID
161 | FATTR4_WORD0_FILEID
162 | FATTR4_WORD0_FS_LOCATIONS,
163 FATTR4_WORD1_MODE
164 | FATTR4_WORD1_NUMLINKS
165 | FATTR4_WORD1_OWNER
166 | FATTR4_WORD1_OWNER_GROUP
167 | FATTR4_WORD1_RAWDEV
168 | FATTR4_WORD1_SPACE_USED
169 | FATTR4_WORD1_TIME_ACCESS
170 | FATTR4_WORD1_TIME_METADATA
171 | FATTR4_WORD1_TIME_MODIFY
172 | FATTR4_WORD1_MOUNTED_ON_FILEID
173};
174
175static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
176 struct nfs4_readdir_arg *readdir)
177{
178 __be32 *start, *p;
179
180 BUG_ON(readdir->count < 80);
181 if (cookie > 2) {
182 readdir->cookie = cookie;
183 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
184 return;
185 }
186
187 readdir->cookie = 0;
188 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
189 if (cookie == 2)
190 return;
191
192 /*
193 * NFSv4 servers do not return entries for '.' and '..'
194 * Therefore, we fake these entries here. We let '.'
195 * have cookie 0 and '..' have cookie 1. Note that
196 * when talking to the server, we always send cookie 0
197 * instead of 1 or 2.
198 */
199 start = p = kmap_atomic(*readdir->pages, KM_USER0);
200
201 if (cookie == 0) {
202 *p++ = xdr_one; /* next */
203 *p++ = xdr_zero; /* cookie, first word */
204 *p++ = xdr_one; /* cookie, second word */
205 *p++ = xdr_one; /* entry len */
206 memcpy(p, ".\0\0\0", 4); /* entry */
207 p++;
208 *p++ = xdr_one; /* bitmap length */
209 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
210 *p++ = htonl(8); /* attribute buffer length */
211 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
212 }
213
214 *p++ = xdr_one; /* next */
215 *p++ = xdr_zero; /* cookie, first word */
216 *p++ = xdr_two; /* cookie, second word */
217 *p++ = xdr_two; /* entry len */
218 memcpy(p, "..\0\0", 4); /* entry */
219 p++;
220 *p++ = xdr_one; /* bitmap length */
221 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
222 *p++ = htonl(8); /* attribute buffer length */
223 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
224
225 readdir->pgbase = (char *)p - (char *)start;
226 readdir->count -= readdir->pgbase;
227 kunmap_atomic(start, KM_USER0);
228}
229
230static int nfs4_wait_clnt_recover(struct nfs_client *clp)
231{
232 int res;
233
234 might_sleep();
235
236 res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
237 nfs_wait_bit_killable, TASK_KILLABLE);
238 return res;
239}
240
241static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
242{
243 int res = 0;
244
245 might_sleep();
246
247 if (*timeout <= 0)
248 *timeout = NFS4_POLL_RETRY_MIN;
249 if (*timeout > NFS4_POLL_RETRY_MAX)
250 *timeout = NFS4_POLL_RETRY_MAX;
251 freezable_schedule_timeout_killable(*timeout);
252 if (fatal_signal_pending(current))
253 res = -ERESTARTSYS;
254 *timeout <<= 1;
255 return res;
256}
257
258/* This is the error handling routine for processes that are allowed
259 * to sleep.
260 */
261static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
262{
263 struct nfs_client *clp = server->nfs_client;
264 struct nfs4_state *state = exception->state;
265 int ret = errorcode;
266
267 exception->retry = 0;
268 switch(errorcode) {
269 case 0:
270 return 0;
271 case -NFS4ERR_DELEG_REVOKED:
272 case -NFS4ERR_ADMIN_REVOKED:
273 case -NFS4ERR_BAD_STATEID:
274 if (state != NULL)
275 nfs_remove_bad_delegation(state->inode);
276 case -NFS4ERR_OPENMODE:
277 if (state == NULL)
278 break;
279 nfs4_schedule_stateid_recovery(server, state);
280 goto wait_on_recovery;
281 case -NFS4ERR_EXPIRED:
282 if (state != NULL)
283 nfs4_schedule_stateid_recovery(server, state);
284 case -NFS4ERR_STALE_STATEID:
285 case -NFS4ERR_STALE_CLIENTID:
286 nfs4_schedule_lease_recovery(clp);
287 goto wait_on_recovery;
288#if defined(CONFIG_NFS_V4_1)
289 case -NFS4ERR_BADSESSION:
290 case -NFS4ERR_BADSLOT:
291 case -NFS4ERR_BAD_HIGH_SLOT:
292 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
293 case -NFS4ERR_DEADSESSION:
294 case -NFS4ERR_SEQ_FALSE_RETRY:
295 case -NFS4ERR_SEQ_MISORDERED:
296 dprintk("%s ERROR: %d Reset session\n", __func__,
297 errorcode);
298 nfs4_schedule_session_recovery(clp->cl_session);
299 exception->retry = 1;
300 break;
301#endif /* defined(CONFIG_NFS_V4_1) */
302 case -NFS4ERR_FILE_OPEN:
303 if (exception->timeout > HZ) {
304 /* We have retried a decent amount, time to
305 * fail
306 */
307 ret = -EBUSY;
308 break;
309 }
310 case -NFS4ERR_GRACE:
311 case -NFS4ERR_DELAY:
312 case -EKEYEXPIRED:
313 ret = nfs4_delay(server->client, &exception->timeout);
314 if (ret != 0)
315 break;
316 case -NFS4ERR_RETRY_UNCACHED_REP:
317 case -NFS4ERR_OLD_STATEID:
318 exception->retry = 1;
319 break;
320 case -NFS4ERR_BADOWNER:
321 /* The following works around a Linux server bug! */
322 case -NFS4ERR_BADNAME:
323 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
324 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
325 exception->retry = 1;
326 printk(KERN_WARNING "NFS: v4 server %s "
327 "does not accept raw "
328 "uid/gids. "
329 "Reenabling the idmapper.\n",
330 server->nfs_client->cl_hostname);
331 }
332 }
333 /* We failed to handle the error */
334 return nfs4_map_errors(ret);
335wait_on_recovery:
336 ret = nfs4_wait_clnt_recover(clp);
337 if (ret == 0)
338 exception->retry = 1;
339 return ret;
340}
341
342
343static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
344{
345 spin_lock(&clp->cl_lock);
346 if (time_before(clp->cl_last_renewal,timestamp))
347 clp->cl_last_renewal = timestamp;
348 spin_unlock(&clp->cl_lock);
349}
350
351static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
352{
353 do_renew_lease(server->nfs_client, timestamp);
354}
355
356#if defined(CONFIG_NFS_V4_1)
357
358/*
359 * nfs4_free_slot - free a slot and efficiently update slot table.
360 *
361 * freeing a slot is trivially done by clearing its respective bit
362 * in the bitmap.
363 * If the freed slotid equals highest_used_slotid we want to update it
364 * so that the server would be able to size down the slot table if needed,
365 * otherwise we know that the highest_used_slotid is still in use.
366 * When updating highest_used_slotid there may be "holes" in the bitmap
367 * so we need to scan down from highest_used_slotid to 0 looking for the now
368 * highest slotid in use.
369 * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
370 *
371 * Must be called while holding tbl->slot_tbl_lock
372 */
373static void
374nfs4_free_slot(struct nfs4_slot_table *tbl, u32 slotid)
375{
376 BUG_ON(slotid >= NFS4_MAX_SLOT_TABLE);
377 /* clear used bit in bitmap */
378 __clear_bit(slotid, tbl->used_slots);
379
380 /* update highest_used_slotid when it is freed */
381 if (slotid == tbl->highest_used_slotid) {
382 slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
383 if (slotid < tbl->max_slots)
384 tbl->highest_used_slotid = slotid;
385 else
386 tbl->highest_used_slotid = NFS4_NO_SLOT;
387 }
388 dprintk("%s: slotid %u highest_used_slotid %d\n", __func__,
389 slotid, tbl->highest_used_slotid);
390}
391
392bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy)
393{
394 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
395 return true;
396}
397
398/*
399 * Signal state manager thread if session fore channel is drained
400 */
401static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
402{
403 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
404 rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq,
405 nfs4_set_task_privileged, NULL);
406 return;
407 }
408
409 if (ses->fc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
410 return;
411
412 dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
413 complete(&ses->fc_slot_table.complete);
414}
415
416/*
417 * Signal state manager thread if session back channel is drained
418 */
419void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
420{
421 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
422 ses->bc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
423 return;
424 dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
425 complete(&ses->bc_slot_table.complete);
426}
427
428static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
429{
430 struct nfs4_slot_table *tbl;
431
432 tbl = &res->sr_session->fc_slot_table;
433 if (!res->sr_slot) {
434 /* just wake up the next guy waiting since
435 * we may have not consumed a slot after all */
436 dprintk("%s: No slot\n", __func__);
437 return;
438 }
439
440 spin_lock(&tbl->slot_tbl_lock);
441 nfs4_free_slot(tbl, res->sr_slot - tbl->slots);
442 nfs4_check_drain_fc_complete(res->sr_session);
443 spin_unlock(&tbl->slot_tbl_lock);
444 res->sr_slot = NULL;
445}
446
447static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
448{
449 unsigned long timestamp;
450 struct nfs_client *clp;
451
452 /*
453 * sr_status remains 1 if an RPC level error occurred. The server
454 * may or may not have processed the sequence operation..
455 * Proceed as if the server received and processed the sequence
456 * operation.
457 */
458 if (res->sr_status == 1)
459 res->sr_status = NFS_OK;
460
461 /* don't increment the sequence number if the task wasn't sent */
462 if (!RPC_WAS_SENT(task))
463 goto out;
464
465 /* Check the SEQUENCE operation status */
466 switch (res->sr_status) {
467 case 0:
468 /* Update the slot's sequence and clientid lease timer */
469 ++res->sr_slot->seq_nr;
470 timestamp = res->sr_renewal_time;
471 clp = res->sr_session->clp;
472 do_renew_lease(clp, timestamp);
473 /* Check sequence flags */
474 if (res->sr_status_flags != 0)
475 nfs4_schedule_lease_recovery(clp);
476 break;
477 case -NFS4ERR_DELAY:
478 /* The server detected a resend of the RPC call and
479 * returned NFS4ERR_DELAY as per Section 2.10.6.2
480 * of RFC5661.
481 */
482 dprintk("%s: slot=%td seq=%d: Operation in progress\n",
483 __func__,
484 res->sr_slot - res->sr_session->fc_slot_table.slots,
485 res->sr_slot->seq_nr);
486 goto out_retry;
487 default:
488 /* Just update the slot sequence no. */
489 ++res->sr_slot->seq_nr;
490 }
491out:
492 /* The session may be reset by one of the error handlers. */
493 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
494 nfs41_sequence_free_slot(res);
495 return 1;
496out_retry:
497 if (!rpc_restart_call(task))
498 goto out;
499 rpc_delay(task, NFS4_POLL_RETRY_MAX);
500 return 0;
501}
502
503static int nfs4_sequence_done(struct rpc_task *task,
504 struct nfs4_sequence_res *res)
505{
506 if (res->sr_session == NULL)
507 return 1;
508 return nfs41_sequence_done(task, res);
509}
510
511/*
512 * nfs4_find_slot - efficiently look for a free slot
513 *
514 * nfs4_find_slot looks for an unset bit in the used_slots bitmap.
515 * If found, we mark the slot as used, update the highest_used_slotid,
516 * and respectively set up the sequence operation args.
517 * The slot number is returned if found, or NFS4_NO_SLOT otherwise.
518 *
519 * Note: must be called with under the slot_tbl_lock.
520 */
521static u32
522nfs4_find_slot(struct nfs4_slot_table *tbl)
523{
524 u32 slotid;
525 u32 ret_id = NFS4_NO_SLOT;
526
527 dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
528 __func__, tbl->used_slots[0], tbl->highest_used_slotid,
529 tbl->max_slots);
530 slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
531 if (slotid >= tbl->max_slots)
532 goto out;
533 __set_bit(slotid, tbl->used_slots);
534 if (slotid > tbl->highest_used_slotid ||
535 tbl->highest_used_slotid == NFS4_NO_SLOT)
536 tbl->highest_used_slotid = slotid;
537 ret_id = slotid;
538out:
539 dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
540 __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
541 return ret_id;
542}
543
544static void nfs41_init_sequence(struct nfs4_sequence_args *args,
545 struct nfs4_sequence_res *res, int cache_reply)
546{
547 args->sa_session = NULL;
548 args->sa_cache_this = 0;
549 if (cache_reply)
550 args->sa_cache_this = 1;
551 res->sr_session = NULL;
552 res->sr_slot = NULL;
553}
554
555int nfs41_setup_sequence(struct nfs4_session *session,
556 struct nfs4_sequence_args *args,
557 struct nfs4_sequence_res *res,
558 struct rpc_task *task)
559{
560 struct nfs4_slot *slot;
561 struct nfs4_slot_table *tbl;
562 u32 slotid;
563
564 dprintk("--> %s\n", __func__);
565 /* slot already allocated? */
566 if (res->sr_slot != NULL)
567 return 0;
568
569 tbl = &session->fc_slot_table;
570
571 spin_lock(&tbl->slot_tbl_lock);
572 if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
573 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
574 /* The state manager will wait until the slot table is empty */
575 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
576 spin_unlock(&tbl->slot_tbl_lock);
577 dprintk("%s session is draining\n", __func__);
578 return -EAGAIN;
579 }
580
581 if (!rpc_queue_empty(&tbl->slot_tbl_waitq) &&
582 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
583 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
584 spin_unlock(&tbl->slot_tbl_lock);
585 dprintk("%s enforce FIFO order\n", __func__);
586 return -EAGAIN;
587 }
588
589 slotid = nfs4_find_slot(tbl);
590 if (slotid == NFS4_NO_SLOT) {
591 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
592 spin_unlock(&tbl->slot_tbl_lock);
593 dprintk("<-- %s: no free slots\n", __func__);
594 return -EAGAIN;
595 }
596 spin_unlock(&tbl->slot_tbl_lock);
597
598 rpc_task_set_priority(task, RPC_PRIORITY_NORMAL);
599 slot = tbl->slots + slotid;
600 args->sa_session = session;
601 args->sa_slotid = slotid;
602
603 dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
604
605 res->sr_session = session;
606 res->sr_slot = slot;
607 res->sr_renewal_time = jiffies;
608 res->sr_status_flags = 0;
609 /*
610 * sr_status is only set in decode_sequence, and so will remain
611 * set to 1 if an rpc level failure occurs.
612 */
613 res->sr_status = 1;
614 return 0;
615}
616EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
617
618int nfs4_setup_sequence(const struct nfs_server *server,
619 struct nfs4_sequence_args *args,
620 struct nfs4_sequence_res *res,
621 struct rpc_task *task)
622{
623 struct nfs4_session *session = nfs4_get_session(server);
624 int ret = 0;
625
626 if (session == NULL)
627 goto out;
628
629 dprintk("--> %s clp %p session %p sr_slot %td\n",
630 __func__, session->clp, session, res->sr_slot ?
631 res->sr_slot - session->fc_slot_table.slots : -1);
632
633 ret = nfs41_setup_sequence(session, args, res, task);
634out:
635 dprintk("<-- %s status=%d\n", __func__, ret);
636 return ret;
637}
638
639struct nfs41_call_sync_data {
640 const struct nfs_server *seq_server;
641 struct nfs4_sequence_args *seq_args;
642 struct nfs4_sequence_res *seq_res;
643};
644
645static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
646{
647 struct nfs41_call_sync_data *data = calldata;
648
649 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
650
651 if (nfs4_setup_sequence(data->seq_server, data->seq_args,
652 data->seq_res, task))
653 return;
654 rpc_call_start(task);
655}
656
657static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata)
658{
659 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
660 nfs41_call_sync_prepare(task, calldata);
661}
662
663static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
664{
665 struct nfs41_call_sync_data *data = calldata;
666
667 nfs41_sequence_done(task, data->seq_res);
668}
669
670struct rpc_call_ops nfs41_call_sync_ops = {
671 .rpc_call_prepare = nfs41_call_sync_prepare,
672 .rpc_call_done = nfs41_call_sync_done,
673};
674
675struct rpc_call_ops nfs41_call_priv_sync_ops = {
676 .rpc_call_prepare = nfs41_call_priv_sync_prepare,
677 .rpc_call_done = nfs41_call_sync_done,
678};
679
680static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
681 struct nfs_server *server,
682 struct rpc_message *msg,
683 struct nfs4_sequence_args *args,
684 struct nfs4_sequence_res *res,
685 int privileged)
686{
687 int ret;
688 struct rpc_task *task;
689 struct nfs41_call_sync_data data = {
690 .seq_server = server,
691 .seq_args = args,
692 .seq_res = res,
693 };
694 struct rpc_task_setup task_setup = {
695 .rpc_client = clnt,
696 .rpc_message = msg,
697 .callback_ops = &nfs41_call_sync_ops,
698 .callback_data = &data
699 };
700
701 if (privileged)
702 task_setup.callback_ops = &nfs41_call_priv_sync_ops;
703 task = rpc_run_task(&task_setup);
704 if (IS_ERR(task))
705 ret = PTR_ERR(task);
706 else {
707 ret = task->tk_status;
708 rpc_put_task(task);
709 }
710 return ret;
711}
712
713int _nfs4_call_sync_session(struct rpc_clnt *clnt,
714 struct nfs_server *server,
715 struct rpc_message *msg,
716 struct nfs4_sequence_args *args,
717 struct nfs4_sequence_res *res,
718 int cache_reply)
719{
720 nfs41_init_sequence(args, res, cache_reply);
721 return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0);
722}
723
724#else
725static inline
726void nfs41_init_sequence(struct nfs4_sequence_args *args,
727 struct nfs4_sequence_res *res, int cache_reply)
728{
729}
730
731static int nfs4_sequence_done(struct rpc_task *task,
732 struct nfs4_sequence_res *res)
733{
734 return 1;
735}
736#endif /* CONFIG_NFS_V4_1 */
737
738int _nfs4_call_sync(struct rpc_clnt *clnt,
739 struct nfs_server *server,
740 struct rpc_message *msg,
741 struct nfs4_sequence_args *args,
742 struct nfs4_sequence_res *res,
743 int cache_reply)
744{
745 nfs41_init_sequence(args, res, cache_reply);
746 return rpc_call_sync(clnt, msg, 0);
747}
748
749static inline
750int nfs4_call_sync(struct rpc_clnt *clnt,
751 struct nfs_server *server,
752 struct rpc_message *msg,
753 struct nfs4_sequence_args *args,
754 struct nfs4_sequence_res *res,
755 int cache_reply)
756{
757 return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
758 args, res, cache_reply);
759}
760
761static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
762{
763 struct nfs_inode *nfsi = NFS_I(dir);
764
765 spin_lock(&dir->i_lock);
766 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
767 if (!cinfo->atomic || cinfo->before != dir->i_version)
768 nfs_force_lookup_revalidate(dir);
769 dir->i_version = cinfo->after;
770 spin_unlock(&dir->i_lock);
771}
772
773struct nfs4_opendata {
774 struct kref kref;
775 struct nfs_openargs o_arg;
776 struct nfs_openres o_res;
777 struct nfs_open_confirmargs c_arg;
778 struct nfs_open_confirmres c_res;
779 struct nfs4_string owner_name;
780 struct nfs4_string group_name;
781 struct nfs_fattr f_attr;
782 struct nfs_fattr dir_attr;
783 struct dentry *dir;
784 struct dentry *dentry;
785 struct nfs4_state_owner *owner;
786 struct nfs4_state *state;
787 struct iattr attrs;
788 unsigned long timestamp;
789 unsigned int rpc_done : 1;
790 int rpc_status;
791 int cancelled;
792};
793
794
795static void nfs4_init_opendata_res(struct nfs4_opendata *p)
796{
797 p->o_res.f_attr = &p->f_attr;
798 p->o_res.dir_attr = &p->dir_attr;
799 p->o_res.seqid = p->o_arg.seqid;
800 p->c_res.seqid = p->c_arg.seqid;
801 p->o_res.server = p->o_arg.server;
802 nfs_fattr_init(&p->f_attr);
803 nfs_fattr_init(&p->dir_attr);
804 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
805}
806
807static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
808 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
809 const struct iattr *attrs,
810 gfp_t gfp_mask)
811{
812 struct dentry *parent = dget_parent(dentry);
813 struct inode *dir = parent->d_inode;
814 struct nfs_server *server = NFS_SERVER(dir);
815 struct nfs4_opendata *p;
816
817 p = kzalloc(sizeof(*p), gfp_mask);
818 if (p == NULL)
819 goto err;
820 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
821 if (p->o_arg.seqid == NULL)
822 goto err_free;
823 nfs_sb_active(dentry->d_sb);
824 p->dentry = dget(dentry);
825 p->dir = parent;
826 p->owner = sp;
827 atomic_inc(&sp->so_count);
828 p->o_arg.fh = NFS_FH(dir);
829 p->o_arg.open_flags = flags;
830 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
831 p->o_arg.clientid = server->nfs_client->cl_clientid;
832 p->o_arg.id = sp->so_seqid.owner_id;
833 p->o_arg.name = &dentry->d_name;
834 p->o_arg.server = server;
835 p->o_arg.bitmask = server->attr_bitmask;
836 p->o_arg.dir_bitmask = server->cache_consistency_bitmask;
837 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
838 if (attrs != NULL && attrs->ia_valid != 0) {
839 u32 *s;
840
841 p->o_arg.u.attrs = &p->attrs;
842 memcpy(&p->attrs, attrs, sizeof(p->attrs));
843 s = (u32 *) p->o_arg.u.verifier.data;
844 s[0] = jiffies;
845 s[1] = current->pid;
846 }
847 p->c_arg.fh = &p->o_res.fh;
848 p->c_arg.stateid = &p->o_res.stateid;
849 p->c_arg.seqid = p->o_arg.seqid;
850 nfs4_init_opendata_res(p);
851 kref_init(&p->kref);
852 return p;
853err_free:
854 kfree(p);
855err:
856 dput(parent);
857 return NULL;
858}
859
860static void nfs4_opendata_free(struct kref *kref)
861{
862 struct nfs4_opendata *p = container_of(kref,
863 struct nfs4_opendata, kref);
864 struct super_block *sb = p->dentry->d_sb;
865
866 nfs_free_seqid(p->o_arg.seqid);
867 if (p->state != NULL)
868 nfs4_put_open_state(p->state);
869 nfs4_put_state_owner(p->owner);
870 dput(p->dir);
871 dput(p->dentry);
872 nfs_sb_deactive(sb);
873 nfs_fattr_free_names(&p->f_attr);
874 kfree(p);
875}
876
877static void nfs4_opendata_put(struct nfs4_opendata *p)
878{
879 if (p != NULL)
880 kref_put(&p->kref, nfs4_opendata_free);
881}
882
883static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
884{
885 int ret;
886
887 ret = rpc_wait_for_completion_task(task);
888 return ret;
889}
890
891static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
892{
893 int ret = 0;
894
895 if (open_mode & (O_EXCL|O_TRUNC))
896 goto out;
897 switch (mode & (FMODE_READ|FMODE_WRITE)) {
898 case FMODE_READ:
899 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
900 && state->n_rdonly != 0;
901 break;
902 case FMODE_WRITE:
903 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
904 && state->n_wronly != 0;
905 break;
906 case FMODE_READ|FMODE_WRITE:
907 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
908 && state->n_rdwr != 0;
909 }
910out:
911 return ret;
912}
913
914static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
915{
916 if (delegation == NULL)
917 return 0;
918 if ((delegation->type & fmode) != fmode)
919 return 0;
920 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
921 return 0;
922 nfs_mark_delegation_referenced(delegation);
923 return 1;
924}
925
926static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
927{
928 switch (fmode) {
929 case FMODE_WRITE:
930 state->n_wronly++;
931 break;
932 case FMODE_READ:
933 state->n_rdonly++;
934 break;
935 case FMODE_READ|FMODE_WRITE:
936 state->n_rdwr++;
937 }
938 nfs4_state_set_mode_locked(state, state->state | fmode);
939}
940
941static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
942{
943 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
944 memcpy(state->stateid.data, stateid->data, sizeof(state->stateid.data));
945 memcpy(state->open_stateid.data, stateid->data, sizeof(state->open_stateid.data));
946 switch (fmode) {
947 case FMODE_READ:
948 set_bit(NFS_O_RDONLY_STATE, &state->flags);
949 break;
950 case FMODE_WRITE:
951 set_bit(NFS_O_WRONLY_STATE, &state->flags);
952 break;
953 case FMODE_READ|FMODE_WRITE:
954 set_bit(NFS_O_RDWR_STATE, &state->flags);
955 }
956}
957
958static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
959{
960 write_seqlock(&state->seqlock);
961 nfs_set_open_stateid_locked(state, stateid, fmode);
962 write_sequnlock(&state->seqlock);
963}
964
965static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
966{
967 /*
968 * Protect the call to nfs4_state_set_mode_locked and
969 * serialise the stateid update
970 */
971 write_seqlock(&state->seqlock);
972 if (deleg_stateid != NULL) {
973 memcpy(state->stateid.data, deleg_stateid->data, sizeof(state->stateid.data));
974 set_bit(NFS_DELEGATED_STATE, &state->flags);
975 }
976 if (open_stateid != NULL)
977 nfs_set_open_stateid_locked(state, open_stateid, fmode);
978 write_sequnlock(&state->seqlock);
979 spin_lock(&state->owner->so_lock);
980 update_open_stateflags(state, fmode);
981 spin_unlock(&state->owner->so_lock);
982}
983
984static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
985{
986 struct nfs_inode *nfsi = NFS_I(state->inode);
987 struct nfs_delegation *deleg_cur;
988 int ret = 0;
989
990 fmode &= (FMODE_READ|FMODE_WRITE);
991
992 rcu_read_lock();
993 deleg_cur = rcu_dereference(nfsi->delegation);
994 if (deleg_cur == NULL)
995 goto no_delegation;
996
997 spin_lock(&deleg_cur->lock);
998 if (nfsi->delegation != deleg_cur ||
999 (deleg_cur->type & fmode) != fmode)
1000 goto no_delegation_unlock;
1001
1002 if (delegation == NULL)
1003 delegation = &deleg_cur->stateid;
1004 else if (memcmp(deleg_cur->stateid.data, delegation->data, NFS4_STATEID_SIZE) != 0)
1005 goto no_delegation_unlock;
1006
1007 nfs_mark_delegation_referenced(deleg_cur);
1008 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1009 ret = 1;
1010no_delegation_unlock:
1011 spin_unlock(&deleg_cur->lock);
1012no_delegation:
1013 rcu_read_unlock();
1014
1015 if (!ret && open_stateid != NULL) {
1016 __update_open_stateid(state, open_stateid, NULL, fmode);
1017 ret = 1;
1018 }
1019
1020 return ret;
1021}
1022
1023
1024static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1025{
1026 struct nfs_delegation *delegation;
1027
1028 rcu_read_lock();
1029 delegation = rcu_dereference(NFS_I(inode)->delegation);
1030 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1031 rcu_read_unlock();
1032 return;
1033 }
1034 rcu_read_unlock();
1035 nfs_inode_return_delegation(inode);
1036}
1037
1038static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1039{
1040 struct nfs4_state *state = opendata->state;
1041 struct nfs_inode *nfsi = NFS_I(state->inode);
1042 struct nfs_delegation *delegation;
1043 int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC);
1044 fmode_t fmode = opendata->o_arg.fmode;
1045 nfs4_stateid stateid;
1046 int ret = -EAGAIN;
1047
1048 for (;;) {
1049 if (can_open_cached(state, fmode, open_mode)) {
1050 spin_lock(&state->owner->so_lock);
1051 if (can_open_cached(state, fmode, open_mode)) {
1052 update_open_stateflags(state, fmode);
1053 spin_unlock(&state->owner->so_lock);
1054 goto out_return_state;
1055 }
1056 spin_unlock(&state->owner->so_lock);
1057 }
1058 rcu_read_lock();
1059 delegation = rcu_dereference(nfsi->delegation);
1060 if (!can_open_delegated(delegation, fmode)) {
1061 rcu_read_unlock();
1062 break;
1063 }
1064 /* Save the delegation */
1065 memcpy(stateid.data, delegation->stateid.data, sizeof(stateid.data));
1066 rcu_read_unlock();
1067 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1068 if (ret != 0)
1069 goto out;
1070 ret = -EAGAIN;
1071
1072 /* Try to update the stateid using the delegation */
1073 if (update_open_stateid(state, NULL, &stateid, fmode))
1074 goto out_return_state;
1075 }
1076out:
1077 return ERR_PTR(ret);
1078out_return_state:
1079 atomic_inc(&state->count);
1080 return state;
1081}
1082
1083static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1084{
1085 struct inode *inode;
1086 struct nfs4_state *state = NULL;
1087 struct nfs_delegation *delegation;
1088 int ret;
1089
1090 if (!data->rpc_done) {
1091 state = nfs4_try_open_cached(data);
1092 goto out;
1093 }
1094
1095 ret = -EAGAIN;
1096 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1097 goto err;
1098 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
1099 ret = PTR_ERR(inode);
1100 if (IS_ERR(inode))
1101 goto err;
1102 ret = -ENOMEM;
1103 state = nfs4_get_open_state(inode, data->owner);
1104 if (state == NULL)
1105 goto err_put_inode;
1106 if (data->o_res.delegation_type != 0) {
1107 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
1108 int delegation_flags = 0;
1109
1110 rcu_read_lock();
1111 delegation = rcu_dereference(NFS_I(inode)->delegation);
1112 if (delegation)
1113 delegation_flags = delegation->flags;
1114 rcu_read_unlock();
1115 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
1116 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1117 "returning a delegation for "
1118 "OPEN(CLAIM_DELEGATE_CUR)\n",
1119 clp->cl_hostname);
1120 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1121 nfs_inode_set_delegation(state->inode,
1122 data->owner->so_cred,
1123 &data->o_res);
1124 else
1125 nfs_inode_reclaim_delegation(state->inode,
1126 data->owner->so_cred,
1127 &data->o_res);
1128 }
1129
1130 update_open_stateid(state, &data->o_res.stateid, NULL,
1131 data->o_arg.fmode);
1132 iput(inode);
1133out:
1134 return state;
1135err_put_inode:
1136 iput(inode);
1137err:
1138 return ERR_PTR(ret);
1139}
1140
1141static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1142{
1143 struct nfs_inode *nfsi = NFS_I(state->inode);
1144 struct nfs_open_context *ctx;
1145
1146 spin_lock(&state->inode->i_lock);
1147 list_for_each_entry(ctx, &nfsi->open_files, list) {
1148 if (ctx->state != state)
1149 continue;
1150 get_nfs_open_context(ctx);
1151 spin_unlock(&state->inode->i_lock);
1152 return ctx;
1153 }
1154 spin_unlock(&state->inode->i_lock);
1155 return ERR_PTR(-ENOENT);
1156}
1157
1158static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
1159{
1160 struct nfs4_opendata *opendata;
1161
1162 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS);
1163 if (opendata == NULL)
1164 return ERR_PTR(-ENOMEM);
1165 opendata->state = state;
1166 atomic_inc(&state->count);
1167 return opendata;
1168}
1169
1170static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
1171{
1172 struct nfs4_state *newstate;
1173 int ret;
1174
1175 opendata->o_arg.open_flags = 0;
1176 opendata->o_arg.fmode = fmode;
1177 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1178 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1179 nfs4_init_opendata_res(opendata);
1180 ret = _nfs4_recover_proc_open(opendata);
1181 if (ret != 0)
1182 return ret;
1183 newstate = nfs4_opendata_to_nfs4_state(opendata);
1184 if (IS_ERR(newstate))
1185 return PTR_ERR(newstate);
1186 nfs4_close_state(newstate, fmode);
1187 *res = newstate;
1188 return 0;
1189}
1190
1191static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1192{
1193 struct nfs4_state *newstate;
1194 int ret;
1195
1196 /* memory barrier prior to reading state->n_* */
1197 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1198 smp_rmb();
1199 if (state->n_rdwr != 0) {
1200 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1201 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
1202 if (ret != 0)
1203 return ret;
1204 if (newstate != state)
1205 return -ESTALE;
1206 }
1207 if (state->n_wronly != 0) {
1208 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1209 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
1210 if (ret != 0)
1211 return ret;
1212 if (newstate != state)
1213 return -ESTALE;
1214 }
1215 if (state->n_rdonly != 0) {
1216 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1217 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1218 if (ret != 0)
1219 return ret;
1220 if (newstate != state)
1221 return -ESTALE;
1222 }
1223 /*
1224 * We may have performed cached opens for all three recoveries.
1225 * Check if we need to update the current stateid.
1226 */
1227 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1228 memcmp(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data)) != 0) {
1229 write_seqlock(&state->seqlock);
1230 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1231 memcpy(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data));
1232 write_sequnlock(&state->seqlock);
1233 }
1234 return 0;
1235}
1236
1237/*
1238 * OPEN_RECLAIM:
1239 * reclaim state on the server after a reboot.
1240 */
1241static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1242{
1243 struct nfs_delegation *delegation;
1244 struct nfs4_opendata *opendata;
1245 fmode_t delegation_type = 0;
1246 int status;
1247
1248 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1249 if (IS_ERR(opendata))
1250 return PTR_ERR(opendata);
1251 opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
1252 opendata->o_arg.fh = NFS_FH(state->inode);
1253 rcu_read_lock();
1254 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1255 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1256 delegation_type = delegation->type;
1257 rcu_read_unlock();
1258 opendata->o_arg.u.delegation_type = delegation_type;
1259 status = nfs4_open_recover(opendata, state);
1260 nfs4_opendata_put(opendata);
1261 return status;
1262}
1263
1264static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1265{
1266 struct nfs_server *server = NFS_SERVER(state->inode);
1267 struct nfs4_exception exception = { };
1268 int err;
1269 do {
1270 err = _nfs4_do_open_reclaim(ctx, state);
1271 if (err != -NFS4ERR_DELAY)
1272 break;
1273 nfs4_handle_exception(server, err, &exception);
1274 } while (exception.retry);
1275 return err;
1276}
1277
1278static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1279{
1280 struct nfs_open_context *ctx;
1281 int ret;
1282
1283 ctx = nfs4_state_find_open_context(state);
1284 if (IS_ERR(ctx))
1285 return PTR_ERR(ctx);
1286 ret = nfs4_do_open_reclaim(ctx, state);
1287 put_nfs_open_context(ctx);
1288 return ret;
1289}
1290
1291static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1292{
1293 struct nfs4_opendata *opendata;
1294 int ret;
1295
1296 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1297 if (IS_ERR(opendata))
1298 return PTR_ERR(opendata);
1299 opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
1300 memcpy(opendata->o_arg.u.delegation.data, stateid->data,
1301 sizeof(opendata->o_arg.u.delegation.data));
1302 ret = nfs4_open_recover(opendata, state);
1303 nfs4_opendata_put(opendata);
1304 return ret;
1305}
1306
1307int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1308{
1309 struct nfs4_exception exception = { };
1310 struct nfs_server *server = NFS_SERVER(state->inode);
1311 int err;
1312 do {
1313 err = _nfs4_open_delegation_recall(ctx, state, stateid);
1314 switch (err) {
1315 case 0:
1316 case -ENOENT:
1317 case -ESTALE:
1318 goto out;
1319 case -NFS4ERR_BADSESSION:
1320 case -NFS4ERR_BADSLOT:
1321 case -NFS4ERR_BAD_HIGH_SLOT:
1322 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1323 case -NFS4ERR_DEADSESSION:
1324 nfs4_schedule_session_recovery(server->nfs_client->cl_session);
1325 goto out;
1326 case -NFS4ERR_STALE_CLIENTID:
1327 case -NFS4ERR_STALE_STATEID:
1328 case -NFS4ERR_EXPIRED:
1329 /* Don't recall a delegation if it was lost */
1330 nfs4_schedule_lease_recovery(server->nfs_client);
1331 goto out;
1332 case -ERESTARTSYS:
1333 /*
1334 * The show must go on: exit, but mark the
1335 * stateid as needing recovery.
1336 */
1337 case -NFS4ERR_DELEG_REVOKED:
1338 case -NFS4ERR_ADMIN_REVOKED:
1339 case -NFS4ERR_BAD_STATEID:
1340 nfs_inode_find_state_and_recover(state->inode,
1341 stateid);
1342 nfs4_schedule_stateid_recovery(server, state);
1343 case -EKEYEXPIRED:
1344 /*
1345 * User RPCSEC_GSS context has expired.
1346 * We cannot recover this stateid now, so
1347 * skip it and allow recovery thread to
1348 * proceed.
1349 */
1350 case -ENOMEM:
1351 err = 0;
1352 goto out;
1353 }
1354 err = nfs4_handle_exception(server, err, &exception);
1355 } while (exception.retry);
1356out:
1357 return err;
1358}
1359
1360static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1361{
1362 struct nfs4_opendata *data = calldata;
1363
1364 data->rpc_status = task->tk_status;
1365 if (data->rpc_status == 0) {
1366 memcpy(data->o_res.stateid.data, data->c_res.stateid.data,
1367 sizeof(data->o_res.stateid.data));
1368 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1369 renew_lease(data->o_res.server, data->timestamp);
1370 data->rpc_done = 1;
1371 }
1372}
1373
1374static void nfs4_open_confirm_release(void *calldata)
1375{
1376 struct nfs4_opendata *data = calldata;
1377 struct nfs4_state *state = NULL;
1378
1379 /* If this request hasn't been cancelled, do nothing */
1380 if (data->cancelled == 0)
1381 goto out_free;
1382 /* In case of error, no cleanup! */
1383 if (!data->rpc_done)
1384 goto out_free;
1385 state = nfs4_opendata_to_nfs4_state(data);
1386 if (!IS_ERR(state))
1387 nfs4_close_state(state, data->o_arg.fmode);
1388out_free:
1389 nfs4_opendata_put(data);
1390}
1391
1392static const struct rpc_call_ops nfs4_open_confirm_ops = {
1393 .rpc_call_done = nfs4_open_confirm_done,
1394 .rpc_release = nfs4_open_confirm_release,
1395};
1396
1397/*
1398 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1399 */
1400static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1401{
1402 struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
1403 struct rpc_task *task;
1404 struct rpc_message msg = {
1405 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1406 .rpc_argp = &data->c_arg,
1407 .rpc_resp = &data->c_res,
1408 .rpc_cred = data->owner->so_cred,
1409 };
1410 struct rpc_task_setup task_setup_data = {
1411 .rpc_client = server->client,
1412 .rpc_message = &msg,
1413 .callback_ops = &nfs4_open_confirm_ops,
1414 .callback_data = data,
1415 .workqueue = nfsiod_workqueue,
1416 .flags = RPC_TASK_ASYNC,
1417 };
1418 int status;
1419
1420 kref_get(&data->kref);
1421 data->rpc_done = 0;
1422 data->rpc_status = 0;
1423 data->timestamp = jiffies;
1424 task = rpc_run_task(&task_setup_data);
1425 if (IS_ERR(task))
1426 return PTR_ERR(task);
1427 status = nfs4_wait_for_completion_rpc_task(task);
1428 if (status != 0) {
1429 data->cancelled = 1;
1430 smp_wmb();
1431 } else
1432 status = data->rpc_status;
1433 rpc_put_task(task);
1434 return status;
1435}
1436
1437static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1438{
1439 struct nfs4_opendata *data = calldata;
1440 struct nfs4_state_owner *sp = data->owner;
1441
1442 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1443 return;
1444 /*
1445 * Check if we still need to send an OPEN call, or if we can use
1446 * a delegation instead.
1447 */
1448 if (data->state != NULL) {
1449 struct nfs_delegation *delegation;
1450
1451 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1452 goto out_no_action;
1453 rcu_read_lock();
1454 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1455 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
1456 can_open_delegated(delegation, data->o_arg.fmode))
1457 goto unlock_no_action;
1458 rcu_read_unlock();
1459 }
1460 /* Update sequence id. */
1461 data->o_arg.id = sp->so_seqid.owner_id;
1462 data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
1463 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
1464 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1465 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1466 }
1467 data->timestamp = jiffies;
1468 if (nfs4_setup_sequence(data->o_arg.server,
1469 &data->o_arg.seq_args,
1470 &data->o_res.seq_res, task))
1471 return;
1472 rpc_call_start(task);
1473 return;
1474unlock_no_action:
1475 rcu_read_unlock();
1476out_no_action:
1477 task->tk_action = NULL;
1478
1479}
1480
1481static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata)
1482{
1483 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
1484 nfs4_open_prepare(task, calldata);
1485}
1486
1487static void nfs4_open_done(struct rpc_task *task, void *calldata)
1488{
1489 struct nfs4_opendata *data = calldata;
1490
1491 data->rpc_status = task->tk_status;
1492
1493 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1494 return;
1495
1496 if (task->tk_status == 0) {
1497 switch (data->o_res.f_attr->mode & S_IFMT) {
1498 case S_IFREG:
1499 break;
1500 case S_IFLNK:
1501 data->rpc_status = -ELOOP;
1502 break;
1503 case S_IFDIR:
1504 data->rpc_status = -EISDIR;
1505 break;
1506 default:
1507 data->rpc_status = -ENOTDIR;
1508 }
1509 renew_lease(data->o_res.server, data->timestamp);
1510 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1511 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1512 }
1513 data->rpc_done = 1;
1514}
1515
1516static void nfs4_open_release(void *calldata)
1517{
1518 struct nfs4_opendata *data = calldata;
1519 struct nfs4_state *state = NULL;
1520
1521 /* If this request hasn't been cancelled, do nothing */
1522 if (data->cancelled == 0)
1523 goto out_free;
1524 /* In case of error, no cleanup! */
1525 if (data->rpc_status != 0 || !data->rpc_done)
1526 goto out_free;
1527 /* In case we need an open_confirm, no cleanup! */
1528 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1529 goto out_free;
1530 state = nfs4_opendata_to_nfs4_state(data);
1531 if (!IS_ERR(state))
1532 nfs4_close_state(state, data->o_arg.fmode);
1533out_free:
1534 nfs4_opendata_put(data);
1535}
1536
1537static const struct rpc_call_ops nfs4_open_ops = {
1538 .rpc_call_prepare = nfs4_open_prepare,
1539 .rpc_call_done = nfs4_open_done,
1540 .rpc_release = nfs4_open_release,
1541};
1542
1543static const struct rpc_call_ops nfs4_recover_open_ops = {
1544 .rpc_call_prepare = nfs4_recover_open_prepare,
1545 .rpc_call_done = nfs4_open_done,
1546 .rpc_release = nfs4_open_release,
1547};
1548
1549static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
1550{
1551 struct inode *dir = data->dir->d_inode;
1552 struct nfs_server *server = NFS_SERVER(dir);
1553 struct nfs_openargs *o_arg = &data->o_arg;
1554 struct nfs_openres *o_res = &data->o_res;
1555 struct rpc_task *task;
1556 struct rpc_message msg = {
1557 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
1558 .rpc_argp = o_arg,
1559 .rpc_resp = o_res,
1560 .rpc_cred = data->owner->so_cred,
1561 };
1562 struct rpc_task_setup task_setup_data = {
1563 .rpc_client = server->client,
1564 .rpc_message = &msg,
1565 .callback_ops = &nfs4_open_ops,
1566 .callback_data = data,
1567 .workqueue = nfsiod_workqueue,
1568 .flags = RPC_TASK_ASYNC,
1569 };
1570 int status;
1571
1572 nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
1573 kref_get(&data->kref);
1574 data->rpc_done = 0;
1575 data->rpc_status = 0;
1576 data->cancelled = 0;
1577 if (isrecover)
1578 task_setup_data.callback_ops = &nfs4_recover_open_ops;
1579 task = rpc_run_task(&task_setup_data);
1580 if (IS_ERR(task))
1581 return PTR_ERR(task);
1582 status = nfs4_wait_for_completion_rpc_task(task);
1583 if (status != 0) {
1584 data->cancelled = 1;
1585 smp_wmb();
1586 } else
1587 status = data->rpc_status;
1588 rpc_put_task(task);
1589
1590 return status;
1591}
1592
1593static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
1594{
1595 struct inode *dir = data->dir->d_inode;
1596 struct nfs_openres *o_res = &data->o_res;
1597 int status;
1598
1599 status = nfs4_run_open_task(data, 1);
1600 if (status != 0 || !data->rpc_done)
1601 return status;
1602
1603 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
1604
1605 nfs_refresh_inode(dir, o_res->dir_attr);
1606
1607 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1608 status = _nfs4_proc_open_confirm(data);
1609 if (status != 0)
1610 return status;
1611 }
1612
1613 return status;
1614}
1615
1616/*
1617 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
1618 */
1619static int _nfs4_proc_open(struct nfs4_opendata *data)
1620{
1621 struct inode *dir = data->dir->d_inode;
1622 struct nfs_server *server = NFS_SERVER(dir);
1623 struct nfs_openargs *o_arg = &data->o_arg;
1624 struct nfs_openres *o_res = &data->o_res;
1625 int status;
1626
1627 status = nfs4_run_open_task(data, 0);
1628 if (!data->rpc_done)
1629 return status;
1630 if (status != 0) {
1631 if (status == -NFS4ERR_BADNAME &&
1632 !(o_arg->open_flags & O_CREAT))
1633 return -ENOENT;
1634 return status;
1635 }
1636
1637 nfs_fattr_map_and_free_names(server, &data->f_attr);
1638
1639 if (o_arg->open_flags & O_CREAT) {
1640 update_changeattr(dir, &o_res->cinfo);
1641 nfs_post_op_update_inode(dir, o_res->dir_attr);
1642 } else
1643 nfs_refresh_inode(dir, o_res->dir_attr);
1644 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
1645 server->caps &= ~NFS_CAP_POSIX_LOCK;
1646 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1647 status = _nfs4_proc_open_confirm(data);
1648 if (status != 0)
1649 return status;
1650 }
1651 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
1652 _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
1653 return 0;
1654}
1655
1656static int nfs4_client_recover_expired_lease(struct nfs_client *clp)
1657{
1658 unsigned int loop;
1659 int ret;
1660
1661 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
1662 ret = nfs4_wait_clnt_recover(clp);
1663 if (ret != 0)
1664 break;
1665 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
1666 !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
1667 break;
1668 nfs4_schedule_state_manager(clp);
1669 ret = -EIO;
1670 }
1671 return ret;
1672}
1673
1674static int nfs4_recover_expired_lease(struct nfs_server *server)
1675{
1676 return nfs4_client_recover_expired_lease(server->nfs_client);
1677}
1678
1679/*
1680 * OPEN_EXPIRED:
1681 * reclaim state on the server after a network partition.
1682 * Assumes caller holds the appropriate lock
1683 */
1684static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1685{
1686 struct nfs4_opendata *opendata;
1687 int ret;
1688
1689 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1690 if (IS_ERR(opendata))
1691 return PTR_ERR(opendata);
1692 ret = nfs4_open_recover(opendata, state);
1693 if (ret == -ESTALE)
1694 d_drop(ctx->dentry);
1695 nfs4_opendata_put(opendata);
1696 return ret;
1697}
1698
1699static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1700{
1701 struct nfs_server *server = NFS_SERVER(state->inode);
1702 struct nfs4_exception exception = { };
1703 int err;
1704
1705 do {
1706 err = _nfs4_open_expired(ctx, state);
1707 switch (err) {
1708 default:
1709 goto out;
1710 case -NFS4ERR_GRACE:
1711 case -NFS4ERR_DELAY:
1712 nfs4_handle_exception(server, err, &exception);
1713 err = 0;
1714 }
1715 } while (exception.retry);
1716out:
1717 return err;
1718}
1719
1720static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1721{
1722 struct nfs_open_context *ctx;
1723 int ret;
1724
1725 ctx = nfs4_state_find_open_context(state);
1726 if (IS_ERR(ctx))
1727 return PTR_ERR(ctx);
1728 ret = nfs4_do_open_expired(ctx, state);
1729 put_nfs_open_context(ctx);
1730 return ret;
1731}
1732
1733#if defined(CONFIG_NFS_V4_1)
1734static int nfs41_check_expired_stateid(struct nfs4_state *state, nfs4_stateid *stateid, unsigned int flags)
1735{
1736 int status = NFS_OK;
1737 struct nfs_server *server = NFS_SERVER(state->inode);
1738
1739 if (state->flags & flags) {
1740 status = nfs41_test_stateid(server, stateid);
1741 if (status != NFS_OK) {
1742 nfs41_free_stateid(server, stateid);
1743 state->flags &= ~flags;
1744 }
1745 }
1746 return status;
1747}
1748
1749static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1750{
1751 int deleg_status, open_status;
1752 int deleg_flags = 1 << NFS_DELEGATED_STATE;
1753 int open_flags = (1 << NFS_O_RDONLY_STATE) | (1 << NFS_O_WRONLY_STATE) | (1 << NFS_O_RDWR_STATE);
1754
1755 deleg_status = nfs41_check_expired_stateid(state, &state->stateid, deleg_flags);
1756 open_status = nfs41_check_expired_stateid(state, &state->open_stateid, open_flags);
1757
1758 if ((deleg_status == NFS_OK) && (open_status == NFS_OK))
1759 return NFS_OK;
1760 return nfs4_open_expired(sp, state);
1761}
1762#endif
1763
1764/*
1765 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
1766 * fields corresponding to attributes that were used to store the verifier.
1767 * Make sure we clobber those fields in the later setattr call
1768 */
1769static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
1770{
1771 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
1772 !(sattr->ia_valid & ATTR_ATIME_SET))
1773 sattr->ia_valid |= ATTR_ATIME;
1774
1775 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
1776 !(sattr->ia_valid & ATTR_MTIME_SET))
1777 sattr->ia_valid |= ATTR_MTIME;
1778}
1779
1780/*
1781 * Returns a referenced nfs4_state
1782 */
1783static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
1784{
1785 struct nfs4_state_owner *sp;
1786 struct nfs4_state *state = NULL;
1787 struct nfs_server *server = NFS_SERVER(dir);
1788 struct nfs4_opendata *opendata;
1789 int status;
1790
1791 /* Protect against reboot recovery conflicts */
1792 status = -ENOMEM;
1793 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
1794 if (sp == NULL) {
1795 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
1796 goto out_err;
1797 }
1798 status = nfs4_recover_expired_lease(server);
1799 if (status != 0)
1800 goto err_put_state_owner;
1801 if (dentry->d_inode != NULL)
1802 nfs4_return_incompatible_delegation(dentry->d_inode, fmode);
1803 status = -ENOMEM;
1804 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL);
1805 if (opendata == NULL)
1806 goto err_put_state_owner;
1807
1808 if (dentry->d_inode != NULL)
1809 opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
1810
1811 status = _nfs4_proc_open(opendata);
1812 if (status != 0)
1813 goto err_opendata_put;
1814
1815 state = nfs4_opendata_to_nfs4_state(opendata);
1816 status = PTR_ERR(state);
1817 if (IS_ERR(state))
1818 goto err_opendata_put;
1819 if (server->caps & NFS_CAP_POSIX_LOCK)
1820 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
1821
1822 if (opendata->o_arg.open_flags & O_EXCL) {
1823 nfs4_exclusive_attrset(opendata, sattr);
1824
1825 nfs_fattr_init(opendata->o_res.f_attr);
1826 status = nfs4_do_setattr(state->inode, cred,
1827 opendata->o_res.f_attr, sattr,
1828 state);
1829 if (status == 0)
1830 nfs_setattr_update_inode(state->inode, sattr);
1831 nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
1832 }
1833 nfs4_opendata_put(opendata);
1834 nfs4_put_state_owner(sp);
1835 *res = state;
1836 return 0;
1837err_opendata_put:
1838 nfs4_opendata_put(opendata);
1839err_put_state_owner:
1840 nfs4_put_state_owner(sp);
1841out_err:
1842 *res = NULL;
1843 return status;
1844}
1845
1846
1847static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
1848{
1849 struct nfs4_exception exception = { };
1850 struct nfs4_state *res;
1851 int status;
1852
1853 do {
1854 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res);
1855 if (status == 0)
1856 break;
1857 /* NOTE: BAD_SEQID means the server and client disagree about the
1858 * book-keeping w.r.t. state-changing operations
1859 * (OPEN/CLOSE/LOCK/LOCKU...)
1860 * It is actually a sign of a bug on the client or on the server.
1861 *
1862 * If we receive a BAD_SEQID error in the particular case of
1863 * doing an OPEN, we assume that nfs_increment_open_seqid() will
1864 * have unhashed the old state_owner for us, and that we can
1865 * therefore safely retry using a new one. We should still warn
1866 * the user though...
1867 */
1868 if (status == -NFS4ERR_BAD_SEQID) {
1869 printk(KERN_WARNING "NFS: v4 server %s "
1870 " returned a bad sequence-id error!\n",
1871 NFS_SERVER(dir)->nfs_client->cl_hostname);
1872 exception.retry = 1;
1873 continue;
1874 }
1875 /*
1876 * BAD_STATEID on OPEN means that the server cancelled our
1877 * state before it received the OPEN_CONFIRM.
1878 * Recover by retrying the request as per the discussion
1879 * on Page 181 of RFC3530.
1880 */
1881 if (status == -NFS4ERR_BAD_STATEID) {
1882 exception.retry = 1;
1883 continue;
1884 }
1885 if (status == -EAGAIN) {
1886 /* We must have found a delegation */
1887 exception.retry = 1;
1888 continue;
1889 }
1890 res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
1891 status, &exception));
1892 } while (exception.retry);
1893 return res;
1894}
1895
1896static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1897 struct nfs_fattr *fattr, struct iattr *sattr,
1898 struct nfs4_state *state)
1899{
1900 struct nfs_server *server = NFS_SERVER(inode);
1901 struct nfs_setattrargs arg = {
1902 .fh = NFS_FH(inode),
1903 .iap = sattr,
1904 .server = server,
1905 .bitmask = server->attr_bitmask,
1906 };
1907 struct nfs_setattrres res = {
1908 .fattr = fattr,
1909 .server = server,
1910 };
1911 struct rpc_message msg = {
1912 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
1913 .rpc_argp = &arg,
1914 .rpc_resp = &res,
1915 .rpc_cred = cred,
1916 };
1917 unsigned long timestamp = jiffies;
1918 int status;
1919
1920 nfs_fattr_init(fattr);
1921
1922 if (nfs4_copy_delegation_stateid(&arg.stateid, inode)) {
1923 /* Use that stateid */
1924 } else if (state != NULL) {
1925 nfs4_select_rw_stateid(&arg.stateid, state, current->files, current->tgid);
1926 } else
1927 memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
1928
1929 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
1930 if (status == 0 && state != NULL)
1931 renew_lease(server, timestamp);
1932 return status;
1933}
1934
1935static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1936 struct nfs_fattr *fattr, struct iattr *sattr,
1937 struct nfs4_state *state)
1938{
1939 struct nfs_server *server = NFS_SERVER(inode);
1940 struct nfs4_exception exception = {
1941 .state = state,
1942 };
1943 int err;
1944 do {
1945 err = nfs4_handle_exception(server,
1946 _nfs4_do_setattr(inode, cred, fattr, sattr, state),
1947 &exception);
1948 } while (exception.retry);
1949 return err;
1950}
1951
1952struct nfs4_closedata {
1953 struct inode *inode;
1954 struct nfs4_state *state;
1955 struct nfs_closeargs arg;
1956 struct nfs_closeres res;
1957 struct nfs_fattr fattr;
1958 unsigned long timestamp;
1959 bool roc;
1960 u32 roc_barrier;
1961};
1962
1963static void nfs4_free_closedata(void *data)
1964{
1965 struct nfs4_closedata *calldata = data;
1966 struct nfs4_state_owner *sp = calldata->state->owner;
1967 struct super_block *sb = calldata->state->inode->i_sb;
1968
1969 if (calldata->roc)
1970 pnfs_roc_release(calldata->state->inode);
1971 nfs4_put_open_state(calldata->state);
1972 nfs_free_seqid(calldata->arg.seqid);
1973 nfs4_put_state_owner(sp);
1974 nfs_sb_deactive(sb);
1975 kfree(calldata);
1976}
1977
1978static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
1979 fmode_t fmode)
1980{
1981 spin_lock(&state->owner->so_lock);
1982 if (!(fmode & FMODE_READ))
1983 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1984 if (!(fmode & FMODE_WRITE))
1985 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1986 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1987 spin_unlock(&state->owner->so_lock);
1988}
1989
1990static void nfs4_close_done(struct rpc_task *task, void *data)
1991{
1992 struct nfs4_closedata *calldata = data;
1993 struct nfs4_state *state = calldata->state;
1994 struct nfs_server *server = NFS_SERVER(calldata->inode);
1995
1996 dprintk("%s: begin!\n", __func__);
1997 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
1998 return;
1999 /* hmm. we are done with the inode, and in the process of freeing
2000 * the state_owner. we keep this around to process errors
2001 */
2002 switch (task->tk_status) {
2003 case 0:
2004 if (calldata->roc)
2005 pnfs_roc_set_barrier(state->inode,
2006 calldata->roc_barrier);
2007 nfs_set_open_stateid(state, &calldata->res.stateid, 0);
2008 renew_lease(server, calldata->timestamp);
2009 nfs4_close_clear_stateid_flags(state,
2010 calldata->arg.fmode);
2011 break;
2012 case -NFS4ERR_STALE_STATEID:
2013 case -NFS4ERR_OLD_STATEID:
2014 case -NFS4ERR_BAD_STATEID:
2015 case -NFS4ERR_EXPIRED:
2016 if (calldata->arg.fmode == 0)
2017 break;
2018 default:
2019 if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
2020 rpc_restart_call_prepare(task);
2021 }
2022 nfs_release_seqid(calldata->arg.seqid);
2023 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2024 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2025}
2026
2027static void nfs4_close_prepare(struct rpc_task *task, void *data)
2028{
2029 struct nfs4_closedata *calldata = data;
2030 struct nfs4_state *state = calldata->state;
2031 int call_close = 0;
2032
2033 dprintk("%s: begin!\n", __func__);
2034 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2035 return;
2036
2037 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2038 calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
2039 spin_lock(&state->owner->so_lock);
2040 /* Calculate the change in open mode */
2041 if (state->n_rdwr == 0) {
2042 if (state->n_rdonly == 0) {
2043 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
2044 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2045 calldata->arg.fmode &= ~FMODE_READ;
2046 }
2047 if (state->n_wronly == 0) {
2048 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
2049 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2050 calldata->arg.fmode &= ~FMODE_WRITE;
2051 }
2052 }
2053 spin_unlock(&state->owner->so_lock);
2054
2055 if (!call_close) {
2056 /* Note: exit _without_ calling nfs4_close_done */
2057 task->tk_action = NULL;
2058 goto out;
2059 }
2060
2061 if (calldata->arg.fmode == 0) {
2062 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2063 if (calldata->roc &&
2064 pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) {
2065 rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq,
2066 task, NULL);
2067 goto out;
2068 }
2069 }
2070
2071 nfs_fattr_init(calldata->res.fattr);
2072 calldata->timestamp = jiffies;
2073 if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
2074 &calldata->arg.seq_args,
2075 &calldata->res.seq_res,
2076 task))
2077 goto out;
2078 rpc_call_start(task);
2079out:
2080 dprintk("%s: done!\n", __func__);
2081}
2082
2083static const struct rpc_call_ops nfs4_close_ops = {
2084 .rpc_call_prepare = nfs4_close_prepare,
2085 .rpc_call_done = nfs4_close_done,
2086 .rpc_release = nfs4_free_closedata,
2087};
2088
2089/*
2090 * It is possible for data to be read/written from a mem-mapped file
2091 * after the sys_close call (which hits the vfs layer as a flush).
2092 * This means that we can't safely call nfsv4 close on a file until
2093 * the inode is cleared. This in turn means that we are not good
2094 * NFSv4 citizens - we do not indicate to the server to update the file's
2095 * share state even when we are done with one of the three share
2096 * stateid's in the inode.
2097 *
2098 * NOTE: Caller must be holding the sp->so_owner semaphore!
2099 */
2100int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
2101{
2102 struct nfs_server *server = NFS_SERVER(state->inode);
2103 struct nfs4_closedata *calldata;
2104 struct nfs4_state_owner *sp = state->owner;
2105 struct rpc_task *task;
2106 struct rpc_message msg = {
2107 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2108 .rpc_cred = state->owner->so_cred,
2109 };
2110 struct rpc_task_setup task_setup_data = {
2111 .rpc_client = server->client,
2112 .rpc_message = &msg,
2113 .callback_ops = &nfs4_close_ops,
2114 .workqueue = nfsiod_workqueue,
2115 .flags = RPC_TASK_ASYNC,
2116 };
2117 int status = -ENOMEM;
2118
2119 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2120 if (calldata == NULL)
2121 goto out;
2122 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2123 calldata->inode = state->inode;
2124 calldata->state = state;
2125 calldata->arg.fh = NFS_FH(state->inode);
2126 calldata->arg.stateid = &state->open_stateid;
2127 /* Serialization for the sequence id */
2128 calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
2129 if (calldata->arg.seqid == NULL)
2130 goto out_free_calldata;
2131 calldata->arg.fmode = 0;
2132 calldata->arg.bitmask = server->cache_consistency_bitmask;
2133 calldata->res.fattr = &calldata->fattr;
2134 calldata->res.seqid = calldata->arg.seqid;
2135 calldata->res.server = server;
2136 calldata->roc = roc;
2137 nfs_sb_active(calldata->inode->i_sb);
2138
2139 msg.rpc_argp = &calldata->arg;
2140 msg.rpc_resp = &calldata->res;
2141 task_setup_data.callback_data = calldata;
2142 task = rpc_run_task(&task_setup_data);
2143 if (IS_ERR(task))
2144 return PTR_ERR(task);
2145 status = 0;
2146 if (wait)
2147 status = rpc_wait_for_completion_task(task);
2148 rpc_put_task(task);
2149 return status;
2150out_free_calldata:
2151 kfree(calldata);
2152out:
2153 if (roc)
2154 pnfs_roc_release(state->inode);
2155 nfs4_put_open_state(state);
2156 nfs4_put_state_owner(sp);
2157 return status;
2158}
2159
2160static struct inode *
2161nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
2162{
2163 struct nfs4_state *state;
2164
2165 /* Protect against concurrent sillydeletes */
2166 state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred);
2167 if (IS_ERR(state))
2168 return ERR_CAST(state);
2169 ctx->state = state;
2170 return igrab(state->inode);
2171}
2172
2173static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
2174{
2175 if (ctx->state == NULL)
2176 return;
2177 if (is_sync)
2178 nfs4_close_sync(ctx->state, ctx->mode);
2179 else
2180 nfs4_close_state(ctx->state, ctx->mode);
2181}
2182
2183static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2184{
2185 struct nfs4_server_caps_arg args = {
2186 .fhandle = fhandle,
2187 };
2188 struct nfs4_server_caps_res res = {};
2189 struct rpc_message msg = {
2190 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
2191 .rpc_argp = &args,
2192 .rpc_resp = &res,
2193 };
2194 int status;
2195
2196 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2197 if (status == 0) {
2198 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2199 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2200 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2201 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2202 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2203 NFS_CAP_CTIME|NFS_CAP_MTIME);
2204 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
2205 server->caps |= NFS_CAP_ACLS;
2206 if (res.has_links != 0)
2207 server->caps |= NFS_CAP_HARDLINKS;
2208 if (res.has_symlinks != 0)
2209 server->caps |= NFS_CAP_SYMLINKS;
2210 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2211 server->caps |= NFS_CAP_FILEID;
2212 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2213 server->caps |= NFS_CAP_MODE;
2214 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2215 server->caps |= NFS_CAP_NLINK;
2216 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2217 server->caps |= NFS_CAP_OWNER;
2218 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2219 server->caps |= NFS_CAP_OWNER_GROUP;
2220 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2221 server->caps |= NFS_CAP_ATIME;
2222 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2223 server->caps |= NFS_CAP_CTIME;
2224 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2225 server->caps |= NFS_CAP_MTIME;
2226
2227 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2228 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2229 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
2230 server->acl_bitmask = res.acl_bitmask;
2231 server->fh_expire_type = res.fh_expire_type;
2232 }
2233
2234 return status;
2235}
2236
2237int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2238{
2239 struct nfs4_exception exception = { };
2240 int err;
2241 do {
2242 err = nfs4_handle_exception(server,
2243 _nfs4_server_capabilities(server, fhandle),
2244 &exception);
2245 } while (exception.retry);
2246 return err;
2247}
2248
2249static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2250 struct nfs_fsinfo *info)
2251{
2252 struct nfs4_lookup_root_arg args = {
2253 .bitmask = nfs4_fattr_bitmap,
2254 };
2255 struct nfs4_lookup_res res = {
2256 .server = server,
2257 .fattr = info->fattr,
2258 .fh = fhandle,
2259 };
2260 struct rpc_message msg = {
2261 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
2262 .rpc_argp = &args,
2263 .rpc_resp = &res,
2264 };
2265
2266 nfs_fattr_init(info->fattr);
2267 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2268}
2269
2270static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2271 struct nfs_fsinfo *info)
2272{
2273 struct nfs4_exception exception = { };
2274 int err;
2275 do {
2276 err = _nfs4_lookup_root(server, fhandle, info);
2277 switch (err) {
2278 case 0:
2279 case -NFS4ERR_WRONGSEC:
2280 break;
2281 default:
2282 err = nfs4_handle_exception(server, err, &exception);
2283 }
2284 } while (exception.retry);
2285 return err;
2286}
2287
2288static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2289 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
2290{
2291 struct rpc_auth *auth;
2292 int ret;
2293
2294 auth = rpcauth_create(flavor, server->client);
2295 if (!auth) {
2296 ret = -EIO;
2297 goto out;
2298 }
2299 ret = nfs4_lookup_root(server, fhandle, info);
2300out:
2301 return ret;
2302}
2303
2304static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2305 struct nfs_fsinfo *info)
2306{
2307 int i, len, status = 0;
2308 rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS];
2309
2310 len = gss_mech_list_pseudoflavors(&flav_array[0]);
2311 flav_array[len] = RPC_AUTH_NULL;
2312 len += 1;
2313
2314 for (i = 0; i < len; i++) {
2315 status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
2316 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
2317 continue;
2318 break;
2319 }
2320 /*
2321 * -EACCESS could mean that the user doesn't have correct permissions
2322 * to access the mount. It could also mean that we tried to mount
2323 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
2324 * existing mount programs don't handle -EACCES very well so it should
2325 * be mapped to -EPERM instead.
2326 */
2327 if (status == -EACCES)
2328 status = -EPERM;
2329 return status;
2330}
2331
2332/*
2333 * get the file handle for the "/" directory on the server
2334 */
2335static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
2336 struct nfs_fsinfo *info)
2337{
2338 int minor_version = server->nfs_client->cl_minorversion;
2339 int status = nfs4_lookup_root(server, fhandle, info);
2340 if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR))
2341 /*
2342 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM
2343 * by nfs4_map_errors() as this function exits.
2344 */
2345 status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info);
2346 if (status == 0)
2347 status = nfs4_server_capabilities(server, fhandle);
2348 if (status == 0)
2349 status = nfs4_do_fsinfo(server, fhandle, info);
2350 return nfs4_map_errors(status);
2351}
2352
2353/*
2354 * Get locations and (maybe) other attributes of a referral.
2355 * Note that we'll actually follow the referral later when
2356 * we detect fsid mismatch in inode revalidation
2357 */
2358static int nfs4_get_referral(struct inode *dir, const struct qstr *name,
2359 struct nfs_fattr *fattr, struct nfs_fh *fhandle)
2360{
2361 int status = -ENOMEM;
2362 struct page *page = NULL;
2363 struct nfs4_fs_locations *locations = NULL;
2364
2365 page = alloc_page(GFP_KERNEL);
2366 if (page == NULL)
2367 goto out;
2368 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
2369 if (locations == NULL)
2370 goto out;
2371
2372 status = nfs4_proc_fs_locations(dir, name, locations, page);
2373 if (status != 0)
2374 goto out;
2375 /* Make sure server returned a different fsid for the referral */
2376 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
2377 dprintk("%s: server did not return a different fsid for"
2378 " a referral at %s\n", __func__, name->name);
2379 status = -EIO;
2380 goto out;
2381 }
2382 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
2383 nfs_fixup_referral_attributes(&locations->fattr);
2384
2385 /* replace the lookup nfs_fattr with the locations nfs_fattr */
2386 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
2387 memset(fhandle, 0, sizeof(struct nfs_fh));
2388out:
2389 if (page)
2390 __free_page(page);
2391 kfree(locations);
2392 return status;
2393}
2394
2395static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2396{
2397 struct nfs4_getattr_arg args = {
2398 .fh = fhandle,
2399 .bitmask = server->attr_bitmask,
2400 };
2401 struct nfs4_getattr_res res = {
2402 .fattr = fattr,
2403 .server = server,
2404 };
2405 struct rpc_message msg = {
2406 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
2407 .rpc_argp = &args,
2408 .rpc_resp = &res,
2409 };
2410
2411 nfs_fattr_init(fattr);
2412 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2413}
2414
2415static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2416{
2417 struct nfs4_exception exception = { };
2418 int err;
2419 do {
2420 err = nfs4_handle_exception(server,
2421 _nfs4_proc_getattr(server, fhandle, fattr),
2422 &exception);
2423 } while (exception.retry);
2424 return err;
2425}
2426
2427/*
2428 * The file is not closed if it is opened due to the a request to change
2429 * the size of the file. The open call will not be needed once the
2430 * VFS layer lookup-intents are implemented.
2431 *
2432 * Close is called when the inode is destroyed.
2433 * If we haven't opened the file for O_WRONLY, we
2434 * need to in the size_change case to obtain a stateid.
2435 *
2436 * Got race?
2437 * Because OPEN is always done by name in nfsv4, it is
2438 * possible that we opened a different file by the same
2439 * name. We can recognize this race condition, but we
2440 * can't do anything about it besides returning an error.
2441 *
2442 * This will be fixed with VFS changes (lookup-intent).
2443 */
2444static int
2445nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
2446 struct iattr *sattr)
2447{
2448 struct inode *inode = dentry->d_inode;
2449 struct rpc_cred *cred = NULL;
2450 struct nfs4_state *state = NULL;
2451 int status;
2452
2453 if (pnfs_ld_layoutret_on_setattr(inode))
2454 pnfs_return_layout(inode);
2455
2456 nfs_fattr_init(fattr);
2457
2458 /* Search for an existing open(O_WRITE) file */
2459 if (sattr->ia_valid & ATTR_FILE) {
2460 struct nfs_open_context *ctx;
2461
2462 ctx = nfs_file_open_context(sattr->ia_file);
2463 if (ctx) {
2464 cred = ctx->cred;
2465 state = ctx->state;
2466 }
2467 }
2468
2469 /* Deal with open(O_TRUNC) */
2470 if (sattr->ia_valid & ATTR_OPEN)
2471 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
2472
2473 status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
2474 if (status == 0)
2475 nfs_setattr_update_inode(inode, sattr);
2476 return status;
2477}
2478
2479static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
2480 const struct qstr *name, struct nfs_fh *fhandle,
2481 struct nfs_fattr *fattr)
2482{
2483 struct nfs_server *server = NFS_SERVER(dir);
2484 int status;
2485 struct nfs4_lookup_arg args = {
2486 .bitmask = server->attr_bitmask,
2487 .dir_fh = NFS_FH(dir),
2488 .name = name,
2489 };
2490 struct nfs4_lookup_res res = {
2491 .server = server,
2492 .fattr = fattr,
2493 .fh = fhandle,
2494 };
2495 struct rpc_message msg = {
2496 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
2497 .rpc_argp = &args,
2498 .rpc_resp = &res,
2499 };
2500
2501 nfs_fattr_init(fattr);
2502
2503 dprintk("NFS call lookup %s\n", name->name);
2504 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
2505 dprintk("NFS reply lookup: %d\n", status);
2506 return status;
2507}
2508
2509void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr, struct nfs_fh *fh)
2510{
2511 memset(fh, 0, sizeof(struct nfs_fh));
2512 fattr->fsid.major = 1;
2513 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
2514 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_FSID | NFS_ATTR_FATTR_MOUNTPOINT;
2515 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
2516 fattr->nlink = 2;
2517}
2518
2519static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
2520 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2521{
2522 struct nfs4_exception exception = { };
2523 int err;
2524 do {
2525 int status;
2526
2527 status = _nfs4_proc_lookup(clnt, dir, name, fhandle, fattr);
2528 switch (status) {
2529 case -NFS4ERR_BADNAME:
2530 return -ENOENT;
2531 case -NFS4ERR_MOVED:
2532 return nfs4_get_referral(dir, name, fattr, fhandle);
2533 case -NFS4ERR_WRONGSEC:
2534 nfs_fixup_secinfo_attributes(fattr, fhandle);
2535 }
2536 err = nfs4_handle_exception(NFS_SERVER(dir),
2537 status, &exception);
2538 } while (exception.retry);
2539 return err;
2540}
2541
2542static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2543{
2544 struct nfs_server *server = NFS_SERVER(inode);
2545 struct nfs4_accessargs args = {
2546 .fh = NFS_FH(inode),
2547 .bitmask = server->cache_consistency_bitmask,
2548 };
2549 struct nfs4_accessres res = {
2550 .server = server,
2551 };
2552 struct rpc_message msg = {
2553 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
2554 .rpc_argp = &args,
2555 .rpc_resp = &res,
2556 .rpc_cred = entry->cred,
2557 };
2558 int mode = entry->mask;
2559 int status;
2560
2561 /*
2562 * Determine which access bits we want to ask for...
2563 */
2564 if (mode & MAY_READ)
2565 args.access |= NFS4_ACCESS_READ;
2566 if (S_ISDIR(inode->i_mode)) {
2567 if (mode & MAY_WRITE)
2568 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
2569 if (mode & MAY_EXEC)
2570 args.access |= NFS4_ACCESS_LOOKUP;
2571 } else {
2572 if (mode & MAY_WRITE)
2573 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
2574 if (mode & MAY_EXEC)
2575 args.access |= NFS4_ACCESS_EXECUTE;
2576 }
2577
2578 res.fattr = nfs_alloc_fattr();
2579 if (res.fattr == NULL)
2580 return -ENOMEM;
2581
2582 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2583 if (!status) {
2584 entry->mask = 0;
2585 if (res.access & NFS4_ACCESS_READ)
2586 entry->mask |= MAY_READ;
2587 if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE))
2588 entry->mask |= MAY_WRITE;
2589 if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
2590 entry->mask |= MAY_EXEC;
2591 nfs_refresh_inode(inode, res.fattr);
2592 }
2593 nfs_free_fattr(res.fattr);
2594 return status;
2595}
2596
2597static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2598{
2599 struct nfs4_exception exception = { };
2600 int err;
2601 do {
2602 err = nfs4_handle_exception(NFS_SERVER(inode),
2603 _nfs4_proc_access(inode, entry),
2604 &exception);
2605 } while (exception.retry);
2606 return err;
2607}
2608
2609/*
2610 * TODO: For the time being, we don't try to get any attributes
2611 * along with any of the zero-copy operations READ, READDIR,
2612 * READLINK, WRITE.
2613 *
2614 * In the case of the first three, we want to put the GETATTR
2615 * after the read-type operation -- this is because it is hard
2616 * to predict the length of a GETATTR response in v4, and thus
2617 * align the READ data correctly. This means that the GETATTR
2618 * may end up partially falling into the page cache, and we should
2619 * shift it into the 'tail' of the xdr_buf before processing.
2620 * To do this efficiently, we need to know the total length
2621 * of data received, which doesn't seem to be available outside
2622 * of the RPC layer.
2623 *
2624 * In the case of WRITE, we also want to put the GETATTR after
2625 * the operation -- in this case because we want to make sure
2626 * we get the post-operation mtime and size. This means that
2627 * we can't use xdr_encode_pages() as written: we need a variant
2628 * of it which would leave room in the 'tail' iovec.
2629 *
2630 * Both of these changes to the XDR layer would in fact be quite
2631 * minor, but I decided to leave them for a subsequent patch.
2632 */
2633static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
2634 unsigned int pgbase, unsigned int pglen)
2635{
2636 struct nfs4_readlink args = {
2637 .fh = NFS_FH(inode),
2638 .pgbase = pgbase,
2639 .pglen = pglen,
2640 .pages = &page,
2641 };
2642 struct nfs4_readlink_res res;
2643 struct rpc_message msg = {
2644 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
2645 .rpc_argp = &args,
2646 .rpc_resp = &res,
2647 };
2648
2649 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
2650}
2651
2652static int nfs4_proc_readlink(struct inode *inode, struct page *page,
2653 unsigned int pgbase, unsigned int pglen)
2654{
2655 struct nfs4_exception exception = { };
2656 int err;
2657 do {
2658 err = nfs4_handle_exception(NFS_SERVER(inode),
2659 _nfs4_proc_readlink(inode, page, pgbase, pglen),
2660 &exception);
2661 } while (exception.retry);
2662 return err;
2663}
2664
2665/*
2666 * Got race?
2667 * We will need to arrange for the VFS layer to provide an atomic open.
2668 * Until then, this create/open method is prone to inefficiency and race
2669 * conditions due to the lookup, create, and open VFS calls from sys_open()
2670 * placed on the wire.
2671 *
2672 * Given the above sorry state of affairs, I'm simply sending an OPEN.
2673 * The file will be opened again in the subsequent VFS open call
2674 * (nfs4_proc_file_open).
2675 *
2676 * The open for read will just hang around to be used by any process that
2677 * opens the file O_RDONLY. This will all be resolved with the VFS changes.
2678 */
2679
2680static int
2681nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
2682 int flags, struct nfs_open_context *ctx)
2683{
2684 struct dentry *de = dentry;
2685 struct nfs4_state *state;
2686 struct rpc_cred *cred = NULL;
2687 fmode_t fmode = 0;
2688 int status = 0;
2689
2690 if (ctx != NULL) {
2691 cred = ctx->cred;
2692 de = ctx->dentry;
2693 fmode = ctx->mode;
2694 }
2695 sattr->ia_mode &= ~current_umask();
2696 state = nfs4_do_open(dir, de, fmode, flags, sattr, cred);
2697 d_drop(dentry);
2698 if (IS_ERR(state)) {
2699 status = PTR_ERR(state);
2700 goto out;
2701 }
2702 d_add(dentry, igrab(state->inode));
2703 nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
2704 if (ctx != NULL)
2705 ctx->state = state;
2706 else
2707 nfs4_close_sync(state, fmode);
2708out:
2709 return status;
2710}
2711
2712static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
2713{
2714 struct nfs_server *server = NFS_SERVER(dir);
2715 struct nfs_removeargs args = {
2716 .fh = NFS_FH(dir),
2717 .name.len = name->len,
2718 .name.name = name->name,
2719 .bitmask = server->attr_bitmask,
2720 };
2721 struct nfs_removeres res = {
2722 .server = server,
2723 };
2724 struct rpc_message msg = {
2725 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
2726 .rpc_argp = &args,
2727 .rpc_resp = &res,
2728 };
2729 int status = -ENOMEM;
2730
2731 res.dir_attr = nfs_alloc_fattr();
2732 if (res.dir_attr == NULL)
2733 goto out;
2734
2735 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
2736 if (status == 0) {
2737 update_changeattr(dir, &res.cinfo);
2738 nfs_post_op_update_inode(dir, res.dir_attr);
2739 }
2740 nfs_free_fattr(res.dir_attr);
2741out:
2742 return status;
2743}
2744
2745static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
2746{
2747 struct nfs4_exception exception = { };
2748 int err;
2749 do {
2750 err = nfs4_handle_exception(NFS_SERVER(dir),
2751 _nfs4_proc_remove(dir, name),
2752 &exception);
2753 } while (exception.retry);
2754 return err;
2755}
2756
2757static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
2758{
2759 struct nfs_server *server = NFS_SERVER(dir);
2760 struct nfs_removeargs *args = msg->rpc_argp;
2761 struct nfs_removeres *res = msg->rpc_resp;
2762
2763 args->bitmask = server->cache_consistency_bitmask;
2764 res->server = server;
2765 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
2766 nfs41_init_sequence(&args->seq_args, &res->seq_res, 1);
2767}
2768
2769static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
2770{
2771 struct nfs_removeres *res = task->tk_msg.rpc_resp;
2772
2773 if (!nfs4_sequence_done(task, &res->seq_res))
2774 return 0;
2775 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2776 return 0;
2777 update_changeattr(dir, &res->cinfo);
2778 nfs_post_op_update_inode(dir, res->dir_attr);
2779 return 1;
2780}
2781
2782static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
2783{
2784 struct nfs_server *server = NFS_SERVER(dir);
2785 struct nfs_renameargs *arg = msg->rpc_argp;
2786 struct nfs_renameres *res = msg->rpc_resp;
2787
2788 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
2789 arg->bitmask = server->attr_bitmask;
2790 res->server = server;
2791 nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1);
2792}
2793
2794static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
2795 struct inode *new_dir)
2796{
2797 struct nfs_renameres *res = task->tk_msg.rpc_resp;
2798
2799 if (!nfs4_sequence_done(task, &res->seq_res))
2800 return 0;
2801 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2802 return 0;
2803
2804 update_changeattr(old_dir, &res->old_cinfo);
2805 nfs_post_op_update_inode(old_dir, res->old_fattr);
2806 update_changeattr(new_dir, &res->new_cinfo);
2807 nfs_post_op_update_inode(new_dir, res->new_fattr);
2808 return 1;
2809}
2810
2811static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2812 struct inode *new_dir, struct qstr *new_name)
2813{
2814 struct nfs_server *server = NFS_SERVER(old_dir);
2815 struct nfs_renameargs arg = {
2816 .old_dir = NFS_FH(old_dir),
2817 .new_dir = NFS_FH(new_dir),
2818 .old_name = old_name,
2819 .new_name = new_name,
2820 .bitmask = server->attr_bitmask,
2821 };
2822 struct nfs_renameres res = {
2823 .server = server,
2824 };
2825 struct rpc_message msg = {
2826 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
2827 .rpc_argp = &arg,
2828 .rpc_resp = &res,
2829 };
2830 int status = -ENOMEM;
2831
2832 res.old_fattr = nfs_alloc_fattr();
2833 res.new_fattr = nfs_alloc_fattr();
2834 if (res.old_fattr == NULL || res.new_fattr == NULL)
2835 goto out;
2836
2837 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2838 if (!status) {
2839 update_changeattr(old_dir, &res.old_cinfo);
2840 nfs_post_op_update_inode(old_dir, res.old_fattr);
2841 update_changeattr(new_dir, &res.new_cinfo);
2842 nfs_post_op_update_inode(new_dir, res.new_fattr);
2843 }
2844out:
2845 nfs_free_fattr(res.new_fattr);
2846 nfs_free_fattr(res.old_fattr);
2847 return status;
2848}
2849
2850static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2851 struct inode *new_dir, struct qstr *new_name)
2852{
2853 struct nfs4_exception exception = { };
2854 int err;
2855 do {
2856 err = nfs4_handle_exception(NFS_SERVER(old_dir),
2857 _nfs4_proc_rename(old_dir, old_name,
2858 new_dir, new_name),
2859 &exception);
2860 } while (exception.retry);
2861 return err;
2862}
2863
2864static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
2865{
2866 struct nfs_server *server = NFS_SERVER(inode);
2867 struct nfs4_link_arg arg = {
2868 .fh = NFS_FH(inode),
2869 .dir_fh = NFS_FH(dir),
2870 .name = name,
2871 .bitmask = server->attr_bitmask,
2872 };
2873 struct nfs4_link_res res = {
2874 .server = server,
2875 };
2876 struct rpc_message msg = {
2877 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
2878 .rpc_argp = &arg,
2879 .rpc_resp = &res,
2880 };
2881 int status = -ENOMEM;
2882
2883 res.fattr = nfs_alloc_fattr();
2884 res.dir_attr = nfs_alloc_fattr();
2885 if (res.fattr == NULL || res.dir_attr == NULL)
2886 goto out;
2887
2888 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2889 if (!status) {
2890 update_changeattr(dir, &res.cinfo);
2891 nfs_post_op_update_inode(dir, res.dir_attr);
2892 nfs_post_op_update_inode(inode, res.fattr);
2893 }
2894out:
2895 nfs_free_fattr(res.dir_attr);
2896 nfs_free_fattr(res.fattr);
2897 return status;
2898}
2899
2900static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
2901{
2902 struct nfs4_exception exception = { };
2903 int err;
2904 do {
2905 err = nfs4_handle_exception(NFS_SERVER(inode),
2906 _nfs4_proc_link(inode, dir, name),
2907 &exception);
2908 } while (exception.retry);
2909 return err;
2910}
2911
2912struct nfs4_createdata {
2913 struct rpc_message msg;
2914 struct nfs4_create_arg arg;
2915 struct nfs4_create_res res;
2916 struct nfs_fh fh;
2917 struct nfs_fattr fattr;
2918 struct nfs_fattr dir_fattr;
2919};
2920
2921static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
2922 struct qstr *name, struct iattr *sattr, u32 ftype)
2923{
2924 struct nfs4_createdata *data;
2925
2926 data = kzalloc(sizeof(*data), GFP_KERNEL);
2927 if (data != NULL) {
2928 struct nfs_server *server = NFS_SERVER(dir);
2929
2930 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
2931 data->msg.rpc_argp = &data->arg;
2932 data->msg.rpc_resp = &data->res;
2933 data->arg.dir_fh = NFS_FH(dir);
2934 data->arg.server = server;
2935 data->arg.name = name;
2936 data->arg.attrs = sattr;
2937 data->arg.ftype = ftype;
2938 data->arg.bitmask = server->attr_bitmask;
2939 data->res.server = server;
2940 data->res.fh = &data->fh;
2941 data->res.fattr = &data->fattr;
2942 data->res.dir_fattr = &data->dir_fattr;
2943 nfs_fattr_init(data->res.fattr);
2944 nfs_fattr_init(data->res.dir_fattr);
2945 }
2946 return data;
2947}
2948
2949static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
2950{
2951 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
2952 &data->arg.seq_args, &data->res.seq_res, 1);
2953 if (status == 0) {
2954 update_changeattr(dir, &data->res.dir_cinfo);
2955 nfs_post_op_update_inode(dir, data->res.dir_fattr);
2956 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
2957 }
2958 return status;
2959}
2960
2961static void nfs4_free_createdata(struct nfs4_createdata *data)
2962{
2963 kfree(data);
2964}
2965
2966static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
2967 struct page *page, unsigned int len, struct iattr *sattr)
2968{
2969 struct nfs4_createdata *data;
2970 int status = -ENAMETOOLONG;
2971
2972 if (len > NFS4_MAXPATHLEN)
2973 goto out;
2974
2975 status = -ENOMEM;
2976 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
2977 if (data == NULL)
2978 goto out;
2979
2980 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
2981 data->arg.u.symlink.pages = &page;
2982 data->arg.u.symlink.len = len;
2983
2984 status = nfs4_do_create(dir, dentry, data);
2985
2986 nfs4_free_createdata(data);
2987out:
2988 return status;
2989}
2990
2991static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
2992 struct page *page, unsigned int len, struct iattr *sattr)
2993{
2994 struct nfs4_exception exception = { };
2995 int err;
2996 do {
2997 err = nfs4_handle_exception(NFS_SERVER(dir),
2998 _nfs4_proc_symlink(dir, dentry, page,
2999 len, sattr),
3000 &exception);
3001 } while (exception.retry);
3002 return err;
3003}
3004
3005static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3006 struct iattr *sattr)
3007{
3008 struct nfs4_createdata *data;
3009 int status = -ENOMEM;
3010
3011 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
3012 if (data == NULL)
3013 goto out;
3014
3015 status = nfs4_do_create(dir, dentry, data);
3016
3017 nfs4_free_createdata(data);
3018out:
3019 return status;
3020}
3021
3022static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3023 struct iattr *sattr)
3024{
3025 struct nfs4_exception exception = { };
3026 int err;
3027
3028 sattr->ia_mode &= ~current_umask();
3029 do {
3030 err = nfs4_handle_exception(NFS_SERVER(dir),
3031 _nfs4_proc_mkdir(dir, dentry, sattr),
3032 &exception);
3033 } while (exception.retry);
3034 return err;
3035}
3036
3037static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3038 u64 cookie, struct page **pages, unsigned int count, int plus)
3039{
3040 struct inode *dir = dentry->d_inode;
3041 struct nfs4_readdir_arg args = {
3042 .fh = NFS_FH(dir),
3043 .pages = pages,
3044 .pgbase = 0,
3045 .count = count,
3046 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
3047 .plus = plus,
3048 };
3049 struct nfs4_readdir_res res;
3050 struct rpc_message msg = {
3051 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
3052 .rpc_argp = &args,
3053 .rpc_resp = &res,
3054 .rpc_cred = cred,
3055 };
3056 int status;
3057
3058 dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
3059 dentry->d_parent->d_name.name,
3060 dentry->d_name.name,
3061 (unsigned long long)cookie);
3062 nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
3063 res.pgbase = args.pgbase;
3064 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
3065 if (status >= 0) {
3066 memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
3067 status += args.pgbase;
3068 }
3069
3070 nfs_invalidate_atime(dir);
3071
3072 dprintk("%s: returns %d\n", __func__, status);
3073 return status;
3074}
3075
3076static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3077 u64 cookie, struct page **pages, unsigned int count, int plus)
3078{
3079 struct nfs4_exception exception = { };
3080 int err;
3081 do {
3082 err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
3083 _nfs4_proc_readdir(dentry, cred, cookie,
3084 pages, count, plus),
3085 &exception);
3086 } while (exception.retry);
3087 return err;
3088}
3089
3090static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3091 struct iattr *sattr, dev_t rdev)
3092{
3093 struct nfs4_createdata *data;
3094 int mode = sattr->ia_mode;
3095 int status = -ENOMEM;
3096
3097 BUG_ON(!(sattr->ia_valid & ATTR_MODE));
3098 BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
3099
3100 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
3101 if (data == NULL)
3102 goto out;
3103
3104 if (S_ISFIFO(mode))
3105 data->arg.ftype = NF4FIFO;
3106 else if (S_ISBLK(mode)) {
3107 data->arg.ftype = NF4BLK;
3108 data->arg.u.device.specdata1 = MAJOR(rdev);
3109 data->arg.u.device.specdata2 = MINOR(rdev);
3110 }
3111 else if (S_ISCHR(mode)) {
3112 data->arg.ftype = NF4CHR;
3113 data->arg.u.device.specdata1 = MAJOR(rdev);
3114 data->arg.u.device.specdata2 = MINOR(rdev);
3115 }
3116
3117 status = nfs4_do_create(dir, dentry, data);
3118
3119 nfs4_free_createdata(data);
3120out:
3121 return status;
3122}
3123
3124static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3125 struct iattr *sattr, dev_t rdev)
3126{
3127 struct nfs4_exception exception = { };
3128 int err;
3129
3130 sattr->ia_mode &= ~current_umask();
3131 do {
3132 err = nfs4_handle_exception(NFS_SERVER(dir),
3133 _nfs4_proc_mknod(dir, dentry, sattr, rdev),
3134 &exception);
3135 } while (exception.retry);
3136 return err;
3137}
3138
3139static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
3140 struct nfs_fsstat *fsstat)
3141{
3142 struct nfs4_statfs_arg args = {
3143 .fh = fhandle,
3144 .bitmask = server->attr_bitmask,
3145 };
3146 struct nfs4_statfs_res res = {
3147 .fsstat = fsstat,
3148 };
3149 struct rpc_message msg = {
3150 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
3151 .rpc_argp = &args,
3152 .rpc_resp = &res,
3153 };
3154
3155 nfs_fattr_init(fsstat->fattr);
3156 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3157}
3158
3159static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
3160{
3161 struct nfs4_exception exception = { };
3162 int err;
3163 do {
3164 err = nfs4_handle_exception(server,
3165 _nfs4_proc_statfs(server, fhandle, fsstat),
3166 &exception);
3167 } while (exception.retry);
3168 return err;
3169}
3170
3171static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
3172 struct nfs_fsinfo *fsinfo)
3173{
3174 struct nfs4_fsinfo_arg args = {
3175 .fh = fhandle,
3176 .bitmask = server->attr_bitmask,
3177 };
3178 struct nfs4_fsinfo_res res = {
3179 .fsinfo = fsinfo,
3180 };
3181 struct rpc_message msg = {
3182 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
3183 .rpc_argp = &args,
3184 .rpc_resp = &res,
3185 };
3186
3187 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3188}
3189
3190static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3191{
3192 struct nfs4_exception exception = { };
3193 int err;
3194
3195 do {
3196 err = nfs4_handle_exception(server,
3197 _nfs4_do_fsinfo(server, fhandle, fsinfo),
3198 &exception);
3199 } while (exception.retry);
3200 return err;
3201}
3202
3203static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3204{
3205 nfs_fattr_init(fsinfo->fattr);
3206 return nfs4_do_fsinfo(server, fhandle, fsinfo);
3207}
3208
3209static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3210 struct nfs_pathconf *pathconf)
3211{
3212 struct nfs4_pathconf_arg args = {
3213 .fh = fhandle,
3214 .bitmask = server->attr_bitmask,
3215 };
3216 struct nfs4_pathconf_res res = {
3217 .pathconf = pathconf,
3218 };
3219 struct rpc_message msg = {
3220 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
3221 .rpc_argp = &args,
3222 .rpc_resp = &res,
3223 };
3224
3225 /* None of the pathconf attributes are mandatory to implement */
3226 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
3227 memset(pathconf, 0, sizeof(*pathconf));
3228 return 0;
3229 }
3230
3231 nfs_fattr_init(pathconf->fattr);
3232 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3233}
3234
3235static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3236 struct nfs_pathconf *pathconf)
3237{
3238 struct nfs4_exception exception = { };
3239 int err;
3240
3241 do {
3242 err = nfs4_handle_exception(server,
3243 _nfs4_proc_pathconf(server, fhandle, pathconf),
3244 &exception);
3245 } while (exception.retry);
3246 return err;
3247}
3248
3249void __nfs4_read_done_cb(struct nfs_read_data *data)
3250{
3251 nfs_invalidate_atime(data->inode);
3252}
3253
3254static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
3255{
3256 struct nfs_server *server = NFS_SERVER(data->inode);
3257
3258 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
3259 rpc_restart_call_prepare(task);
3260 return -EAGAIN;
3261 }
3262
3263 __nfs4_read_done_cb(data);
3264 if (task->tk_status > 0)
3265 renew_lease(server, data->timestamp);
3266 return 0;
3267}
3268
3269static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
3270{
3271
3272 dprintk("--> %s\n", __func__);
3273
3274 if (!nfs4_sequence_done(task, &data->res.seq_res))
3275 return -EAGAIN;
3276
3277 return data->read_done_cb ? data->read_done_cb(task, data) :
3278 nfs4_read_done_cb(task, data);
3279}
3280
3281static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
3282{
3283 data->timestamp = jiffies;
3284 data->read_done_cb = nfs4_read_done_cb;
3285 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
3286 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
3287}
3288
3289/* Reset the the nfs_read_data to send the read to the MDS. */
3290void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data)
3291{
3292 dprintk("%s Reset task for i/o through\n", __func__);
3293 put_lseg(data->lseg);
3294 data->lseg = NULL;
3295 /* offsets will differ in the dense stripe case */
3296 data->args.offset = data->mds_offset;
3297 data->ds_clp = NULL;
3298 data->args.fh = NFS_FH(data->inode);
3299 data->read_done_cb = nfs4_read_done_cb;
3300 task->tk_ops = data->mds_ops;
3301 rpc_task_reset_client(task, NFS_CLIENT(data->inode));
3302}
3303EXPORT_SYMBOL_GPL(nfs4_reset_read);
3304
3305static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
3306{
3307 struct inode *inode = data->inode;
3308
3309 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
3310 rpc_restart_call_prepare(task);
3311 return -EAGAIN;
3312 }
3313 if (task->tk_status >= 0) {
3314 renew_lease(NFS_SERVER(inode), data->timestamp);
3315 nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
3316 }
3317 return 0;
3318}
3319
3320static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
3321{
3322 if (!nfs4_sequence_done(task, &data->res.seq_res))
3323 return -EAGAIN;
3324 return data->write_done_cb ? data->write_done_cb(task, data) :
3325 nfs4_write_done_cb(task, data);
3326}
3327
3328/* Reset the the nfs_write_data to send the write to the MDS. */
3329void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data)
3330{
3331 dprintk("%s Reset task for i/o through\n", __func__);
3332 put_lseg(data->lseg);
3333 data->lseg = NULL;
3334 data->ds_clp = NULL;
3335 data->write_done_cb = nfs4_write_done_cb;
3336 data->args.fh = NFS_FH(data->inode);
3337 data->args.bitmask = data->res.server->cache_consistency_bitmask;
3338 data->args.offset = data->mds_offset;
3339 data->res.fattr = &data->fattr;
3340 task->tk_ops = data->mds_ops;
3341 rpc_task_reset_client(task, NFS_CLIENT(data->inode));
3342}
3343EXPORT_SYMBOL_GPL(nfs4_reset_write);
3344
3345static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
3346{
3347 struct nfs_server *server = NFS_SERVER(data->inode);
3348
3349 if (data->lseg) {
3350 data->args.bitmask = NULL;
3351 data->res.fattr = NULL;
3352 } else
3353 data->args.bitmask = server->cache_consistency_bitmask;
3354 if (!data->write_done_cb)
3355 data->write_done_cb = nfs4_write_done_cb;
3356 data->res.server = server;
3357 data->timestamp = jiffies;
3358
3359 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
3360 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3361}
3362
3363static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data)
3364{
3365 struct inode *inode = data->inode;
3366
3367 if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
3368 rpc_restart_call_prepare(task);
3369 return -EAGAIN;
3370 }
3371 nfs_refresh_inode(inode, data->res.fattr);
3372 return 0;
3373}
3374
3375static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
3376{
3377 if (!nfs4_sequence_done(task, &data->res.seq_res))
3378 return -EAGAIN;
3379 return data->write_done_cb(task, data);
3380}
3381
3382static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
3383{
3384 struct nfs_server *server = NFS_SERVER(data->inode);
3385
3386 if (data->lseg) {
3387 data->args.bitmask = NULL;
3388 data->res.fattr = NULL;
3389 } else
3390 data->args.bitmask = server->cache_consistency_bitmask;
3391 if (!data->write_done_cb)
3392 data->write_done_cb = nfs4_commit_done_cb;
3393 data->res.server = server;
3394 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
3395 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3396}
3397
3398struct nfs4_renewdata {
3399 struct nfs_client *client;
3400 unsigned long timestamp;
3401};
3402
3403/*
3404 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
3405 * standalone procedure for queueing an asynchronous RENEW.
3406 */
3407static void nfs4_renew_release(void *calldata)
3408{
3409 struct nfs4_renewdata *data = calldata;
3410 struct nfs_client *clp = data->client;
3411
3412 if (atomic_read(&clp->cl_count) > 1)
3413 nfs4_schedule_state_renewal(clp);
3414 nfs_put_client(clp);
3415 kfree(data);
3416}
3417
3418static void nfs4_renew_done(struct rpc_task *task, void *calldata)
3419{
3420 struct nfs4_renewdata *data = calldata;
3421 struct nfs_client *clp = data->client;
3422 unsigned long timestamp = data->timestamp;
3423
3424 if (task->tk_status < 0) {
3425 /* Unless we're shutting down, schedule state recovery! */
3426 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
3427 return;
3428 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
3429 nfs4_schedule_lease_recovery(clp);
3430 return;
3431 }
3432 nfs4_schedule_path_down_recovery(clp);
3433 }
3434 do_renew_lease(clp, timestamp);
3435}
3436
3437static const struct rpc_call_ops nfs4_renew_ops = {
3438 .rpc_call_done = nfs4_renew_done,
3439 .rpc_release = nfs4_renew_release,
3440};
3441
3442static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
3443{
3444 struct rpc_message msg = {
3445 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3446 .rpc_argp = clp,
3447 .rpc_cred = cred,
3448 };
3449 struct nfs4_renewdata *data;
3450
3451 if (renew_flags == 0)
3452 return 0;
3453 if (!atomic_inc_not_zero(&clp->cl_count))
3454 return -EIO;
3455 data = kmalloc(sizeof(*data), GFP_NOFS);
3456 if (data == NULL)
3457 return -ENOMEM;
3458 data->client = clp;
3459 data->timestamp = jiffies;
3460 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
3461 &nfs4_renew_ops, data);
3462}
3463
3464static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
3465{
3466 struct rpc_message msg = {
3467 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3468 .rpc_argp = clp,
3469 .rpc_cred = cred,
3470 };
3471 unsigned long now = jiffies;
3472 int status;
3473
3474 status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
3475 if (status < 0)
3476 return status;
3477 do_renew_lease(clp, now);
3478 return 0;
3479}
3480
3481static inline int nfs4_server_supports_acls(struct nfs_server *server)
3482{
3483 return (server->caps & NFS_CAP_ACLS)
3484 && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3485 && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
3486}
3487
3488/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
3489 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
3490 * the stack.
3491 */
3492#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
3493
3494static int buf_to_pages_noslab(const void *buf, size_t buflen,
3495 struct page **pages, unsigned int *pgbase)
3496{
3497 struct page *newpage, **spages;
3498 int rc = 0;
3499 size_t len;
3500 spages = pages;
3501
3502 do {
3503 len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
3504 newpage = alloc_page(GFP_KERNEL);
3505
3506 if (newpage == NULL)
3507 goto unwind;
3508 memcpy(page_address(newpage), buf, len);
3509 buf += len;
3510 buflen -= len;
3511 *pages++ = newpage;
3512 rc++;
3513 } while (buflen != 0);
3514
3515 return rc;
3516
3517unwind:
3518 for(; rc > 0; rc--)
3519 __free_page(spages[rc-1]);
3520 return -ENOMEM;
3521}
3522
3523struct nfs4_cached_acl {
3524 int cached;
3525 size_t len;
3526 char data[0];
3527};
3528
3529static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
3530{
3531 struct nfs_inode *nfsi = NFS_I(inode);
3532
3533 spin_lock(&inode->i_lock);
3534 kfree(nfsi->nfs4_acl);
3535 nfsi->nfs4_acl = acl;
3536 spin_unlock(&inode->i_lock);
3537}
3538
3539static void nfs4_zap_acl_attr(struct inode *inode)
3540{
3541 nfs4_set_cached_acl(inode, NULL);
3542}
3543
3544static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
3545{
3546 struct nfs_inode *nfsi = NFS_I(inode);
3547 struct nfs4_cached_acl *acl;
3548 int ret = -ENOENT;
3549
3550 spin_lock(&inode->i_lock);
3551 acl = nfsi->nfs4_acl;
3552 if (acl == NULL)
3553 goto out;
3554 if (buf == NULL) /* user is just asking for length */
3555 goto out_len;
3556 if (acl->cached == 0)
3557 goto out;
3558 ret = -ERANGE; /* see getxattr(2) man page */
3559 if (acl->len > buflen)
3560 goto out;
3561 memcpy(buf, acl->data, acl->len);
3562out_len:
3563 ret = acl->len;
3564out:
3565 spin_unlock(&inode->i_lock);
3566 return ret;
3567}
3568
3569static void nfs4_write_cached_acl(struct inode *inode, const char *buf, size_t acl_len)
3570{
3571 struct nfs4_cached_acl *acl;
3572
3573 if (buf && acl_len <= PAGE_SIZE) {
3574 acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
3575 if (acl == NULL)
3576 goto out;
3577 acl->cached = 1;
3578 memcpy(acl->data, buf, acl_len);
3579 } else {
3580 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
3581 if (acl == NULL)
3582 goto out;
3583 acl->cached = 0;
3584 }
3585 acl->len = acl_len;
3586out:
3587 nfs4_set_cached_acl(inode, acl);
3588}
3589
3590/*
3591 * The getxattr API returns the required buffer length when called with a
3592 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
3593 * the required buf. On a NULL buf, we send a page of data to the server
3594 * guessing that the ACL request can be serviced by a page. If so, we cache
3595 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
3596 * the cache. If not so, we throw away the page, and cache the required
3597 * length. The next getxattr call will then produce another round trip to
3598 * the server, this time with the input buf of the required size.
3599 */
3600static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3601{
3602 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
3603 struct nfs_getaclargs args = {
3604 .fh = NFS_FH(inode),
3605 .acl_pages = pages,
3606 .acl_len = buflen,
3607 };
3608 struct nfs_getaclres res = {
3609 .acl_len = buflen,
3610 };
3611 void *resp_buf;
3612 struct rpc_message msg = {
3613 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
3614 .rpc_argp = &args,
3615 .rpc_resp = &res,
3616 };
3617 int ret = -ENOMEM, npages, i, acl_len = 0;
3618
3619 npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3620 /* As long as we're doing a round trip to the server anyway,
3621 * let's be prepared for a page of acl data. */
3622 if (npages == 0)
3623 npages = 1;
3624
3625 for (i = 0; i < npages; i++) {
3626 pages[i] = alloc_page(GFP_KERNEL);
3627 if (!pages[i])
3628 goto out_free;
3629 }
3630 if (npages > 1) {
3631 /* for decoding across pages */
3632 res.acl_scratch = alloc_page(GFP_KERNEL);
3633 if (!res.acl_scratch)
3634 goto out_free;
3635 }
3636 args.acl_len = npages * PAGE_SIZE;
3637 args.acl_pgbase = 0;
3638 /* Let decode_getfacl know not to fail if the ACL data is larger than
3639 * the page we send as a guess */
3640 if (buf == NULL)
3641 res.acl_flags |= NFS4_ACL_LEN_REQUEST;
3642 resp_buf = page_address(pages[0]);
3643
3644 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
3645 __func__, buf, buflen, npages, args.acl_len);
3646 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
3647 &msg, &args.seq_args, &res.seq_res, 0);
3648 if (ret)
3649 goto out_free;
3650
3651 acl_len = res.acl_len - res.acl_data_offset;
3652 if (acl_len > args.acl_len)
3653 nfs4_write_cached_acl(inode, NULL, acl_len);
3654 else
3655 nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
3656 acl_len);
3657 if (buf) {
3658 ret = -ERANGE;
3659 if (acl_len > buflen)
3660 goto out_free;
3661 _copy_from_pages(buf, pages, res.acl_data_offset,
3662 res.acl_len);
3663 }
3664 ret = acl_len;
3665out_free:
3666 for (i = 0; i < npages; i++)
3667 if (pages[i])
3668 __free_page(pages[i]);
3669 if (res.acl_scratch)
3670 __free_page(res.acl_scratch);
3671 return ret;
3672}
3673
3674static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3675{
3676 struct nfs4_exception exception = { };
3677 ssize_t ret;
3678 do {
3679 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
3680 if (ret >= 0)
3681 break;
3682 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
3683 } while (exception.retry);
3684 return ret;
3685}
3686
3687static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
3688{
3689 struct nfs_server *server = NFS_SERVER(inode);
3690 int ret;
3691
3692 if (!nfs4_server_supports_acls(server))
3693 return -EOPNOTSUPP;
3694 ret = nfs_revalidate_inode(server, inode);
3695 if (ret < 0)
3696 return ret;
3697 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
3698 nfs_zap_acl_cache(inode);
3699 ret = nfs4_read_cached_acl(inode, buf, buflen);
3700 if (ret != -ENOENT)
3701 /* -ENOENT is returned if there is no ACL or if there is an ACL
3702 * but no cached acl data, just the acl length */
3703 return ret;
3704 return nfs4_get_acl_uncached(inode, buf, buflen);
3705}
3706
3707static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3708{
3709 struct nfs_server *server = NFS_SERVER(inode);
3710 struct page *pages[NFS4ACL_MAXPAGES];
3711 struct nfs_setaclargs arg = {
3712 .fh = NFS_FH(inode),
3713 .acl_pages = pages,
3714 .acl_len = buflen,
3715 };
3716 struct nfs_setaclres res;
3717 struct rpc_message msg = {
3718 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
3719 .rpc_argp = &arg,
3720 .rpc_resp = &res,
3721 };
3722 int ret, i;
3723
3724 if (!nfs4_server_supports_acls(server))
3725 return -EOPNOTSUPP;
3726 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
3727 if (i < 0)
3728 return i;
3729 nfs_inode_return_delegation(inode);
3730 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3731
3732 /*
3733 * Free each page after tx, so the only ref left is
3734 * held by the network stack
3735 */
3736 for (; i > 0; i--)
3737 put_page(pages[i-1]);
3738
3739 /*
3740 * Acl update can result in inode attribute update.
3741 * so mark the attribute cache invalid.
3742 */
3743 spin_lock(&inode->i_lock);
3744 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
3745 spin_unlock(&inode->i_lock);
3746 nfs_access_zap_cache(inode);
3747 nfs_zap_acl_cache(inode);
3748 return ret;
3749}
3750
3751static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3752{
3753 struct nfs4_exception exception = { };
3754 int err;
3755 do {
3756 err = nfs4_handle_exception(NFS_SERVER(inode),
3757 __nfs4_proc_set_acl(inode, buf, buflen),
3758 &exception);
3759 } while (exception.retry);
3760 return err;
3761}
3762
3763static int
3764nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
3765{
3766 struct nfs_client *clp = server->nfs_client;
3767
3768 if (task->tk_status >= 0)
3769 return 0;
3770 switch(task->tk_status) {
3771 case -NFS4ERR_DELEG_REVOKED:
3772 case -NFS4ERR_ADMIN_REVOKED:
3773 case -NFS4ERR_BAD_STATEID:
3774 if (state != NULL)
3775 nfs_remove_bad_delegation(state->inode);
3776 case -NFS4ERR_OPENMODE:
3777 if (state == NULL)
3778 break;
3779 nfs4_schedule_stateid_recovery(server, state);
3780 goto wait_on_recovery;
3781 case -NFS4ERR_EXPIRED:
3782 if (state != NULL)
3783 nfs4_schedule_stateid_recovery(server, state);
3784 case -NFS4ERR_STALE_STATEID:
3785 case -NFS4ERR_STALE_CLIENTID:
3786 nfs4_schedule_lease_recovery(clp);
3787 goto wait_on_recovery;
3788#if defined(CONFIG_NFS_V4_1)
3789 case -NFS4ERR_BADSESSION:
3790 case -NFS4ERR_BADSLOT:
3791 case -NFS4ERR_BAD_HIGH_SLOT:
3792 case -NFS4ERR_DEADSESSION:
3793 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
3794 case -NFS4ERR_SEQ_FALSE_RETRY:
3795 case -NFS4ERR_SEQ_MISORDERED:
3796 dprintk("%s ERROR %d, Reset session\n", __func__,
3797 task->tk_status);
3798 nfs4_schedule_session_recovery(clp->cl_session);
3799 task->tk_status = 0;
3800 return -EAGAIN;
3801#endif /* CONFIG_NFS_V4_1 */
3802 case -NFS4ERR_DELAY:
3803 nfs_inc_server_stats(server, NFSIOS_DELAY);
3804 case -NFS4ERR_GRACE:
3805 case -EKEYEXPIRED:
3806 rpc_delay(task, NFS4_POLL_RETRY_MAX);
3807 task->tk_status = 0;
3808 return -EAGAIN;
3809 case -NFS4ERR_RETRY_UNCACHED_REP:
3810 case -NFS4ERR_OLD_STATEID:
3811 task->tk_status = 0;
3812 return -EAGAIN;
3813 }
3814 task->tk_status = nfs4_map_errors(task->tk_status);
3815 return 0;
3816wait_on_recovery:
3817 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
3818 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
3819 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
3820 task->tk_status = 0;
3821 return -EAGAIN;
3822}
3823
3824int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
3825 unsigned short port, struct rpc_cred *cred,
3826 struct nfs4_setclientid_res *res)
3827{
3828 nfs4_verifier sc_verifier;
3829 struct nfs4_setclientid setclientid = {
3830 .sc_verifier = &sc_verifier,
3831 .sc_prog = program,
3832 .sc_cb_ident = clp->cl_cb_ident,
3833 };
3834 struct rpc_message msg = {
3835 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
3836 .rpc_argp = &setclientid,
3837 .rpc_resp = res,
3838 .rpc_cred = cred,
3839 };
3840 __be32 *p;
3841 int loop = 0;
3842 int status;
3843
3844 p = (__be32*)sc_verifier.data;
3845 *p++ = htonl((u32)clp->cl_boot_time.tv_sec);
3846 *p = htonl((u32)clp->cl_boot_time.tv_nsec);
3847
3848 for(;;) {
3849 rcu_read_lock();
3850 setclientid.sc_name_len = scnprintf(setclientid.sc_name,
3851 sizeof(setclientid.sc_name), "%s/%s %s %s %u",
3852 clp->cl_ipaddr,
3853 rpc_peeraddr2str(clp->cl_rpcclient,
3854 RPC_DISPLAY_ADDR),
3855 rpc_peeraddr2str(clp->cl_rpcclient,
3856 RPC_DISPLAY_PROTO),
3857 clp->cl_rpcclient->cl_auth->au_ops->au_name,
3858 clp->cl_id_uniquifier);
3859 setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
3860 sizeof(setclientid.sc_netid),
3861 rpc_peeraddr2str(clp->cl_rpcclient,
3862 RPC_DISPLAY_NETID));
3863 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
3864 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
3865 clp->cl_ipaddr, port >> 8, port & 255);
3866 rcu_read_unlock();
3867
3868 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
3869 if (status != -NFS4ERR_CLID_INUSE)
3870 break;
3871 if (loop != 0) {
3872 ++clp->cl_id_uniquifier;
3873 break;
3874 }
3875 ++loop;
3876 ssleep(clp->cl_lease_time / HZ + 1);
3877 }
3878 return status;
3879}
3880
3881int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
3882 struct nfs4_setclientid_res *arg,
3883 struct rpc_cred *cred)
3884{
3885 struct nfs_fsinfo fsinfo;
3886 struct rpc_message msg = {
3887 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
3888 .rpc_argp = arg,
3889 .rpc_resp = &fsinfo,
3890 .rpc_cred = cred,
3891 };
3892 unsigned long now;
3893 int status;
3894
3895 now = jiffies;
3896 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
3897 if (status == 0) {
3898 spin_lock(&clp->cl_lock);
3899 clp->cl_lease_time = fsinfo.lease_time * HZ;
3900 clp->cl_last_renewal = now;
3901 spin_unlock(&clp->cl_lock);
3902 }
3903 return status;
3904}
3905
3906struct nfs4_delegreturndata {
3907 struct nfs4_delegreturnargs args;
3908 struct nfs4_delegreturnres res;
3909 struct nfs_fh fh;
3910 nfs4_stateid stateid;
3911 unsigned long timestamp;
3912 struct nfs_fattr fattr;
3913 int rpc_status;
3914};
3915
3916static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
3917{
3918 struct nfs4_delegreturndata *data = calldata;
3919
3920 if (!nfs4_sequence_done(task, &data->res.seq_res))
3921 return;
3922
3923 switch (task->tk_status) {
3924 case -NFS4ERR_STALE_STATEID:
3925 case -NFS4ERR_EXPIRED:
3926 case 0:
3927 renew_lease(data->res.server, data->timestamp);
3928 break;
3929 default:
3930 if (nfs4_async_handle_error(task, data->res.server, NULL) ==
3931 -EAGAIN) {
3932 rpc_restart_call_prepare(task);
3933 return;
3934 }
3935 }
3936 data->rpc_status = task->tk_status;
3937}
3938
3939static void nfs4_delegreturn_release(void *calldata)
3940{
3941 kfree(calldata);
3942}
3943
3944#if defined(CONFIG_NFS_V4_1)
3945static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
3946{
3947 struct nfs4_delegreturndata *d_data;
3948
3949 d_data = (struct nfs4_delegreturndata *)data;
3950
3951 if (nfs4_setup_sequence(d_data->res.server,
3952 &d_data->args.seq_args,
3953 &d_data->res.seq_res, task))
3954 return;
3955 rpc_call_start(task);
3956}
3957#endif /* CONFIG_NFS_V4_1 */
3958
3959static const struct rpc_call_ops nfs4_delegreturn_ops = {
3960#if defined(CONFIG_NFS_V4_1)
3961 .rpc_call_prepare = nfs4_delegreturn_prepare,
3962#endif /* CONFIG_NFS_V4_1 */
3963 .rpc_call_done = nfs4_delegreturn_done,
3964 .rpc_release = nfs4_delegreturn_release,
3965};
3966
3967static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
3968{
3969 struct nfs4_delegreturndata *data;
3970 struct nfs_server *server = NFS_SERVER(inode);
3971 struct rpc_task *task;
3972 struct rpc_message msg = {
3973 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
3974 .rpc_cred = cred,
3975 };
3976 struct rpc_task_setup task_setup_data = {
3977 .rpc_client = server->client,
3978 .rpc_message = &msg,
3979 .callback_ops = &nfs4_delegreturn_ops,
3980 .flags = RPC_TASK_ASYNC,
3981 };
3982 int status = 0;
3983
3984 data = kzalloc(sizeof(*data), GFP_NOFS);
3985 if (data == NULL)
3986 return -ENOMEM;
3987 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3988 data->args.fhandle = &data->fh;
3989 data->args.stateid = &data->stateid;
3990 data->args.bitmask = server->attr_bitmask;
3991 nfs_copy_fh(&data->fh, NFS_FH(inode));
3992 memcpy(&data->stateid, stateid, sizeof(data->stateid));
3993 data->res.fattr = &data->fattr;
3994 data->res.server = server;
3995 nfs_fattr_init(data->res.fattr);
3996 data->timestamp = jiffies;
3997 data->rpc_status = 0;
3998
3999 task_setup_data.callback_data = data;
4000 msg.rpc_argp = &data->args;
4001 msg.rpc_resp = &data->res;
4002 task = rpc_run_task(&task_setup_data);
4003 if (IS_ERR(task))
4004 return PTR_ERR(task);
4005 if (!issync)
4006 goto out;
4007 status = nfs4_wait_for_completion_rpc_task(task);
4008 if (status != 0)
4009 goto out;
4010 status = data->rpc_status;
4011 if (status != 0)
4012 goto out;
4013 nfs_refresh_inode(inode, &data->fattr);
4014out:
4015 rpc_put_task(task);
4016 return status;
4017}
4018
4019int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4020{
4021 struct nfs_server *server = NFS_SERVER(inode);
4022 struct nfs4_exception exception = { };
4023 int err;
4024 do {
4025 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
4026 switch (err) {
4027 case -NFS4ERR_STALE_STATEID:
4028 case -NFS4ERR_EXPIRED:
4029 case 0:
4030 return 0;
4031 }
4032 err = nfs4_handle_exception(server, err, &exception);
4033 } while (exception.retry);
4034 return err;
4035}
4036
4037#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
4038#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
4039
4040/*
4041 * sleep, with exponential backoff, and retry the LOCK operation.
4042 */
4043static unsigned long
4044nfs4_set_lock_task_retry(unsigned long timeout)
4045{
4046 freezable_schedule_timeout_killable(timeout);
4047 timeout <<= 1;
4048 if (timeout > NFS4_LOCK_MAXTIMEOUT)
4049 return NFS4_LOCK_MAXTIMEOUT;
4050 return timeout;
4051}
4052
4053static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4054{
4055 struct inode *inode = state->inode;
4056 struct nfs_server *server = NFS_SERVER(inode);
4057 struct nfs_client *clp = server->nfs_client;
4058 struct nfs_lockt_args arg = {
4059 .fh = NFS_FH(inode),
4060 .fl = request,
4061 };
4062 struct nfs_lockt_res res = {
4063 .denied = request,
4064 };
4065 struct rpc_message msg = {
4066 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
4067 .rpc_argp = &arg,
4068 .rpc_resp = &res,
4069 .rpc_cred = state->owner->so_cred,
4070 };
4071 struct nfs4_lock_state *lsp;
4072 int status;
4073
4074 arg.lock_owner.clientid = clp->cl_clientid;
4075 status = nfs4_set_lock_state(state, request);
4076 if (status != 0)
4077 goto out;
4078 lsp = request->fl_u.nfs4_fl.owner;
4079 arg.lock_owner.id = lsp->ls_seqid.owner_id;
4080 arg.lock_owner.s_dev = server->s_dev;
4081 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4082 switch (status) {
4083 case 0:
4084 request->fl_type = F_UNLCK;
4085 break;
4086 case -NFS4ERR_DENIED:
4087 status = 0;
4088 }
4089 request->fl_ops->fl_release_private(request);
4090out:
4091 return status;
4092}
4093
4094static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4095{
4096 struct nfs4_exception exception = { };
4097 int err;
4098
4099 do {
4100 err = nfs4_handle_exception(NFS_SERVER(state->inode),
4101 _nfs4_proc_getlk(state, cmd, request),
4102 &exception);
4103 } while (exception.retry);
4104 return err;
4105}
4106
4107static int do_vfs_lock(struct file *file, struct file_lock *fl)
4108{
4109 int res = 0;
4110 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
4111 case FL_POSIX:
4112 res = posix_lock_file_wait(file, fl);
4113 break;
4114 case FL_FLOCK:
4115 res = flock_lock_file_wait(file, fl);
4116 break;
4117 default:
4118 BUG();
4119 }
4120 return res;
4121}
4122
4123struct nfs4_unlockdata {
4124 struct nfs_locku_args arg;
4125 struct nfs_locku_res res;
4126 struct nfs4_lock_state *lsp;
4127 struct nfs_open_context *ctx;
4128 struct file_lock fl;
4129 const struct nfs_server *server;
4130 unsigned long timestamp;
4131};
4132
4133static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
4134 struct nfs_open_context *ctx,
4135 struct nfs4_lock_state *lsp,
4136 struct nfs_seqid *seqid)
4137{
4138 struct nfs4_unlockdata *p;
4139 struct inode *inode = lsp->ls_state->inode;
4140
4141 p = kzalloc(sizeof(*p), GFP_NOFS);
4142 if (p == NULL)
4143 return NULL;
4144 p->arg.fh = NFS_FH(inode);
4145 p->arg.fl = &p->fl;
4146 p->arg.seqid = seqid;
4147 p->res.seqid = seqid;
4148 p->arg.stateid = &lsp->ls_stateid;
4149 p->lsp = lsp;
4150 atomic_inc(&lsp->ls_count);
4151 /* Ensure we don't close file until we're done freeing locks! */
4152 p->ctx = get_nfs_open_context(ctx);
4153 memcpy(&p->fl, fl, sizeof(p->fl));
4154 p->server = NFS_SERVER(inode);
4155 return p;
4156}
4157
4158static void nfs4_locku_release_calldata(void *data)
4159{
4160 struct nfs4_unlockdata *calldata = data;
4161 nfs_free_seqid(calldata->arg.seqid);
4162 nfs4_put_lock_state(calldata->lsp);
4163 put_nfs_open_context(calldata->ctx);
4164 kfree(calldata);
4165}
4166
4167static void nfs4_locku_done(struct rpc_task *task, void *data)
4168{
4169 struct nfs4_unlockdata *calldata = data;
4170
4171 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
4172 return;
4173 switch (task->tk_status) {
4174 case 0:
4175 memcpy(calldata->lsp->ls_stateid.data,
4176 calldata->res.stateid.data,
4177 sizeof(calldata->lsp->ls_stateid.data));
4178 renew_lease(calldata->server, calldata->timestamp);
4179 break;
4180 case -NFS4ERR_BAD_STATEID:
4181 case -NFS4ERR_OLD_STATEID:
4182 case -NFS4ERR_STALE_STATEID:
4183 case -NFS4ERR_EXPIRED:
4184 break;
4185 default:
4186 if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
4187 rpc_restart_call_prepare(task);
4188 }
4189}
4190
4191static void nfs4_locku_prepare(struct rpc_task *task, void *data)
4192{
4193 struct nfs4_unlockdata *calldata = data;
4194
4195 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
4196 return;
4197 if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
4198 /* Note: exit _without_ running nfs4_locku_done */
4199 task->tk_action = NULL;
4200 return;
4201 }
4202 calldata->timestamp = jiffies;
4203 if (nfs4_setup_sequence(calldata->server,
4204 &calldata->arg.seq_args,
4205 &calldata->res.seq_res, task))
4206 return;
4207 rpc_call_start(task);
4208}
4209
4210static const struct rpc_call_ops nfs4_locku_ops = {
4211 .rpc_call_prepare = nfs4_locku_prepare,
4212 .rpc_call_done = nfs4_locku_done,
4213 .rpc_release = nfs4_locku_release_calldata,
4214};
4215
4216static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
4217 struct nfs_open_context *ctx,
4218 struct nfs4_lock_state *lsp,
4219 struct nfs_seqid *seqid)
4220{
4221 struct nfs4_unlockdata *data;
4222 struct rpc_message msg = {
4223 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
4224 .rpc_cred = ctx->cred,
4225 };
4226 struct rpc_task_setup task_setup_data = {
4227 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
4228 .rpc_message = &msg,
4229 .callback_ops = &nfs4_locku_ops,
4230 .workqueue = nfsiod_workqueue,
4231 .flags = RPC_TASK_ASYNC,
4232 };
4233
4234 /* Ensure this is an unlock - when canceling a lock, the
4235 * canceled lock is passed in, and it won't be an unlock.
4236 */
4237 fl->fl_type = F_UNLCK;
4238
4239 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
4240 if (data == NULL) {
4241 nfs_free_seqid(seqid);
4242 return ERR_PTR(-ENOMEM);
4243 }
4244
4245 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4246 msg.rpc_argp = &data->arg;
4247 msg.rpc_resp = &data->res;
4248 task_setup_data.callback_data = data;
4249 return rpc_run_task(&task_setup_data);
4250}
4251
4252static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
4253{
4254 struct nfs_inode *nfsi = NFS_I(state->inode);
4255 struct nfs_seqid *seqid;
4256 struct nfs4_lock_state *lsp;
4257 struct rpc_task *task;
4258 int status = 0;
4259 unsigned char fl_flags = request->fl_flags;
4260
4261 status = nfs4_set_lock_state(state, request);
4262 /* Unlock _before_ we do the RPC call */
4263 request->fl_flags |= FL_EXISTS;
4264 down_read(&nfsi->rwsem);
4265 if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
4266 up_read(&nfsi->rwsem);
4267 goto out;
4268 }
4269 up_read(&nfsi->rwsem);
4270 if (status != 0)
4271 goto out;
4272 /* Is this a delegated lock? */
4273 if (test_bit(NFS_DELEGATED_STATE, &state->flags))
4274 goto out;
4275 lsp = request->fl_u.nfs4_fl.owner;
4276 seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
4277 status = -ENOMEM;
4278 if (seqid == NULL)
4279 goto out;
4280 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
4281 status = PTR_ERR(task);
4282 if (IS_ERR(task))
4283 goto out;
4284 status = nfs4_wait_for_completion_rpc_task(task);
4285 rpc_put_task(task);
4286out:
4287 request->fl_flags = fl_flags;
4288 return status;
4289}
4290
4291struct nfs4_lockdata {
4292 struct nfs_lock_args arg;
4293 struct nfs_lock_res res;
4294 struct nfs4_lock_state *lsp;
4295 struct nfs_open_context *ctx;
4296 struct file_lock fl;
4297 unsigned long timestamp;
4298 int rpc_status;
4299 int cancelled;
4300 struct nfs_server *server;
4301};
4302
4303static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
4304 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
4305 gfp_t gfp_mask)
4306{
4307 struct nfs4_lockdata *p;
4308 struct inode *inode = lsp->ls_state->inode;
4309 struct nfs_server *server = NFS_SERVER(inode);
4310
4311 p = kzalloc(sizeof(*p), gfp_mask);
4312 if (p == NULL)
4313 return NULL;
4314
4315 p->arg.fh = NFS_FH(inode);
4316 p->arg.fl = &p->fl;
4317 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
4318 if (p->arg.open_seqid == NULL)
4319 goto out_free;
4320 p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
4321 if (p->arg.lock_seqid == NULL)
4322 goto out_free_seqid;
4323 p->arg.lock_stateid = &lsp->ls_stateid;
4324 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
4325 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
4326 p->arg.lock_owner.s_dev = server->s_dev;
4327 p->res.lock_seqid = p->arg.lock_seqid;
4328 p->lsp = lsp;
4329 p->server = server;
4330 atomic_inc(&lsp->ls_count);
4331 p->ctx = get_nfs_open_context(ctx);
4332 memcpy(&p->fl, fl, sizeof(p->fl));
4333 return p;
4334out_free_seqid:
4335 nfs_free_seqid(p->arg.open_seqid);
4336out_free:
4337 kfree(p);
4338 return NULL;
4339}
4340
4341static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
4342{
4343 struct nfs4_lockdata *data = calldata;
4344 struct nfs4_state *state = data->lsp->ls_state;
4345
4346 dprintk("%s: begin!\n", __func__);
4347 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
4348 return;
4349 /* Do we need to do an open_to_lock_owner? */
4350 if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
4351 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
4352 return;
4353 data->arg.open_stateid = &state->stateid;
4354 data->arg.new_lock_owner = 1;
4355 data->res.open_seqid = data->arg.open_seqid;
4356 } else
4357 data->arg.new_lock_owner = 0;
4358 data->timestamp = jiffies;
4359 if (nfs4_setup_sequence(data->server,
4360 &data->arg.seq_args,
4361 &data->res.seq_res, task))
4362 return;
4363 rpc_call_start(task);
4364 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
4365}
4366
4367static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
4368{
4369 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
4370 nfs4_lock_prepare(task, calldata);
4371}
4372
4373static void nfs4_lock_done(struct rpc_task *task, void *calldata)
4374{
4375 struct nfs4_lockdata *data = calldata;
4376
4377 dprintk("%s: begin!\n", __func__);
4378
4379 if (!nfs4_sequence_done(task, &data->res.seq_res))
4380 return;
4381
4382 data->rpc_status = task->tk_status;
4383 if (data->arg.new_lock_owner != 0) {
4384 if (data->rpc_status == 0)
4385 nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
4386 else
4387 goto out;
4388 }
4389 if (data->rpc_status == 0) {
4390 memcpy(data->lsp->ls_stateid.data, data->res.stateid.data,
4391 sizeof(data->lsp->ls_stateid.data));
4392 data->lsp->ls_flags |= NFS_LOCK_INITIALIZED;
4393 renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
4394 }
4395out:
4396 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
4397}
4398
4399static void nfs4_lock_release(void *calldata)
4400{
4401 struct nfs4_lockdata *data = calldata;
4402
4403 dprintk("%s: begin!\n", __func__);
4404 nfs_free_seqid(data->arg.open_seqid);
4405 if (data->cancelled != 0) {
4406 struct rpc_task *task;
4407 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
4408 data->arg.lock_seqid);
4409 if (!IS_ERR(task))
4410 rpc_put_task_async(task);
4411 dprintk("%s: cancelling lock!\n", __func__);
4412 } else
4413 nfs_free_seqid(data->arg.lock_seqid);
4414 nfs4_put_lock_state(data->lsp);
4415 put_nfs_open_context(data->ctx);
4416 kfree(data);
4417 dprintk("%s: done!\n", __func__);
4418}
4419
4420static const struct rpc_call_ops nfs4_lock_ops = {
4421 .rpc_call_prepare = nfs4_lock_prepare,
4422 .rpc_call_done = nfs4_lock_done,
4423 .rpc_release = nfs4_lock_release,
4424};
4425
4426static const struct rpc_call_ops nfs4_recover_lock_ops = {
4427 .rpc_call_prepare = nfs4_recover_lock_prepare,
4428 .rpc_call_done = nfs4_lock_done,
4429 .rpc_release = nfs4_lock_release,
4430};
4431
4432static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
4433{
4434 switch (error) {
4435 case -NFS4ERR_ADMIN_REVOKED:
4436 case -NFS4ERR_BAD_STATEID:
4437 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4438 if (new_lock_owner != 0 ||
4439 (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
4440 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
4441 break;
4442 case -NFS4ERR_STALE_STATEID:
4443 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4444 case -NFS4ERR_EXPIRED:
4445 nfs4_schedule_lease_recovery(server->nfs_client);
4446 };
4447}
4448
4449static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
4450{
4451 struct nfs4_lockdata *data;
4452 struct rpc_task *task;
4453 struct rpc_message msg = {
4454 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
4455 .rpc_cred = state->owner->so_cred,
4456 };
4457 struct rpc_task_setup task_setup_data = {
4458 .rpc_client = NFS_CLIENT(state->inode),
4459 .rpc_message = &msg,
4460 .callback_ops = &nfs4_lock_ops,
4461 .workqueue = nfsiod_workqueue,
4462 .flags = RPC_TASK_ASYNC,
4463 };
4464 int ret;
4465
4466 dprintk("%s: begin!\n", __func__);
4467 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
4468 fl->fl_u.nfs4_fl.owner,
4469 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
4470 if (data == NULL)
4471 return -ENOMEM;
4472 if (IS_SETLKW(cmd))
4473 data->arg.block = 1;
4474 if (recovery_type > NFS_LOCK_NEW) {
4475 if (recovery_type == NFS_LOCK_RECLAIM)
4476 data->arg.reclaim = NFS_LOCK_RECLAIM;
4477 task_setup_data.callback_ops = &nfs4_recover_lock_ops;
4478 }
4479 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4480 msg.rpc_argp = &data->arg;
4481 msg.rpc_resp = &data->res;
4482 task_setup_data.callback_data = data;
4483 task = rpc_run_task(&task_setup_data);
4484 if (IS_ERR(task))
4485 return PTR_ERR(task);
4486 ret = nfs4_wait_for_completion_rpc_task(task);
4487 if (ret == 0) {
4488 ret = data->rpc_status;
4489 if (ret)
4490 nfs4_handle_setlk_error(data->server, data->lsp,
4491 data->arg.new_lock_owner, ret);
4492 } else
4493 data->cancelled = 1;
4494 rpc_put_task(task);
4495 dprintk("%s: done, ret = %d!\n", __func__, ret);
4496 return ret;
4497}
4498
4499static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
4500{
4501 struct nfs_server *server = NFS_SERVER(state->inode);
4502 struct nfs4_exception exception = { };
4503 int err;
4504
4505 do {
4506 /* Cache the lock if possible... */
4507 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4508 return 0;
4509 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
4510 if (err != -NFS4ERR_DELAY)
4511 break;
4512 nfs4_handle_exception(server, err, &exception);
4513 } while (exception.retry);
4514 return err;
4515}
4516
4517static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
4518{
4519 struct nfs_server *server = NFS_SERVER(state->inode);
4520 struct nfs4_exception exception = { };
4521 int err;
4522
4523 err = nfs4_set_lock_state(state, request);
4524 if (err != 0)
4525 return err;
4526 do {
4527 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4528 return 0;
4529 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
4530 switch (err) {
4531 default:
4532 goto out;
4533 case -NFS4ERR_GRACE:
4534 case -NFS4ERR_DELAY:
4535 nfs4_handle_exception(server, err, &exception);
4536 err = 0;
4537 }
4538 } while (exception.retry);
4539out:
4540 return err;
4541}
4542
4543#if defined(CONFIG_NFS_V4_1)
4544static int nfs41_check_expired_locks(struct nfs4_state *state)
4545{
4546 int status, ret = NFS_OK;
4547 struct nfs4_lock_state *lsp;
4548 struct nfs_server *server = NFS_SERVER(state->inode);
4549
4550 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
4551 if (lsp->ls_flags & NFS_LOCK_INITIALIZED) {
4552 status = nfs41_test_stateid(server, &lsp->ls_stateid);
4553 if (status != NFS_OK) {
4554 nfs41_free_stateid(server, &lsp->ls_stateid);
4555 lsp->ls_flags &= ~NFS_LOCK_INITIALIZED;
4556 ret = status;
4557 }
4558 }
4559 };
4560
4561 return ret;
4562}
4563
4564static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
4565{
4566 int status = NFS_OK;
4567
4568 if (test_bit(LK_STATE_IN_USE, &state->flags))
4569 status = nfs41_check_expired_locks(state);
4570 if (status == NFS_OK)
4571 return status;
4572 return nfs4_lock_expired(state, request);
4573}
4574#endif
4575
4576static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4577{
4578 struct nfs_inode *nfsi = NFS_I(state->inode);
4579 unsigned char fl_flags = request->fl_flags;
4580 int status = -ENOLCK;
4581
4582 if ((fl_flags & FL_POSIX) &&
4583 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
4584 goto out;
4585 /* Is this a delegated open? */
4586 status = nfs4_set_lock_state(state, request);
4587 if (status != 0)
4588 goto out;
4589 request->fl_flags |= FL_ACCESS;
4590 status = do_vfs_lock(request->fl_file, request);
4591 if (status < 0)
4592 goto out;
4593 down_read(&nfsi->rwsem);
4594 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
4595 /* Yes: cache locks! */
4596 /* ...but avoid races with delegation recall... */
4597 request->fl_flags = fl_flags & ~FL_SLEEP;
4598 status = do_vfs_lock(request->fl_file, request);
4599 goto out_unlock;
4600 }
4601 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
4602 if (status != 0)
4603 goto out_unlock;
4604 /* Note: we always want to sleep here! */
4605 request->fl_flags = fl_flags | FL_SLEEP;
4606 if (do_vfs_lock(request->fl_file, request) < 0)
4607 printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock "
4608 "manager!\n", __func__);
4609out_unlock:
4610 up_read(&nfsi->rwsem);
4611out:
4612 request->fl_flags = fl_flags;
4613 return status;
4614}
4615
4616static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4617{
4618 struct nfs4_exception exception = {
4619 .state = state,
4620 };
4621 int err;
4622
4623 do {
4624 err = _nfs4_proc_setlk(state, cmd, request);
4625 if (err == -NFS4ERR_DENIED)
4626 err = -EAGAIN;
4627 err = nfs4_handle_exception(NFS_SERVER(state->inode),
4628 err, &exception);
4629 } while (exception.retry);
4630 return err;
4631}
4632
4633static int
4634nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
4635{
4636 struct nfs_open_context *ctx;
4637 struct nfs4_state *state;
4638 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
4639 int status;
4640
4641 /* verify open state */
4642 ctx = nfs_file_open_context(filp);
4643 state = ctx->state;
4644
4645 if (request->fl_start < 0 || request->fl_end < 0)
4646 return -EINVAL;
4647
4648 if (IS_GETLK(cmd)) {
4649 if (state != NULL)
4650 return nfs4_proc_getlk(state, F_GETLK, request);
4651 return 0;
4652 }
4653
4654 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
4655 return -EINVAL;
4656
4657 if (request->fl_type == F_UNLCK) {
4658 if (state != NULL)
4659 return nfs4_proc_unlck(state, cmd, request);
4660 return 0;
4661 }
4662
4663 if (state == NULL)
4664 return -ENOLCK;
4665 do {
4666 status = nfs4_proc_setlk(state, cmd, request);
4667 if ((status != -EAGAIN) || IS_SETLK(cmd))
4668 break;
4669 timeout = nfs4_set_lock_task_retry(timeout);
4670 status = -ERESTARTSYS;
4671 if (signalled())
4672 break;
4673 } while(status < 0);
4674 return status;
4675}
4676
4677int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
4678{
4679 struct nfs_server *server = NFS_SERVER(state->inode);
4680 struct nfs4_exception exception = { };
4681 int err;
4682
4683 err = nfs4_set_lock_state(state, fl);
4684 if (err != 0)
4685 goto out;
4686 do {
4687 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
4688 switch (err) {
4689 default:
4690 printk(KERN_ERR "NFS: %s: unhandled error "
4691 "%d.\n", __func__, err);
4692 case 0:
4693 case -ESTALE:
4694 goto out;
4695 case -NFS4ERR_EXPIRED:
4696 nfs4_schedule_stateid_recovery(server, state);
4697 case -NFS4ERR_STALE_CLIENTID:
4698 case -NFS4ERR_STALE_STATEID:
4699 nfs4_schedule_lease_recovery(server->nfs_client);
4700 goto out;
4701 case -NFS4ERR_BADSESSION:
4702 case -NFS4ERR_BADSLOT:
4703 case -NFS4ERR_BAD_HIGH_SLOT:
4704 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4705 case -NFS4ERR_DEADSESSION:
4706 nfs4_schedule_session_recovery(server->nfs_client->cl_session);
4707 goto out;
4708 case -ERESTARTSYS:
4709 /*
4710 * The show must go on: exit, but mark the
4711 * stateid as needing recovery.
4712 */
4713 case -NFS4ERR_DELEG_REVOKED:
4714 case -NFS4ERR_ADMIN_REVOKED:
4715 case -NFS4ERR_BAD_STATEID:
4716 case -NFS4ERR_OPENMODE:
4717 nfs4_schedule_stateid_recovery(server, state);
4718 err = 0;
4719 goto out;
4720 case -EKEYEXPIRED:
4721 /*
4722 * User RPCSEC_GSS context has expired.
4723 * We cannot recover this stateid now, so
4724 * skip it and allow recovery thread to
4725 * proceed.
4726 */
4727 err = 0;
4728 goto out;
4729 case -ENOMEM:
4730 case -NFS4ERR_DENIED:
4731 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
4732 err = 0;
4733 goto out;
4734 case -NFS4ERR_DELAY:
4735 break;
4736 }
4737 err = nfs4_handle_exception(server, err, &exception);
4738 } while (exception.retry);
4739out:
4740 return err;
4741}
4742
4743static void nfs4_release_lockowner_release(void *calldata)
4744{
4745 kfree(calldata);
4746}
4747
4748const struct rpc_call_ops nfs4_release_lockowner_ops = {
4749 .rpc_release = nfs4_release_lockowner_release,
4750};
4751
4752void nfs4_release_lockowner(const struct nfs4_lock_state *lsp)
4753{
4754 struct nfs_server *server = lsp->ls_state->owner->so_server;
4755 struct nfs_release_lockowner_args *args;
4756 struct rpc_message msg = {
4757 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
4758 };
4759
4760 if (server->nfs_client->cl_mvops->minor_version != 0)
4761 return;
4762 args = kmalloc(sizeof(*args), GFP_NOFS);
4763 if (!args)
4764 return;
4765 args->lock_owner.clientid = server->nfs_client->cl_clientid;
4766 args->lock_owner.id = lsp->ls_seqid.owner_id;
4767 args->lock_owner.s_dev = server->s_dev;
4768 msg.rpc_argp = args;
4769 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args);
4770}
4771
4772#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
4773
4774static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
4775 const void *buf, size_t buflen,
4776 int flags, int type)
4777{
4778 if (strcmp(key, "") != 0)
4779 return -EINVAL;
4780
4781 return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
4782}
4783
4784static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
4785 void *buf, size_t buflen, int type)
4786{
4787 if (strcmp(key, "") != 0)
4788 return -EINVAL;
4789
4790 return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
4791}
4792
4793static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
4794 size_t list_len, const char *name,
4795 size_t name_len, int type)
4796{
4797 size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
4798
4799 if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
4800 return 0;
4801
4802 if (list && len <= list_len)
4803 memcpy(list, XATTR_NAME_NFSV4_ACL, len);
4804 return len;
4805}
4806
4807/*
4808 * nfs_fhget will use either the mounted_on_fileid or the fileid
4809 */
4810static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
4811{
4812 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
4813 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
4814 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
4815 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
4816 return;
4817
4818 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
4819 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
4820 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
4821 fattr->nlink = 2;
4822}
4823
4824int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
4825 struct nfs4_fs_locations *fs_locations, struct page *page)
4826{
4827 struct nfs_server *server = NFS_SERVER(dir);
4828 u32 bitmask[2] = {
4829 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
4830 };
4831 struct nfs4_fs_locations_arg args = {
4832 .dir_fh = NFS_FH(dir),
4833 .name = name,
4834 .page = page,
4835 .bitmask = bitmask,
4836 };
4837 struct nfs4_fs_locations_res res = {
4838 .fs_locations = fs_locations,
4839 };
4840 struct rpc_message msg = {
4841 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
4842 .rpc_argp = &args,
4843 .rpc_resp = &res,
4844 };
4845 int status;
4846
4847 dprintk("%s: start\n", __func__);
4848
4849 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
4850 * is not supported */
4851 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
4852 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
4853 else
4854 bitmask[0] |= FATTR4_WORD0_FILEID;
4855
4856 nfs_fattr_init(&fs_locations->fattr);
4857 fs_locations->server = server;
4858 fs_locations->nlocations = 0;
4859 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4860 dprintk("%s: returned status = %d\n", __func__, status);
4861 return status;
4862}
4863
4864static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
4865{
4866 int status;
4867 struct nfs4_secinfo_arg args = {
4868 .dir_fh = NFS_FH(dir),
4869 .name = name,
4870 };
4871 struct nfs4_secinfo_res res = {
4872 .flavors = flavors,
4873 };
4874 struct rpc_message msg = {
4875 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
4876 .rpc_argp = &args,
4877 .rpc_resp = &res,
4878 };
4879
4880 dprintk("NFS call secinfo %s\n", name->name);
4881 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
4882 dprintk("NFS reply secinfo: %d\n", status);
4883 return status;
4884}
4885
4886int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
4887{
4888 struct nfs4_exception exception = { };
4889 int err;
4890 do {
4891 err = nfs4_handle_exception(NFS_SERVER(dir),
4892 _nfs4_proc_secinfo(dir, name, flavors),
4893 &exception);
4894 } while (exception.retry);
4895 return err;
4896}
4897
4898#ifdef CONFIG_NFS_V4_1
4899/*
4900 * Check the exchange flags returned by the server for invalid flags, having
4901 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
4902 * DS flags set.
4903 */
4904static int nfs4_check_cl_exchange_flags(u32 flags)
4905{
4906 if (flags & ~EXCHGID4_FLAG_MASK_R)
4907 goto out_inval;
4908 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
4909 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
4910 goto out_inval;
4911 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
4912 goto out_inval;
4913 return NFS_OK;
4914out_inval:
4915 return -NFS4ERR_INVAL;
4916}
4917
4918static bool
4919nfs41_same_server_scope(struct server_scope *a, struct server_scope *b)
4920{
4921 if (a->server_scope_sz == b->server_scope_sz &&
4922 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
4923 return true;
4924
4925 return false;
4926}
4927
4928/*
4929 * nfs4_proc_exchange_id()
4930 *
4931 * Since the clientid has expired, all compounds using sessions
4932 * associated with the stale clientid will be returning
4933 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
4934 * be in some phase of session reset.
4935 */
4936int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
4937{
4938 nfs4_verifier verifier;
4939 struct nfs41_exchange_id_args args = {
4940 .client = clp,
4941 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
4942 };
4943 struct nfs41_exchange_id_res res = {
4944 .client = clp,
4945 };
4946 int status;
4947 struct rpc_message msg = {
4948 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
4949 .rpc_argp = &args,
4950 .rpc_resp = &res,
4951 .rpc_cred = cred,
4952 };
4953 __be32 *p;
4954
4955 dprintk("--> %s\n", __func__);
4956 BUG_ON(clp == NULL);
4957
4958 p = (u32 *)verifier.data;
4959 *p++ = htonl((u32)clp->cl_boot_time.tv_sec);
4960 *p = htonl((u32)clp->cl_boot_time.tv_nsec);
4961 args.verifier = &verifier;
4962
4963 args.id_len = scnprintf(args.id, sizeof(args.id),
4964 "%s/%s.%s/%u",
4965 clp->cl_ipaddr,
4966 init_utsname()->nodename,
4967 init_utsname()->domainname,
4968 clp->cl_rpcclient->cl_auth->au_flavor);
4969
4970 res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL);
4971 if (unlikely(!res.server_scope)) {
4972 status = -ENOMEM;
4973 goto out;
4974 }
4975
4976 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_KERNEL);
4977 if (unlikely(!res.impl_id)) {
4978 status = -ENOMEM;
4979 goto out_server_scope;
4980 }
4981
4982 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4983 if (!status)
4984 status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
4985
4986 if (!status) {
4987 /* use the most recent implementation id */
4988 kfree(clp->impl_id);
4989 clp->impl_id = res.impl_id;
4990 } else
4991 kfree(res.impl_id);
4992
4993 if (!status) {
4994 if (clp->server_scope &&
4995 !nfs41_same_server_scope(clp->server_scope,
4996 res.server_scope)) {
4997 dprintk("%s: server_scope mismatch detected\n",
4998 __func__);
4999 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
5000 kfree(clp->server_scope);
5001 clp->server_scope = NULL;
5002 }
5003
5004 if (!clp->server_scope) {
5005 clp->server_scope = res.server_scope;
5006 goto out;
5007 }
5008 }
5009
5010out_server_scope:
5011 kfree(res.server_scope);
5012out:
5013 if (clp->impl_id)
5014 dprintk("%s: Server Implementation ID: "
5015 "domain: %s, name: %s, date: %llu,%u\n",
5016 __func__, clp->impl_id->domain, clp->impl_id->name,
5017 clp->impl_id->date.seconds,
5018 clp->impl_id->date.nseconds);
5019 dprintk("<-- %s status= %d\n", __func__, status);
5020 return status;
5021}
5022
5023struct nfs4_get_lease_time_data {
5024 struct nfs4_get_lease_time_args *args;
5025 struct nfs4_get_lease_time_res *res;
5026 struct nfs_client *clp;
5027};
5028
5029static void nfs4_get_lease_time_prepare(struct rpc_task *task,
5030 void *calldata)
5031{
5032 int ret;
5033 struct nfs4_get_lease_time_data *data =
5034 (struct nfs4_get_lease_time_data *)calldata;
5035
5036 dprintk("--> %s\n", __func__);
5037 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
5038 /* just setup sequence, do not trigger session recovery
5039 since we're invoked within one */
5040 ret = nfs41_setup_sequence(data->clp->cl_session,
5041 &data->args->la_seq_args,
5042 &data->res->lr_seq_res, task);
5043
5044 BUG_ON(ret == -EAGAIN);
5045 rpc_call_start(task);
5046 dprintk("<-- %s\n", __func__);
5047}
5048
5049/*
5050 * Called from nfs4_state_manager thread for session setup, so don't recover
5051 * from sequence operation or clientid errors.
5052 */
5053static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
5054{
5055 struct nfs4_get_lease_time_data *data =
5056 (struct nfs4_get_lease_time_data *)calldata;
5057
5058 dprintk("--> %s\n", __func__);
5059 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
5060 return;
5061 switch (task->tk_status) {
5062 case -NFS4ERR_DELAY:
5063 case -NFS4ERR_GRACE:
5064 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
5065 rpc_delay(task, NFS4_POLL_RETRY_MIN);
5066 task->tk_status = 0;
5067 /* fall through */
5068 case -NFS4ERR_RETRY_UNCACHED_REP:
5069 rpc_restart_call_prepare(task);
5070 return;
5071 }
5072 dprintk("<-- %s\n", __func__);
5073}
5074
5075struct rpc_call_ops nfs4_get_lease_time_ops = {
5076 .rpc_call_prepare = nfs4_get_lease_time_prepare,
5077 .rpc_call_done = nfs4_get_lease_time_done,
5078};
5079
5080int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
5081{
5082 struct rpc_task *task;
5083 struct nfs4_get_lease_time_args args;
5084 struct nfs4_get_lease_time_res res = {
5085 .lr_fsinfo = fsinfo,
5086 };
5087 struct nfs4_get_lease_time_data data = {
5088 .args = &args,
5089 .res = &res,
5090 .clp = clp,
5091 };
5092 struct rpc_message msg = {
5093 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
5094 .rpc_argp = &args,
5095 .rpc_resp = &res,
5096 };
5097 struct rpc_task_setup task_setup = {
5098 .rpc_client = clp->cl_rpcclient,
5099 .rpc_message = &msg,
5100 .callback_ops = &nfs4_get_lease_time_ops,
5101 .callback_data = &data,
5102 .flags = RPC_TASK_TIMEOUT,
5103 };
5104 int status;
5105
5106 nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
5107 dprintk("--> %s\n", __func__);
5108 task = rpc_run_task(&task_setup);
5109
5110 if (IS_ERR(task))
5111 status = PTR_ERR(task);
5112 else {
5113 status = task->tk_status;
5114 rpc_put_task(task);
5115 }
5116 dprintk("<-- %s return %d\n", __func__, status);
5117
5118 return status;
5119}
5120
5121static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags)
5122{
5123 return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags);
5124}
5125
5126static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl,
5127 struct nfs4_slot *new,
5128 u32 max_slots,
5129 u32 ivalue)
5130{
5131 struct nfs4_slot *old = NULL;
5132 u32 i;
5133
5134 spin_lock(&tbl->slot_tbl_lock);
5135 if (new) {
5136 old = tbl->slots;
5137 tbl->slots = new;
5138 tbl->max_slots = max_slots;
5139 }
5140 tbl->highest_used_slotid = -1; /* no slot is currently used */
5141 for (i = 0; i < tbl->max_slots; i++)
5142 tbl->slots[i].seq_nr = ivalue;
5143 spin_unlock(&tbl->slot_tbl_lock);
5144 kfree(old);
5145}
5146
5147/*
5148 * (re)Initialise a slot table
5149 */
5150static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs,
5151 u32 ivalue)
5152{
5153 struct nfs4_slot *new = NULL;
5154 int ret = -ENOMEM;
5155
5156 dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__,
5157 max_reqs, tbl->max_slots);
5158
5159 /* Does the newly negotiated max_reqs match the existing slot table? */
5160 if (max_reqs != tbl->max_slots) {
5161 new = nfs4_alloc_slots(max_reqs, GFP_NOFS);
5162 if (!new)
5163 goto out;
5164 }
5165 ret = 0;
5166
5167 nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue);
5168 dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
5169 tbl, tbl->slots, tbl->max_slots);
5170out:
5171 dprintk("<-- %s: return %d\n", __func__, ret);
5172 return ret;
5173}
5174
5175/* Destroy the slot table */
5176static void nfs4_destroy_slot_tables(struct nfs4_session *session)
5177{
5178 if (session->fc_slot_table.slots != NULL) {
5179 kfree(session->fc_slot_table.slots);
5180 session->fc_slot_table.slots = NULL;
5181 }
5182 if (session->bc_slot_table.slots != NULL) {
5183 kfree(session->bc_slot_table.slots);
5184 session->bc_slot_table.slots = NULL;
5185 }
5186 return;
5187}
5188
5189/*
5190 * Initialize or reset the forechannel and backchannel tables
5191 */
5192static int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
5193{
5194 struct nfs4_slot_table *tbl;
5195 int status;
5196
5197 dprintk("--> %s\n", __func__);
5198 /* Fore channel */
5199 tbl = &ses->fc_slot_table;
5200 status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
5201 if (status) /* -ENOMEM */
5202 return status;
5203 /* Back channel */
5204 tbl = &ses->bc_slot_table;
5205 status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
5206 if (status && tbl->slots == NULL)
5207 /* Fore and back channel share a connection so get
5208 * both slot tables or neither */
5209 nfs4_destroy_slot_tables(ses);
5210 return status;
5211}
5212
5213struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
5214{
5215 struct nfs4_session *session;
5216 struct nfs4_slot_table *tbl;
5217
5218 session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
5219 if (!session)
5220 return NULL;
5221
5222 tbl = &session->fc_slot_table;
5223 tbl->highest_used_slotid = NFS4_NO_SLOT;
5224 spin_lock_init(&tbl->slot_tbl_lock);
5225 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
5226 init_completion(&tbl->complete);
5227
5228 tbl = &session->bc_slot_table;
5229 tbl->highest_used_slotid = NFS4_NO_SLOT;
5230 spin_lock_init(&tbl->slot_tbl_lock);
5231 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
5232 init_completion(&tbl->complete);
5233
5234 session->session_state = 1<<NFS4_SESSION_INITING;
5235
5236 session->clp = clp;
5237 return session;
5238}
5239
5240void nfs4_destroy_session(struct nfs4_session *session)
5241{
5242 struct rpc_xprt *xprt;
5243
5244 nfs4_proc_destroy_session(session);
5245
5246 rcu_read_lock();
5247 xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
5248 rcu_read_unlock();
5249 dprintk("%s Destroy backchannel for xprt %p\n",
5250 __func__, xprt);
5251 xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
5252 nfs4_destroy_slot_tables(session);
5253 kfree(session);
5254}
5255
5256/*
5257 * Initialize the values to be used by the client in CREATE_SESSION
5258 * If nfs4_init_session set the fore channel request and response sizes,
5259 * use them.
5260 *
5261 * Set the back channel max_resp_sz_cached to zero to force the client to
5262 * always set csa_cachethis to FALSE because the current implementation
5263 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
5264 */
5265static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
5266{
5267 struct nfs4_session *session = args->client->cl_session;
5268 unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
5269 mxresp_sz = session->fc_attrs.max_resp_sz;
5270
5271 if (mxrqst_sz == 0)
5272 mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
5273 if (mxresp_sz == 0)
5274 mxresp_sz = NFS_MAX_FILE_IO_SIZE;
5275 /* Fore channel attributes */
5276 args->fc_attrs.max_rqst_sz = mxrqst_sz;
5277 args->fc_attrs.max_resp_sz = mxresp_sz;
5278 args->fc_attrs.max_ops = NFS4_MAX_OPS;
5279 args->fc_attrs.max_reqs = max_session_slots;
5280
5281 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
5282 "max_ops=%u max_reqs=%u\n",
5283 __func__,
5284 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
5285 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
5286
5287 /* Back channel attributes */
5288 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
5289 args->bc_attrs.max_resp_sz = PAGE_SIZE;
5290 args->bc_attrs.max_resp_sz_cached = 0;
5291 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
5292 args->bc_attrs.max_reqs = 1;
5293
5294 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
5295 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
5296 __func__,
5297 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
5298 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
5299 args->bc_attrs.max_reqs);
5300}
5301
5302static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5303{
5304 struct nfs4_channel_attrs *sent = &args->fc_attrs;
5305 struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
5306
5307 if (rcvd->max_resp_sz > sent->max_resp_sz)
5308 return -EINVAL;
5309 /*
5310 * Our requested max_ops is the minimum we need; we're not
5311 * prepared to break up compounds into smaller pieces than that.
5312 * So, no point even trying to continue if the server won't
5313 * cooperate:
5314 */
5315 if (rcvd->max_ops < sent->max_ops)
5316 return -EINVAL;
5317 if (rcvd->max_reqs == 0)
5318 return -EINVAL;
5319 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
5320 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
5321 return 0;
5322}
5323
5324static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5325{
5326 struct nfs4_channel_attrs *sent = &args->bc_attrs;
5327 struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
5328
5329 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
5330 return -EINVAL;
5331 if (rcvd->max_resp_sz < sent->max_resp_sz)
5332 return -EINVAL;
5333 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
5334 return -EINVAL;
5335 /* These would render the backchannel useless: */
5336 if (rcvd->max_ops != sent->max_ops)
5337 return -EINVAL;
5338 if (rcvd->max_reqs != sent->max_reqs)
5339 return -EINVAL;
5340 return 0;
5341}
5342
5343static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
5344 struct nfs4_session *session)
5345{
5346 int ret;
5347
5348 ret = nfs4_verify_fore_channel_attrs(args, session);
5349 if (ret)
5350 return ret;
5351 return nfs4_verify_back_channel_attrs(args, session);
5352}
5353
5354static int _nfs4_proc_create_session(struct nfs_client *clp)
5355{
5356 struct nfs4_session *session = clp->cl_session;
5357 struct nfs41_create_session_args args = {
5358 .client = clp,
5359 .cb_program = NFS4_CALLBACK,
5360 };
5361 struct nfs41_create_session_res res = {
5362 .client = clp,
5363 };
5364 struct rpc_message msg = {
5365 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
5366 .rpc_argp = &args,
5367 .rpc_resp = &res,
5368 };
5369 int status;
5370
5371 nfs4_init_channel_attrs(&args);
5372 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
5373
5374 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5375
5376 if (!status)
5377 /* Verify the session's negotiated channel_attrs values */
5378 status = nfs4_verify_channel_attrs(&args, session);
5379 if (!status) {
5380 /* Increment the clientid slot sequence id */
5381 clp->cl_seqid++;
5382 }
5383
5384 return status;
5385}
5386
5387/*
5388 * Issues a CREATE_SESSION operation to the server.
5389 * It is the responsibility of the caller to verify the session is
5390 * expired before calling this routine.
5391 */
5392int nfs4_proc_create_session(struct nfs_client *clp)
5393{
5394 int status;
5395 unsigned *ptr;
5396 struct nfs4_session *session = clp->cl_session;
5397
5398 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
5399
5400 status = _nfs4_proc_create_session(clp);
5401 if (status)
5402 goto out;
5403
5404 /* Init or reset the session slot tables */
5405 status = nfs4_setup_session_slot_tables(session);
5406 dprintk("slot table setup returned %d\n", status);
5407 if (status)
5408 goto out;
5409
5410 ptr = (unsigned *)&session->sess_id.data[0];
5411 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
5412 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
5413out:
5414 dprintk("<-- %s\n", __func__);
5415 return status;
5416}
5417
5418/*
5419 * Issue the over-the-wire RPC DESTROY_SESSION.
5420 * The caller must serialize access to this routine.
5421 */
5422int nfs4_proc_destroy_session(struct nfs4_session *session)
5423{
5424 int status = 0;
5425 struct rpc_message msg;
5426
5427 dprintk("--> nfs4_proc_destroy_session\n");
5428
5429 /* session is still being setup */
5430 if (session->clp->cl_cons_state != NFS_CS_READY)
5431 return status;
5432
5433 msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
5434 msg.rpc_argp = session;
5435 msg.rpc_resp = NULL;
5436 msg.rpc_cred = NULL;
5437 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5438
5439 if (status)
5440 printk(KERN_WARNING
5441 "NFS: Got error %d from the server on DESTROY_SESSION. "
5442 "Session has been destroyed regardless...\n", status);
5443
5444 dprintk("<-- nfs4_proc_destroy_session\n");
5445 return status;
5446}
5447
5448int nfs4_init_session(struct nfs_server *server)
5449{
5450 struct nfs_client *clp = server->nfs_client;
5451 struct nfs4_session *session;
5452 unsigned int rsize, wsize;
5453 int ret;
5454
5455 if (!nfs4_has_session(clp))
5456 return 0;
5457
5458 session = clp->cl_session;
5459 if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
5460 return 0;
5461
5462 rsize = server->rsize;
5463 if (rsize == 0)
5464 rsize = NFS_MAX_FILE_IO_SIZE;
5465 wsize = server->wsize;
5466 if (wsize == 0)
5467 wsize = NFS_MAX_FILE_IO_SIZE;
5468
5469 session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
5470 session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
5471
5472 ret = nfs4_recover_expired_lease(server);
5473 if (!ret)
5474 ret = nfs4_check_client_ready(clp);
5475 return ret;
5476}
5477
5478int nfs4_init_ds_session(struct nfs_client *clp)
5479{
5480 struct nfs4_session *session = clp->cl_session;
5481 int ret;
5482
5483 if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
5484 return 0;
5485
5486 ret = nfs4_client_recover_expired_lease(clp);
5487 if (!ret)
5488 /* Test for the DS role */
5489 if (!is_ds_client(clp))
5490 ret = -ENODEV;
5491 if (!ret)
5492 ret = nfs4_check_client_ready(clp);
5493 return ret;
5494
5495}
5496EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
5497
5498
5499/*
5500 * Renew the cl_session lease.
5501 */
5502struct nfs4_sequence_data {
5503 struct nfs_client *clp;
5504 struct nfs4_sequence_args args;
5505 struct nfs4_sequence_res res;
5506};
5507
5508static void nfs41_sequence_release(void *data)
5509{
5510 struct nfs4_sequence_data *calldata = data;
5511 struct nfs_client *clp = calldata->clp;
5512
5513 if (atomic_read(&clp->cl_count) > 1)
5514 nfs4_schedule_state_renewal(clp);
5515 nfs_put_client(clp);
5516 kfree(calldata);
5517}
5518
5519static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5520{
5521 switch(task->tk_status) {
5522 case -NFS4ERR_DELAY:
5523 rpc_delay(task, NFS4_POLL_RETRY_MAX);
5524 return -EAGAIN;
5525 default:
5526 nfs4_schedule_lease_recovery(clp);
5527 }
5528 return 0;
5529}
5530
5531static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
5532{
5533 struct nfs4_sequence_data *calldata = data;
5534 struct nfs_client *clp = calldata->clp;
5535
5536 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
5537 return;
5538
5539 if (task->tk_status < 0) {
5540 dprintk("%s ERROR %d\n", __func__, task->tk_status);
5541 if (atomic_read(&clp->cl_count) == 1)
5542 goto out;
5543
5544 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
5545 rpc_restart_call_prepare(task);
5546 return;
5547 }
5548 }
5549 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
5550out:
5551 dprintk("<-- %s\n", __func__);
5552}
5553
5554static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
5555{
5556 struct nfs4_sequence_data *calldata = data;
5557 struct nfs_client *clp = calldata->clp;
5558 struct nfs4_sequence_args *args;
5559 struct nfs4_sequence_res *res;
5560
5561 args = task->tk_msg.rpc_argp;
5562 res = task->tk_msg.rpc_resp;
5563
5564 if (nfs41_setup_sequence(clp->cl_session, args, res, task))
5565 return;
5566 rpc_call_start(task);
5567}
5568
5569static const struct rpc_call_ops nfs41_sequence_ops = {
5570 .rpc_call_done = nfs41_sequence_call_done,
5571 .rpc_call_prepare = nfs41_sequence_prepare,
5572 .rpc_release = nfs41_sequence_release,
5573};
5574
5575static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5576{
5577 struct nfs4_sequence_data *calldata;
5578 struct rpc_message msg = {
5579 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
5580 .rpc_cred = cred,
5581 };
5582 struct rpc_task_setup task_setup_data = {
5583 .rpc_client = clp->cl_rpcclient,
5584 .rpc_message = &msg,
5585 .callback_ops = &nfs41_sequence_ops,
5586 .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
5587 };
5588
5589 if (!atomic_inc_not_zero(&clp->cl_count))
5590 return ERR_PTR(-EIO);
5591 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
5592 if (calldata == NULL) {
5593 nfs_put_client(clp);
5594 return ERR_PTR(-ENOMEM);
5595 }
5596 nfs41_init_sequence(&calldata->args, &calldata->res, 0);
5597 msg.rpc_argp = &calldata->args;
5598 msg.rpc_resp = &calldata->res;
5599 calldata->clp = clp;
5600 task_setup_data.callback_data = calldata;
5601
5602 return rpc_run_task(&task_setup_data);
5603}
5604
5605static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
5606{
5607 struct rpc_task *task;
5608 int ret = 0;
5609
5610 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
5611 return 0;
5612 task = _nfs41_proc_sequence(clp, cred);
5613 if (IS_ERR(task))
5614 ret = PTR_ERR(task);
5615 else
5616 rpc_put_task_async(task);
5617 dprintk("<-- %s status=%d\n", __func__, ret);
5618 return ret;
5619}
5620
5621static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5622{
5623 struct rpc_task *task;
5624 int ret;
5625
5626 task = _nfs41_proc_sequence(clp, cred);
5627 if (IS_ERR(task)) {
5628 ret = PTR_ERR(task);
5629 goto out;
5630 }
5631 ret = rpc_wait_for_completion_task(task);
5632 if (!ret) {
5633 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
5634
5635 if (task->tk_status == 0)
5636 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
5637 ret = task->tk_status;
5638 }
5639 rpc_put_task(task);
5640out:
5641 dprintk("<-- %s status=%d\n", __func__, ret);
5642 return ret;
5643}
5644
5645struct nfs4_reclaim_complete_data {
5646 struct nfs_client *clp;
5647 struct nfs41_reclaim_complete_args arg;
5648 struct nfs41_reclaim_complete_res res;
5649};
5650
5651static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
5652{
5653 struct nfs4_reclaim_complete_data *calldata = data;
5654
5655 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
5656 if (nfs41_setup_sequence(calldata->clp->cl_session,
5657 &calldata->arg.seq_args,
5658 &calldata->res.seq_res, task))
5659 return;
5660
5661 rpc_call_start(task);
5662}
5663
5664static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5665{
5666 switch(task->tk_status) {
5667 case 0:
5668 case -NFS4ERR_COMPLETE_ALREADY:
5669 case -NFS4ERR_WRONG_CRED: /* What to do here? */
5670 break;
5671 case -NFS4ERR_DELAY:
5672 rpc_delay(task, NFS4_POLL_RETRY_MAX);
5673 /* fall through */
5674 case -NFS4ERR_RETRY_UNCACHED_REP:
5675 return -EAGAIN;
5676 default:
5677 nfs4_schedule_lease_recovery(clp);
5678 }
5679 return 0;
5680}
5681
5682static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
5683{
5684 struct nfs4_reclaim_complete_data *calldata = data;
5685 struct nfs_client *clp = calldata->clp;
5686 struct nfs4_sequence_res *res = &calldata->res.seq_res;
5687
5688 dprintk("--> %s\n", __func__);
5689 if (!nfs41_sequence_done(task, res))
5690 return;
5691
5692 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
5693 rpc_restart_call_prepare(task);
5694 return;
5695 }
5696 dprintk("<-- %s\n", __func__);
5697}
5698
5699static void nfs4_free_reclaim_complete_data(void *data)
5700{
5701 struct nfs4_reclaim_complete_data *calldata = data;
5702
5703 kfree(calldata);
5704}
5705
5706static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
5707 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
5708 .rpc_call_done = nfs4_reclaim_complete_done,
5709 .rpc_release = nfs4_free_reclaim_complete_data,
5710};
5711
5712/*
5713 * Issue a global reclaim complete.
5714 */
5715static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
5716{
5717 struct nfs4_reclaim_complete_data *calldata;
5718 struct rpc_task *task;
5719 struct rpc_message msg = {
5720 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
5721 };
5722 struct rpc_task_setup task_setup_data = {
5723 .rpc_client = clp->cl_rpcclient,
5724 .rpc_message = &msg,
5725 .callback_ops = &nfs4_reclaim_complete_call_ops,
5726 .flags = RPC_TASK_ASYNC,
5727 };
5728 int status = -ENOMEM;
5729
5730 dprintk("--> %s\n", __func__);
5731 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
5732 if (calldata == NULL)
5733 goto out;
5734 calldata->clp = clp;
5735 calldata->arg.one_fs = 0;
5736
5737 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
5738 msg.rpc_argp = &calldata->arg;
5739 msg.rpc_resp = &calldata->res;
5740 task_setup_data.callback_data = calldata;
5741 task = rpc_run_task(&task_setup_data);
5742 if (IS_ERR(task)) {
5743 status = PTR_ERR(task);
5744 goto out;
5745 }
5746 status = nfs4_wait_for_completion_rpc_task(task);
5747 if (status == 0)
5748 status = task->tk_status;
5749 rpc_put_task(task);
5750 return 0;
5751out:
5752 dprintk("<-- %s status=%d\n", __func__, status);
5753 return status;
5754}
5755
5756static void
5757nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
5758{
5759 struct nfs4_layoutget *lgp = calldata;
5760 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
5761
5762 dprintk("--> %s\n", __func__);
5763 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
5764 * right now covering the LAYOUTGET we are about to send.
5765 * However, that is not so catastrophic, and there seems
5766 * to be no way to prevent it completely.
5767 */
5768 if (nfs4_setup_sequence(server, &lgp->args.seq_args,
5769 &lgp->res.seq_res, task))
5770 return;
5771 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
5772 NFS_I(lgp->args.inode)->layout,
5773 lgp->args.ctx->state)) {
5774 rpc_exit(task, NFS4_OK);
5775 return;
5776 }
5777 rpc_call_start(task);
5778}
5779
5780static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
5781{
5782 struct nfs4_layoutget *lgp = calldata;
5783 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
5784
5785 dprintk("--> %s\n", __func__);
5786
5787 if (!nfs4_sequence_done(task, &lgp->res.seq_res))
5788 return;
5789
5790 switch (task->tk_status) {
5791 case 0:
5792 break;
5793 case -NFS4ERR_LAYOUTTRYLATER:
5794 case -NFS4ERR_RECALLCONFLICT:
5795 task->tk_status = -NFS4ERR_DELAY;
5796 /* Fall through */
5797 default:
5798 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
5799 rpc_restart_call_prepare(task);
5800 return;
5801 }
5802 }
5803 dprintk("<-- %s\n", __func__);
5804}
5805
5806static void nfs4_layoutget_release(void *calldata)
5807{
5808 struct nfs4_layoutget *lgp = calldata;
5809
5810 dprintk("--> %s\n", __func__);
5811 put_nfs_open_context(lgp->args.ctx);
5812 kfree(calldata);
5813 dprintk("<-- %s\n", __func__);
5814}
5815
5816static const struct rpc_call_ops nfs4_layoutget_call_ops = {
5817 .rpc_call_prepare = nfs4_layoutget_prepare,
5818 .rpc_call_done = nfs4_layoutget_done,
5819 .rpc_release = nfs4_layoutget_release,
5820};
5821
5822int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
5823{
5824 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
5825 struct rpc_task *task;
5826 struct rpc_message msg = {
5827 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
5828 .rpc_argp = &lgp->args,
5829 .rpc_resp = &lgp->res,
5830 };
5831 struct rpc_task_setup task_setup_data = {
5832 .rpc_client = server->client,
5833 .rpc_message = &msg,
5834 .callback_ops = &nfs4_layoutget_call_ops,
5835 .callback_data = lgp,
5836 .flags = RPC_TASK_ASYNC,
5837 };
5838 int status = 0;
5839
5840 dprintk("--> %s\n", __func__);
5841
5842 lgp->res.layoutp = &lgp->args.layout;
5843 lgp->res.seq_res.sr_slot = NULL;
5844 nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
5845 task = rpc_run_task(&task_setup_data);
5846 if (IS_ERR(task))
5847 return PTR_ERR(task);
5848 status = nfs4_wait_for_completion_rpc_task(task);
5849 if (status == 0)
5850 status = task->tk_status;
5851 if (status == 0)
5852 status = pnfs_layout_process(lgp);
5853 rpc_put_task(task);
5854 dprintk("<-- %s status=%d\n", __func__, status);
5855 return status;
5856}
5857
5858static void
5859nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
5860{
5861 struct nfs4_layoutreturn *lrp = calldata;
5862
5863 dprintk("--> %s\n", __func__);
5864 if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args,
5865 &lrp->res.seq_res, task))
5866 return;
5867 rpc_call_start(task);
5868}
5869
5870static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
5871{
5872 struct nfs4_layoutreturn *lrp = calldata;
5873 struct nfs_server *server;
5874 struct pnfs_layout_hdr *lo = lrp->args.layout;
5875
5876 dprintk("--> %s\n", __func__);
5877
5878 if (!nfs4_sequence_done(task, &lrp->res.seq_res))
5879 return;
5880
5881 server = NFS_SERVER(lrp->args.inode);
5882 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
5883 rpc_restart_call_prepare(task);
5884 return;
5885 }
5886 spin_lock(&lo->plh_inode->i_lock);
5887 if (task->tk_status == 0) {
5888 if (lrp->res.lrs_present) {
5889 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
5890 } else
5891 BUG_ON(!list_empty(&lo->plh_segs));
5892 }
5893 lo->plh_block_lgets--;
5894 spin_unlock(&lo->plh_inode->i_lock);
5895 dprintk("<-- %s\n", __func__);
5896}
5897
5898static void nfs4_layoutreturn_release(void *calldata)
5899{
5900 struct nfs4_layoutreturn *lrp = calldata;
5901
5902 dprintk("--> %s\n", __func__);
5903 put_layout_hdr(lrp->args.layout);
5904 kfree(calldata);
5905 dprintk("<-- %s\n", __func__);
5906}
5907
5908static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
5909 .rpc_call_prepare = nfs4_layoutreturn_prepare,
5910 .rpc_call_done = nfs4_layoutreturn_done,
5911 .rpc_release = nfs4_layoutreturn_release,
5912};
5913
5914int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
5915{
5916 struct rpc_task *task;
5917 struct rpc_message msg = {
5918 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
5919 .rpc_argp = &lrp->args,
5920 .rpc_resp = &lrp->res,
5921 };
5922 struct rpc_task_setup task_setup_data = {
5923 .rpc_client = lrp->clp->cl_rpcclient,
5924 .rpc_message = &msg,
5925 .callback_ops = &nfs4_layoutreturn_call_ops,
5926 .callback_data = lrp,
5927 };
5928 int status;
5929
5930 dprintk("--> %s\n", __func__);
5931 nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
5932 task = rpc_run_task(&task_setup_data);
5933 if (IS_ERR(task))
5934 return PTR_ERR(task);
5935 status = task->tk_status;
5936 dprintk("<-- %s status=%d\n", __func__, status);
5937 rpc_put_task(task);
5938 return status;
5939}
5940
5941/*
5942 * Retrieve the list of Data Server devices from the MDS.
5943 */
5944static int _nfs4_getdevicelist(struct nfs_server *server,
5945 const struct nfs_fh *fh,
5946 struct pnfs_devicelist *devlist)
5947{
5948 struct nfs4_getdevicelist_args args = {
5949 .fh = fh,
5950 .layoutclass = server->pnfs_curr_ld->id,
5951 };
5952 struct nfs4_getdevicelist_res res = {
5953 .devlist = devlist,
5954 };
5955 struct rpc_message msg = {
5956 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST],
5957 .rpc_argp = &args,
5958 .rpc_resp = &res,
5959 };
5960 int status;
5961
5962 dprintk("--> %s\n", __func__);
5963 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
5964 &res.seq_res, 0);
5965 dprintk("<-- %s status=%d\n", __func__, status);
5966 return status;
5967}
5968
5969int nfs4_proc_getdevicelist(struct nfs_server *server,
5970 const struct nfs_fh *fh,
5971 struct pnfs_devicelist *devlist)
5972{
5973 struct nfs4_exception exception = { };
5974 int err;
5975
5976 do {
5977 err = nfs4_handle_exception(server,
5978 _nfs4_getdevicelist(server, fh, devlist),
5979 &exception);
5980 } while (exception.retry);
5981
5982 dprintk("%s: err=%d, num_devs=%u\n", __func__,
5983 err, devlist->num_devs);
5984
5985 return err;
5986}
5987EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist);
5988
5989static int
5990_nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
5991{
5992 struct nfs4_getdeviceinfo_args args = {
5993 .pdev = pdev,
5994 };
5995 struct nfs4_getdeviceinfo_res res = {
5996 .pdev = pdev,
5997 };
5998 struct rpc_message msg = {
5999 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
6000 .rpc_argp = &args,
6001 .rpc_resp = &res,
6002 };
6003 int status;
6004
6005 dprintk("--> %s\n", __func__);
6006 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6007 dprintk("<-- %s status=%d\n", __func__, status);
6008
6009 return status;
6010}
6011
6012int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6013{
6014 struct nfs4_exception exception = { };
6015 int err;
6016
6017 do {
6018 err = nfs4_handle_exception(server,
6019 _nfs4_proc_getdeviceinfo(server, pdev),
6020 &exception);
6021 } while (exception.retry);
6022 return err;
6023}
6024EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
6025
6026static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
6027{
6028 struct nfs4_layoutcommit_data *data = calldata;
6029 struct nfs_server *server = NFS_SERVER(data->args.inode);
6030
6031 if (nfs4_setup_sequence(server, &data->args.seq_args,
6032 &data->res.seq_res, task))
6033 return;
6034 rpc_call_start(task);
6035}
6036
6037static void
6038nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
6039{
6040 struct nfs4_layoutcommit_data *data = calldata;
6041 struct nfs_server *server = NFS_SERVER(data->args.inode);
6042
6043 if (!nfs4_sequence_done(task, &data->res.seq_res))
6044 return;
6045
6046 switch (task->tk_status) { /* Just ignore these failures */
6047 case NFS4ERR_DELEG_REVOKED: /* layout was recalled */
6048 case NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
6049 case NFS4ERR_BADLAYOUT: /* no layout */
6050 case NFS4ERR_GRACE: /* loca_recalim always false */
6051 task->tk_status = 0;
6052 }
6053
6054 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6055 rpc_restart_call_prepare(task);
6056 return;
6057 }
6058
6059 if (task->tk_status == 0)
6060 nfs_post_op_update_inode_force_wcc(data->args.inode,
6061 data->res.fattr);
6062}
6063
6064static void nfs4_layoutcommit_release(void *calldata)
6065{
6066 struct nfs4_layoutcommit_data *data = calldata;
6067 struct pnfs_layout_segment *lseg, *tmp;
6068 unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
6069
6070 pnfs_cleanup_layoutcommit(data);
6071 /* Matched by references in pnfs_set_layoutcommit */
6072 list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
6073 list_del_init(&lseg->pls_lc_list);
6074 if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
6075 &lseg->pls_flags))
6076 put_lseg(lseg);
6077 }
6078
6079 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
6080 smp_mb__after_clear_bit();
6081 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
6082
6083 put_rpccred(data->cred);
6084 kfree(data);
6085}
6086
6087static const struct rpc_call_ops nfs4_layoutcommit_ops = {
6088 .rpc_call_prepare = nfs4_layoutcommit_prepare,
6089 .rpc_call_done = nfs4_layoutcommit_done,
6090 .rpc_release = nfs4_layoutcommit_release,
6091};
6092
6093int
6094nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
6095{
6096 struct rpc_message msg = {
6097 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
6098 .rpc_argp = &data->args,
6099 .rpc_resp = &data->res,
6100 .rpc_cred = data->cred,
6101 };
6102 struct rpc_task_setup task_setup_data = {
6103 .task = &data->task,
6104 .rpc_client = NFS_CLIENT(data->args.inode),
6105 .rpc_message = &msg,
6106 .callback_ops = &nfs4_layoutcommit_ops,
6107 .callback_data = data,
6108 .flags = RPC_TASK_ASYNC,
6109 };
6110 struct rpc_task *task;
6111 int status = 0;
6112
6113 dprintk("NFS: %4d initiating layoutcommit call. sync %d "
6114 "lbw: %llu inode %lu\n",
6115 data->task.tk_pid, sync,
6116 data->args.lastbytewritten,
6117 data->args.inode->i_ino);
6118
6119 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
6120 task = rpc_run_task(&task_setup_data);
6121 if (IS_ERR(task))
6122 return PTR_ERR(task);
6123 if (sync == false)
6124 goto out;
6125 status = nfs4_wait_for_completion_rpc_task(task);
6126 if (status != 0)
6127 goto out;
6128 status = task->tk_status;
6129out:
6130 dprintk("%s: status %d\n", __func__, status);
6131 rpc_put_task(task);
6132 return status;
6133}
6134
6135static int
6136_nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6137 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6138{
6139 struct nfs41_secinfo_no_name_args args = {
6140 .style = SECINFO_STYLE_CURRENT_FH,
6141 };
6142 struct nfs4_secinfo_res res = {
6143 .flavors = flavors,
6144 };
6145 struct rpc_message msg = {
6146 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
6147 .rpc_argp = &args,
6148 .rpc_resp = &res,
6149 };
6150 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6151}
6152
6153static int
6154nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6155 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6156{
6157 struct nfs4_exception exception = { };
6158 int err;
6159 do {
6160 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6161 switch (err) {
6162 case 0:
6163 case -NFS4ERR_WRONGSEC:
6164 case -NFS4ERR_NOTSUPP:
6165 break;
6166 default:
6167 err = nfs4_handle_exception(server, err, &exception);
6168 }
6169 } while (exception.retry);
6170 return err;
6171}
6172
6173static int
6174nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
6175 struct nfs_fsinfo *info)
6176{
6177 int err;
6178 struct page *page;
6179 rpc_authflavor_t flavor;
6180 struct nfs4_secinfo_flavors *flavors;
6181
6182 page = alloc_page(GFP_KERNEL);
6183 if (!page) {
6184 err = -ENOMEM;
6185 goto out;
6186 }
6187
6188 flavors = page_address(page);
6189 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6190
6191 /*
6192 * Fall back on "guess and check" method if
6193 * the server doesn't support SECINFO_NO_NAME
6194 */
6195 if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
6196 err = nfs4_find_root_sec(server, fhandle, info);
6197 goto out_freepage;
6198 }
6199 if (err)
6200 goto out_freepage;
6201
6202 flavor = nfs_find_best_sec(flavors);
6203 if (err == 0)
6204 err = nfs4_lookup_root_sec(server, fhandle, info, flavor);
6205
6206out_freepage:
6207 put_page(page);
6208 if (err == -EACCES)
6209 return -EPERM;
6210out:
6211 return err;
6212}
6213
6214static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6215{
6216 int status;
6217 struct nfs41_test_stateid_args args = {
6218 .stateid = stateid,
6219 };
6220 struct nfs41_test_stateid_res res;
6221 struct rpc_message msg = {
6222 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
6223 .rpc_argp = &args,
6224 .rpc_resp = &res,
6225 };
6226
6227 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
6228 status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
6229
6230 if (status == NFS_OK)
6231 return res.status;
6232 return status;
6233}
6234
6235static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6236{
6237 struct nfs4_exception exception = { };
6238 int err;
6239 do {
6240 err = nfs4_handle_exception(server,
6241 _nfs41_test_stateid(server, stateid),
6242 &exception);
6243 } while (exception.retry);
6244 return err;
6245}
6246
6247static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6248{
6249 struct nfs41_free_stateid_args args = {
6250 .stateid = stateid,
6251 };
6252 struct nfs41_free_stateid_res res;
6253 struct rpc_message msg = {
6254 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
6255 .rpc_argp = &args,
6256 .rpc_resp = &res,
6257 };
6258
6259 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
6260 return nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
6261}
6262
6263static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6264{
6265 struct nfs4_exception exception = { };
6266 int err;
6267 do {
6268 err = nfs4_handle_exception(server,
6269 _nfs4_free_stateid(server, stateid),
6270 &exception);
6271 } while (exception.retry);
6272 return err;
6273}
6274
6275static bool nfs41_match_stateid(const nfs4_stateid *s1,
6276 const nfs4_stateid *s2)
6277{
6278 if (memcmp(s1->stateid.other, s2->stateid.other,
6279 sizeof(s1->stateid.other)) != 0)
6280 return false;
6281
6282 if (s1->stateid.seqid == s2->stateid.seqid)
6283 return true;
6284 if (s1->stateid.seqid == 0 || s2->stateid.seqid == 0)
6285 return true;
6286
6287 return false;
6288}
6289
6290#endif /* CONFIG_NFS_V4_1 */
6291
6292static bool nfs4_match_stateid(const nfs4_stateid *s1,
6293 const nfs4_stateid *s2)
6294{
6295 return memcmp(s1->data, s2->data, sizeof(s1->data)) == 0;
6296}
6297
6298
6299struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
6300 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6301 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
6302 .recover_open = nfs4_open_reclaim,
6303 .recover_lock = nfs4_lock_reclaim,
6304 .establish_clid = nfs4_init_clientid,
6305 .get_clid_cred = nfs4_get_setclientid_cred,
6306};
6307
6308#if defined(CONFIG_NFS_V4_1)
6309struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
6310 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6311 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
6312 .recover_open = nfs4_open_reclaim,
6313 .recover_lock = nfs4_lock_reclaim,
6314 .establish_clid = nfs41_init_clientid,
6315 .get_clid_cred = nfs4_get_exchange_id_cred,
6316 .reclaim_complete = nfs41_proc_reclaim_complete,
6317};
6318#endif /* CONFIG_NFS_V4_1 */
6319
6320struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
6321 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
6322 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
6323 .recover_open = nfs4_open_expired,
6324 .recover_lock = nfs4_lock_expired,
6325 .establish_clid = nfs4_init_clientid,
6326 .get_clid_cred = nfs4_get_setclientid_cred,
6327};
6328
6329#if defined(CONFIG_NFS_V4_1)
6330struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
6331 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
6332 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
6333 .recover_open = nfs41_open_expired,
6334 .recover_lock = nfs41_lock_expired,
6335 .establish_clid = nfs41_init_clientid,
6336 .get_clid_cred = nfs4_get_exchange_id_cred,
6337};
6338#endif /* CONFIG_NFS_V4_1 */
6339
6340struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
6341 .sched_state_renewal = nfs4_proc_async_renew,
6342 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
6343 .renew_lease = nfs4_proc_renew,
6344};
6345
6346#if defined(CONFIG_NFS_V4_1)
6347struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
6348 .sched_state_renewal = nfs41_proc_async_sequence,
6349 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
6350 .renew_lease = nfs4_proc_sequence,
6351};
6352#endif
6353
6354static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
6355 .minor_version = 0,
6356 .call_sync = _nfs4_call_sync,
6357 .match_stateid = nfs4_match_stateid,
6358 .find_root_sec = nfs4_find_root_sec,
6359 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
6360 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
6361 .state_renewal_ops = &nfs40_state_renewal_ops,
6362};
6363
6364#if defined(CONFIG_NFS_V4_1)
6365static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
6366 .minor_version = 1,
6367 .call_sync = _nfs4_call_sync_session,
6368 .match_stateid = nfs41_match_stateid,
6369 .find_root_sec = nfs41_find_root_sec,
6370 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
6371 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
6372 .state_renewal_ops = &nfs41_state_renewal_ops,
6373};
6374#endif
6375
6376const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
6377 [0] = &nfs_v4_0_minor_ops,
6378#if defined(CONFIG_NFS_V4_1)
6379 [1] = &nfs_v4_1_minor_ops,
6380#endif
6381};
6382
6383static const struct inode_operations nfs4_file_inode_operations = {
6384 .permission = nfs_permission,
6385 .getattr = nfs_getattr,
6386 .setattr = nfs_setattr,
6387 .getxattr = generic_getxattr,
6388 .setxattr = generic_setxattr,
6389 .listxattr = generic_listxattr,
6390 .removexattr = generic_removexattr,
6391};
6392
6393const struct nfs_rpc_ops nfs_v4_clientops = {
6394 .version = 4, /* protocol version */
6395 .dentry_ops = &nfs4_dentry_operations,
6396 .dir_inode_ops = &nfs4_dir_inode_operations,
6397 .file_inode_ops = &nfs4_file_inode_operations,
6398 .file_ops = &nfs4_file_operations,
6399 .getroot = nfs4_proc_get_root,
6400 .getattr = nfs4_proc_getattr,
6401 .setattr = nfs4_proc_setattr,
6402 .lookup = nfs4_proc_lookup,
6403 .access = nfs4_proc_access,
6404 .readlink = nfs4_proc_readlink,
6405 .create = nfs4_proc_create,
6406 .remove = nfs4_proc_remove,
6407 .unlink_setup = nfs4_proc_unlink_setup,
6408 .unlink_done = nfs4_proc_unlink_done,
6409 .rename = nfs4_proc_rename,
6410 .rename_setup = nfs4_proc_rename_setup,
6411 .rename_done = nfs4_proc_rename_done,
6412 .link = nfs4_proc_link,
6413 .symlink = nfs4_proc_symlink,
6414 .mkdir = nfs4_proc_mkdir,
6415 .rmdir = nfs4_proc_remove,
6416 .readdir = nfs4_proc_readdir,
6417 .mknod = nfs4_proc_mknod,
6418 .statfs = nfs4_proc_statfs,
6419 .fsinfo = nfs4_proc_fsinfo,
6420 .pathconf = nfs4_proc_pathconf,
6421 .set_capabilities = nfs4_server_capabilities,
6422 .decode_dirent = nfs4_decode_dirent,
6423 .read_setup = nfs4_proc_read_setup,
6424 .read_done = nfs4_read_done,
6425 .write_setup = nfs4_proc_write_setup,
6426 .write_done = nfs4_write_done,
6427 .commit_setup = nfs4_proc_commit_setup,
6428 .commit_done = nfs4_commit_done,
6429 .lock = nfs4_proc_lock,
6430 .clear_acl_cache = nfs4_zap_acl_attr,
6431 .close_context = nfs4_close_context,
6432 .open_context = nfs4_atomic_open,
6433 .init_client = nfs4_init_client,
6434 .secinfo = nfs4_proc_secinfo,
6435};
6436
6437static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
6438 .prefix = XATTR_NAME_NFSV4_ACL,
6439 .list = nfs4_xattr_list_nfs4_acl,
6440 .get = nfs4_xattr_get_nfs4_acl,
6441 .set = nfs4_xattr_set_nfs4_acl,
6442};
6443
6444const struct xattr_handler *nfs4_xattr_handlers[] = {
6445 &nfs4_xattr_nfs4_acl_handler,
6446 NULL
6447};
6448
6449module_param(max_session_slots, ushort, 0644);
6450MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
6451 "requests the client will negotiate");
6452
6453/*
6454 * Local variables:
6455 * c-basic-offset: 8
6456 * End:
6457 */