nfsd: fix laundromat next-run-time calculation
[linux-2.6-block.git] / fs / nfsd / nfs4state.c
... / ...
CommitLineData
1/*
2* Copyright (c) 2001 The Regents of the University of Michigan.
3* All rights reserved.
4*
5* Kendrick Smith <kmsmith@umich.edu>
6* Andy Adamson <kandros@umich.edu>
7*
8* Redistribution and use in source and binary forms, with or without
9* modification, are permitted provided that the following conditions
10* are met:
11*
12* 1. Redistributions of source code must retain the above copyright
13* notice, this list of conditions and the following disclaimer.
14* 2. Redistributions in binary form must reproduce the above copyright
15* notice, this list of conditions and the following disclaimer in the
16* documentation and/or other materials provided with the distribution.
17* 3. Neither the name of the University nor the names of its
18* contributors may be used to endorse or promote products derived
19* from this software without specific prior written permission.
20*
21* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32*
33*/
34
35#include <linux/file.h>
36#include <linux/fs.h>
37#include <linux/slab.h>
38#include <linux/namei.h>
39#include <linux/swap.h>
40#include <linux/pagemap.h>
41#include <linux/ratelimit.h>
42#include <linux/sunrpc/svcauth_gss.h>
43#include <linux/sunrpc/addr.h>
44#include "xdr4.h"
45#include "xdr4cb.h"
46#include "vfs.h"
47#include "current_stateid.h"
48
49#include "netns.h"
50
51#define NFSDDBG_FACILITY NFSDDBG_PROC
52
53#define all_ones {{~0,~0},~0}
54static const stateid_t one_stateid = {
55 .si_generation = ~0,
56 .si_opaque = all_ones,
57};
58static const stateid_t zero_stateid = {
59 /* all fields zero */
60};
61static const stateid_t currentstateid = {
62 .si_generation = 1,
63};
64
65static u64 current_sessionid = 1;
66
67#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
68#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
69#define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
70
71/* forward declarations */
72static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner);
73
74/* Locking: */
75
76/* Currently used for almost all code touching nfsv4 state: */
77static DEFINE_MUTEX(client_mutex);
78
79/*
80 * Currently used for the del_recall_lru and file hash table. In an
81 * effort to decrease the scope of the client_mutex, this spinlock may
82 * eventually cover more:
83 */
84static DEFINE_SPINLOCK(recall_lock);
85
86static struct kmem_cache *openowner_slab;
87static struct kmem_cache *lockowner_slab;
88static struct kmem_cache *file_slab;
89static struct kmem_cache *stateid_slab;
90static struct kmem_cache *deleg_slab;
91
92void
93nfs4_lock_state(void)
94{
95 mutex_lock(&client_mutex);
96}
97
98static void free_session(struct nfsd4_session *);
99
100static bool is_session_dead(struct nfsd4_session *ses)
101{
102 return ses->se_flags & NFS4_SESSION_DEAD;
103}
104
105void nfsd4_put_session(struct nfsd4_session *ses)
106{
107 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
108 free_session(ses);
109}
110
111static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
112{
113 if (atomic_read(&ses->se_ref) > ref_held_by_me)
114 return nfserr_jukebox;
115 ses->se_flags |= NFS4_SESSION_DEAD;
116 return nfs_ok;
117}
118
119static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
120{
121 if (is_session_dead(ses))
122 return nfserr_badsession;
123 atomic_inc(&ses->se_ref);
124 return nfs_ok;
125}
126
127void
128nfs4_unlock_state(void)
129{
130 mutex_unlock(&client_mutex);
131}
132
133static bool is_client_expired(struct nfs4_client *clp)
134{
135 return clp->cl_time == 0;
136}
137
138static __be32 mark_client_expired_locked(struct nfs4_client *clp)
139{
140 if (atomic_read(&clp->cl_refcount))
141 return nfserr_jukebox;
142 clp->cl_time = 0;
143 return nfs_ok;
144}
145
146static __be32 mark_client_expired(struct nfs4_client *clp)
147{
148 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
149 __be32 ret;
150
151 spin_lock(&nn->client_lock);
152 ret = mark_client_expired_locked(clp);
153 spin_unlock(&nn->client_lock);
154 return ret;
155}
156
157static __be32 get_client_locked(struct nfs4_client *clp)
158{
159 if (is_client_expired(clp))
160 return nfserr_expired;
161 atomic_inc(&clp->cl_refcount);
162 return nfs_ok;
163}
164
165/* must be called under the client_lock */
166static inline void
167renew_client_locked(struct nfs4_client *clp)
168{
169 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
170
171 if (is_client_expired(clp)) {
172 WARN_ON(1);
173 printk("%s: client (clientid %08x/%08x) already expired\n",
174 __func__,
175 clp->cl_clientid.cl_boot,
176 clp->cl_clientid.cl_id);
177 return;
178 }
179
180 dprintk("renewing client (clientid %08x/%08x)\n",
181 clp->cl_clientid.cl_boot,
182 clp->cl_clientid.cl_id);
183 list_move_tail(&clp->cl_lru, &nn->client_lru);
184 clp->cl_time = get_seconds();
185}
186
187static inline void
188renew_client(struct nfs4_client *clp)
189{
190 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
191
192 spin_lock(&nn->client_lock);
193 renew_client_locked(clp);
194 spin_unlock(&nn->client_lock);
195}
196
197static void put_client_renew_locked(struct nfs4_client *clp)
198{
199 if (!atomic_dec_and_test(&clp->cl_refcount))
200 return;
201 if (!is_client_expired(clp))
202 renew_client_locked(clp);
203}
204
205void put_client_renew(struct nfs4_client *clp)
206{
207 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
208
209 if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
210 return;
211 if (!is_client_expired(clp))
212 renew_client_locked(clp);
213 spin_unlock(&nn->client_lock);
214}
215
216
217static inline u32
218opaque_hashval(const void *ptr, int nbytes)
219{
220 unsigned char *cptr = (unsigned char *) ptr;
221
222 u32 x = 0;
223 while (nbytes--) {
224 x *= 37;
225 x += *cptr++;
226 }
227 return x;
228}
229
230static void nfsd4_free_file(struct nfs4_file *f)
231{
232 kmem_cache_free(file_slab, f);
233}
234
235static inline void
236put_nfs4_file(struct nfs4_file *fi)
237{
238 if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) {
239 hlist_del(&fi->fi_hash);
240 spin_unlock(&recall_lock);
241 iput(fi->fi_inode);
242 nfsd4_free_file(fi);
243 }
244}
245
246static inline void
247get_nfs4_file(struct nfs4_file *fi)
248{
249 atomic_inc(&fi->fi_ref);
250}
251
252static int num_delegations;
253unsigned long max_delegations;
254
255/*
256 * Open owner state (share locks)
257 */
258
259/* hash tables for lock and open owners */
260#define OWNER_HASH_BITS 8
261#define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
262#define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
263
264static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
265{
266 unsigned int ret;
267
268 ret = opaque_hashval(ownername->data, ownername->len);
269 ret += clientid;
270 return ret & OWNER_HASH_MASK;
271}
272
273/* hash table for nfs4_file */
274#define FILE_HASH_BITS 8
275#define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
276
277static unsigned int file_hashval(struct inode *ino)
278{
279 /* XXX: why are we hashing on inode pointer, anyway? */
280 return hash_ptr(ino, FILE_HASH_BITS);
281}
282
283static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
284
285static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag)
286{
287 WARN_ON_ONCE(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR]));
288 atomic_inc(&fp->fi_access[oflag]);
289}
290
291static void nfs4_file_get_access(struct nfs4_file *fp, int oflag)
292{
293 if (oflag == O_RDWR) {
294 __nfs4_file_get_access(fp, O_RDONLY);
295 __nfs4_file_get_access(fp, O_WRONLY);
296 } else
297 __nfs4_file_get_access(fp, oflag);
298}
299
300static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag)
301{
302 if (fp->fi_fds[oflag]) {
303 fput(fp->fi_fds[oflag]);
304 fp->fi_fds[oflag] = NULL;
305 }
306}
307
308static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
309{
310 if (atomic_dec_and_test(&fp->fi_access[oflag])) {
311 nfs4_file_put_fd(fp, oflag);
312 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
313 nfs4_file_put_fd(fp, O_RDWR);
314 }
315}
316
317static void nfs4_file_put_access(struct nfs4_file *fp, int oflag)
318{
319 if (oflag == O_RDWR) {
320 __nfs4_file_put_access(fp, O_RDONLY);
321 __nfs4_file_put_access(fp, O_WRONLY);
322 } else
323 __nfs4_file_put_access(fp, oflag);
324}
325
326static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct
327kmem_cache *slab)
328{
329 struct idr *stateids = &cl->cl_stateids;
330 struct nfs4_stid *stid;
331 int new_id;
332
333 stid = kmem_cache_alloc(slab, GFP_KERNEL);
334 if (!stid)
335 return NULL;
336
337 new_id = idr_alloc_cyclic(stateids, stid, 0, 0, GFP_KERNEL);
338 if (new_id < 0)
339 goto out_free;
340 stid->sc_client = cl;
341 stid->sc_type = 0;
342 stid->sc_stateid.si_opaque.so_id = new_id;
343 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
344 /* Will be incremented before return to client: */
345 stid->sc_stateid.si_generation = 0;
346
347 /*
348 * It shouldn't be a problem to reuse an opaque stateid value.
349 * I don't think it is for 4.1. But with 4.0 I worry that, for
350 * example, a stray write retransmission could be accepted by
351 * the server when it should have been rejected. Therefore,
352 * adopt a trick from the sctp code to attempt to maximize the
353 * amount of time until an id is reused, by ensuring they always
354 * "increase" (mod INT_MAX):
355 */
356 return stid;
357out_free:
358 kmem_cache_free(slab, stid);
359 return NULL;
360}
361
362static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
363{
364 return openlockstateid(nfs4_alloc_stid(clp, stateid_slab));
365}
366
367static struct nfs4_delegation *
368alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh)
369{
370 struct nfs4_delegation *dp;
371
372 dprintk("NFSD alloc_init_deleg\n");
373 if (num_delegations > max_delegations)
374 return NULL;
375 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
376 if (dp == NULL)
377 return dp;
378 dp->dl_stid.sc_type = NFS4_DELEG_STID;
379 /*
380 * delegation seqid's are never incremented. The 4.1 special
381 * meaning of seqid 0 isn't meaningful, really, but let's avoid
382 * 0 anyway just for consistency and use 1:
383 */
384 dp->dl_stid.sc_stateid.si_generation = 1;
385 num_delegations++;
386 INIT_LIST_HEAD(&dp->dl_perfile);
387 INIT_LIST_HEAD(&dp->dl_perclnt);
388 INIT_LIST_HEAD(&dp->dl_recall_lru);
389 dp->dl_file = NULL;
390 dp->dl_type = NFS4_OPEN_DELEGATE_READ;
391 fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
392 dp->dl_time = 0;
393 atomic_set(&dp->dl_count, 1);
394 nfsd4_init_callback(&dp->dl_recall);
395 return dp;
396}
397
398static void remove_stid(struct nfs4_stid *s)
399{
400 struct idr *stateids = &s->sc_client->cl_stateids;
401
402 idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
403}
404
405static void nfs4_free_stid(struct kmem_cache *slab, struct nfs4_stid *s)
406{
407 kmem_cache_free(slab, s);
408}
409
410void
411nfs4_put_delegation(struct nfs4_delegation *dp)
412{
413 if (atomic_dec_and_test(&dp->dl_count)) {
414 nfs4_free_stid(deleg_slab, &dp->dl_stid);
415 num_delegations--;
416 }
417}
418
419static void nfs4_put_deleg_lease(struct nfs4_file *fp)
420{
421 if (!fp->fi_lease)
422 return;
423 if (atomic_dec_and_test(&fp->fi_delegees)) {
424 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
425 fp->fi_lease = NULL;
426 fput(fp->fi_deleg_file);
427 fp->fi_deleg_file = NULL;
428 }
429}
430
431static void unhash_stid(struct nfs4_stid *s)
432{
433 s->sc_type = 0;
434}
435
436/* Called under the state lock. */
437static void
438unhash_delegation(struct nfs4_delegation *dp)
439{
440 list_del_init(&dp->dl_perclnt);
441 spin_lock(&recall_lock);
442 list_del_init(&dp->dl_perfile);
443 list_del_init(&dp->dl_recall_lru);
444 spin_unlock(&recall_lock);
445 if (dp->dl_file) {
446 nfs4_put_deleg_lease(dp->dl_file);
447 put_nfs4_file(dp->dl_file);
448 dp->dl_file = NULL;
449 }
450}
451
452
453
454static void destroy_revoked_delegation(struct nfs4_delegation *dp)
455{
456 list_del_init(&dp->dl_recall_lru);
457 remove_stid(&dp->dl_stid);
458 nfs4_put_delegation(dp);
459}
460
461static void destroy_delegation(struct nfs4_delegation *dp)
462{
463 unhash_delegation(dp);
464 remove_stid(&dp->dl_stid);
465 nfs4_put_delegation(dp);
466}
467
468static void revoke_delegation(struct nfs4_delegation *dp)
469{
470 struct nfs4_client *clp = dp->dl_stid.sc_client;
471
472 if (clp->cl_minorversion == 0)
473 destroy_delegation(dp);
474 else {
475 unhash_delegation(dp);
476 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
477 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
478 }
479}
480
481/*
482 * SETCLIENTID state
483 */
484
485static unsigned int clientid_hashval(u32 id)
486{
487 return id & CLIENT_HASH_MASK;
488}
489
490static unsigned int clientstr_hashval(const char *name)
491{
492 return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
493}
494
495/*
496 * We store the NONE, READ, WRITE, and BOTH bits separately in the
497 * st_{access,deny}_bmap field of the stateid, in order to track not
498 * only what share bits are currently in force, but also what
499 * combinations of share bits previous opens have used. This allows us
500 * to enforce the recommendation of rfc 3530 14.2.19 that the server
501 * return an error if the client attempt to downgrade to a combination
502 * of share bits not explicable by closing some of its previous opens.
503 *
504 * XXX: This enforcement is actually incomplete, since we don't keep
505 * track of access/deny bit combinations; so, e.g., we allow:
506 *
507 * OPEN allow read, deny write
508 * OPEN allow both, deny none
509 * DOWNGRADE allow read, deny none
510 *
511 * which we should reject.
512 */
513static unsigned int
514bmap_to_share_mode(unsigned long bmap) {
515 int i;
516 unsigned int access = 0;
517
518 for (i = 1; i < 4; i++) {
519 if (test_bit(i, &bmap))
520 access |= i;
521 }
522 return access;
523}
524
525static bool
526test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
527 unsigned int access, deny;
528
529 access = bmap_to_share_mode(stp->st_access_bmap);
530 deny = bmap_to_share_mode(stp->st_deny_bmap);
531 if ((access & open->op_share_deny) || (deny & open->op_share_access))
532 return false;
533 return true;
534}
535
536/* set share access for a given stateid */
537static inline void
538set_access(u32 access, struct nfs4_ol_stateid *stp)
539{
540 __set_bit(access, &stp->st_access_bmap);
541}
542
543/* clear share access for a given stateid */
544static inline void
545clear_access(u32 access, struct nfs4_ol_stateid *stp)
546{
547 __clear_bit(access, &stp->st_access_bmap);
548}
549
550/* test whether a given stateid has access */
551static inline bool
552test_access(u32 access, struct nfs4_ol_stateid *stp)
553{
554 return test_bit(access, &stp->st_access_bmap);
555}
556
557/* set share deny for a given stateid */
558static inline void
559set_deny(u32 access, struct nfs4_ol_stateid *stp)
560{
561 __set_bit(access, &stp->st_deny_bmap);
562}
563
564/* clear share deny for a given stateid */
565static inline void
566clear_deny(u32 access, struct nfs4_ol_stateid *stp)
567{
568 __clear_bit(access, &stp->st_deny_bmap);
569}
570
571/* test whether a given stateid is denying specific access */
572static inline bool
573test_deny(u32 access, struct nfs4_ol_stateid *stp)
574{
575 return test_bit(access, &stp->st_deny_bmap);
576}
577
578static int nfs4_access_to_omode(u32 access)
579{
580 switch (access & NFS4_SHARE_ACCESS_BOTH) {
581 case NFS4_SHARE_ACCESS_READ:
582 return O_RDONLY;
583 case NFS4_SHARE_ACCESS_WRITE:
584 return O_WRONLY;
585 case NFS4_SHARE_ACCESS_BOTH:
586 return O_RDWR;
587 }
588 WARN_ON_ONCE(1);
589 return O_RDONLY;
590}
591
592/* release all access and file references for a given stateid */
593static void
594release_all_access(struct nfs4_ol_stateid *stp)
595{
596 int i;
597
598 for (i = 1; i < 4; i++) {
599 if (test_access(i, stp))
600 nfs4_file_put_access(stp->st_file,
601 nfs4_access_to_omode(i));
602 clear_access(i, stp);
603 }
604}
605
606static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
607{
608 list_del(&stp->st_perfile);
609 list_del(&stp->st_perstateowner);
610}
611
612static void close_generic_stateid(struct nfs4_ol_stateid *stp)
613{
614 release_all_access(stp);
615 put_nfs4_file(stp->st_file);
616 stp->st_file = NULL;
617}
618
619static void free_generic_stateid(struct nfs4_ol_stateid *stp)
620{
621 remove_stid(&stp->st_stid);
622 nfs4_free_stid(stateid_slab, &stp->st_stid);
623}
624
625static void release_lock_stateid(struct nfs4_ol_stateid *stp)
626{
627 struct file *file;
628
629 unhash_generic_stateid(stp);
630 unhash_stid(&stp->st_stid);
631 file = find_any_file(stp->st_file);
632 if (file)
633 locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner));
634 close_generic_stateid(stp);
635 free_generic_stateid(stp);
636}
637
638static void unhash_lockowner(struct nfs4_lockowner *lo)
639{
640 struct nfs4_ol_stateid *stp;
641
642 list_del(&lo->lo_owner.so_strhash);
643 list_del(&lo->lo_perstateid);
644 list_del(&lo->lo_owner_ino_hash);
645 while (!list_empty(&lo->lo_owner.so_stateids)) {
646 stp = list_first_entry(&lo->lo_owner.so_stateids,
647 struct nfs4_ol_stateid, st_perstateowner);
648 release_lock_stateid(stp);
649 }
650}
651
652static void nfs4_free_lockowner(struct nfs4_lockowner *lo)
653{
654 kfree(lo->lo_owner.so_owner.data);
655 kmem_cache_free(lockowner_slab, lo);
656}
657
658static void release_lockowner(struct nfs4_lockowner *lo)
659{
660 unhash_lockowner(lo);
661 nfs4_free_lockowner(lo);
662}
663
664static void
665release_stateid_lockowners(struct nfs4_ol_stateid *open_stp)
666{
667 struct nfs4_lockowner *lo;
668
669 while (!list_empty(&open_stp->st_lockowners)) {
670 lo = list_entry(open_stp->st_lockowners.next,
671 struct nfs4_lockowner, lo_perstateid);
672 release_lockowner(lo);
673 }
674}
675
676static void unhash_open_stateid(struct nfs4_ol_stateid *stp)
677{
678 unhash_generic_stateid(stp);
679 release_stateid_lockowners(stp);
680 close_generic_stateid(stp);
681}
682
683static void release_open_stateid(struct nfs4_ol_stateid *stp)
684{
685 unhash_open_stateid(stp);
686 free_generic_stateid(stp);
687}
688
689static void unhash_openowner(struct nfs4_openowner *oo)
690{
691 struct nfs4_ol_stateid *stp;
692
693 list_del(&oo->oo_owner.so_strhash);
694 list_del(&oo->oo_perclient);
695 while (!list_empty(&oo->oo_owner.so_stateids)) {
696 stp = list_first_entry(&oo->oo_owner.so_stateids,
697 struct nfs4_ol_stateid, st_perstateowner);
698 release_open_stateid(stp);
699 }
700}
701
702static void release_last_closed_stateid(struct nfs4_openowner *oo)
703{
704 struct nfs4_ol_stateid *s = oo->oo_last_closed_stid;
705
706 if (s) {
707 free_generic_stateid(s);
708 oo->oo_last_closed_stid = NULL;
709 }
710}
711
712static void nfs4_free_openowner(struct nfs4_openowner *oo)
713{
714 kfree(oo->oo_owner.so_owner.data);
715 kmem_cache_free(openowner_slab, oo);
716}
717
718static void release_openowner(struct nfs4_openowner *oo)
719{
720 unhash_openowner(oo);
721 list_del(&oo->oo_close_lru);
722 release_last_closed_stateid(oo);
723 nfs4_free_openowner(oo);
724}
725
726static inline int
727hash_sessionid(struct nfs4_sessionid *sessionid)
728{
729 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
730
731 return sid->sequence % SESSION_HASH_SIZE;
732}
733
734#ifdef NFSD_DEBUG
735static inline void
736dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
737{
738 u32 *ptr = (u32 *)(&sessionid->data[0]);
739 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
740}
741#else
742static inline void
743dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
744{
745}
746#endif
747
748/*
749 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
750 * won't be used for replay.
751 */
752void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
753{
754 struct nfs4_stateowner *so = cstate->replay_owner;
755
756 if (nfserr == nfserr_replay_me)
757 return;
758
759 if (!seqid_mutating_err(ntohl(nfserr))) {
760 cstate->replay_owner = NULL;
761 return;
762 }
763 if (!so)
764 return;
765 if (so->so_is_open_owner)
766 release_last_closed_stateid(openowner(so));
767 so->so_seqid++;
768 return;
769}
770
771static void
772gen_sessionid(struct nfsd4_session *ses)
773{
774 struct nfs4_client *clp = ses->se_client;
775 struct nfsd4_sessionid *sid;
776
777 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
778 sid->clientid = clp->cl_clientid;
779 sid->sequence = current_sessionid++;
780 sid->reserved = 0;
781}
782
783/*
784 * The protocol defines ca_maxresponssize_cached to include the size of
785 * the rpc header, but all we need to cache is the data starting after
786 * the end of the initial SEQUENCE operation--the rest we regenerate
787 * each time. Therefore we can advertise a ca_maxresponssize_cached
788 * value that is the number of bytes in our cache plus a few additional
789 * bytes. In order to stay on the safe side, and not promise more than
790 * we can cache, those additional bytes must be the minimum possible: 24
791 * bytes of rpc header (xid through accept state, with AUTH_NULL
792 * verifier), 12 for the compound header (with zero-length tag), and 44
793 * for the SEQUENCE op response:
794 */
795#define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
796
797static void
798free_session_slots(struct nfsd4_session *ses)
799{
800 int i;
801
802 for (i = 0; i < ses->se_fchannel.maxreqs; i++)
803 kfree(ses->se_slots[i]);
804}
805
806/*
807 * We don't actually need to cache the rpc and session headers, so we
808 * can allocate a little less for each slot:
809 */
810static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
811{
812 u32 size;
813
814 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
815 size = 0;
816 else
817 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
818 return size + sizeof(struct nfsd4_slot);
819}
820
821/*
822 * XXX: If we run out of reserved DRC memory we could (up to a point)
823 * re-negotiate active sessions and reduce their slot usage to make
824 * room for new connections. For now we just fail the create session.
825 */
826static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
827{
828 u32 slotsize = slot_bytes(ca);
829 u32 num = ca->maxreqs;
830 int avail;
831
832 spin_lock(&nfsd_drc_lock);
833 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
834 nfsd_drc_max_mem - nfsd_drc_mem_used);
835 num = min_t(int, num, avail / slotsize);
836 nfsd_drc_mem_used += num * slotsize;
837 spin_unlock(&nfsd_drc_lock);
838
839 return num;
840}
841
842static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
843{
844 int slotsize = slot_bytes(ca);
845
846 spin_lock(&nfsd_drc_lock);
847 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
848 spin_unlock(&nfsd_drc_lock);
849}
850
851static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
852 struct nfsd4_channel_attrs *battrs)
853{
854 int numslots = fattrs->maxreqs;
855 int slotsize = slot_bytes(fattrs);
856 struct nfsd4_session *new;
857 int mem, i;
858
859 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
860 + sizeof(struct nfsd4_session) > PAGE_SIZE);
861 mem = numslots * sizeof(struct nfsd4_slot *);
862
863 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
864 if (!new)
865 return NULL;
866 /* allocate each struct nfsd4_slot and data cache in one piece */
867 for (i = 0; i < numslots; i++) {
868 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
869 if (!new->se_slots[i])
870 goto out_free;
871 }
872
873 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
874 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
875
876 return new;
877out_free:
878 while (i--)
879 kfree(new->se_slots[i]);
880 kfree(new);
881 return NULL;
882}
883
884static void free_conn(struct nfsd4_conn *c)
885{
886 svc_xprt_put(c->cn_xprt);
887 kfree(c);
888}
889
890static void nfsd4_conn_lost(struct svc_xpt_user *u)
891{
892 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
893 struct nfs4_client *clp = c->cn_session->se_client;
894
895 spin_lock(&clp->cl_lock);
896 if (!list_empty(&c->cn_persession)) {
897 list_del(&c->cn_persession);
898 free_conn(c);
899 }
900 nfsd4_probe_callback(clp);
901 spin_unlock(&clp->cl_lock);
902}
903
904static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
905{
906 struct nfsd4_conn *conn;
907
908 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
909 if (!conn)
910 return NULL;
911 svc_xprt_get(rqstp->rq_xprt);
912 conn->cn_xprt = rqstp->rq_xprt;
913 conn->cn_flags = flags;
914 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
915 return conn;
916}
917
918static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
919{
920 conn->cn_session = ses;
921 list_add(&conn->cn_persession, &ses->se_conns);
922}
923
924static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
925{
926 struct nfs4_client *clp = ses->se_client;
927
928 spin_lock(&clp->cl_lock);
929 __nfsd4_hash_conn(conn, ses);
930 spin_unlock(&clp->cl_lock);
931}
932
933static int nfsd4_register_conn(struct nfsd4_conn *conn)
934{
935 conn->cn_xpt_user.callback = nfsd4_conn_lost;
936 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
937}
938
939static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
940{
941 int ret;
942
943 nfsd4_hash_conn(conn, ses);
944 ret = nfsd4_register_conn(conn);
945 if (ret)
946 /* oops; xprt is already down: */
947 nfsd4_conn_lost(&conn->cn_xpt_user);
948 if (conn->cn_flags & NFS4_CDFC4_BACK) {
949 /* callback channel may be back up */
950 nfsd4_probe_callback(ses->se_client);
951 }
952}
953
954static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
955{
956 u32 dir = NFS4_CDFC4_FORE;
957
958 if (cses->flags & SESSION4_BACK_CHAN)
959 dir |= NFS4_CDFC4_BACK;
960 return alloc_conn(rqstp, dir);
961}
962
963/* must be called under client_lock */
964static void nfsd4_del_conns(struct nfsd4_session *s)
965{
966 struct nfs4_client *clp = s->se_client;
967 struct nfsd4_conn *c;
968
969 spin_lock(&clp->cl_lock);
970 while (!list_empty(&s->se_conns)) {
971 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
972 list_del_init(&c->cn_persession);
973 spin_unlock(&clp->cl_lock);
974
975 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
976 free_conn(c);
977
978 spin_lock(&clp->cl_lock);
979 }
980 spin_unlock(&clp->cl_lock);
981}
982
983static void __free_session(struct nfsd4_session *ses)
984{
985 free_session_slots(ses);
986 kfree(ses);
987}
988
989static void free_session(struct nfsd4_session *ses)
990{
991 struct nfsd_net *nn = net_generic(ses->se_client->net, nfsd_net_id);
992
993 lockdep_assert_held(&nn->client_lock);
994 nfsd4_del_conns(ses);
995 nfsd4_put_drc_mem(&ses->se_fchannel);
996 __free_session(ses);
997}
998
999static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1000{
1001 int idx;
1002 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1003
1004 new->se_client = clp;
1005 gen_sessionid(new);
1006
1007 INIT_LIST_HEAD(&new->se_conns);
1008
1009 new->se_cb_seq_nr = 1;
1010 new->se_flags = cses->flags;
1011 new->se_cb_prog = cses->callback_prog;
1012 new->se_cb_sec = cses->cb_sec;
1013 atomic_set(&new->se_ref, 0);
1014 idx = hash_sessionid(&new->se_sessionid);
1015 spin_lock(&nn->client_lock);
1016 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1017 spin_lock(&clp->cl_lock);
1018 list_add(&new->se_perclnt, &clp->cl_sessions);
1019 spin_unlock(&clp->cl_lock);
1020 spin_unlock(&nn->client_lock);
1021
1022 if (cses->flags & SESSION4_BACK_CHAN) {
1023 struct sockaddr *sa = svc_addr(rqstp);
1024 /*
1025 * This is a little silly; with sessions there's no real
1026 * use for the callback address. Use the peer address
1027 * as a reasonable default for now, but consider fixing
1028 * the rpc client not to require an address in the
1029 * future:
1030 */
1031 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1032 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1033 }
1034}
1035
1036/* caller must hold client_lock */
1037static struct nfsd4_session *
1038find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1039{
1040 struct nfsd4_session *elem;
1041 int idx;
1042 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1043
1044 dump_sessionid(__func__, sessionid);
1045 idx = hash_sessionid(sessionid);
1046 /* Search in the appropriate list */
1047 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1048 if (!memcmp(elem->se_sessionid.data, sessionid->data,
1049 NFS4_MAX_SESSIONID_LEN)) {
1050 return elem;
1051 }
1052 }
1053
1054 dprintk("%s: session not found\n", __func__);
1055 return NULL;
1056}
1057
1058/* caller must hold client_lock */
1059static void
1060unhash_session(struct nfsd4_session *ses)
1061{
1062 list_del(&ses->se_hash);
1063 spin_lock(&ses->se_client->cl_lock);
1064 list_del(&ses->se_perclnt);
1065 spin_unlock(&ses->se_client->cl_lock);
1066}
1067
1068/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1069static int
1070STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1071{
1072 if (clid->cl_boot == nn->boot_time)
1073 return 0;
1074 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1075 clid->cl_boot, clid->cl_id, nn->boot_time);
1076 return 1;
1077}
1078
1079/*
1080 * XXX Should we use a slab cache ?
1081 * This type of memory management is somewhat inefficient, but we use it
1082 * anyway since SETCLIENTID is not a common operation.
1083 */
1084static struct nfs4_client *alloc_client(struct xdr_netobj name)
1085{
1086 struct nfs4_client *clp;
1087
1088 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1089 if (clp == NULL)
1090 return NULL;
1091 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1092 if (clp->cl_name.data == NULL) {
1093 kfree(clp);
1094 return NULL;
1095 }
1096 clp->cl_name.len = name.len;
1097 INIT_LIST_HEAD(&clp->cl_sessions);
1098 idr_init(&clp->cl_stateids);
1099 atomic_set(&clp->cl_refcount, 0);
1100 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1101 INIT_LIST_HEAD(&clp->cl_idhash);
1102 INIT_LIST_HEAD(&clp->cl_openowners);
1103 INIT_LIST_HEAD(&clp->cl_delegations);
1104 INIT_LIST_HEAD(&clp->cl_lru);
1105 INIT_LIST_HEAD(&clp->cl_callbacks);
1106 INIT_LIST_HEAD(&clp->cl_revoked);
1107 spin_lock_init(&clp->cl_lock);
1108 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1109 return clp;
1110}
1111
1112static void
1113free_client(struct nfs4_client *clp)
1114{
1115 struct nfsd_net __maybe_unused *nn = net_generic(clp->net, nfsd_net_id);
1116
1117 lockdep_assert_held(&nn->client_lock);
1118 while (!list_empty(&clp->cl_sessions)) {
1119 struct nfsd4_session *ses;
1120 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1121 se_perclnt);
1122 list_del(&ses->se_perclnt);
1123 WARN_ON_ONCE(atomic_read(&ses->se_ref));
1124 free_session(ses);
1125 }
1126 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1127 free_svc_cred(&clp->cl_cred);
1128 kfree(clp->cl_name.data);
1129 idr_destroy(&clp->cl_stateids);
1130 kfree(clp);
1131}
1132
1133/* must be called under the client_lock */
1134static inline void
1135unhash_client_locked(struct nfs4_client *clp)
1136{
1137 struct nfsd4_session *ses;
1138
1139 list_del(&clp->cl_lru);
1140 spin_lock(&clp->cl_lock);
1141 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1142 list_del_init(&ses->se_hash);
1143 spin_unlock(&clp->cl_lock);
1144}
1145
1146static void
1147destroy_client(struct nfs4_client *clp)
1148{
1149 struct nfs4_openowner *oo;
1150 struct nfs4_delegation *dp;
1151 struct list_head reaplist;
1152 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1153
1154 INIT_LIST_HEAD(&reaplist);
1155 spin_lock(&recall_lock);
1156 while (!list_empty(&clp->cl_delegations)) {
1157 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1158 list_del_init(&dp->dl_perclnt);
1159 list_move(&dp->dl_recall_lru, &reaplist);
1160 }
1161 spin_unlock(&recall_lock);
1162 while (!list_empty(&reaplist)) {
1163 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1164 destroy_delegation(dp);
1165 }
1166 list_splice_init(&clp->cl_revoked, &reaplist);
1167 while (!list_empty(&reaplist)) {
1168 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1169 destroy_revoked_delegation(dp);
1170 }
1171 while (!list_empty(&clp->cl_openowners)) {
1172 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1173 release_openowner(oo);
1174 }
1175 nfsd4_shutdown_callback(clp);
1176 if (clp->cl_cb_conn.cb_xprt)
1177 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1178 list_del(&clp->cl_idhash);
1179 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1180 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1181 else
1182 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1183 spin_lock(&nn->client_lock);
1184 unhash_client_locked(clp);
1185 WARN_ON_ONCE(atomic_read(&clp->cl_refcount));
1186 free_client(clp);
1187 spin_unlock(&nn->client_lock);
1188}
1189
1190static void expire_client(struct nfs4_client *clp)
1191{
1192 nfsd4_client_record_remove(clp);
1193 destroy_client(clp);
1194}
1195
1196static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1197{
1198 memcpy(target->cl_verifier.data, source->data,
1199 sizeof(target->cl_verifier.data));
1200}
1201
1202static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1203{
1204 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1205 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1206}
1207
1208static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1209{
1210 if (source->cr_principal) {
1211 target->cr_principal =
1212 kstrdup(source->cr_principal, GFP_KERNEL);
1213 if (target->cr_principal == NULL)
1214 return -ENOMEM;
1215 } else
1216 target->cr_principal = NULL;
1217 target->cr_flavor = source->cr_flavor;
1218 target->cr_uid = source->cr_uid;
1219 target->cr_gid = source->cr_gid;
1220 target->cr_group_info = source->cr_group_info;
1221 get_group_info(target->cr_group_info);
1222 target->cr_gss_mech = source->cr_gss_mech;
1223 if (source->cr_gss_mech)
1224 gss_mech_get(source->cr_gss_mech);
1225 return 0;
1226}
1227
1228static long long
1229compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1230{
1231 long long res;
1232
1233 res = o1->len - o2->len;
1234 if (res)
1235 return res;
1236 return (long long)memcmp(o1->data, o2->data, o1->len);
1237}
1238
1239static int same_name(const char *n1, const char *n2)
1240{
1241 return 0 == memcmp(n1, n2, HEXDIR_LEN);
1242}
1243
1244static int
1245same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1246{
1247 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1248}
1249
1250static int
1251same_clid(clientid_t *cl1, clientid_t *cl2)
1252{
1253 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1254}
1255
1256static bool groups_equal(struct group_info *g1, struct group_info *g2)
1257{
1258 int i;
1259
1260 if (g1->ngroups != g2->ngroups)
1261 return false;
1262 for (i=0; i<g1->ngroups; i++)
1263 if (!gid_eq(GROUP_AT(g1, i), GROUP_AT(g2, i)))
1264 return false;
1265 return true;
1266}
1267
1268/*
1269 * RFC 3530 language requires clid_inuse be returned when the
1270 * "principal" associated with a requests differs from that previously
1271 * used. We use uid, gid's, and gss principal string as our best
1272 * approximation. We also don't want to allow non-gss use of a client
1273 * established using gss: in theory cr_principal should catch that
1274 * change, but in practice cr_principal can be null even in the gss case
1275 * since gssd doesn't always pass down a principal string.
1276 */
1277static bool is_gss_cred(struct svc_cred *cr)
1278{
1279 /* Is cr_flavor one of the gss "pseudoflavors"?: */
1280 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
1281}
1282
1283
1284static bool
1285same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1286{
1287 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
1288 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
1289 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
1290 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
1291 return false;
1292 if (cr1->cr_principal == cr2->cr_principal)
1293 return true;
1294 if (!cr1->cr_principal || !cr2->cr_principal)
1295 return false;
1296 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
1297}
1298
1299static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
1300{
1301 struct svc_cred *cr = &rqstp->rq_cred;
1302 u32 service;
1303
1304 if (!cr->cr_gss_mech)
1305 return false;
1306 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
1307 return service == RPC_GSS_SVC_INTEGRITY ||
1308 service == RPC_GSS_SVC_PRIVACY;
1309}
1310
1311static bool mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
1312{
1313 struct svc_cred *cr = &rqstp->rq_cred;
1314
1315 if (!cl->cl_mach_cred)
1316 return true;
1317 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
1318 return false;
1319 if (!svc_rqst_integrity_protected(rqstp))
1320 return false;
1321 if (!cr->cr_principal)
1322 return false;
1323 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
1324}
1325
1326static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
1327{
1328 static u32 current_clientid = 1;
1329
1330 clp->cl_clientid.cl_boot = nn->boot_time;
1331 clp->cl_clientid.cl_id = current_clientid++;
1332}
1333
1334static void gen_confirm(struct nfs4_client *clp)
1335{
1336 __be32 verf[2];
1337 static u32 i;
1338
1339 verf[0] = (__be32)get_seconds();
1340 verf[1] = (__be32)i++;
1341 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
1342}
1343
1344static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
1345{
1346 struct nfs4_stid *ret;
1347
1348 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
1349 if (!ret || !ret->sc_type)
1350 return NULL;
1351 return ret;
1352}
1353
1354static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
1355{
1356 struct nfs4_stid *s;
1357
1358 s = find_stateid(cl, t);
1359 if (!s)
1360 return NULL;
1361 if (typemask & s->sc_type)
1362 return s;
1363 return NULL;
1364}
1365
1366static struct nfs4_client *create_client(struct xdr_netobj name,
1367 struct svc_rqst *rqstp, nfs4_verifier *verf)
1368{
1369 struct nfs4_client *clp;
1370 struct sockaddr *sa = svc_addr(rqstp);
1371 int ret;
1372 struct net *net = SVC_NET(rqstp);
1373 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1374
1375 clp = alloc_client(name);
1376 if (clp == NULL)
1377 return NULL;
1378
1379 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1380 if (ret) {
1381 spin_lock(&nn->client_lock);
1382 free_client(clp);
1383 spin_unlock(&nn->client_lock);
1384 return NULL;
1385 }
1386 nfsd4_init_callback(&clp->cl_cb_null);
1387 clp->cl_time = get_seconds();
1388 clear_bit(0, &clp->cl_cb_slot_busy);
1389 copy_verf(clp, verf);
1390 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
1391 gen_confirm(clp);
1392 clp->cl_cb_session = NULL;
1393 clp->net = net;
1394 return clp;
1395}
1396
1397static void
1398add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
1399{
1400 struct rb_node **new = &(root->rb_node), *parent = NULL;
1401 struct nfs4_client *clp;
1402
1403 while (*new) {
1404 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
1405 parent = *new;
1406
1407 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
1408 new = &((*new)->rb_left);
1409 else
1410 new = &((*new)->rb_right);
1411 }
1412
1413 rb_link_node(&new_clp->cl_namenode, parent, new);
1414 rb_insert_color(&new_clp->cl_namenode, root);
1415}
1416
1417static struct nfs4_client *
1418find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
1419{
1420 long long cmp;
1421 struct rb_node *node = root->rb_node;
1422 struct nfs4_client *clp;
1423
1424 while (node) {
1425 clp = rb_entry(node, struct nfs4_client, cl_namenode);
1426 cmp = compare_blob(&clp->cl_name, name);
1427 if (cmp > 0)
1428 node = node->rb_left;
1429 else if (cmp < 0)
1430 node = node->rb_right;
1431 else
1432 return clp;
1433 }
1434 return NULL;
1435}
1436
1437static void
1438add_to_unconfirmed(struct nfs4_client *clp)
1439{
1440 unsigned int idhashval;
1441 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1442
1443 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1444 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
1445 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1446 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
1447 renew_client(clp);
1448}
1449
1450static void
1451move_to_confirmed(struct nfs4_client *clp)
1452{
1453 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1454 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1455
1456 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
1457 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
1458 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1459 add_clp_to_name_tree(clp, &nn->conf_name_tree);
1460 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1461 renew_client(clp);
1462}
1463
1464static struct nfs4_client *
1465find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
1466{
1467 struct nfs4_client *clp;
1468 unsigned int idhashval = clientid_hashval(clid->cl_id);
1469
1470 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
1471 if (same_clid(&clp->cl_clientid, clid)) {
1472 if ((bool)clp->cl_minorversion != sessions)
1473 return NULL;
1474 renew_client(clp);
1475 return clp;
1476 }
1477 }
1478 return NULL;
1479}
1480
1481static struct nfs4_client *
1482find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1483{
1484 struct list_head *tbl = nn->conf_id_hashtbl;
1485
1486 return find_client_in_id_table(tbl, clid, sessions);
1487}
1488
1489static struct nfs4_client *
1490find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1491{
1492 struct list_head *tbl = nn->unconf_id_hashtbl;
1493
1494 return find_client_in_id_table(tbl, clid, sessions);
1495}
1496
1497static bool clp_used_exchangeid(struct nfs4_client *clp)
1498{
1499 return clp->cl_exchange_flags != 0;
1500}
1501
1502static struct nfs4_client *
1503find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1504{
1505 return find_clp_in_name_tree(name, &nn->conf_name_tree);
1506}
1507
1508static struct nfs4_client *
1509find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1510{
1511 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
1512}
1513
1514static void
1515gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
1516{
1517 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
1518 struct sockaddr *sa = svc_addr(rqstp);
1519 u32 scopeid = rpc_get_scope_id(sa);
1520 unsigned short expected_family;
1521
1522 /* Currently, we only support tcp and tcp6 for the callback channel */
1523 if (se->se_callback_netid_len == 3 &&
1524 !memcmp(se->se_callback_netid_val, "tcp", 3))
1525 expected_family = AF_INET;
1526 else if (se->se_callback_netid_len == 4 &&
1527 !memcmp(se->se_callback_netid_val, "tcp6", 4))
1528 expected_family = AF_INET6;
1529 else
1530 goto out_err;
1531
1532 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
1533 se->se_callback_addr_len,
1534 (struct sockaddr *)&conn->cb_addr,
1535 sizeof(conn->cb_addr));
1536
1537 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
1538 goto out_err;
1539
1540 if (conn->cb_addr.ss_family == AF_INET6)
1541 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
1542
1543 conn->cb_prog = se->se_callback_prog;
1544 conn->cb_ident = se->se_callback_ident;
1545 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
1546 return;
1547out_err:
1548 conn->cb_addr.ss_family = AF_UNSPEC;
1549 conn->cb_addrlen = 0;
1550 dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
1551 "will not receive delegations\n",
1552 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
1553
1554 return;
1555}
1556
1557/*
1558 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
1559 */
1560void
1561nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
1562{
1563 struct xdr_buf *buf = resp->xdr.buf;
1564 struct nfsd4_slot *slot = resp->cstate.slot;
1565 unsigned int base;
1566
1567 dprintk("--> %s slot %p\n", __func__, slot);
1568
1569 slot->sl_opcnt = resp->opcnt;
1570 slot->sl_status = resp->cstate.status;
1571
1572 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
1573 if (nfsd4_not_cached(resp)) {
1574 slot->sl_datalen = 0;
1575 return;
1576 }
1577 base = resp->cstate.data_offset;
1578 slot->sl_datalen = buf->len - base;
1579 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
1580 WARN("%s: sessions DRC could not cache compound\n", __func__);
1581 return;
1582}
1583
1584/*
1585 * Encode the replay sequence operation from the slot values.
1586 * If cachethis is FALSE encode the uncached rep error on the next
1587 * operation which sets resp->p and increments resp->opcnt for
1588 * nfs4svc_encode_compoundres.
1589 *
1590 */
1591static __be32
1592nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
1593 struct nfsd4_compoundres *resp)
1594{
1595 struct nfsd4_op *op;
1596 struct nfsd4_slot *slot = resp->cstate.slot;
1597
1598 /* Encode the replayed sequence operation */
1599 op = &args->ops[resp->opcnt - 1];
1600 nfsd4_encode_operation(resp, op);
1601
1602 /* Return nfserr_retry_uncached_rep in next operation. */
1603 if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
1604 op = &args->ops[resp->opcnt++];
1605 op->status = nfserr_retry_uncached_rep;
1606 nfsd4_encode_operation(resp, op);
1607 }
1608 return op->status;
1609}
1610
1611/*
1612 * The sequence operation is not cached because we can use the slot and
1613 * session values.
1614 */
1615static __be32
1616nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
1617 struct nfsd4_sequence *seq)
1618{
1619 struct nfsd4_slot *slot = resp->cstate.slot;
1620 struct xdr_stream *xdr = &resp->xdr;
1621 __be32 *p;
1622 __be32 status;
1623
1624 dprintk("--> %s slot %p\n", __func__, slot);
1625
1626 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
1627 if (status)
1628 return status;
1629
1630 p = xdr_reserve_space(xdr, slot->sl_datalen);
1631 if (!p) {
1632 WARN_ON_ONCE(1);
1633 return nfserr_serverfault;
1634 }
1635 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
1636 xdr_commit_encode(xdr);
1637
1638 resp->opcnt = slot->sl_opcnt;
1639 return slot->sl_status;
1640}
1641
1642/*
1643 * Set the exchange_id flags returned by the server.
1644 */
1645static void
1646nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
1647{
1648 /* pNFS is not supported */
1649 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
1650
1651 /* Referrals are supported, Migration is not. */
1652 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
1653
1654 /* set the wire flags to return to client. */
1655 clid->flags = new->cl_exchange_flags;
1656}
1657
1658static bool client_has_state(struct nfs4_client *clp)
1659{
1660 /*
1661 * Note clp->cl_openowners check isn't quite right: there's no
1662 * need to count owners without stateid's.
1663 *
1664 * Also note we should probably be using this in 4.0 case too.
1665 */
1666 return !list_empty(&clp->cl_openowners)
1667 || !list_empty(&clp->cl_delegations)
1668 || !list_empty(&clp->cl_sessions);
1669}
1670
1671__be32
1672nfsd4_exchange_id(struct svc_rqst *rqstp,
1673 struct nfsd4_compound_state *cstate,
1674 struct nfsd4_exchange_id *exid)
1675{
1676 struct nfs4_client *unconf, *conf, *new;
1677 __be32 status;
1678 char addr_str[INET6_ADDRSTRLEN];
1679 nfs4_verifier verf = exid->verifier;
1680 struct sockaddr *sa = svc_addr(rqstp);
1681 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
1682 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1683
1684 rpc_ntop(sa, addr_str, sizeof(addr_str));
1685 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
1686 "ip_addr=%s flags %x, spa_how %d\n",
1687 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
1688 addr_str, exid->flags, exid->spa_how);
1689
1690 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
1691 return nfserr_inval;
1692
1693 switch (exid->spa_how) {
1694 case SP4_MACH_CRED:
1695 if (!svc_rqst_integrity_protected(rqstp))
1696 return nfserr_inval;
1697 case SP4_NONE:
1698 break;
1699 default: /* checked by xdr code */
1700 WARN_ON_ONCE(1);
1701 case SP4_SSV:
1702 return nfserr_encr_alg_unsupp;
1703 }
1704
1705 /* Cases below refer to rfc 5661 section 18.35.4: */
1706 nfs4_lock_state();
1707 conf = find_confirmed_client_by_name(&exid->clname, nn);
1708 if (conf) {
1709 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
1710 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
1711
1712 if (update) {
1713 if (!clp_used_exchangeid(conf)) { /* buggy client */
1714 status = nfserr_inval;
1715 goto out;
1716 }
1717 if (!mach_creds_match(conf, rqstp)) {
1718 status = nfserr_wrong_cred;
1719 goto out;
1720 }
1721 if (!creds_match) { /* case 9 */
1722 status = nfserr_perm;
1723 goto out;
1724 }
1725 if (!verfs_match) { /* case 8 */
1726 status = nfserr_not_same;
1727 goto out;
1728 }
1729 /* case 6 */
1730 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
1731 new = conf;
1732 goto out_copy;
1733 }
1734 if (!creds_match) { /* case 3 */
1735 if (client_has_state(conf)) {
1736 status = nfserr_clid_inuse;
1737 goto out;
1738 }
1739 expire_client(conf);
1740 goto out_new;
1741 }
1742 if (verfs_match) { /* case 2 */
1743 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
1744 new = conf;
1745 goto out_copy;
1746 }
1747 /* case 5, client reboot */
1748 goto out_new;
1749 }
1750
1751 if (update) { /* case 7 */
1752 status = nfserr_noent;
1753 goto out;
1754 }
1755
1756 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
1757 if (unconf) /* case 4, possible retry or client restart */
1758 expire_client(unconf);
1759
1760 /* case 1 (normal case) */
1761out_new:
1762 new = create_client(exid->clname, rqstp, &verf);
1763 if (new == NULL) {
1764 status = nfserr_jukebox;
1765 goto out;
1766 }
1767 new->cl_minorversion = cstate->minorversion;
1768 new->cl_mach_cred = (exid->spa_how == SP4_MACH_CRED);
1769
1770 gen_clid(new, nn);
1771 add_to_unconfirmed(new);
1772out_copy:
1773 exid->clientid.cl_boot = new->cl_clientid.cl_boot;
1774 exid->clientid.cl_id = new->cl_clientid.cl_id;
1775
1776 exid->seqid = new->cl_cs_slot.sl_seqid + 1;
1777 nfsd4_set_ex_flags(new, exid);
1778
1779 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
1780 new->cl_cs_slot.sl_seqid, new->cl_exchange_flags);
1781 status = nfs_ok;
1782
1783out:
1784 nfs4_unlock_state();
1785 return status;
1786}
1787
1788static __be32
1789check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
1790{
1791 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
1792 slot_seqid);
1793
1794 /* The slot is in use, and no response has been sent. */
1795 if (slot_inuse) {
1796 if (seqid == slot_seqid)
1797 return nfserr_jukebox;
1798 else
1799 return nfserr_seq_misordered;
1800 }
1801 /* Note unsigned 32-bit arithmetic handles wraparound: */
1802 if (likely(seqid == slot_seqid + 1))
1803 return nfs_ok;
1804 if (seqid == slot_seqid)
1805 return nfserr_replay_cache;
1806 return nfserr_seq_misordered;
1807}
1808
1809/*
1810 * Cache the create session result into the create session single DRC
1811 * slot cache by saving the xdr structure. sl_seqid has been set.
1812 * Do this for solo or embedded create session operations.
1813 */
1814static void
1815nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
1816 struct nfsd4_clid_slot *slot, __be32 nfserr)
1817{
1818 slot->sl_status = nfserr;
1819 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
1820}
1821
1822static __be32
1823nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
1824 struct nfsd4_clid_slot *slot)
1825{
1826 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
1827 return slot->sl_status;
1828}
1829
1830#define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
1831 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
1832 1 + /* MIN tag is length with zero, only length */ \
1833 3 + /* version, opcount, opcode */ \
1834 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1835 /* seqid, slotID, slotID, cache */ \
1836 4 ) * sizeof(__be32))
1837
1838#define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
1839 2 + /* verifier: AUTH_NULL, length 0 */\
1840 1 + /* status */ \
1841 1 + /* MIN tag is length with zero, only length */ \
1842 3 + /* opcount, opcode, opstatus*/ \
1843 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1844 /* seqid, slotID, slotID, slotID, status */ \
1845 5 ) * sizeof(__be32))
1846
1847static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1848{
1849 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
1850
1851 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
1852 return nfserr_toosmall;
1853 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
1854 return nfserr_toosmall;
1855 ca->headerpadsz = 0;
1856 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
1857 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
1858 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
1859 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
1860 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
1861 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
1862 /*
1863 * Note decreasing slot size below client's request may make it
1864 * difficult for client to function correctly, whereas
1865 * decreasing the number of slots will (just?) affect
1866 * performance. When short on memory we therefore prefer to
1867 * decrease number of slots instead of their size. Clients that
1868 * request larger slots than they need will get poor results:
1869 */
1870 ca->maxreqs = nfsd4_get_drc_mem(ca);
1871 if (!ca->maxreqs)
1872 return nfserr_jukebox;
1873
1874 return nfs_ok;
1875}
1876
1877#define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
1878 RPC_MAX_HEADER_WITH_AUTH) * sizeof(__be32))
1879#define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
1880 RPC_MAX_REPHEADER_WITH_AUTH) * sizeof(__be32))
1881
1882static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
1883{
1884 ca->headerpadsz = 0;
1885
1886 /*
1887 * These RPC_MAX_HEADER macros are overkill, especially since we
1888 * don't even do gss on the backchannel yet. But this is still
1889 * less than 1k. Tighten up this estimate in the unlikely event
1890 * it turns out to be a problem for some client:
1891 */
1892 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
1893 return nfserr_toosmall;
1894 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
1895 return nfserr_toosmall;
1896 ca->maxresp_cached = 0;
1897 if (ca->maxops < 2)
1898 return nfserr_toosmall;
1899
1900 return nfs_ok;
1901}
1902
1903static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
1904{
1905 switch (cbs->flavor) {
1906 case RPC_AUTH_NULL:
1907 case RPC_AUTH_UNIX:
1908 return nfs_ok;
1909 default:
1910 /*
1911 * GSS case: the spec doesn't allow us to return this
1912 * error. But it also doesn't allow us not to support
1913 * GSS.
1914 * I'd rather this fail hard than return some error the
1915 * client might think it can already handle:
1916 */
1917 return nfserr_encr_alg_unsupp;
1918 }
1919}
1920
1921__be32
1922nfsd4_create_session(struct svc_rqst *rqstp,
1923 struct nfsd4_compound_state *cstate,
1924 struct nfsd4_create_session *cr_ses)
1925{
1926 struct sockaddr *sa = svc_addr(rqstp);
1927 struct nfs4_client *conf, *unconf;
1928 struct nfsd4_session *new;
1929 struct nfsd4_conn *conn;
1930 struct nfsd4_clid_slot *cs_slot = NULL;
1931 __be32 status = 0;
1932 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1933
1934 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
1935 return nfserr_inval;
1936 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
1937 if (status)
1938 return status;
1939 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
1940 if (status)
1941 return status;
1942 status = check_backchannel_attrs(&cr_ses->back_channel);
1943 if (status)
1944 goto out_release_drc_mem;
1945 status = nfserr_jukebox;
1946 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
1947 if (!new)
1948 goto out_release_drc_mem;
1949 conn = alloc_conn_from_crses(rqstp, cr_ses);
1950 if (!conn)
1951 goto out_free_session;
1952
1953 nfs4_lock_state();
1954 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
1955 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
1956 WARN_ON_ONCE(conf && unconf);
1957
1958 if (conf) {
1959 status = nfserr_wrong_cred;
1960 if (!mach_creds_match(conf, rqstp))
1961 goto out_free_conn;
1962 cs_slot = &conf->cl_cs_slot;
1963 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1964 if (status == nfserr_replay_cache) {
1965 status = nfsd4_replay_create_session(cr_ses, cs_slot);
1966 goto out_free_conn;
1967 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
1968 status = nfserr_seq_misordered;
1969 goto out_free_conn;
1970 }
1971 } else if (unconf) {
1972 struct nfs4_client *old;
1973 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
1974 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
1975 status = nfserr_clid_inuse;
1976 goto out_free_conn;
1977 }
1978 status = nfserr_wrong_cred;
1979 if (!mach_creds_match(unconf, rqstp))
1980 goto out_free_conn;
1981 cs_slot = &unconf->cl_cs_slot;
1982 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1983 if (status) {
1984 /* an unconfirmed replay returns misordered */
1985 status = nfserr_seq_misordered;
1986 goto out_free_conn;
1987 }
1988 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
1989 if (old) {
1990 status = mark_client_expired(old);
1991 if (status)
1992 goto out_free_conn;
1993 expire_client(old);
1994 }
1995 move_to_confirmed(unconf);
1996 conf = unconf;
1997 } else {
1998 status = nfserr_stale_clientid;
1999 goto out_free_conn;
2000 }
2001 status = nfs_ok;
2002 /*
2003 * We do not support RDMA or persistent sessions
2004 */
2005 cr_ses->flags &= ~SESSION4_PERSIST;
2006 cr_ses->flags &= ~SESSION4_RDMA;
2007
2008 init_session(rqstp, new, conf, cr_ses);
2009 nfsd4_init_conn(rqstp, conn, new);
2010
2011 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
2012 NFS4_MAX_SESSIONID_LEN);
2013 cs_slot->sl_seqid++;
2014 cr_ses->seqid = cs_slot->sl_seqid;
2015
2016 /* cache solo and embedded create sessions under the state lock */
2017 nfsd4_cache_create_session(cr_ses, cs_slot, status);
2018 nfs4_unlock_state();
2019 return status;
2020out_free_conn:
2021 nfs4_unlock_state();
2022 free_conn(conn);
2023out_free_session:
2024 __free_session(new);
2025out_release_drc_mem:
2026 nfsd4_put_drc_mem(&cr_ses->fore_channel);
2027 return status;
2028}
2029
2030static __be32 nfsd4_map_bcts_dir(u32 *dir)
2031{
2032 switch (*dir) {
2033 case NFS4_CDFC4_FORE:
2034 case NFS4_CDFC4_BACK:
2035 return nfs_ok;
2036 case NFS4_CDFC4_FORE_OR_BOTH:
2037 case NFS4_CDFC4_BACK_OR_BOTH:
2038 *dir = NFS4_CDFC4_BOTH;
2039 return nfs_ok;
2040 };
2041 return nfserr_inval;
2042}
2043
2044__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
2045{
2046 struct nfsd4_session *session = cstate->session;
2047 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2048 __be32 status;
2049
2050 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
2051 if (status)
2052 return status;
2053 spin_lock(&nn->client_lock);
2054 session->se_cb_prog = bc->bc_cb_program;
2055 session->se_cb_sec = bc->bc_cb_sec;
2056 spin_unlock(&nn->client_lock);
2057
2058 nfsd4_probe_callback(session->se_client);
2059
2060 return nfs_ok;
2061}
2062
2063__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
2064 struct nfsd4_compound_state *cstate,
2065 struct nfsd4_bind_conn_to_session *bcts)
2066{
2067 __be32 status;
2068 struct nfsd4_conn *conn;
2069 struct nfsd4_session *session;
2070 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2071
2072 if (!nfsd4_last_compound_op(rqstp))
2073 return nfserr_not_only_op;
2074 nfs4_lock_state();
2075 spin_lock(&nn->client_lock);
2076 session = find_in_sessionid_hashtbl(&bcts->sessionid, SVC_NET(rqstp));
2077 spin_unlock(&nn->client_lock);
2078 status = nfserr_badsession;
2079 if (!session)
2080 goto out;
2081 status = nfserr_wrong_cred;
2082 if (!mach_creds_match(session->se_client, rqstp))
2083 goto out;
2084 status = nfsd4_map_bcts_dir(&bcts->dir);
2085 if (status)
2086 goto out;
2087 conn = alloc_conn(rqstp, bcts->dir);
2088 status = nfserr_jukebox;
2089 if (!conn)
2090 goto out;
2091 nfsd4_init_conn(rqstp, conn, session);
2092 status = nfs_ok;
2093out:
2094 nfs4_unlock_state();
2095 return status;
2096}
2097
2098static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
2099{
2100 if (!session)
2101 return 0;
2102 return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
2103}
2104
2105__be32
2106nfsd4_destroy_session(struct svc_rqst *r,
2107 struct nfsd4_compound_state *cstate,
2108 struct nfsd4_destroy_session *sessionid)
2109{
2110 struct nfsd4_session *ses;
2111 __be32 status;
2112 int ref_held_by_me = 0;
2113 struct nfsd_net *nn = net_generic(SVC_NET(r), nfsd_net_id);
2114
2115 nfs4_lock_state();
2116 status = nfserr_not_only_op;
2117 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
2118 if (!nfsd4_last_compound_op(r))
2119 goto out;
2120 ref_held_by_me++;
2121 }
2122 dump_sessionid(__func__, &sessionid->sessionid);
2123 spin_lock(&nn->client_lock);
2124 ses = find_in_sessionid_hashtbl(&sessionid->sessionid, SVC_NET(r));
2125 status = nfserr_badsession;
2126 if (!ses)
2127 goto out_client_lock;
2128 status = nfserr_wrong_cred;
2129 if (!mach_creds_match(ses->se_client, r))
2130 goto out_client_lock;
2131 nfsd4_get_session_locked(ses);
2132 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
2133 if (status)
2134 goto out_put_session;
2135 unhash_session(ses);
2136 spin_unlock(&nn->client_lock);
2137
2138 nfsd4_probe_callback_sync(ses->se_client);
2139
2140 spin_lock(&nn->client_lock);
2141 status = nfs_ok;
2142out_put_session:
2143 nfsd4_put_session(ses);
2144out_client_lock:
2145 spin_unlock(&nn->client_lock);
2146out:
2147 nfs4_unlock_state();
2148 return status;
2149}
2150
2151static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
2152{
2153 struct nfsd4_conn *c;
2154
2155 list_for_each_entry(c, &s->se_conns, cn_persession) {
2156 if (c->cn_xprt == xpt) {
2157 return c;
2158 }
2159 }
2160 return NULL;
2161}
2162
2163static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
2164{
2165 struct nfs4_client *clp = ses->se_client;
2166 struct nfsd4_conn *c;
2167 __be32 status = nfs_ok;
2168 int ret;
2169
2170 spin_lock(&clp->cl_lock);
2171 c = __nfsd4_find_conn(new->cn_xprt, ses);
2172 if (c)
2173 goto out_free;
2174 status = nfserr_conn_not_bound_to_session;
2175 if (clp->cl_mach_cred)
2176 goto out_free;
2177 __nfsd4_hash_conn(new, ses);
2178 spin_unlock(&clp->cl_lock);
2179 ret = nfsd4_register_conn(new);
2180 if (ret)
2181 /* oops; xprt is already down: */
2182 nfsd4_conn_lost(&new->cn_xpt_user);
2183 return nfs_ok;
2184out_free:
2185 spin_unlock(&clp->cl_lock);
2186 free_conn(new);
2187 return status;
2188}
2189
2190static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
2191{
2192 struct nfsd4_compoundargs *args = rqstp->rq_argp;
2193
2194 return args->opcnt > session->se_fchannel.maxops;
2195}
2196
2197static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
2198 struct nfsd4_session *session)
2199{
2200 struct xdr_buf *xb = &rqstp->rq_arg;
2201
2202 return xb->len > session->se_fchannel.maxreq_sz;
2203}
2204
2205__be32
2206nfsd4_sequence(struct svc_rqst *rqstp,
2207 struct nfsd4_compound_state *cstate,
2208 struct nfsd4_sequence *seq)
2209{
2210 struct nfsd4_compoundres *resp = rqstp->rq_resp;
2211 struct xdr_stream *xdr = &resp->xdr;
2212 struct nfsd4_session *session;
2213 struct nfs4_client *clp;
2214 struct nfsd4_slot *slot;
2215 struct nfsd4_conn *conn;
2216 __be32 status;
2217 int buflen;
2218 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2219
2220 if (resp->opcnt != 1)
2221 return nfserr_sequence_pos;
2222
2223 /*
2224 * Will be either used or freed by nfsd4_sequence_check_conn
2225 * below.
2226 */
2227 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
2228 if (!conn)
2229 return nfserr_jukebox;
2230
2231 spin_lock(&nn->client_lock);
2232 status = nfserr_badsession;
2233 session = find_in_sessionid_hashtbl(&seq->sessionid, SVC_NET(rqstp));
2234 if (!session)
2235 goto out_no_session;
2236 clp = session->se_client;
2237 status = get_client_locked(clp);
2238 if (status)
2239 goto out_no_session;
2240 status = nfsd4_get_session_locked(session);
2241 if (status)
2242 goto out_put_client;
2243
2244 status = nfserr_too_many_ops;
2245 if (nfsd4_session_too_many_ops(rqstp, session))
2246 goto out_put_session;
2247
2248 status = nfserr_req_too_big;
2249 if (nfsd4_request_too_big(rqstp, session))
2250 goto out_put_session;
2251
2252 status = nfserr_badslot;
2253 if (seq->slotid >= session->se_fchannel.maxreqs)
2254 goto out_put_session;
2255
2256 slot = session->se_slots[seq->slotid];
2257 dprintk("%s: slotid %d\n", __func__, seq->slotid);
2258
2259 /* We do not negotiate the number of slots yet, so set the
2260 * maxslots to the session maxreqs which is used to encode
2261 * sr_highest_slotid and the sr_target_slot id to maxslots */
2262 seq->maxslots = session->se_fchannel.maxreqs;
2263
2264 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
2265 slot->sl_flags & NFSD4_SLOT_INUSE);
2266 if (status == nfserr_replay_cache) {
2267 status = nfserr_seq_misordered;
2268 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
2269 goto out_put_session;
2270 cstate->slot = slot;
2271 cstate->session = session;
2272 /* Return the cached reply status and set cstate->status
2273 * for nfsd4_proc_compound processing */
2274 status = nfsd4_replay_cache_entry(resp, seq);
2275 cstate->status = nfserr_replay_cache;
2276 goto out;
2277 }
2278 if (status)
2279 goto out_put_session;
2280
2281 status = nfsd4_sequence_check_conn(conn, session);
2282 conn = NULL;
2283 if (status)
2284 goto out_put_session;
2285
2286 buflen = (seq->cachethis) ?
2287 session->se_fchannel.maxresp_cached :
2288 session->se_fchannel.maxresp_sz;
2289 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
2290 nfserr_rep_too_big;
2291 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
2292 goto out_put_session;
2293 svc_reserve(rqstp, buflen);
2294
2295 status = nfs_ok;
2296 /* Success! bump slot seqid */
2297 slot->sl_seqid = seq->seqid;
2298 slot->sl_flags |= NFSD4_SLOT_INUSE;
2299 if (seq->cachethis)
2300 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
2301 else
2302 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
2303
2304 cstate->slot = slot;
2305 cstate->session = session;
2306
2307out:
2308 switch (clp->cl_cb_state) {
2309 case NFSD4_CB_DOWN:
2310 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
2311 break;
2312 case NFSD4_CB_FAULT:
2313 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
2314 break;
2315 default:
2316 seq->status_flags = 0;
2317 }
2318 if (!list_empty(&clp->cl_revoked))
2319 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
2320out_no_session:
2321 if (conn)
2322 free_conn(conn);
2323 spin_unlock(&nn->client_lock);
2324 return status;
2325out_put_session:
2326 nfsd4_put_session(session);
2327out_put_client:
2328 put_client_renew_locked(clp);
2329 goto out_no_session;
2330}
2331
2332__be32
2333nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
2334{
2335 struct nfs4_client *conf, *unconf, *clp;
2336 __be32 status = 0;
2337 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2338
2339 nfs4_lock_state();
2340 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
2341 conf = find_confirmed_client(&dc->clientid, true, nn);
2342 WARN_ON_ONCE(conf && unconf);
2343
2344 if (conf) {
2345 clp = conf;
2346
2347 if (client_has_state(conf)) {
2348 status = nfserr_clientid_busy;
2349 goto out;
2350 }
2351 } else if (unconf)
2352 clp = unconf;
2353 else {
2354 status = nfserr_stale_clientid;
2355 goto out;
2356 }
2357 if (!mach_creds_match(clp, rqstp)) {
2358 status = nfserr_wrong_cred;
2359 goto out;
2360 }
2361 expire_client(clp);
2362out:
2363 nfs4_unlock_state();
2364 return status;
2365}
2366
2367__be32
2368nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
2369{
2370 __be32 status = 0;
2371
2372 if (rc->rca_one_fs) {
2373 if (!cstate->current_fh.fh_dentry)
2374 return nfserr_nofilehandle;
2375 /*
2376 * We don't take advantage of the rca_one_fs case.
2377 * That's OK, it's optional, we can safely ignore it.
2378 */
2379 return nfs_ok;
2380 }
2381
2382 nfs4_lock_state();
2383 status = nfserr_complete_already;
2384 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
2385 &cstate->session->se_client->cl_flags))
2386 goto out;
2387
2388 status = nfserr_stale_clientid;
2389 if (is_client_expired(cstate->session->se_client))
2390 /*
2391 * The following error isn't really legal.
2392 * But we only get here if the client just explicitly
2393 * destroyed the client. Surely it no longer cares what
2394 * error it gets back on an operation for the dead
2395 * client.
2396 */
2397 goto out;
2398
2399 status = nfs_ok;
2400 nfsd4_client_record_create(cstate->session->se_client);
2401out:
2402 nfs4_unlock_state();
2403 return status;
2404}
2405
2406__be32
2407nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2408 struct nfsd4_setclientid *setclid)
2409{
2410 struct xdr_netobj clname = setclid->se_name;
2411 nfs4_verifier clverifier = setclid->se_verf;
2412 struct nfs4_client *conf, *unconf, *new;
2413 __be32 status;
2414 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2415
2416 /* Cases below refer to rfc 3530 section 14.2.33: */
2417 nfs4_lock_state();
2418 conf = find_confirmed_client_by_name(&clname, nn);
2419 if (conf) {
2420 /* case 0: */
2421 status = nfserr_clid_inuse;
2422 if (clp_used_exchangeid(conf))
2423 goto out;
2424 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
2425 char addr_str[INET6_ADDRSTRLEN];
2426 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
2427 sizeof(addr_str));
2428 dprintk("NFSD: setclientid: string in use by client "
2429 "at %s\n", addr_str);
2430 goto out;
2431 }
2432 }
2433 unconf = find_unconfirmed_client_by_name(&clname, nn);
2434 if (unconf)
2435 expire_client(unconf);
2436 status = nfserr_jukebox;
2437 new = create_client(clname, rqstp, &clverifier);
2438 if (new == NULL)
2439 goto out;
2440 if (conf && same_verf(&conf->cl_verifier, &clverifier))
2441 /* case 1: probable callback update */
2442 copy_clid(new, conf);
2443 else /* case 4 (new client) or cases 2, 3 (client reboot): */
2444 gen_clid(new, nn);
2445 new->cl_minorversion = 0;
2446 gen_callback(new, setclid, rqstp);
2447 add_to_unconfirmed(new);
2448 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
2449 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
2450 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
2451 status = nfs_ok;
2452out:
2453 nfs4_unlock_state();
2454 return status;
2455}
2456
2457
2458__be32
2459nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2460 struct nfsd4_compound_state *cstate,
2461 struct nfsd4_setclientid_confirm *setclientid_confirm)
2462{
2463 struct nfs4_client *conf, *unconf;
2464 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
2465 clientid_t * clid = &setclientid_confirm->sc_clientid;
2466 __be32 status;
2467 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2468
2469 if (STALE_CLIENTID(clid, nn))
2470 return nfserr_stale_clientid;
2471 nfs4_lock_state();
2472
2473 conf = find_confirmed_client(clid, false, nn);
2474 unconf = find_unconfirmed_client(clid, false, nn);
2475 /*
2476 * We try hard to give out unique clientid's, so if we get an
2477 * attempt to confirm the same clientid with a different cred,
2478 * there's a bug somewhere. Let's charitably assume it's our
2479 * bug.
2480 */
2481 status = nfserr_serverfault;
2482 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
2483 goto out;
2484 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
2485 goto out;
2486 /* cases below refer to rfc 3530 section 14.2.34: */
2487 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
2488 if (conf && !unconf) /* case 2: probable retransmit */
2489 status = nfs_ok;
2490 else /* case 4: client hasn't noticed we rebooted yet? */
2491 status = nfserr_stale_clientid;
2492 goto out;
2493 }
2494 status = nfs_ok;
2495 if (conf) { /* case 1: callback update */
2496 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
2497 nfsd4_probe_callback(conf);
2498 expire_client(unconf);
2499 } else { /* case 3: normal case; new or rebooted client */
2500 conf = find_confirmed_client_by_name(&unconf->cl_name, nn);
2501 if (conf) {
2502 status = mark_client_expired(conf);
2503 if (status)
2504 goto out;
2505 expire_client(conf);
2506 }
2507 move_to_confirmed(unconf);
2508 nfsd4_probe_callback(unconf);
2509 }
2510out:
2511 nfs4_unlock_state();
2512 return status;
2513}
2514
2515static struct nfs4_file *nfsd4_alloc_file(void)
2516{
2517 return kmem_cache_alloc(file_slab, GFP_KERNEL);
2518}
2519
2520/* OPEN Share state helper functions */
2521static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino)
2522{
2523 unsigned int hashval = file_hashval(ino);
2524
2525 atomic_set(&fp->fi_ref, 1);
2526 INIT_LIST_HEAD(&fp->fi_stateids);
2527 INIT_LIST_HEAD(&fp->fi_delegations);
2528 fp->fi_inode = igrab(ino);
2529 fp->fi_had_conflict = false;
2530 fp->fi_lease = NULL;
2531 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
2532 memset(fp->fi_access, 0, sizeof(fp->fi_access));
2533 spin_lock(&recall_lock);
2534 hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]);
2535 spin_unlock(&recall_lock);
2536}
2537
2538void
2539nfsd4_free_slabs(void)
2540{
2541 kmem_cache_destroy(openowner_slab);
2542 kmem_cache_destroy(lockowner_slab);
2543 kmem_cache_destroy(file_slab);
2544 kmem_cache_destroy(stateid_slab);
2545 kmem_cache_destroy(deleg_slab);
2546}
2547
2548int
2549nfsd4_init_slabs(void)
2550{
2551 openowner_slab = kmem_cache_create("nfsd4_openowners",
2552 sizeof(struct nfs4_openowner), 0, 0, NULL);
2553 if (openowner_slab == NULL)
2554 goto out;
2555 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
2556 sizeof(struct nfs4_lockowner), 0, 0, NULL);
2557 if (lockowner_slab == NULL)
2558 goto out_free_openowner_slab;
2559 file_slab = kmem_cache_create("nfsd4_files",
2560 sizeof(struct nfs4_file), 0, 0, NULL);
2561 if (file_slab == NULL)
2562 goto out_free_lockowner_slab;
2563 stateid_slab = kmem_cache_create("nfsd4_stateids",
2564 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
2565 if (stateid_slab == NULL)
2566 goto out_free_file_slab;
2567 deleg_slab = kmem_cache_create("nfsd4_delegations",
2568 sizeof(struct nfs4_delegation), 0, 0, NULL);
2569 if (deleg_slab == NULL)
2570 goto out_free_stateid_slab;
2571 return 0;
2572
2573out_free_stateid_slab:
2574 kmem_cache_destroy(stateid_slab);
2575out_free_file_slab:
2576 kmem_cache_destroy(file_slab);
2577out_free_lockowner_slab:
2578 kmem_cache_destroy(lockowner_slab);
2579out_free_openowner_slab:
2580 kmem_cache_destroy(openowner_slab);
2581out:
2582 dprintk("nfsd4: out of memory while initializing nfsv4\n");
2583 return -ENOMEM;
2584}
2585
2586static void init_nfs4_replay(struct nfs4_replay *rp)
2587{
2588 rp->rp_status = nfserr_serverfault;
2589 rp->rp_buflen = 0;
2590 rp->rp_buf = rp->rp_ibuf;
2591}
2592
2593static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
2594{
2595 struct nfs4_stateowner *sop;
2596
2597 sop = kmem_cache_alloc(slab, GFP_KERNEL);
2598 if (!sop)
2599 return NULL;
2600
2601 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
2602 if (!sop->so_owner.data) {
2603 kmem_cache_free(slab, sop);
2604 return NULL;
2605 }
2606 sop->so_owner.len = owner->len;
2607
2608 INIT_LIST_HEAD(&sop->so_stateids);
2609 sop->so_client = clp;
2610 init_nfs4_replay(&sop->so_replay);
2611 return sop;
2612}
2613
2614static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
2615{
2616 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2617
2618 list_add(&oo->oo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]);
2619 list_add(&oo->oo_perclient, &clp->cl_openowners);
2620}
2621
2622static struct nfs4_openowner *
2623alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) {
2624 struct nfs4_openowner *oo;
2625
2626 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
2627 if (!oo)
2628 return NULL;
2629 oo->oo_owner.so_is_open_owner = 1;
2630 oo->oo_owner.so_seqid = open->op_seqid;
2631 oo->oo_flags = NFS4_OO_NEW;
2632 oo->oo_time = 0;
2633 oo->oo_last_closed_stid = NULL;
2634 INIT_LIST_HEAD(&oo->oo_close_lru);
2635 hash_openowner(oo, clp, strhashval);
2636 return oo;
2637}
2638
2639static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
2640 struct nfs4_openowner *oo = open->op_openowner;
2641
2642 stp->st_stid.sc_type = NFS4_OPEN_STID;
2643 INIT_LIST_HEAD(&stp->st_lockowners);
2644 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
2645 list_add(&stp->st_perfile, &fp->fi_stateids);
2646 stp->st_stateowner = &oo->oo_owner;
2647 get_nfs4_file(fp);
2648 stp->st_file = fp;
2649 stp->st_access_bmap = 0;
2650 stp->st_deny_bmap = 0;
2651 set_access(open->op_share_access, stp);
2652 set_deny(open->op_share_deny, stp);
2653 stp->st_openstp = NULL;
2654}
2655
2656static void
2657move_to_close_lru(struct nfs4_openowner *oo, struct net *net)
2658{
2659 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2660
2661 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
2662
2663 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
2664 oo->oo_time = get_seconds();
2665}
2666
2667static int
2668same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
2669 clientid_t *clid)
2670{
2671 return (sop->so_owner.len == owner->len) &&
2672 0 == memcmp(sop->so_owner.data, owner->data, owner->len) &&
2673 (sop->so_client->cl_clientid.cl_id == clid->cl_id);
2674}
2675
2676static struct nfs4_openowner *
2677find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
2678 bool sessions, struct nfsd_net *nn)
2679{
2680 struct nfs4_stateowner *so;
2681 struct nfs4_openowner *oo;
2682 struct nfs4_client *clp;
2683
2684 list_for_each_entry(so, &nn->ownerstr_hashtbl[hashval], so_strhash) {
2685 if (!so->so_is_open_owner)
2686 continue;
2687 if (same_owner_str(so, &open->op_owner, &open->op_clientid)) {
2688 oo = openowner(so);
2689 clp = oo->oo_owner.so_client;
2690 if ((bool)clp->cl_minorversion != sessions)
2691 return NULL;
2692 renew_client(oo->oo_owner.so_client);
2693 return oo;
2694 }
2695 }
2696 return NULL;
2697}
2698
2699/* search file_hashtbl[] for file */
2700static struct nfs4_file *
2701find_file(struct inode *ino)
2702{
2703 unsigned int hashval = file_hashval(ino);
2704 struct nfs4_file *fp;
2705
2706 spin_lock(&recall_lock);
2707 hlist_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
2708 if (fp->fi_inode == ino) {
2709 get_nfs4_file(fp);
2710 spin_unlock(&recall_lock);
2711 return fp;
2712 }
2713 }
2714 spin_unlock(&recall_lock);
2715 return NULL;
2716}
2717
2718/*
2719 * Called to check deny when READ with all zero stateid or
2720 * WRITE with all zero or all one stateid
2721 */
2722static __be32
2723nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
2724{
2725 struct inode *ino = current_fh->fh_dentry->d_inode;
2726 struct nfs4_file *fp;
2727 struct nfs4_ol_stateid *stp;
2728 __be32 ret;
2729
2730 fp = find_file(ino);
2731 if (!fp)
2732 return nfs_ok;
2733 ret = nfserr_locked;
2734 /* Search for conflicting share reservations */
2735 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
2736 if (test_deny(deny_type, stp) ||
2737 test_deny(NFS4_SHARE_DENY_BOTH, stp))
2738 goto out;
2739 }
2740 ret = nfs_ok;
2741out:
2742 put_nfs4_file(fp);
2743 return ret;
2744}
2745
2746static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
2747{
2748 struct nfs4_client *clp = dp->dl_stid.sc_client;
2749 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2750
2751 /* We're assuming the state code never drops its reference
2752 * without first removing the lease. Since we're in this lease
2753 * callback (and since the lease code is serialized by the kernel
2754 * lock) we know the server hasn't removed the lease yet, we know
2755 * it's safe to take a reference: */
2756 atomic_inc(&dp->dl_count);
2757
2758 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
2759
2760 /* Only place dl_time is set; protected by i_lock: */
2761 dp->dl_time = get_seconds();
2762
2763 nfsd4_cb_recall(dp);
2764}
2765
2766/* Called from break_lease() with i_lock held. */
2767static void nfsd_break_deleg_cb(struct file_lock *fl)
2768{
2769 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
2770 struct nfs4_delegation *dp;
2771
2772 if (!fp) {
2773 WARN(1, "(%p)->fl_owner NULL\n", fl);
2774 return;
2775 }
2776 if (fp->fi_had_conflict) {
2777 WARN(1, "duplicate break on %p\n", fp);
2778 return;
2779 }
2780 /*
2781 * We don't want the locks code to timeout the lease for us;
2782 * we'll remove it ourself if a delegation isn't returned
2783 * in time:
2784 */
2785 fl->fl_break_time = 0;
2786
2787 spin_lock(&recall_lock);
2788 fp->fi_had_conflict = true;
2789 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
2790 nfsd_break_one_deleg(dp);
2791 spin_unlock(&recall_lock);
2792}
2793
2794static
2795int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
2796{
2797 if (arg & F_UNLCK)
2798 return lease_modify(onlist, arg);
2799 else
2800 return -EAGAIN;
2801}
2802
2803static const struct lock_manager_operations nfsd_lease_mng_ops = {
2804 .lm_break = nfsd_break_deleg_cb,
2805 .lm_change = nfsd_change_deleg_cb,
2806};
2807
2808static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
2809{
2810 if (nfsd4_has_session(cstate))
2811 return nfs_ok;
2812 if (seqid == so->so_seqid - 1)
2813 return nfserr_replay_me;
2814 if (seqid == so->so_seqid)
2815 return nfs_ok;
2816 return nfserr_bad_seqid;
2817}
2818
2819__be32
2820nfsd4_process_open1(struct nfsd4_compound_state *cstate,
2821 struct nfsd4_open *open, struct nfsd_net *nn)
2822{
2823 clientid_t *clientid = &open->op_clientid;
2824 struct nfs4_client *clp = NULL;
2825 unsigned int strhashval;
2826 struct nfs4_openowner *oo = NULL;
2827 __be32 status;
2828
2829 if (STALE_CLIENTID(&open->op_clientid, nn))
2830 return nfserr_stale_clientid;
2831 /*
2832 * In case we need it later, after we've already created the
2833 * file and don't want to risk a further failure:
2834 */
2835 open->op_file = nfsd4_alloc_file();
2836 if (open->op_file == NULL)
2837 return nfserr_jukebox;
2838
2839 strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner);
2840 oo = find_openstateowner_str(strhashval, open, cstate->minorversion, nn);
2841 open->op_openowner = oo;
2842 if (!oo) {
2843 clp = find_confirmed_client(clientid, cstate->minorversion,
2844 nn);
2845 if (clp == NULL)
2846 return nfserr_expired;
2847 goto new_owner;
2848 }
2849 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
2850 /* Replace unconfirmed owners without checking for replay. */
2851 clp = oo->oo_owner.so_client;
2852 release_openowner(oo);
2853 open->op_openowner = NULL;
2854 goto new_owner;
2855 }
2856 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
2857 if (status)
2858 return status;
2859 clp = oo->oo_owner.so_client;
2860 goto alloc_stateid;
2861new_owner:
2862 oo = alloc_init_open_stateowner(strhashval, clp, open);
2863 if (oo == NULL)
2864 return nfserr_jukebox;
2865 open->op_openowner = oo;
2866alloc_stateid:
2867 open->op_stp = nfs4_alloc_stateid(clp);
2868 if (!open->op_stp)
2869 return nfserr_jukebox;
2870 return nfs_ok;
2871}
2872
2873static inline __be32
2874nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
2875{
2876 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
2877 return nfserr_openmode;
2878 else
2879 return nfs_ok;
2880}
2881
2882static int share_access_to_flags(u32 share_access)
2883{
2884 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
2885}
2886
2887static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
2888{
2889 struct nfs4_stid *ret;
2890
2891 ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
2892 if (!ret)
2893 return NULL;
2894 return delegstateid(ret);
2895}
2896
2897static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
2898{
2899 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
2900 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
2901}
2902
2903static __be32
2904nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
2905 struct nfs4_delegation **dp)
2906{
2907 int flags;
2908 __be32 status = nfserr_bad_stateid;
2909
2910 *dp = find_deleg_stateid(cl, &open->op_delegate_stateid);
2911 if (*dp == NULL)
2912 goto out;
2913 flags = share_access_to_flags(open->op_share_access);
2914 status = nfs4_check_delegmode(*dp, flags);
2915 if (status)
2916 *dp = NULL;
2917out:
2918 if (!nfsd4_is_deleg_cur(open))
2919 return nfs_ok;
2920 if (status)
2921 return status;
2922 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2923 return nfs_ok;
2924}
2925
2926static __be32
2927nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp)
2928{
2929 struct nfs4_ol_stateid *local;
2930 struct nfs4_openowner *oo = open->op_openowner;
2931
2932 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
2933 /* ignore lock owners */
2934 if (local->st_stateowner->so_is_open_owner == 0)
2935 continue;
2936 /* remember if we have seen this open owner */
2937 if (local->st_stateowner == &oo->oo_owner)
2938 *stpp = local;
2939 /* check for conflicting share reservations */
2940 if (!test_share(local, open))
2941 return nfserr_share_denied;
2942 }
2943 return nfs_ok;
2944}
2945
2946static inline int nfs4_access_to_access(u32 nfs4_access)
2947{
2948 int flags = 0;
2949
2950 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
2951 flags |= NFSD_MAY_READ;
2952 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
2953 flags |= NFSD_MAY_WRITE;
2954 return flags;
2955}
2956
2957static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
2958 struct svc_fh *cur_fh, struct nfsd4_open *open)
2959{
2960 __be32 status;
2961 int oflag = nfs4_access_to_omode(open->op_share_access);
2962 int access = nfs4_access_to_access(open->op_share_access);
2963
2964 if (!fp->fi_fds[oflag]) {
2965 status = nfsd_open(rqstp, cur_fh, S_IFREG, access,
2966 &fp->fi_fds[oflag]);
2967 if (status)
2968 return status;
2969 }
2970 nfs4_file_get_access(fp, oflag);
2971
2972 return nfs_ok;
2973}
2974
2975static inline __be32
2976nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
2977 struct nfsd4_open *open)
2978{
2979 struct iattr iattr = {
2980 .ia_valid = ATTR_SIZE,
2981 .ia_size = 0,
2982 };
2983 if (!open->op_truncate)
2984 return 0;
2985 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
2986 return nfserr_inval;
2987 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
2988}
2989
2990static __be32
2991nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
2992{
2993 u32 op_share_access = open->op_share_access;
2994 bool new_access;
2995 __be32 status;
2996
2997 new_access = !test_access(op_share_access, stp);
2998 if (new_access) {
2999 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
3000 if (status)
3001 return status;
3002 }
3003 status = nfsd4_truncate(rqstp, cur_fh, open);
3004 if (status) {
3005 if (new_access) {
3006 int oflag = nfs4_access_to_omode(op_share_access);
3007 nfs4_file_put_access(fp, oflag);
3008 }
3009 return status;
3010 }
3011 /* remember the open */
3012 set_access(op_share_access, stp);
3013 set_deny(open->op_share_deny, stp);
3014
3015 return nfs_ok;
3016}
3017
3018
3019static void
3020nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
3021{
3022 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3023}
3024
3025/* Should we give out recallable state?: */
3026static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
3027{
3028 if (clp->cl_cb_state == NFSD4_CB_UP)
3029 return true;
3030 /*
3031 * In the sessions case, since we don't have to establish a
3032 * separate connection for callbacks, we assume it's OK
3033 * until we hear otherwise:
3034 */
3035 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
3036}
3037
3038static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag)
3039{
3040 struct file_lock *fl;
3041
3042 fl = locks_alloc_lock();
3043 if (!fl)
3044 return NULL;
3045 locks_init_lock(fl);
3046 fl->fl_lmops = &nfsd_lease_mng_ops;
3047 fl->fl_flags = FL_DELEG;
3048 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
3049 fl->fl_end = OFFSET_MAX;
3050 fl->fl_owner = (fl_owner_t)(dp->dl_file);
3051 fl->fl_pid = current->tgid;
3052 return fl;
3053}
3054
3055static int nfs4_setlease(struct nfs4_delegation *dp)
3056{
3057 struct nfs4_file *fp = dp->dl_file;
3058 struct file_lock *fl;
3059 int status;
3060
3061 fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
3062 if (!fl)
3063 return -ENOMEM;
3064 fl->fl_file = find_readable_file(fp);
3065 status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
3066 if (status)
3067 goto out_free;
3068 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
3069 fp->fi_lease = fl;
3070 fp->fi_deleg_file = get_file(fl->fl_file);
3071 atomic_set(&fp->fi_delegees, 1);
3072 list_add(&dp->dl_perfile, &fp->fi_delegations);
3073 return 0;
3074out_free:
3075 locks_free_lock(fl);
3076 return status;
3077}
3078
3079static int nfs4_set_delegation(struct nfs4_delegation *dp, struct nfs4_file *fp)
3080{
3081 if (fp->fi_had_conflict)
3082 return -EAGAIN;
3083 get_nfs4_file(fp);
3084 dp->dl_file = fp;
3085 if (!fp->fi_lease)
3086 return nfs4_setlease(dp);
3087 spin_lock(&recall_lock);
3088 atomic_inc(&fp->fi_delegees);
3089 if (fp->fi_had_conflict) {
3090 spin_unlock(&recall_lock);
3091 return -EAGAIN;
3092 }
3093 list_add(&dp->dl_perfile, &fp->fi_delegations);
3094 spin_unlock(&recall_lock);
3095 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
3096 return 0;
3097}
3098
3099static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
3100{
3101 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3102 if (status == -EAGAIN)
3103 open->op_why_no_deleg = WND4_CONTENTION;
3104 else {
3105 open->op_why_no_deleg = WND4_RESOURCE;
3106 switch (open->op_deleg_want) {
3107 case NFS4_SHARE_WANT_READ_DELEG:
3108 case NFS4_SHARE_WANT_WRITE_DELEG:
3109 case NFS4_SHARE_WANT_ANY_DELEG:
3110 break;
3111 case NFS4_SHARE_WANT_CANCEL:
3112 open->op_why_no_deleg = WND4_CANCELLED;
3113 break;
3114 case NFS4_SHARE_WANT_NO_DELEG:
3115 WARN_ON_ONCE(1);
3116 }
3117 }
3118}
3119
3120/*
3121 * Attempt to hand out a delegation.
3122 *
3123 * Note we don't support write delegations, and won't until the vfs has
3124 * proper support for them.
3125 */
3126static void
3127nfs4_open_delegation(struct net *net, struct svc_fh *fh,
3128 struct nfsd4_open *open, struct nfs4_ol_stateid *stp)
3129{
3130 struct nfs4_delegation *dp;
3131 struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
3132 int cb_up;
3133 int status = 0;
3134
3135 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
3136 open->op_recall = 0;
3137 switch (open->op_claim_type) {
3138 case NFS4_OPEN_CLAIM_PREVIOUS:
3139 if (!cb_up)
3140 open->op_recall = 1;
3141 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
3142 goto out_no_deleg;
3143 break;
3144 case NFS4_OPEN_CLAIM_NULL:
3145 case NFS4_OPEN_CLAIM_FH:
3146 /*
3147 * Let's not give out any delegations till everyone's
3148 * had the chance to reclaim theirs....
3149 */
3150 if (locks_in_grace(net))
3151 goto out_no_deleg;
3152 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
3153 goto out_no_deleg;
3154 /*
3155 * Also, if the file was opened for write or
3156 * create, there's a good chance the client's
3157 * about to write to it, resulting in an
3158 * immediate recall (since we don't support
3159 * write delegations):
3160 */
3161 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
3162 goto out_no_deleg;
3163 if (open->op_create == NFS4_OPEN_CREATE)
3164 goto out_no_deleg;
3165 break;
3166 default:
3167 goto out_no_deleg;
3168 }
3169 dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh);
3170 if (dp == NULL)
3171 goto out_no_deleg;
3172 status = nfs4_set_delegation(dp, stp->st_file);
3173 if (status)
3174 goto out_free;
3175
3176 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
3177
3178 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
3179 STATEID_VAL(&dp->dl_stid.sc_stateid));
3180 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
3181 return;
3182out_free:
3183 destroy_delegation(dp);
3184out_no_deleg:
3185 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
3186 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
3187 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
3188 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
3189 open->op_recall = 1;
3190 }
3191
3192 /* 4.1 client asking for a delegation? */
3193 if (open->op_deleg_want)
3194 nfsd4_open_deleg_none_ext(open, status);
3195 return;
3196}
3197
3198static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
3199 struct nfs4_delegation *dp)
3200{
3201 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
3202 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
3203 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3204 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
3205 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
3206 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
3207 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3208 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
3209 }
3210 /* Otherwise the client must be confused wanting a delegation
3211 * it already has, therefore we don't return
3212 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
3213 */
3214}
3215
3216/*
3217 * called with nfs4_lock_state() held.
3218 */
3219__be32
3220nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
3221{
3222 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3223 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
3224 struct nfs4_file *fp = NULL;
3225 struct inode *ino = current_fh->fh_dentry->d_inode;
3226 struct nfs4_ol_stateid *stp = NULL;
3227 struct nfs4_delegation *dp = NULL;
3228 __be32 status;
3229
3230 /*
3231 * Lookup file; if found, lookup stateid and check open request,
3232 * and check for delegations in the process of being recalled.
3233 * If not found, create the nfs4_file struct
3234 */
3235 fp = find_file(ino);
3236 if (fp) {
3237 if ((status = nfs4_check_open(fp, open, &stp)))
3238 goto out;
3239 status = nfs4_check_deleg(cl, open, &dp);
3240 if (status)
3241 goto out;
3242 } else {
3243 status = nfserr_bad_stateid;
3244 if (nfsd4_is_deleg_cur(open))
3245 goto out;
3246 status = nfserr_jukebox;
3247 fp = open->op_file;
3248 open->op_file = NULL;
3249 nfsd4_init_file(fp, ino);
3250 }
3251
3252 /*
3253 * OPEN the file, or upgrade an existing OPEN.
3254 * If truncate fails, the OPEN fails.
3255 */
3256 if (stp) {
3257 /* Stateid was found, this is an OPEN upgrade */
3258 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
3259 if (status)
3260 goto out;
3261 } else {
3262 status = nfs4_get_vfs_file(rqstp, fp, current_fh, open);
3263 if (status)
3264 goto out;
3265 status = nfsd4_truncate(rqstp, current_fh, open);
3266 if (status)
3267 goto out;
3268 stp = open->op_stp;
3269 open->op_stp = NULL;
3270 init_open_stateid(stp, fp, open);
3271 }
3272 update_stateid(&stp->st_stid.sc_stateid);
3273 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3274
3275 if (nfsd4_has_session(&resp->cstate)) {
3276 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3277
3278 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
3279 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3280 open->op_why_no_deleg = WND4_NOT_WANTED;
3281 goto nodeleg;
3282 }
3283 }
3284
3285 /*
3286 * Attempt to hand out a delegation. No error return, because the
3287 * OPEN succeeds even if we fail.
3288 */
3289 nfs4_open_delegation(SVC_NET(rqstp), current_fh, open, stp);
3290nodeleg:
3291 status = nfs_ok;
3292
3293 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
3294 STATEID_VAL(&stp->st_stid.sc_stateid));
3295out:
3296 /* 4.1 client trying to upgrade/downgrade delegation? */
3297 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
3298 open->op_deleg_want)
3299 nfsd4_deleg_xgrade_none_ext(open, dp);
3300
3301 if (fp)
3302 put_nfs4_file(fp);
3303 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
3304 nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
3305 /*
3306 * To finish the open response, we just need to set the rflags.
3307 */
3308 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
3309 if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
3310 !nfsd4_has_session(&resp->cstate))
3311 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
3312
3313 return status;
3314}
3315
3316void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status)
3317{
3318 if (open->op_openowner) {
3319 struct nfs4_openowner *oo = open->op_openowner;
3320
3321 if (!list_empty(&oo->oo_owner.so_stateids))
3322 list_del_init(&oo->oo_close_lru);
3323 if (oo->oo_flags & NFS4_OO_NEW) {
3324 if (status) {
3325 release_openowner(oo);
3326 open->op_openowner = NULL;
3327 } else
3328 oo->oo_flags &= ~NFS4_OO_NEW;
3329 }
3330 }
3331 if (open->op_file)
3332 nfsd4_free_file(open->op_file);
3333 if (open->op_stp)
3334 free_generic_stateid(open->op_stp);
3335}
3336
3337static __be32 lookup_clientid(clientid_t *clid, bool session, struct nfsd_net *nn, struct nfs4_client **clp)
3338{
3339 struct nfs4_client *found;
3340
3341 if (STALE_CLIENTID(clid, nn))
3342 return nfserr_stale_clientid;
3343 found = find_confirmed_client(clid, session, nn);
3344 if (clp)
3345 *clp = found;
3346 return found ? nfs_ok : nfserr_expired;
3347}
3348
3349__be32
3350nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3351 clientid_t *clid)
3352{
3353 struct nfs4_client *clp;
3354 __be32 status;
3355 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3356
3357 nfs4_lock_state();
3358 dprintk("process_renew(%08x/%08x): starting\n",
3359 clid->cl_boot, clid->cl_id);
3360 status = lookup_clientid(clid, cstate->minorversion, nn, &clp);
3361 if (status)
3362 goto out;
3363 status = nfserr_cb_path_down;
3364 if (!list_empty(&clp->cl_delegations)
3365 && clp->cl_cb_state != NFSD4_CB_UP)
3366 goto out;
3367 status = nfs_ok;
3368out:
3369 nfs4_unlock_state();
3370 return status;
3371}
3372
3373static void
3374nfsd4_end_grace(struct nfsd_net *nn)
3375{
3376 /* do nothing if grace period already ended */
3377 if (nn->grace_ended)
3378 return;
3379
3380 dprintk("NFSD: end of grace period\n");
3381 nn->grace_ended = true;
3382 nfsd4_record_grace_done(nn, nn->boot_time);
3383 locks_end_grace(&nn->nfsd4_manager);
3384 /*
3385 * Now that every NFSv4 client has had the chance to recover and
3386 * to see the (possibly new, possibly shorter) lease time, we
3387 * can safely set the next grace time to the current lease time:
3388 */
3389 nn->nfsd4_grace = nn->nfsd4_lease;
3390}
3391
3392static time_t
3393nfs4_laundromat(struct nfsd_net *nn)
3394{
3395 struct nfs4_client *clp;
3396 struct nfs4_openowner *oo;
3397 struct nfs4_delegation *dp;
3398 struct list_head *pos, *next, reaplist;
3399 time_t cutoff = get_seconds() - nn->nfsd4_lease;
3400 time_t t, new_timeo = nn->nfsd4_lease;
3401
3402 nfs4_lock_state();
3403
3404 dprintk("NFSD: laundromat service - starting\n");
3405 nfsd4_end_grace(nn);
3406 INIT_LIST_HEAD(&reaplist);
3407 spin_lock(&nn->client_lock);
3408 list_for_each_safe(pos, next, &nn->client_lru) {
3409 clp = list_entry(pos, struct nfs4_client, cl_lru);
3410 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
3411 t = clp->cl_time - cutoff;
3412 new_timeo = min(new_timeo, t);
3413 break;
3414 }
3415 if (mark_client_expired_locked(clp)) {
3416 dprintk("NFSD: client in use (clientid %08x)\n",
3417 clp->cl_clientid.cl_id);
3418 continue;
3419 }
3420 list_move(&clp->cl_lru, &reaplist);
3421 }
3422 spin_unlock(&nn->client_lock);
3423 list_for_each_safe(pos, next, &reaplist) {
3424 clp = list_entry(pos, struct nfs4_client, cl_lru);
3425 dprintk("NFSD: purging unused client (clientid %08x)\n",
3426 clp->cl_clientid.cl_id);
3427 expire_client(clp);
3428 }
3429 spin_lock(&recall_lock);
3430 list_for_each_safe(pos, next, &nn->del_recall_lru) {
3431 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3432 if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn)
3433 continue;
3434 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
3435 t = dp->dl_time - cutoff;
3436 new_timeo = min(new_timeo, t);
3437 break;
3438 }
3439 list_move(&dp->dl_recall_lru, &reaplist);
3440 }
3441 spin_unlock(&recall_lock);
3442 list_for_each_safe(pos, next, &reaplist) {
3443 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3444 revoke_delegation(dp);
3445 }
3446 list_for_each_safe(pos, next, &nn->close_lru) {
3447 oo = container_of(pos, struct nfs4_openowner, oo_close_lru);
3448 if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) {
3449 t = oo->oo_time - cutoff;
3450 new_timeo = min(new_timeo, t);
3451 break;
3452 }
3453 release_openowner(oo);
3454 }
3455 new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
3456 nfs4_unlock_state();
3457 return new_timeo;
3458}
3459
3460static struct workqueue_struct *laundry_wq;
3461static void laundromat_main(struct work_struct *);
3462
3463static void
3464laundromat_main(struct work_struct *laundry)
3465{
3466 time_t t;
3467 struct delayed_work *dwork = container_of(laundry, struct delayed_work,
3468 work);
3469 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
3470 laundromat_work);
3471
3472 t = nfs4_laundromat(nn);
3473 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
3474 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
3475}
3476
3477static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
3478{
3479 if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode)
3480 return nfserr_bad_stateid;
3481 return nfs_ok;
3482}
3483
3484static inline int
3485access_permit_read(struct nfs4_ol_stateid *stp)
3486{
3487 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
3488 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
3489 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
3490}
3491
3492static inline int
3493access_permit_write(struct nfs4_ol_stateid *stp)
3494{
3495 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
3496 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
3497}
3498
3499static
3500__be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
3501{
3502 __be32 status = nfserr_openmode;
3503
3504 /* For lock stateid's, we test the parent open, not the lock: */
3505 if (stp->st_openstp)
3506 stp = stp->st_openstp;
3507 if ((flags & WR_STATE) && !access_permit_write(stp))
3508 goto out;
3509 if ((flags & RD_STATE) && !access_permit_read(stp))
3510 goto out;
3511 status = nfs_ok;
3512out:
3513 return status;
3514}
3515
3516static inline __be32
3517check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
3518{
3519 if (ONE_STATEID(stateid) && (flags & RD_STATE))
3520 return nfs_ok;
3521 else if (locks_in_grace(net)) {
3522 /* Answer in remaining cases depends on existence of
3523 * conflicting state; so we must wait out the grace period. */
3524 return nfserr_grace;
3525 } else if (flags & WR_STATE)
3526 return nfs4_share_conflict(current_fh,
3527 NFS4_SHARE_DENY_WRITE);
3528 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
3529 return nfs4_share_conflict(current_fh,
3530 NFS4_SHARE_DENY_READ);
3531}
3532
3533/*
3534 * Allow READ/WRITE during grace period on recovered state only for files
3535 * that are not able to provide mandatory locking.
3536 */
3537static inline int
3538grace_disallows_io(struct net *net, struct inode *inode)
3539{
3540 return locks_in_grace(net) && mandatory_lock(inode);
3541}
3542
3543/* Returns true iff a is later than b: */
3544static bool stateid_generation_after(stateid_t *a, stateid_t *b)
3545{
3546 return (s32)(a->si_generation - b->si_generation) > 0;
3547}
3548
3549static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
3550{
3551 /*
3552 * When sessions are used the stateid generation number is ignored
3553 * when it is zero.
3554 */
3555 if (has_session && in->si_generation == 0)
3556 return nfs_ok;
3557
3558 if (in->si_generation == ref->si_generation)
3559 return nfs_ok;
3560
3561 /* If the client sends us a stateid from the future, it's buggy: */
3562 if (stateid_generation_after(in, ref))
3563 return nfserr_bad_stateid;
3564 /*
3565 * However, we could see a stateid from the past, even from a
3566 * non-buggy client. For example, if the client sends a lock
3567 * while some IO is outstanding, the lock may bump si_generation
3568 * while the IO is still in flight. The client could avoid that
3569 * situation by waiting for responses on all the IO requests,
3570 * but better performance may result in retrying IO that
3571 * receives an old_stateid error if requests are rarely
3572 * reordered in flight:
3573 */
3574 return nfserr_old_stateid;
3575}
3576
3577static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
3578{
3579 struct nfs4_stid *s;
3580 struct nfs4_ol_stateid *ols;
3581 __be32 status;
3582
3583 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3584 return nfserr_bad_stateid;
3585 /* Client debugging aid. */
3586 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
3587 char addr_str[INET6_ADDRSTRLEN];
3588 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
3589 sizeof(addr_str));
3590 pr_warn_ratelimited("NFSD: client %s testing state ID "
3591 "with incorrect client ID\n", addr_str);
3592 return nfserr_bad_stateid;
3593 }
3594 s = find_stateid(cl, stateid);
3595 if (!s)
3596 return nfserr_bad_stateid;
3597 status = check_stateid_generation(stateid, &s->sc_stateid, 1);
3598 if (status)
3599 return status;
3600 switch (s->sc_type) {
3601 case NFS4_DELEG_STID:
3602 return nfs_ok;
3603 case NFS4_REVOKED_DELEG_STID:
3604 return nfserr_deleg_revoked;
3605 case NFS4_OPEN_STID:
3606 case NFS4_LOCK_STID:
3607 ols = openlockstateid(s);
3608 if (ols->st_stateowner->so_is_open_owner
3609 && !(openowner(ols->st_stateowner)->oo_flags
3610 & NFS4_OO_CONFIRMED))
3611 return nfserr_bad_stateid;
3612 return nfs_ok;
3613 default:
3614 printk("unknown stateid type %x\n", s->sc_type);
3615 case NFS4_CLOSED_STID:
3616 return nfserr_bad_stateid;
3617 }
3618}
3619
3620static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask,
3621 struct nfs4_stid **s, bool sessions,
3622 struct nfsd_net *nn)
3623{
3624 struct nfs4_client *cl;
3625 __be32 status;
3626
3627 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3628 return nfserr_bad_stateid;
3629 status = lookup_clientid(&stateid->si_opaque.so_clid, sessions,
3630 nn, &cl);
3631 if (status == nfserr_stale_clientid) {
3632 if (sessions)
3633 return nfserr_bad_stateid;
3634 return nfserr_stale_stateid;
3635 }
3636 if (status)
3637 return status;
3638 *s = find_stateid_by_type(cl, stateid, typemask);
3639 if (!*s)
3640 return nfserr_bad_stateid;
3641 return nfs_ok;
3642}
3643
3644/*
3645* Checks for stateid operations
3646*/
3647__be32
3648nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
3649 stateid_t *stateid, int flags, struct file **filpp)
3650{
3651 struct nfs4_stid *s;
3652 struct nfs4_ol_stateid *stp = NULL;
3653 struct nfs4_delegation *dp = NULL;
3654 struct svc_fh *current_fh = &cstate->current_fh;
3655 struct inode *ino = current_fh->fh_dentry->d_inode;
3656 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3657 struct file *file = NULL;
3658 __be32 status;
3659
3660 if (filpp)
3661 *filpp = NULL;
3662
3663 if (grace_disallows_io(net, ino))
3664 return nfserr_grace;
3665
3666 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3667 return check_special_stateids(net, current_fh, stateid, flags);
3668
3669 nfs4_lock_state();
3670
3671 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
3672 &s, cstate->minorversion, nn);
3673 if (status)
3674 goto out;
3675 status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
3676 if (status)
3677 goto out;
3678 switch (s->sc_type) {
3679 case NFS4_DELEG_STID:
3680 dp = delegstateid(s);
3681 status = nfs4_check_delegmode(dp, flags);
3682 if (status)
3683 goto out;
3684 if (filpp) {
3685 file = dp->dl_file->fi_deleg_file;
3686 if (!file) {
3687 WARN_ON_ONCE(1);
3688 status = nfserr_serverfault;
3689 goto out;
3690 }
3691 }
3692 break;
3693 case NFS4_OPEN_STID:
3694 case NFS4_LOCK_STID:
3695 stp = openlockstateid(s);
3696 status = nfs4_check_fh(current_fh, stp);
3697 if (status)
3698 goto out;
3699 if (stp->st_stateowner->so_is_open_owner
3700 && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
3701 goto out;
3702 status = nfs4_check_openmode(stp, flags);
3703 if (status)
3704 goto out;
3705 if (filpp) {
3706 if (flags & RD_STATE)
3707 file = find_readable_file(stp->st_file);
3708 else
3709 file = find_writeable_file(stp->st_file);
3710 }
3711 break;
3712 default:
3713 status = nfserr_bad_stateid;
3714 goto out;
3715 }
3716 status = nfs_ok;
3717 if (file)
3718 *filpp = get_file(file);
3719out:
3720 nfs4_unlock_state();
3721 return status;
3722}
3723
3724static __be32
3725nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
3726{
3727 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
3728
3729 if (check_for_locks(stp->st_file, lo))
3730 return nfserr_locks_held;
3731 /*
3732 * Currently there's a 1-1 lock stateid<->lockowner
3733 * correspondance, and we have to delete the lockowner when we
3734 * delete the lock stateid:
3735 */
3736 unhash_lockowner(lo);
3737 return nfs_ok;
3738}
3739
3740/*
3741 * Test if the stateid is valid
3742 */
3743__be32
3744nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3745 struct nfsd4_test_stateid *test_stateid)
3746{
3747 struct nfsd4_test_stateid_id *stateid;
3748 struct nfs4_client *cl = cstate->session->se_client;
3749
3750 nfs4_lock_state();
3751 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
3752 stateid->ts_id_status =
3753 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
3754 nfs4_unlock_state();
3755
3756 return nfs_ok;
3757}
3758
3759__be32
3760nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3761 struct nfsd4_free_stateid *free_stateid)
3762{
3763 stateid_t *stateid = &free_stateid->fr_stateid;
3764 struct nfs4_stid *s;
3765 struct nfs4_delegation *dp;
3766 struct nfs4_client *cl = cstate->session->se_client;
3767 __be32 ret = nfserr_bad_stateid;
3768
3769 nfs4_lock_state();
3770 s = find_stateid(cl, stateid);
3771 if (!s)
3772 goto out;
3773 switch (s->sc_type) {
3774 case NFS4_DELEG_STID:
3775 ret = nfserr_locks_held;
3776 goto out;
3777 case NFS4_OPEN_STID:
3778 case NFS4_LOCK_STID:
3779 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
3780 if (ret)
3781 goto out;
3782 if (s->sc_type == NFS4_LOCK_STID)
3783 ret = nfsd4_free_lock_stateid(openlockstateid(s));
3784 else
3785 ret = nfserr_locks_held;
3786 break;
3787 case NFS4_REVOKED_DELEG_STID:
3788 dp = delegstateid(s);
3789 destroy_revoked_delegation(dp);
3790 ret = nfs_ok;
3791 break;
3792 default:
3793 ret = nfserr_bad_stateid;
3794 }
3795out:
3796 nfs4_unlock_state();
3797 return ret;
3798}
3799
3800static inline int
3801setlkflg (int type)
3802{
3803 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
3804 RD_STATE : WR_STATE;
3805}
3806
3807static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
3808{
3809 struct svc_fh *current_fh = &cstate->current_fh;
3810 struct nfs4_stateowner *sop = stp->st_stateowner;
3811 __be32 status;
3812
3813 status = nfsd4_check_seqid(cstate, sop, seqid);
3814 if (status)
3815 return status;
3816 if (stp->st_stid.sc_type == NFS4_CLOSED_STID
3817 || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
3818 /*
3819 * "Closed" stateid's exist *only* to return
3820 * nfserr_replay_me from the previous step, and
3821 * revoked delegations are kept only for free_stateid.
3822 */
3823 return nfserr_bad_stateid;
3824 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
3825 if (status)
3826 return status;
3827 return nfs4_check_fh(current_fh, stp);
3828}
3829
3830/*
3831 * Checks for sequence id mutating operations.
3832 */
3833static __be32
3834nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
3835 stateid_t *stateid, char typemask,
3836 struct nfs4_ol_stateid **stpp,
3837 struct nfsd_net *nn)
3838{
3839 __be32 status;
3840 struct nfs4_stid *s;
3841
3842 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
3843 seqid, STATEID_VAL(stateid));
3844
3845 *stpp = NULL;
3846 status = nfsd4_lookup_stateid(stateid, typemask, &s,
3847 cstate->minorversion, nn);
3848 if (status)
3849 return status;
3850 *stpp = openlockstateid(s);
3851 if (!nfsd4_has_session(cstate))
3852 cstate->replay_owner = (*stpp)->st_stateowner;
3853
3854 return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp);
3855}
3856
3857static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
3858 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
3859{
3860 __be32 status;
3861 struct nfs4_openowner *oo;
3862
3863 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
3864 NFS4_OPEN_STID, stpp, nn);
3865 if (status)
3866 return status;
3867 oo = openowner((*stpp)->st_stateowner);
3868 if (!(oo->oo_flags & NFS4_OO_CONFIRMED))
3869 return nfserr_bad_stateid;
3870 return nfs_ok;
3871}
3872
3873__be32
3874nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3875 struct nfsd4_open_confirm *oc)
3876{
3877 __be32 status;
3878 struct nfs4_openowner *oo;
3879 struct nfs4_ol_stateid *stp;
3880 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3881
3882 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
3883 cstate->current_fh.fh_dentry);
3884
3885 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
3886 if (status)
3887 return status;
3888
3889 nfs4_lock_state();
3890
3891 status = nfs4_preprocess_seqid_op(cstate,
3892 oc->oc_seqid, &oc->oc_req_stateid,
3893 NFS4_OPEN_STID, &stp, nn);
3894 if (status)
3895 goto out;
3896 oo = openowner(stp->st_stateowner);
3897 status = nfserr_bad_stateid;
3898 if (oo->oo_flags & NFS4_OO_CONFIRMED)
3899 goto out;
3900 oo->oo_flags |= NFS4_OO_CONFIRMED;
3901 update_stateid(&stp->st_stid.sc_stateid);
3902 memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3903 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
3904 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
3905
3906 nfsd4_client_record_create(oo->oo_owner.so_client);
3907 status = nfs_ok;
3908out:
3909 nfsd4_bump_seqid(cstate, status);
3910 if (!cstate->replay_owner)
3911 nfs4_unlock_state();
3912 return status;
3913}
3914
3915static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
3916{
3917 if (!test_access(access, stp))
3918 return;
3919 nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access));
3920 clear_access(access, stp);
3921}
3922
3923static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
3924{
3925 switch (to_access) {
3926 case NFS4_SHARE_ACCESS_READ:
3927 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
3928 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
3929 break;
3930 case NFS4_SHARE_ACCESS_WRITE:
3931 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
3932 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
3933 break;
3934 case NFS4_SHARE_ACCESS_BOTH:
3935 break;
3936 default:
3937 WARN_ON_ONCE(1);
3938 }
3939}
3940
3941static void
3942reset_union_bmap_deny(unsigned long deny, struct nfs4_ol_stateid *stp)
3943{
3944 int i;
3945 for (i = 0; i < 4; i++) {
3946 if ((i & deny) != i)
3947 clear_deny(i, stp);
3948 }
3949}
3950
3951__be32
3952nfsd4_open_downgrade(struct svc_rqst *rqstp,
3953 struct nfsd4_compound_state *cstate,
3954 struct nfsd4_open_downgrade *od)
3955{
3956 __be32 status;
3957 struct nfs4_ol_stateid *stp;
3958 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3959
3960 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
3961 cstate->current_fh.fh_dentry);
3962
3963 /* We don't yet support WANT bits: */
3964 if (od->od_deleg_want)
3965 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
3966 od->od_deleg_want);
3967
3968 nfs4_lock_state();
3969 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
3970 &od->od_stateid, &stp, nn);
3971 if (status)
3972 goto out;
3973 status = nfserr_inval;
3974 if (!test_access(od->od_share_access, stp)) {
3975 dprintk("NFSD: access not a subset current bitmap: 0x%lx, input access=%08x\n",
3976 stp->st_access_bmap, od->od_share_access);
3977 goto out;
3978 }
3979 if (!test_deny(od->od_share_deny, stp)) {
3980 dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n",
3981 stp->st_deny_bmap, od->od_share_deny);
3982 goto out;
3983 }
3984 nfs4_stateid_downgrade(stp, od->od_share_access);
3985
3986 reset_union_bmap_deny(od->od_share_deny, stp);
3987
3988 update_stateid(&stp->st_stid.sc_stateid);
3989 memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3990 status = nfs_ok;
3991out:
3992 nfsd4_bump_seqid(cstate, status);
3993 if (!cstate->replay_owner)
3994 nfs4_unlock_state();
3995 return status;
3996}
3997
3998static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
3999{
4000 unhash_open_stateid(s);
4001 s->st_stid.sc_type = NFS4_CLOSED_STID;
4002}
4003
4004/*
4005 * nfs4_unlock_state() called after encode
4006 */
4007__be32
4008nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4009 struct nfsd4_close *close)
4010{
4011 __be32 status;
4012 struct nfs4_openowner *oo;
4013 struct nfs4_ol_stateid *stp;
4014 struct net *net = SVC_NET(rqstp);
4015 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4016
4017 dprintk("NFSD: nfsd4_close on file %pd\n",
4018 cstate->current_fh.fh_dentry);
4019
4020 nfs4_lock_state();
4021 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
4022 &close->cl_stateid,
4023 NFS4_OPEN_STID|NFS4_CLOSED_STID,
4024 &stp, nn);
4025 nfsd4_bump_seqid(cstate, status);
4026 if (status)
4027 goto out;
4028 oo = openowner(stp->st_stateowner);
4029 update_stateid(&stp->st_stid.sc_stateid);
4030 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4031
4032 nfsd4_close_open_stateid(stp);
4033
4034 if (cstate->minorversion)
4035 free_generic_stateid(stp);
4036 else
4037 oo->oo_last_closed_stid = stp;
4038
4039 if (list_empty(&oo->oo_owner.so_stateids)) {
4040 if (cstate->minorversion)
4041 release_openowner(oo);
4042 else {
4043 /*
4044 * In the 4.0 case we need to keep the owners around a
4045 * little while to handle CLOSE replay.
4046 */
4047 move_to_close_lru(oo, SVC_NET(rqstp));
4048 }
4049 }
4050out:
4051 if (!cstate->replay_owner)
4052 nfs4_unlock_state();
4053 return status;
4054}
4055
4056__be32
4057nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4058 struct nfsd4_delegreturn *dr)
4059{
4060 struct nfs4_delegation *dp;
4061 stateid_t *stateid = &dr->dr_stateid;
4062 struct nfs4_stid *s;
4063 __be32 status;
4064 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4065
4066 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4067 return status;
4068
4069 nfs4_lock_state();
4070 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID, &s,
4071 cstate->minorversion, nn);
4072 if (status)
4073 goto out;
4074 dp = delegstateid(s);
4075 status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
4076 if (status)
4077 goto out;
4078
4079 destroy_delegation(dp);
4080out:
4081 nfs4_unlock_state();
4082
4083 return status;
4084}
4085
4086
4087#define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start))
4088
4089#define LOCKOWNER_INO_HASH_MASK (LOCKOWNER_INO_HASH_SIZE - 1)
4090
4091static inline u64
4092end_offset(u64 start, u64 len)
4093{
4094 u64 end;
4095
4096 end = start + len;
4097 return end >= start ? end: NFS4_MAX_UINT64;
4098}
4099
4100/* last octet in a range */
4101static inline u64
4102last_byte_offset(u64 start, u64 len)
4103{
4104 u64 end;
4105
4106 WARN_ON_ONCE(!len);
4107 end = start + len;
4108 return end > start ? end - 1: NFS4_MAX_UINT64;
4109}
4110
4111static unsigned int lockowner_ino_hashval(struct inode *inode, u32 cl_id, struct xdr_netobj *ownername)
4112{
4113 return (file_hashval(inode) + cl_id
4114 + opaque_hashval(ownername->data, ownername->len))
4115 & LOCKOWNER_INO_HASH_MASK;
4116}
4117
4118/*
4119 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
4120 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
4121 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
4122 * locking, this prevents us from being completely protocol-compliant. The
4123 * real solution to this problem is to start using unsigned file offsets in
4124 * the VFS, but this is a very deep change!
4125 */
4126static inline void
4127nfs4_transform_lock_offset(struct file_lock *lock)
4128{
4129 if (lock->fl_start < 0)
4130 lock->fl_start = OFFSET_MAX;
4131 if (lock->fl_end < 0)
4132 lock->fl_end = OFFSET_MAX;
4133}
4134
4135/* Hack!: For now, we're defining this just so we can use a pointer to it
4136 * as a unique cookie to identify our (NFSv4's) posix locks. */
4137static const struct lock_manager_operations nfsd_posix_mng_ops = {
4138};
4139
4140static inline void
4141nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
4142{
4143 struct nfs4_lockowner *lo;
4144
4145 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
4146 lo = (struct nfs4_lockowner *) fl->fl_owner;
4147 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
4148 lo->lo_owner.so_owner.len, GFP_KERNEL);
4149 if (!deny->ld_owner.data)
4150 /* We just don't care that much */
4151 goto nevermind;
4152 deny->ld_owner.len = lo->lo_owner.so_owner.len;
4153 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
4154 } else {
4155nevermind:
4156 deny->ld_owner.len = 0;
4157 deny->ld_owner.data = NULL;
4158 deny->ld_clientid.cl_boot = 0;
4159 deny->ld_clientid.cl_id = 0;
4160 }
4161 deny->ld_start = fl->fl_start;
4162 deny->ld_length = NFS4_MAX_UINT64;
4163 if (fl->fl_end != NFS4_MAX_UINT64)
4164 deny->ld_length = fl->fl_end - fl->fl_start + 1;
4165 deny->ld_type = NFS4_READ_LT;
4166 if (fl->fl_type != F_RDLCK)
4167 deny->ld_type = NFS4_WRITE_LT;
4168}
4169
4170static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, clientid_t *clid, struct xdr_netobj *owner)
4171{
4172 struct nfs4_ol_stateid *lst;
4173
4174 if (!same_owner_str(&lo->lo_owner, owner, clid))
4175 return false;
4176 if (list_empty(&lo->lo_owner.so_stateids)) {
4177 WARN_ON_ONCE(1);
4178 return false;
4179 }
4180 lst = list_first_entry(&lo->lo_owner.so_stateids,
4181 struct nfs4_ol_stateid, st_perstateowner);
4182 return lst->st_file->fi_inode == inode;
4183}
4184
4185static struct nfs4_lockowner *
4186find_lockowner_str(struct inode *inode, clientid_t *clid,
4187 struct xdr_netobj *owner, struct nfsd_net *nn)
4188{
4189 unsigned int hashval = lockowner_ino_hashval(inode, clid->cl_id, owner);
4190 struct nfs4_lockowner *lo;
4191
4192 list_for_each_entry(lo, &nn->lockowner_ino_hashtbl[hashval], lo_owner_ino_hash) {
4193 if (same_lockowner_ino(lo, inode, clid, owner))
4194 return lo;
4195 }
4196 return NULL;
4197}
4198
4199static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp)
4200{
4201 struct inode *inode = open_stp->st_file->fi_inode;
4202 unsigned int inohash = lockowner_ino_hashval(inode,
4203 clp->cl_clientid.cl_id, &lo->lo_owner.so_owner);
4204 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
4205
4206 list_add(&lo->lo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]);
4207 list_add(&lo->lo_owner_ino_hash, &nn->lockowner_ino_hashtbl[inohash]);
4208 list_add(&lo->lo_perstateid, &open_stp->st_lockowners);
4209}
4210
4211/*
4212 * Alloc a lock owner structure.
4213 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
4214 * occurred.
4215 *
4216 * strhashval = ownerstr_hashval
4217 */
4218
4219static struct nfs4_lockowner *
4220alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) {
4221 struct nfs4_lockowner *lo;
4222
4223 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
4224 if (!lo)
4225 return NULL;
4226 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
4227 lo->lo_owner.so_is_open_owner = 0;
4228 /* It is the openowner seqid that will be incremented in encode in the
4229 * case of new lockowners; so increment the lock seqid manually: */
4230 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1;
4231 hash_lockowner(lo, strhashval, clp, open_stp);
4232 return lo;
4233}
4234
4235static struct nfs4_ol_stateid *
4236alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp)
4237{
4238 struct nfs4_ol_stateid *stp;
4239 struct nfs4_client *clp = lo->lo_owner.so_client;
4240
4241 stp = nfs4_alloc_stateid(clp);
4242 if (stp == NULL)
4243 return NULL;
4244 stp->st_stid.sc_type = NFS4_LOCK_STID;
4245 list_add(&stp->st_perfile, &fp->fi_stateids);
4246 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
4247 stp->st_stateowner = &lo->lo_owner;
4248 get_nfs4_file(fp);
4249 stp->st_file = fp;
4250 stp->st_access_bmap = 0;
4251 stp->st_deny_bmap = open_stp->st_deny_bmap;
4252 stp->st_openstp = open_stp;
4253 return stp;
4254}
4255
4256static int
4257check_lock_length(u64 offset, u64 length)
4258{
4259 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
4260 LOFF_OVERFLOW(offset, length)));
4261}
4262
4263static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
4264{
4265 struct nfs4_file *fp = lock_stp->st_file;
4266 int oflag = nfs4_access_to_omode(access);
4267
4268 if (test_access(access, lock_stp))
4269 return;
4270 nfs4_file_get_access(fp, oflag);
4271 set_access(access, lock_stp);
4272}
4273
4274static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new)
4275{
4276 struct nfs4_file *fi = ost->st_file;
4277 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
4278 struct nfs4_client *cl = oo->oo_owner.so_client;
4279 struct nfs4_lockowner *lo;
4280 unsigned int strhashval;
4281 struct nfsd_net *nn = net_generic(cl->net, nfsd_net_id);
4282
4283 lo = find_lockowner_str(fi->fi_inode, &cl->cl_clientid,
4284 &lock->v.new.owner, nn);
4285 if (lo) {
4286 if (!cstate->minorversion)
4287 return nfserr_bad_seqid;
4288 /* XXX: a lockowner always has exactly one stateid: */
4289 *lst = list_first_entry(&lo->lo_owner.so_stateids,
4290 struct nfs4_ol_stateid, st_perstateowner);
4291 return nfs_ok;
4292 }
4293 strhashval = ownerstr_hashval(cl->cl_clientid.cl_id,
4294 &lock->v.new.owner);
4295 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
4296 if (lo == NULL)
4297 return nfserr_jukebox;
4298 *lst = alloc_init_lock_stateid(lo, fi, ost);
4299 if (*lst == NULL) {
4300 release_lockowner(lo);
4301 return nfserr_jukebox;
4302 }
4303 *new = true;
4304 return nfs_ok;
4305}
4306
4307/*
4308 * LOCK operation
4309 */
4310__be32
4311nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4312 struct nfsd4_lock *lock)
4313{
4314 struct nfs4_openowner *open_sop = NULL;
4315 struct nfs4_lockowner *lock_sop = NULL;
4316 struct nfs4_ol_stateid *lock_stp;
4317 struct file *filp = NULL;
4318 struct file_lock *file_lock = NULL;
4319 struct file_lock *conflock = NULL;
4320 __be32 status = 0;
4321 bool new_state = false;
4322 int lkflg;
4323 int err;
4324 struct net *net = SVC_NET(rqstp);
4325 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4326
4327 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
4328 (long long) lock->lk_offset,
4329 (long long) lock->lk_length);
4330
4331 if (check_lock_length(lock->lk_offset, lock->lk_length))
4332 return nfserr_inval;
4333
4334 if ((status = fh_verify(rqstp, &cstate->current_fh,
4335 S_IFREG, NFSD_MAY_LOCK))) {
4336 dprintk("NFSD: nfsd4_lock: permission denied!\n");
4337 return status;
4338 }
4339
4340 nfs4_lock_state();
4341
4342 if (lock->lk_is_new) {
4343 struct nfs4_ol_stateid *open_stp = NULL;
4344
4345 if (nfsd4_has_session(cstate))
4346 /* See rfc 5661 18.10.3: given clientid is ignored: */
4347 memcpy(&lock->v.new.clientid,
4348 &cstate->session->se_client->cl_clientid,
4349 sizeof(clientid_t));
4350
4351 status = nfserr_stale_clientid;
4352 if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
4353 goto out;
4354
4355 /* validate and update open stateid and open seqid */
4356 status = nfs4_preprocess_confirmed_seqid_op(cstate,
4357 lock->lk_new_open_seqid,
4358 &lock->lk_new_open_stateid,
4359 &open_stp, nn);
4360 if (status)
4361 goto out;
4362 open_sop = openowner(open_stp->st_stateowner);
4363 status = nfserr_bad_stateid;
4364 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
4365 &lock->v.new.clientid))
4366 goto out;
4367 status = lookup_or_create_lock_state(cstate, open_stp, lock,
4368 &lock_stp, &new_state);
4369 } else
4370 status = nfs4_preprocess_seqid_op(cstate,
4371 lock->lk_old_lock_seqid,
4372 &lock->lk_old_lock_stateid,
4373 NFS4_LOCK_STID, &lock_stp, nn);
4374 if (status)
4375 goto out;
4376 lock_sop = lockowner(lock_stp->st_stateowner);
4377
4378 lkflg = setlkflg(lock->lk_type);
4379 status = nfs4_check_openmode(lock_stp, lkflg);
4380 if (status)
4381 goto out;
4382
4383 status = nfserr_grace;
4384 if (locks_in_grace(net) && !lock->lk_reclaim)
4385 goto out;
4386 status = nfserr_no_grace;
4387 if (!locks_in_grace(net) && lock->lk_reclaim)
4388 goto out;
4389
4390 file_lock = locks_alloc_lock();
4391 if (!file_lock) {
4392 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4393 status = nfserr_jukebox;
4394 goto out;
4395 }
4396
4397 locks_init_lock(file_lock);
4398 switch (lock->lk_type) {
4399 case NFS4_READ_LT:
4400 case NFS4_READW_LT:
4401 filp = find_readable_file(lock_stp->st_file);
4402 if (filp)
4403 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
4404 file_lock->fl_type = F_RDLCK;
4405 break;
4406 case NFS4_WRITE_LT:
4407 case NFS4_WRITEW_LT:
4408 filp = find_writeable_file(lock_stp->st_file);
4409 if (filp)
4410 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
4411 file_lock->fl_type = F_WRLCK;
4412 break;
4413 default:
4414 status = nfserr_inval;
4415 goto out;
4416 }
4417 if (!filp) {
4418 status = nfserr_openmode;
4419 goto out;
4420 }
4421 file_lock->fl_owner = (fl_owner_t)lock_sop;
4422 file_lock->fl_pid = current->tgid;
4423 file_lock->fl_file = filp;
4424 file_lock->fl_flags = FL_POSIX;
4425 file_lock->fl_lmops = &nfsd_posix_mng_ops;
4426 file_lock->fl_start = lock->lk_offset;
4427 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
4428 nfs4_transform_lock_offset(file_lock);
4429
4430 conflock = locks_alloc_lock();
4431 if (!conflock) {
4432 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4433 status = nfserr_jukebox;
4434 goto out;
4435 }
4436
4437 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
4438 switch (-err) {
4439 case 0: /* success! */
4440 update_stateid(&lock_stp->st_stid.sc_stateid);
4441 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid,
4442 sizeof(stateid_t));
4443 status = 0;
4444 break;
4445 case (EAGAIN): /* conflock holds conflicting lock */
4446 status = nfserr_denied;
4447 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
4448 nfs4_set_lock_denied(conflock, &lock->lk_denied);
4449 break;
4450 case (EDEADLK):
4451 status = nfserr_deadlock;
4452 break;
4453 default:
4454 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
4455 status = nfserrno(err);
4456 break;
4457 }
4458out:
4459 if (status && new_state)
4460 release_lockowner(lock_sop);
4461 nfsd4_bump_seqid(cstate, status);
4462 if (!cstate->replay_owner)
4463 nfs4_unlock_state();
4464 if (file_lock)
4465 locks_free_lock(file_lock);
4466 if (conflock)
4467 locks_free_lock(conflock);
4468 return status;
4469}
4470
4471/*
4472 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
4473 * so we do a temporary open here just to get an open file to pass to
4474 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
4475 * inode operation.)
4476 */
4477static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
4478{
4479 struct file *file;
4480 __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
4481 if (!err) {
4482 err = nfserrno(vfs_test_lock(file, lock));
4483 nfsd_close(file);
4484 }
4485 return err;
4486}
4487
4488/*
4489 * LOCKT operation
4490 */
4491__be32
4492nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4493 struct nfsd4_lockt *lockt)
4494{
4495 struct inode *inode;
4496 struct file_lock *file_lock = NULL;
4497 struct nfs4_lockowner *lo;
4498 __be32 status;
4499 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4500
4501 if (locks_in_grace(SVC_NET(rqstp)))
4502 return nfserr_grace;
4503
4504 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
4505 return nfserr_inval;
4506
4507 nfs4_lock_state();
4508
4509 if (!nfsd4_has_session(cstate)) {
4510 status = lookup_clientid(&lockt->lt_clientid, false, nn, NULL);
4511 if (status)
4512 goto out;
4513 }
4514
4515 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4516 goto out;
4517
4518 inode = cstate->current_fh.fh_dentry->d_inode;
4519 file_lock = locks_alloc_lock();
4520 if (!file_lock) {
4521 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4522 status = nfserr_jukebox;
4523 goto out;
4524 }
4525 locks_init_lock(file_lock);
4526 switch (lockt->lt_type) {
4527 case NFS4_READ_LT:
4528 case NFS4_READW_LT:
4529 file_lock->fl_type = F_RDLCK;
4530 break;
4531 case NFS4_WRITE_LT:
4532 case NFS4_WRITEW_LT:
4533 file_lock->fl_type = F_WRLCK;
4534 break;
4535 default:
4536 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
4537 status = nfserr_inval;
4538 goto out;
4539 }
4540
4541 lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner, nn);
4542 if (lo)
4543 file_lock->fl_owner = (fl_owner_t)lo;
4544 file_lock->fl_pid = current->tgid;
4545 file_lock->fl_flags = FL_POSIX;
4546
4547 file_lock->fl_start = lockt->lt_offset;
4548 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
4549
4550 nfs4_transform_lock_offset(file_lock);
4551
4552 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
4553 if (status)
4554 goto out;
4555
4556 if (file_lock->fl_type != F_UNLCK) {
4557 status = nfserr_denied;
4558 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
4559 }
4560out:
4561 nfs4_unlock_state();
4562 if (file_lock)
4563 locks_free_lock(file_lock);
4564 return status;
4565}
4566
4567__be32
4568nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4569 struct nfsd4_locku *locku)
4570{
4571 struct nfs4_ol_stateid *stp;
4572 struct file *filp = NULL;
4573 struct file_lock *file_lock = NULL;
4574 __be32 status;
4575 int err;
4576 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4577
4578 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
4579 (long long) locku->lu_offset,
4580 (long long) locku->lu_length);
4581
4582 if (check_lock_length(locku->lu_offset, locku->lu_length))
4583 return nfserr_inval;
4584
4585 nfs4_lock_state();
4586
4587 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
4588 &locku->lu_stateid, NFS4_LOCK_STID,
4589 &stp, nn);
4590 if (status)
4591 goto out;
4592 filp = find_any_file(stp->st_file);
4593 if (!filp) {
4594 status = nfserr_lock_range;
4595 goto out;
4596 }
4597 file_lock = locks_alloc_lock();
4598 if (!file_lock) {
4599 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4600 status = nfserr_jukebox;
4601 goto out;
4602 }
4603 locks_init_lock(file_lock);
4604 file_lock->fl_type = F_UNLCK;
4605 file_lock->fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
4606 file_lock->fl_pid = current->tgid;
4607 file_lock->fl_file = filp;
4608 file_lock->fl_flags = FL_POSIX;
4609 file_lock->fl_lmops = &nfsd_posix_mng_ops;
4610 file_lock->fl_start = locku->lu_offset;
4611
4612 file_lock->fl_end = last_byte_offset(locku->lu_offset,
4613 locku->lu_length);
4614 nfs4_transform_lock_offset(file_lock);
4615
4616 err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
4617 if (err) {
4618 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
4619 goto out_nfserr;
4620 }
4621 update_stateid(&stp->st_stid.sc_stateid);
4622 memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4623
4624out:
4625 nfsd4_bump_seqid(cstate, status);
4626 if (!cstate->replay_owner)
4627 nfs4_unlock_state();
4628 if (file_lock)
4629 locks_free_lock(file_lock);
4630 return status;
4631
4632out_nfserr:
4633 status = nfserrno(err);
4634 goto out;
4635}
4636
4637/*
4638 * returns
4639 * 1: locks held by lockowner
4640 * 0: no locks held by lockowner
4641 */
4642static int
4643check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner)
4644{
4645 struct file_lock **flpp;
4646 struct inode *inode = filp->fi_inode;
4647 int status = 0;
4648
4649 spin_lock(&inode->i_lock);
4650 for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
4651 if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
4652 status = 1;
4653 goto out;
4654 }
4655 }
4656out:
4657 spin_unlock(&inode->i_lock);
4658 return status;
4659}
4660
4661__be32
4662nfsd4_release_lockowner(struct svc_rqst *rqstp,
4663 struct nfsd4_compound_state *cstate,
4664 struct nfsd4_release_lockowner *rlockowner)
4665{
4666 clientid_t *clid = &rlockowner->rl_clientid;
4667 struct nfs4_stateowner *sop;
4668 struct nfs4_lockowner *lo;
4669 struct nfs4_ol_stateid *stp;
4670 struct xdr_netobj *owner = &rlockowner->rl_owner;
4671 struct list_head matches;
4672 unsigned int hashval = ownerstr_hashval(clid->cl_id, owner);
4673 __be32 status;
4674 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4675
4676 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
4677 clid->cl_boot, clid->cl_id);
4678
4679 nfs4_lock_state();
4680
4681 status = lookup_clientid(clid, cstate->minorversion, nn, NULL);
4682 if (status)
4683 goto out;
4684
4685 status = nfserr_locks_held;
4686 INIT_LIST_HEAD(&matches);
4687
4688 list_for_each_entry(sop, &nn->ownerstr_hashtbl[hashval], so_strhash) {
4689 if (sop->so_is_open_owner)
4690 continue;
4691 if (!same_owner_str(sop, owner, clid))
4692 continue;
4693 list_for_each_entry(stp, &sop->so_stateids,
4694 st_perstateowner) {
4695 lo = lockowner(sop);
4696 if (check_for_locks(stp->st_file, lo))
4697 goto out;
4698 list_add(&lo->lo_list, &matches);
4699 }
4700 }
4701 /* Clients probably won't expect us to return with some (but not all)
4702 * of the lockowner state released; so don't release any until all
4703 * have been checked. */
4704 status = nfs_ok;
4705 while (!list_empty(&matches)) {
4706 lo = list_entry(matches.next, struct nfs4_lockowner,
4707 lo_list);
4708 /* unhash_stateowner deletes so_perclient only
4709 * for openowners. */
4710 list_del(&lo->lo_list);
4711 release_lockowner(lo);
4712 }
4713out:
4714 nfs4_unlock_state();
4715 return status;
4716}
4717
4718static inline struct nfs4_client_reclaim *
4719alloc_reclaim(void)
4720{
4721 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
4722}
4723
4724bool
4725nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
4726{
4727 struct nfs4_client_reclaim *crp;
4728
4729 crp = nfsd4_find_reclaim_client(name, nn);
4730 return (crp && crp->cr_clp);
4731}
4732
4733/*
4734 * failure => all reset bets are off, nfserr_no_grace...
4735 */
4736struct nfs4_client_reclaim *
4737nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
4738{
4739 unsigned int strhashval;
4740 struct nfs4_client_reclaim *crp;
4741
4742 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
4743 crp = alloc_reclaim();
4744 if (crp) {
4745 strhashval = clientstr_hashval(name);
4746 INIT_LIST_HEAD(&crp->cr_strhash);
4747 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
4748 memcpy(crp->cr_recdir, name, HEXDIR_LEN);
4749 crp->cr_clp = NULL;
4750 nn->reclaim_str_hashtbl_size++;
4751 }
4752 return crp;
4753}
4754
4755void
4756nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
4757{
4758 list_del(&crp->cr_strhash);
4759 kfree(crp);
4760 nn->reclaim_str_hashtbl_size--;
4761}
4762
4763void
4764nfs4_release_reclaim(struct nfsd_net *nn)
4765{
4766 struct nfs4_client_reclaim *crp = NULL;
4767 int i;
4768
4769 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4770 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
4771 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
4772 struct nfs4_client_reclaim, cr_strhash);
4773 nfs4_remove_reclaim_record(crp, nn);
4774 }
4775 }
4776 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
4777}
4778
4779/*
4780 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
4781struct nfs4_client_reclaim *
4782nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
4783{
4784 unsigned int strhashval;
4785 struct nfs4_client_reclaim *crp = NULL;
4786
4787 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
4788
4789 strhashval = clientstr_hashval(recdir);
4790 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
4791 if (same_name(crp->cr_recdir, recdir)) {
4792 return crp;
4793 }
4794 }
4795 return NULL;
4796}
4797
4798/*
4799* Called from OPEN. Look for clientid in reclaim list.
4800*/
4801__be32
4802nfs4_check_open_reclaim(clientid_t *clid, bool sessions, struct nfsd_net *nn)
4803{
4804 struct nfs4_client *clp;
4805
4806 /* find clientid in conf_id_hashtbl */
4807 clp = find_confirmed_client(clid, sessions, nn);
4808 if (clp == NULL)
4809 return nfserr_reclaim_bad;
4810
4811 return nfsd4_client_record_check(clp) ? nfserr_reclaim_bad : nfs_ok;
4812}
4813
4814#ifdef CONFIG_NFSD_FAULT_INJECTION
4815
4816u64 nfsd_forget_client(struct nfs4_client *clp, u64 max)
4817{
4818 if (mark_client_expired(clp))
4819 return 0;
4820 expire_client(clp);
4821 return 1;
4822}
4823
4824u64 nfsd_print_client(struct nfs4_client *clp, u64 num)
4825{
4826 char buf[INET6_ADDRSTRLEN];
4827 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
4828 printk(KERN_INFO "NFS Client: %s\n", buf);
4829 return 1;
4830}
4831
4832static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
4833 const char *type)
4834{
4835 char buf[INET6_ADDRSTRLEN];
4836 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
4837 printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
4838}
4839
4840static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_lockowner *))
4841{
4842 struct nfs4_openowner *oop;
4843 struct nfs4_lockowner *lop, *lo_next;
4844 struct nfs4_ol_stateid *stp, *st_next;
4845 u64 count = 0;
4846
4847 list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
4848 list_for_each_entry_safe(stp, st_next, &oop->oo_owner.so_stateids, st_perstateowner) {
4849 list_for_each_entry_safe(lop, lo_next, &stp->st_lockowners, lo_perstateid) {
4850 if (func)
4851 func(lop);
4852 if (++count == max)
4853 return count;
4854 }
4855 }
4856 }
4857
4858 return count;
4859}
4860
4861u64 nfsd_forget_client_locks(struct nfs4_client *clp, u64 max)
4862{
4863 return nfsd_foreach_client_lock(clp, max, release_lockowner);
4864}
4865
4866u64 nfsd_print_client_locks(struct nfs4_client *clp, u64 max)
4867{
4868 u64 count = nfsd_foreach_client_lock(clp, max, NULL);
4869 nfsd_print_count(clp, count, "locked files");
4870 return count;
4871}
4872
4873static u64 nfsd_foreach_client_open(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_openowner *))
4874{
4875 struct nfs4_openowner *oop, *next;
4876 u64 count = 0;
4877
4878 list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
4879 if (func)
4880 func(oop);
4881 if (++count == max)
4882 break;
4883 }
4884
4885 return count;
4886}
4887
4888u64 nfsd_forget_client_openowners(struct nfs4_client *clp, u64 max)
4889{
4890 return nfsd_foreach_client_open(clp, max, release_openowner);
4891}
4892
4893u64 nfsd_print_client_openowners(struct nfs4_client *clp, u64 max)
4894{
4895 u64 count = nfsd_foreach_client_open(clp, max, NULL);
4896 nfsd_print_count(clp, count, "open files");
4897 return count;
4898}
4899
4900static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
4901 struct list_head *victims)
4902{
4903 struct nfs4_delegation *dp, *next;
4904 u64 count = 0;
4905
4906 list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
4907 if (victims)
4908 list_move(&dp->dl_recall_lru, victims);
4909 if (++count == max)
4910 break;
4911 }
4912 return count;
4913}
4914
4915u64 nfsd_forget_client_delegations(struct nfs4_client *clp, u64 max)
4916{
4917 struct nfs4_delegation *dp, *next;
4918 LIST_HEAD(victims);
4919 u64 count;
4920
4921 spin_lock(&recall_lock);
4922 count = nfsd_find_all_delegations(clp, max, &victims);
4923 spin_unlock(&recall_lock);
4924
4925 list_for_each_entry_safe(dp, next, &victims, dl_recall_lru)
4926 revoke_delegation(dp);
4927
4928 return count;
4929}
4930
4931u64 nfsd_recall_client_delegations(struct nfs4_client *clp, u64 max)
4932{
4933 struct nfs4_delegation *dp, *next;
4934 LIST_HEAD(victims);
4935 u64 count;
4936
4937 spin_lock(&recall_lock);
4938 count = nfsd_find_all_delegations(clp, max, &victims);
4939 list_for_each_entry_safe(dp, next, &victims, dl_recall_lru)
4940 nfsd_break_one_deleg(dp);
4941 spin_unlock(&recall_lock);
4942
4943 return count;
4944}
4945
4946u64 nfsd_print_client_delegations(struct nfs4_client *clp, u64 max)
4947{
4948 u64 count = 0;
4949
4950 spin_lock(&recall_lock);
4951 count = nfsd_find_all_delegations(clp, max, NULL);
4952 spin_unlock(&recall_lock);
4953
4954 nfsd_print_count(clp, count, "delegations");
4955 return count;
4956}
4957
4958u64 nfsd_for_n_state(u64 max, u64 (*func)(struct nfs4_client *, u64))
4959{
4960 struct nfs4_client *clp, *next;
4961 u64 count = 0;
4962 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id);
4963
4964 if (!nfsd_netns_ready(nn))
4965 return 0;
4966
4967 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
4968 count += func(clp, max - count);
4969 if ((max != 0) && (count >= max))
4970 break;
4971 }
4972
4973 return count;
4974}
4975
4976struct nfs4_client *nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
4977{
4978 struct nfs4_client *clp;
4979 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id);
4980
4981 if (!nfsd_netns_ready(nn))
4982 return NULL;
4983
4984 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
4985 if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
4986 return clp;
4987 }
4988 return NULL;
4989}
4990
4991#endif /* CONFIG_NFSD_FAULT_INJECTION */
4992
4993/*
4994 * Since the lifetime of a delegation isn't limited to that of an open, a
4995 * client may quite reasonably hang on to a delegation as long as it has
4996 * the inode cached. This becomes an obvious problem the first time a
4997 * client's inode cache approaches the size of the server's total memory.
4998 *
4999 * For now we avoid this problem by imposing a hard limit on the number
5000 * of delegations, which varies according to the server's memory size.
5001 */
5002static void
5003set_max_delegations(void)
5004{
5005 /*
5006 * Allow at most 4 delegations per megabyte of RAM. Quick
5007 * estimates suggest that in the worst case (where every delegation
5008 * is for a different inode), a delegation could take about 1.5K,
5009 * giving a worst case usage of about 6% of memory.
5010 */
5011 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
5012}
5013
5014static int nfs4_state_create_net(struct net *net)
5015{
5016 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5017 int i;
5018
5019 nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
5020 CLIENT_HASH_SIZE, GFP_KERNEL);
5021 if (!nn->conf_id_hashtbl)
5022 goto err;
5023 nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
5024 CLIENT_HASH_SIZE, GFP_KERNEL);
5025 if (!nn->unconf_id_hashtbl)
5026 goto err_unconf_id;
5027 nn->ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
5028 OWNER_HASH_SIZE, GFP_KERNEL);
5029 if (!nn->ownerstr_hashtbl)
5030 goto err_ownerstr;
5031 nn->lockowner_ino_hashtbl = kmalloc(sizeof(struct list_head) *
5032 LOCKOWNER_INO_HASH_SIZE, GFP_KERNEL);
5033 if (!nn->lockowner_ino_hashtbl)
5034 goto err_lockowner_ino;
5035 nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
5036 SESSION_HASH_SIZE, GFP_KERNEL);
5037 if (!nn->sessionid_hashtbl)
5038 goto err_sessionid;
5039
5040 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
5041 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
5042 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
5043 }
5044 for (i = 0; i < OWNER_HASH_SIZE; i++)
5045 INIT_LIST_HEAD(&nn->ownerstr_hashtbl[i]);
5046 for (i = 0; i < LOCKOWNER_INO_HASH_SIZE; i++)
5047 INIT_LIST_HEAD(&nn->lockowner_ino_hashtbl[i]);
5048 for (i = 0; i < SESSION_HASH_SIZE; i++)
5049 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
5050 nn->conf_name_tree = RB_ROOT;
5051 nn->unconf_name_tree = RB_ROOT;
5052 INIT_LIST_HEAD(&nn->client_lru);
5053 INIT_LIST_HEAD(&nn->close_lru);
5054 INIT_LIST_HEAD(&nn->del_recall_lru);
5055 spin_lock_init(&nn->client_lock);
5056
5057 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
5058 get_net(net);
5059
5060 return 0;
5061
5062err_sessionid:
5063 kfree(nn->lockowner_ino_hashtbl);
5064err_lockowner_ino:
5065 kfree(nn->ownerstr_hashtbl);
5066err_ownerstr:
5067 kfree(nn->unconf_id_hashtbl);
5068err_unconf_id:
5069 kfree(nn->conf_id_hashtbl);
5070err:
5071 return -ENOMEM;
5072}
5073
5074static void
5075nfs4_state_destroy_net(struct net *net)
5076{
5077 int i;
5078 struct nfs4_client *clp = NULL;
5079 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5080
5081 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
5082 while (!list_empty(&nn->conf_id_hashtbl[i])) {
5083 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
5084 destroy_client(clp);
5085 }
5086 }
5087
5088 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
5089 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
5090 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
5091 destroy_client(clp);
5092 }
5093 }
5094
5095 kfree(nn->sessionid_hashtbl);
5096 kfree(nn->lockowner_ino_hashtbl);
5097 kfree(nn->ownerstr_hashtbl);
5098 kfree(nn->unconf_id_hashtbl);
5099 kfree(nn->conf_id_hashtbl);
5100 put_net(net);
5101}
5102
5103int
5104nfs4_state_start_net(struct net *net)
5105{
5106 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5107 int ret;
5108
5109 ret = nfs4_state_create_net(net);
5110 if (ret)
5111 return ret;
5112 nfsd4_client_tracking_init(net);
5113 nn->boot_time = get_seconds();
5114 locks_start_grace(net, &nn->nfsd4_manager);
5115 nn->grace_ended = false;
5116 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
5117 nn->nfsd4_grace, net);
5118 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
5119 return 0;
5120}
5121
5122/* initialization to perform when the nfsd service is started: */
5123
5124int
5125nfs4_state_start(void)
5126{
5127 int ret;
5128
5129 ret = set_callback_cred();
5130 if (ret)
5131 return -ENOMEM;
5132 laundry_wq = create_singlethread_workqueue("nfsd4");
5133 if (laundry_wq == NULL) {
5134 ret = -ENOMEM;
5135 goto out_recovery;
5136 }
5137 ret = nfsd4_create_callback_queue();
5138 if (ret)
5139 goto out_free_laundry;
5140
5141 set_max_delegations();
5142
5143 return 0;
5144
5145out_free_laundry:
5146 destroy_workqueue(laundry_wq);
5147out_recovery:
5148 return ret;
5149}
5150
5151void
5152nfs4_state_shutdown_net(struct net *net)
5153{
5154 struct nfs4_delegation *dp = NULL;
5155 struct list_head *pos, *next, reaplist;
5156 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5157
5158 cancel_delayed_work_sync(&nn->laundromat_work);
5159 locks_end_grace(&nn->nfsd4_manager);
5160
5161 nfs4_lock_state();
5162 INIT_LIST_HEAD(&reaplist);
5163 spin_lock(&recall_lock);
5164 list_for_each_safe(pos, next, &nn->del_recall_lru) {
5165 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5166 list_move(&dp->dl_recall_lru, &reaplist);
5167 }
5168 spin_unlock(&recall_lock);
5169 list_for_each_safe(pos, next, &reaplist) {
5170 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5171 destroy_delegation(dp);
5172 }
5173
5174 nfsd4_client_tracking_exit(net);
5175 nfs4_state_destroy_net(net);
5176 nfs4_unlock_state();
5177}
5178
5179void
5180nfs4_state_shutdown(void)
5181{
5182 destroy_workqueue(laundry_wq);
5183 nfsd4_destroy_callback_queue();
5184}
5185
5186static void
5187get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
5188{
5189 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
5190 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
5191}
5192
5193static void
5194put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
5195{
5196 if (cstate->minorversion) {
5197 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
5198 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
5199 }
5200}
5201
5202void
5203clear_current_stateid(struct nfsd4_compound_state *cstate)
5204{
5205 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
5206}
5207
5208/*
5209 * functions to set current state id
5210 */
5211void
5212nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
5213{
5214 put_stateid(cstate, &odp->od_stateid);
5215}
5216
5217void
5218nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
5219{
5220 put_stateid(cstate, &open->op_stateid);
5221}
5222
5223void
5224nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
5225{
5226 put_stateid(cstate, &close->cl_stateid);
5227}
5228
5229void
5230nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
5231{
5232 put_stateid(cstate, &lock->lk_resp_stateid);
5233}
5234
5235/*
5236 * functions to consume current state id
5237 */
5238
5239void
5240nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
5241{
5242 get_stateid(cstate, &odp->od_stateid);
5243}
5244
5245void
5246nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
5247{
5248 get_stateid(cstate, &drp->dr_stateid);
5249}
5250
5251void
5252nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
5253{
5254 get_stateid(cstate, &fsp->fr_stateid);
5255}
5256
5257void
5258nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
5259{
5260 get_stateid(cstate, &setattr->sa_stateid);
5261}
5262
5263void
5264nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
5265{
5266 get_stateid(cstate, &close->cl_stateid);
5267}
5268
5269void
5270nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
5271{
5272 get_stateid(cstate, &locku->lu_stateid);
5273}
5274
5275void
5276nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
5277{
5278 get_stateid(cstate, &read->rd_stateid);
5279}
5280
5281void
5282nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
5283{
5284 get_stateid(cstate, &write->wr_stateid);
5285}