Merge tag 'nfs-for-6.12-1' of git://git.linux-nfs.org/projects/anna/linux-nfs
[linux-2.6-block.git] / fs / nfsd / nfs4state.c
... / ...
CommitLineData
1/*
2* Copyright (c) 2001 The Regents of the University of Michigan.
3* All rights reserved.
4*
5* Kendrick Smith <kmsmith@umich.edu>
6* Andy Adamson <kandros@umich.edu>
7*
8* Redistribution and use in source and binary forms, with or without
9* modification, are permitted provided that the following conditions
10* are met:
11*
12* 1. Redistributions of source code must retain the above copyright
13* notice, this list of conditions and the following disclaimer.
14* 2. Redistributions in binary form must reproduce the above copyright
15* notice, this list of conditions and the following disclaimer in the
16* documentation and/or other materials provided with the distribution.
17* 3. Neither the name of the University nor the names of its
18* contributors may be used to endorse or promote products derived
19* from this software without specific prior written permission.
20*
21* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32*
33*/
34
35#include <linux/file.h>
36#include <linux/fs.h>
37#include <linux/slab.h>
38#include <linux/namei.h>
39#include <linux/swap.h>
40#include <linux/pagemap.h>
41#include <linux/ratelimit.h>
42#include <linux/sunrpc/svcauth_gss.h>
43#include <linux/sunrpc/addr.h>
44#include <linux/jhash.h>
45#include <linux/string_helpers.h>
46#include <linux/fsnotify.h>
47#include <linux/rhashtable.h>
48#include <linux/nfs_ssc.h>
49
50#include "xdr4.h"
51#include "xdr4cb.h"
52#include "vfs.h"
53#include "current_stateid.h"
54
55#include "netns.h"
56#include "pnfs.h"
57#include "filecache.h"
58#include "trace.h"
59
60#define NFSDDBG_FACILITY NFSDDBG_PROC
61
62#define all_ones {{ ~0, ~0}, ~0}
63static const stateid_t one_stateid = {
64 .si_generation = ~0,
65 .si_opaque = all_ones,
66};
67static const stateid_t zero_stateid = {
68 /* all fields zero */
69};
70static const stateid_t currentstateid = {
71 .si_generation = 1,
72};
73static const stateid_t close_stateid = {
74 .si_generation = 0xffffffffU,
75};
76
77static u64 current_sessionid = 1;
78
79#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
80#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
81#define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
82#define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
83
84/* forward declarations */
85static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
86static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
87void nfsd4_end_grace(struct nfsd_net *nn);
88static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
89static void nfsd4_file_hash_remove(struct nfs4_file *fi);
90static void deleg_reaper(struct nfsd_net *nn);
91
92/* Locking: */
93
94/*
95 * Currently used for the del_recall_lru and file hash table. In an
96 * effort to decrease the scope of the client_mutex, this spinlock may
97 * eventually cover more:
98 */
99static DEFINE_SPINLOCK(state_lock);
100
101enum nfsd4_st_mutex_lock_subclass {
102 OPEN_STATEID_MUTEX = 0,
103 LOCK_STATEID_MUTEX = 1,
104};
105
106/*
107 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
108 * the refcount on the open stateid to drop.
109 */
110static DECLARE_WAIT_QUEUE_HEAD(close_wq);
111
112/*
113 * A waitqueue where a writer to clients/#/ctl destroying a client can
114 * wait for cl_rpc_users to drop to 0 and then for the client to be
115 * unhashed.
116 */
117static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
118
119static struct kmem_cache *client_slab;
120static struct kmem_cache *openowner_slab;
121static struct kmem_cache *lockowner_slab;
122static struct kmem_cache *file_slab;
123static struct kmem_cache *stateid_slab;
124static struct kmem_cache *deleg_slab;
125static struct kmem_cache *odstate_slab;
126
127static void free_session(struct nfsd4_session *);
128
129static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
130static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
131static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops;
132
133static struct workqueue_struct *laundry_wq;
134
135int nfsd4_create_laundry_wq(void)
136{
137 int rc = 0;
138
139 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
140 if (laundry_wq == NULL)
141 rc = -ENOMEM;
142 return rc;
143}
144
145void nfsd4_destroy_laundry_wq(void)
146{
147 destroy_workqueue(laundry_wq);
148}
149
150static bool is_session_dead(struct nfsd4_session *ses)
151{
152 return ses->se_flags & NFS4_SESSION_DEAD;
153}
154
155static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
156{
157 if (atomic_read(&ses->se_ref) > ref_held_by_me)
158 return nfserr_jukebox;
159 ses->se_flags |= NFS4_SESSION_DEAD;
160 return nfs_ok;
161}
162
163static bool is_client_expired(struct nfs4_client *clp)
164{
165 return clp->cl_time == 0;
166}
167
168static void nfsd4_dec_courtesy_client_count(struct nfsd_net *nn,
169 struct nfs4_client *clp)
170{
171 if (clp->cl_state != NFSD4_ACTIVE)
172 atomic_add_unless(&nn->nfsd_courtesy_clients, -1, 0);
173}
174
175static __be32 get_client_locked(struct nfs4_client *clp)
176{
177 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
178
179 lockdep_assert_held(&nn->client_lock);
180
181 if (is_client_expired(clp))
182 return nfserr_expired;
183 atomic_inc(&clp->cl_rpc_users);
184 nfsd4_dec_courtesy_client_count(nn, clp);
185 clp->cl_state = NFSD4_ACTIVE;
186 return nfs_ok;
187}
188
189/* must be called under the client_lock */
190static inline void
191renew_client_locked(struct nfs4_client *clp)
192{
193 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
194
195 if (is_client_expired(clp)) {
196 WARN_ON(1);
197 printk("%s: client (clientid %08x/%08x) already expired\n",
198 __func__,
199 clp->cl_clientid.cl_boot,
200 clp->cl_clientid.cl_id);
201 return;
202 }
203
204 list_move_tail(&clp->cl_lru, &nn->client_lru);
205 clp->cl_time = ktime_get_boottime_seconds();
206 nfsd4_dec_courtesy_client_count(nn, clp);
207 clp->cl_state = NFSD4_ACTIVE;
208}
209
210static void put_client_renew_locked(struct nfs4_client *clp)
211{
212 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
213
214 lockdep_assert_held(&nn->client_lock);
215
216 if (!atomic_dec_and_test(&clp->cl_rpc_users))
217 return;
218 if (!is_client_expired(clp))
219 renew_client_locked(clp);
220 else
221 wake_up_all(&expiry_wq);
222}
223
224static void put_client_renew(struct nfs4_client *clp)
225{
226 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
227
228 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
229 return;
230 if (!is_client_expired(clp))
231 renew_client_locked(clp);
232 else
233 wake_up_all(&expiry_wq);
234 spin_unlock(&nn->client_lock);
235}
236
237static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
238{
239 __be32 status;
240
241 if (is_session_dead(ses))
242 return nfserr_badsession;
243 status = get_client_locked(ses->se_client);
244 if (status)
245 return status;
246 atomic_inc(&ses->se_ref);
247 return nfs_ok;
248}
249
250static void nfsd4_put_session_locked(struct nfsd4_session *ses)
251{
252 struct nfs4_client *clp = ses->se_client;
253 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
254
255 lockdep_assert_held(&nn->client_lock);
256
257 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
258 free_session(ses);
259 put_client_renew_locked(clp);
260}
261
262static void nfsd4_put_session(struct nfsd4_session *ses)
263{
264 struct nfs4_client *clp = ses->se_client;
265 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
266
267 spin_lock(&nn->client_lock);
268 nfsd4_put_session_locked(ses);
269 spin_unlock(&nn->client_lock);
270}
271
272static struct nfsd4_blocked_lock *
273find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
274 struct nfsd_net *nn)
275{
276 struct nfsd4_blocked_lock *cur, *found = NULL;
277
278 spin_lock(&nn->blocked_locks_lock);
279 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
280 if (fh_match(fh, &cur->nbl_fh)) {
281 list_del_init(&cur->nbl_list);
282 WARN_ON(list_empty(&cur->nbl_lru));
283 list_del_init(&cur->nbl_lru);
284 found = cur;
285 break;
286 }
287 }
288 spin_unlock(&nn->blocked_locks_lock);
289 if (found)
290 locks_delete_block(&found->nbl_lock);
291 return found;
292}
293
294static struct nfsd4_blocked_lock *
295find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
296 struct nfsd_net *nn)
297{
298 struct nfsd4_blocked_lock *nbl;
299
300 nbl = find_blocked_lock(lo, fh, nn);
301 if (!nbl) {
302 nbl = kmalloc(sizeof(*nbl), GFP_KERNEL);
303 if (nbl) {
304 INIT_LIST_HEAD(&nbl->nbl_list);
305 INIT_LIST_HEAD(&nbl->nbl_lru);
306 fh_copy_shallow(&nbl->nbl_fh, fh);
307 locks_init_lock(&nbl->nbl_lock);
308 kref_init(&nbl->nbl_kref);
309 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
310 &nfsd4_cb_notify_lock_ops,
311 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
312 }
313 }
314 return nbl;
315}
316
317static void
318free_nbl(struct kref *kref)
319{
320 struct nfsd4_blocked_lock *nbl;
321
322 nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref);
323 locks_release_private(&nbl->nbl_lock);
324 kfree(nbl);
325}
326
327static void
328free_blocked_lock(struct nfsd4_blocked_lock *nbl)
329{
330 locks_delete_block(&nbl->nbl_lock);
331 kref_put(&nbl->nbl_kref, free_nbl);
332}
333
334static void
335remove_blocked_locks(struct nfs4_lockowner *lo)
336{
337 struct nfs4_client *clp = lo->lo_owner.so_client;
338 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
339 struct nfsd4_blocked_lock *nbl;
340 LIST_HEAD(reaplist);
341
342 /* Dequeue all blocked locks */
343 spin_lock(&nn->blocked_locks_lock);
344 while (!list_empty(&lo->lo_blocked)) {
345 nbl = list_first_entry(&lo->lo_blocked,
346 struct nfsd4_blocked_lock,
347 nbl_list);
348 list_del_init(&nbl->nbl_list);
349 WARN_ON(list_empty(&nbl->nbl_lru));
350 list_move(&nbl->nbl_lru, &reaplist);
351 }
352 spin_unlock(&nn->blocked_locks_lock);
353
354 /* Now free them */
355 while (!list_empty(&reaplist)) {
356 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
357 nbl_lru);
358 list_del_init(&nbl->nbl_lru);
359 free_blocked_lock(nbl);
360 }
361}
362
363static void
364nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
365{
366 struct nfsd4_blocked_lock *nbl = container_of(cb,
367 struct nfsd4_blocked_lock, nbl_cb);
368 locks_delete_block(&nbl->nbl_lock);
369}
370
371static int
372nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
373{
374 trace_nfsd_cb_notify_lock_done(&zero_stateid, task);
375
376 /*
377 * Since this is just an optimization, we don't try very hard if it
378 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
379 * just quit trying on anything else.
380 */
381 switch (task->tk_status) {
382 case -NFS4ERR_DELAY:
383 rpc_delay(task, 1 * HZ);
384 return 0;
385 default:
386 return 1;
387 }
388}
389
390static void
391nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
392{
393 struct nfsd4_blocked_lock *nbl = container_of(cb,
394 struct nfsd4_blocked_lock, nbl_cb);
395
396 free_blocked_lock(nbl);
397}
398
399static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
400 .prepare = nfsd4_cb_notify_lock_prepare,
401 .done = nfsd4_cb_notify_lock_done,
402 .release = nfsd4_cb_notify_lock_release,
403 .opcode = OP_CB_NOTIFY_LOCK,
404};
405
406/*
407 * We store the NONE, READ, WRITE, and BOTH bits separately in the
408 * st_{access,deny}_bmap field of the stateid, in order to track not
409 * only what share bits are currently in force, but also what
410 * combinations of share bits previous opens have used. This allows us
411 * to enforce the recommendation in
412 * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that
413 * the server return an error if the client attempt to downgrade to a
414 * combination of share bits not explicable by closing some of its
415 * previous opens.
416 *
417 * This enforcement is arguably incomplete, since we don't keep
418 * track of access/deny bit combinations; so, e.g., we allow:
419 *
420 * OPEN allow read, deny write
421 * OPEN allow both, deny none
422 * DOWNGRADE allow read, deny none
423 *
424 * which we should reject.
425 *
426 * But you could also argue that our current code is already overkill,
427 * since it only exists to return NFS4ERR_INVAL on incorrect client
428 * behavior.
429 */
430static unsigned int
431bmap_to_share_mode(unsigned long bmap)
432{
433 int i;
434 unsigned int access = 0;
435
436 for (i = 1; i < 4; i++) {
437 if (test_bit(i, &bmap))
438 access |= i;
439 }
440 return access;
441}
442
443/* set share access for a given stateid */
444static inline void
445set_access(u32 access, struct nfs4_ol_stateid *stp)
446{
447 unsigned char mask = 1 << access;
448
449 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
450 stp->st_access_bmap |= mask;
451}
452
453/* clear share access for a given stateid */
454static inline void
455clear_access(u32 access, struct nfs4_ol_stateid *stp)
456{
457 unsigned char mask = 1 << access;
458
459 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
460 stp->st_access_bmap &= ~mask;
461}
462
463/* test whether a given stateid has access */
464static inline bool
465test_access(u32 access, struct nfs4_ol_stateid *stp)
466{
467 unsigned char mask = 1 << access;
468
469 return (bool)(stp->st_access_bmap & mask);
470}
471
472/* set share deny for a given stateid */
473static inline void
474set_deny(u32 deny, struct nfs4_ol_stateid *stp)
475{
476 unsigned char mask = 1 << deny;
477
478 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
479 stp->st_deny_bmap |= mask;
480}
481
482/* clear share deny for a given stateid */
483static inline void
484clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
485{
486 unsigned char mask = 1 << deny;
487
488 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
489 stp->st_deny_bmap &= ~mask;
490}
491
492/* test whether a given stateid is denying specific access */
493static inline bool
494test_deny(u32 deny, struct nfs4_ol_stateid *stp)
495{
496 unsigned char mask = 1 << deny;
497
498 return (bool)(stp->st_deny_bmap & mask);
499}
500
501static int nfs4_access_to_omode(u32 access)
502{
503 switch (access & NFS4_SHARE_ACCESS_BOTH) {
504 case NFS4_SHARE_ACCESS_READ:
505 return O_RDONLY;
506 case NFS4_SHARE_ACCESS_WRITE:
507 return O_WRONLY;
508 case NFS4_SHARE_ACCESS_BOTH:
509 return O_RDWR;
510 }
511 WARN_ON_ONCE(1);
512 return O_RDONLY;
513}
514
515static inline int
516access_permit_read(struct nfs4_ol_stateid *stp)
517{
518 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
519 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
520 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
521}
522
523static inline int
524access_permit_write(struct nfs4_ol_stateid *stp)
525{
526 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
527 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
528}
529
530static inline struct nfs4_stateowner *
531nfs4_get_stateowner(struct nfs4_stateowner *sop)
532{
533 atomic_inc(&sop->so_count);
534 return sop;
535}
536
537static int
538same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
539{
540 return (sop->so_owner.len == owner->len) &&
541 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
542}
543
544static struct nfs4_openowner *
545find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
546 struct nfs4_client *clp)
547{
548 struct nfs4_stateowner *so;
549
550 lockdep_assert_held(&clp->cl_lock);
551
552 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
553 so_strhash) {
554 if (!so->so_is_open_owner)
555 continue;
556 if (same_owner_str(so, &open->op_owner))
557 return openowner(nfs4_get_stateowner(so));
558 }
559 return NULL;
560}
561
562static inline u32
563opaque_hashval(const void *ptr, int nbytes)
564{
565 unsigned char *cptr = (unsigned char *) ptr;
566
567 u32 x = 0;
568 while (nbytes--) {
569 x *= 37;
570 x += *cptr++;
571 }
572 return x;
573}
574
575static void nfsd4_free_file_rcu(struct rcu_head *rcu)
576{
577 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
578
579 kmem_cache_free(file_slab, fp);
580}
581
582void
583put_nfs4_file(struct nfs4_file *fi)
584{
585 if (refcount_dec_and_test(&fi->fi_ref)) {
586 nfsd4_file_hash_remove(fi);
587 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
588 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
589 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
590 }
591}
592
593static struct nfsd_file *
594find_writeable_file_locked(struct nfs4_file *f)
595{
596 struct nfsd_file *ret;
597
598 lockdep_assert_held(&f->fi_lock);
599
600 ret = nfsd_file_get(f->fi_fds[O_WRONLY]);
601 if (!ret)
602 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
603 return ret;
604}
605
606static struct nfsd_file *
607find_writeable_file(struct nfs4_file *f)
608{
609 struct nfsd_file *ret;
610
611 spin_lock(&f->fi_lock);
612 ret = find_writeable_file_locked(f);
613 spin_unlock(&f->fi_lock);
614
615 return ret;
616}
617
618static struct nfsd_file *
619find_readable_file_locked(struct nfs4_file *f)
620{
621 struct nfsd_file *ret;
622
623 lockdep_assert_held(&f->fi_lock);
624
625 ret = nfsd_file_get(f->fi_fds[O_RDONLY]);
626 if (!ret)
627 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
628 return ret;
629}
630
631static struct nfsd_file *
632find_readable_file(struct nfs4_file *f)
633{
634 struct nfsd_file *ret;
635
636 spin_lock(&f->fi_lock);
637 ret = find_readable_file_locked(f);
638 spin_unlock(&f->fi_lock);
639
640 return ret;
641}
642
643static struct nfsd_file *
644find_rw_file(struct nfs4_file *f)
645{
646 struct nfsd_file *ret;
647
648 spin_lock(&f->fi_lock);
649 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
650 spin_unlock(&f->fi_lock);
651
652 return ret;
653}
654
655struct nfsd_file *
656find_any_file(struct nfs4_file *f)
657{
658 struct nfsd_file *ret;
659
660 if (!f)
661 return NULL;
662 spin_lock(&f->fi_lock);
663 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
664 if (!ret) {
665 ret = nfsd_file_get(f->fi_fds[O_WRONLY]);
666 if (!ret)
667 ret = nfsd_file_get(f->fi_fds[O_RDONLY]);
668 }
669 spin_unlock(&f->fi_lock);
670 return ret;
671}
672
673static struct nfsd_file *find_any_file_locked(struct nfs4_file *f)
674{
675 lockdep_assert_held(&f->fi_lock);
676
677 if (f->fi_fds[O_RDWR])
678 return f->fi_fds[O_RDWR];
679 if (f->fi_fds[O_WRONLY])
680 return f->fi_fds[O_WRONLY];
681 if (f->fi_fds[O_RDONLY])
682 return f->fi_fds[O_RDONLY];
683 return NULL;
684}
685
686static atomic_long_t num_delegations;
687unsigned long max_delegations;
688
689/*
690 * Open owner state (share locks)
691 */
692
693/* hash tables for lock and open owners */
694#define OWNER_HASH_BITS 8
695#define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
696#define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
697
698static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
699{
700 unsigned int ret;
701
702 ret = opaque_hashval(ownername->data, ownername->len);
703 return ret & OWNER_HASH_MASK;
704}
705
706static struct rhltable nfs4_file_rhltable ____cacheline_aligned_in_smp;
707
708static const struct rhashtable_params nfs4_file_rhash_params = {
709 .key_len = sizeof_field(struct nfs4_file, fi_inode),
710 .key_offset = offsetof(struct nfs4_file, fi_inode),
711 .head_offset = offsetof(struct nfs4_file, fi_rlist),
712
713 /*
714 * Start with a single page hash table to reduce resizing churn
715 * on light workloads.
716 */
717 .min_size = 256,
718 .automatic_shrinking = true,
719};
720
721/*
722 * Check if courtesy clients have conflicting access and resolve it if possible
723 *
724 * access: is op_share_access if share_access is true.
725 * Check if access mode, op_share_access, would conflict with
726 * the current deny mode of the file 'fp'.
727 * access: is op_share_deny if share_access is false.
728 * Check if the deny mode, op_share_deny, would conflict with
729 * current access of the file 'fp'.
730 * stp: skip checking this entry.
731 * new_stp: normal open, not open upgrade.
732 *
733 * Function returns:
734 * false - access/deny mode conflict with normal client.
735 * true - no conflict or conflict with courtesy client(s) is resolved.
736 */
737static bool
738nfs4_resolve_deny_conflicts_locked(struct nfs4_file *fp, bool new_stp,
739 struct nfs4_ol_stateid *stp, u32 access, bool share_access)
740{
741 struct nfs4_ol_stateid *st;
742 bool resolvable = true;
743 unsigned char bmap;
744 struct nfsd_net *nn;
745 struct nfs4_client *clp;
746
747 lockdep_assert_held(&fp->fi_lock);
748 list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
749 /* ignore lock stateid */
750 if (st->st_openstp)
751 continue;
752 if (st == stp && new_stp)
753 continue;
754 /* check file access against deny mode or vice versa */
755 bmap = share_access ? st->st_deny_bmap : st->st_access_bmap;
756 if (!(access & bmap_to_share_mode(bmap)))
757 continue;
758 clp = st->st_stid.sc_client;
759 if (try_to_expire_client(clp))
760 continue;
761 resolvable = false;
762 break;
763 }
764 if (resolvable) {
765 clp = stp->st_stid.sc_client;
766 nn = net_generic(clp->net, nfsd_net_id);
767 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
768 }
769 return resolvable;
770}
771
772static void
773__nfs4_file_get_access(struct nfs4_file *fp, u32 access)
774{
775 lockdep_assert_held(&fp->fi_lock);
776
777 if (access & NFS4_SHARE_ACCESS_WRITE)
778 atomic_inc(&fp->fi_access[O_WRONLY]);
779 if (access & NFS4_SHARE_ACCESS_READ)
780 atomic_inc(&fp->fi_access[O_RDONLY]);
781}
782
783static __be32
784nfs4_file_get_access(struct nfs4_file *fp, u32 access)
785{
786 lockdep_assert_held(&fp->fi_lock);
787
788 /* Does this access mode make sense? */
789 if (access & ~NFS4_SHARE_ACCESS_BOTH)
790 return nfserr_inval;
791
792 /* Does it conflict with a deny mode already set? */
793 if ((access & fp->fi_share_deny) != 0)
794 return nfserr_share_denied;
795
796 __nfs4_file_get_access(fp, access);
797 return nfs_ok;
798}
799
800static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
801{
802 /* Common case is that there is no deny mode. */
803 if (deny) {
804 /* Does this deny mode make sense? */
805 if (deny & ~NFS4_SHARE_DENY_BOTH)
806 return nfserr_inval;
807
808 if ((deny & NFS4_SHARE_DENY_READ) &&
809 atomic_read(&fp->fi_access[O_RDONLY]))
810 return nfserr_share_denied;
811
812 if ((deny & NFS4_SHARE_DENY_WRITE) &&
813 atomic_read(&fp->fi_access[O_WRONLY]))
814 return nfserr_share_denied;
815 }
816 return nfs_ok;
817}
818
819static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
820{
821 might_lock(&fp->fi_lock);
822
823 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
824 struct nfsd_file *f1 = NULL;
825 struct nfsd_file *f2 = NULL;
826
827 swap(f1, fp->fi_fds[oflag]);
828 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
829 swap(f2, fp->fi_fds[O_RDWR]);
830 spin_unlock(&fp->fi_lock);
831 if (f1)
832 nfsd_file_put(f1);
833 if (f2)
834 nfsd_file_put(f2);
835 }
836}
837
838static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
839{
840 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
841
842 if (access & NFS4_SHARE_ACCESS_WRITE)
843 __nfs4_file_put_access(fp, O_WRONLY);
844 if (access & NFS4_SHARE_ACCESS_READ)
845 __nfs4_file_put_access(fp, O_RDONLY);
846}
847
848/*
849 * Allocate a new open/delegation state counter. This is needed for
850 * pNFS for proper return on close semantics.
851 *
852 * Note that we only allocate it for pNFS-enabled exports, otherwise
853 * all pointers to struct nfs4_clnt_odstate are always NULL.
854 */
855static struct nfs4_clnt_odstate *
856alloc_clnt_odstate(struct nfs4_client *clp)
857{
858 struct nfs4_clnt_odstate *co;
859
860 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
861 if (co) {
862 co->co_client = clp;
863 refcount_set(&co->co_odcount, 1);
864 }
865 return co;
866}
867
868static void
869hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
870{
871 struct nfs4_file *fp = co->co_file;
872
873 lockdep_assert_held(&fp->fi_lock);
874 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
875}
876
877static inline void
878get_clnt_odstate(struct nfs4_clnt_odstate *co)
879{
880 if (co)
881 refcount_inc(&co->co_odcount);
882}
883
884static void
885put_clnt_odstate(struct nfs4_clnt_odstate *co)
886{
887 struct nfs4_file *fp;
888
889 if (!co)
890 return;
891
892 fp = co->co_file;
893 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
894 list_del(&co->co_perfile);
895 spin_unlock(&fp->fi_lock);
896
897 nfsd4_return_all_file_layouts(co->co_client, fp);
898 kmem_cache_free(odstate_slab, co);
899 }
900}
901
902static struct nfs4_clnt_odstate *
903find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
904{
905 struct nfs4_clnt_odstate *co;
906 struct nfs4_client *cl;
907
908 if (!new)
909 return NULL;
910
911 cl = new->co_client;
912
913 spin_lock(&fp->fi_lock);
914 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
915 if (co->co_client == cl) {
916 get_clnt_odstate(co);
917 goto out;
918 }
919 }
920 co = new;
921 co->co_file = fp;
922 hash_clnt_odstate_locked(new);
923out:
924 spin_unlock(&fp->fi_lock);
925 return co;
926}
927
928struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
929 void (*sc_free)(struct nfs4_stid *))
930{
931 struct nfs4_stid *stid;
932 int new_id;
933
934 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
935 if (!stid)
936 return NULL;
937
938 idr_preload(GFP_KERNEL);
939 spin_lock(&cl->cl_lock);
940 /* Reserving 0 for start of file in nfsdfs "states" file: */
941 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
942 spin_unlock(&cl->cl_lock);
943 idr_preload_end();
944 if (new_id < 0)
945 goto out_free;
946
947 stid->sc_free = sc_free;
948 stid->sc_client = cl;
949 stid->sc_stateid.si_opaque.so_id = new_id;
950 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
951 /* Will be incremented before return to client: */
952 refcount_set(&stid->sc_count, 1);
953 spin_lock_init(&stid->sc_lock);
954 INIT_LIST_HEAD(&stid->sc_cp_list);
955
956 /*
957 * It shouldn't be a problem to reuse an opaque stateid value.
958 * I don't think it is for 4.1. But with 4.0 I worry that, for
959 * example, a stray write retransmission could be accepted by
960 * the server when it should have been rejected. Therefore,
961 * adopt a trick from the sctp code to attempt to maximize the
962 * amount of time until an id is reused, by ensuring they always
963 * "increase" (mod INT_MAX):
964 */
965 return stid;
966out_free:
967 kmem_cache_free(slab, stid);
968 return NULL;
969}
970
971/*
972 * Create a unique stateid_t to represent each COPY.
973 */
974static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
975 unsigned char cs_type)
976{
977 int new_id;
978
979 stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
980 stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
981
982 idr_preload(GFP_KERNEL);
983 spin_lock(&nn->s2s_cp_lock);
984 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
985 stid->cs_stid.si_opaque.so_id = new_id;
986 stid->cs_stid.si_generation = 1;
987 spin_unlock(&nn->s2s_cp_lock);
988 idr_preload_end();
989 if (new_id < 0)
990 return 0;
991 stid->cs_type = cs_type;
992 return 1;
993}
994
995int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
996{
997 return nfs4_init_cp_state(nn, &copy->cp_stateid, NFS4_COPY_STID);
998}
999
1000struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
1001 struct nfs4_stid *p_stid)
1002{
1003 struct nfs4_cpntf_state *cps;
1004
1005 cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
1006 if (!cps)
1007 return NULL;
1008 cps->cpntf_time = ktime_get_boottime_seconds();
1009 refcount_set(&cps->cp_stateid.cs_count, 1);
1010 if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
1011 goto out_free;
1012 spin_lock(&nn->s2s_cp_lock);
1013 list_add(&cps->cp_list, &p_stid->sc_cp_list);
1014 spin_unlock(&nn->s2s_cp_lock);
1015 return cps;
1016out_free:
1017 kfree(cps);
1018 return NULL;
1019}
1020
1021void nfs4_free_copy_state(struct nfsd4_copy *copy)
1022{
1023 struct nfsd_net *nn;
1024
1025 if (copy->cp_stateid.cs_type != NFS4_COPY_STID)
1026 return;
1027 nn = net_generic(copy->cp_clp->net, nfsd_net_id);
1028 spin_lock(&nn->s2s_cp_lock);
1029 idr_remove(&nn->s2s_cp_stateids,
1030 copy->cp_stateid.cs_stid.si_opaque.so_id);
1031 spin_unlock(&nn->s2s_cp_lock);
1032}
1033
1034static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
1035{
1036 struct nfs4_cpntf_state *cps;
1037 struct nfsd_net *nn;
1038
1039 nn = net_generic(net, nfsd_net_id);
1040 spin_lock(&nn->s2s_cp_lock);
1041 while (!list_empty(&stid->sc_cp_list)) {
1042 cps = list_first_entry(&stid->sc_cp_list,
1043 struct nfs4_cpntf_state, cp_list);
1044 _free_cpntf_state_locked(nn, cps);
1045 }
1046 spin_unlock(&nn->s2s_cp_lock);
1047}
1048
1049static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
1050{
1051 struct nfs4_stid *stid;
1052
1053 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
1054 if (!stid)
1055 return NULL;
1056
1057 return openlockstateid(stid);
1058}
1059
1060static void nfs4_free_deleg(struct nfs4_stid *stid)
1061{
1062 struct nfs4_delegation *dp = delegstateid(stid);
1063
1064 WARN_ON_ONCE(!list_empty(&stid->sc_cp_list));
1065 WARN_ON_ONCE(!list_empty(&dp->dl_perfile));
1066 WARN_ON_ONCE(!list_empty(&dp->dl_perclnt));
1067 WARN_ON_ONCE(!list_empty(&dp->dl_recall_lru));
1068 kmem_cache_free(deleg_slab, stid);
1069 atomic_long_dec(&num_delegations);
1070}
1071
1072/*
1073 * When we recall a delegation, we should be careful not to hand it
1074 * out again straight away.
1075 * To ensure this we keep a pair of bloom filters ('new' and 'old')
1076 * in which the filehandles of recalled delegations are "stored".
1077 * If a filehandle appear in either filter, a delegation is blocked.
1078 * When a delegation is recalled, the filehandle is stored in the "new"
1079 * filter.
1080 * Every 30 seconds we swap the filters and clear the "new" one,
1081 * unless both are empty of course. This results in delegations for a
1082 * given filehandle being blocked for between 30 and 60 seconds.
1083 *
1084 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
1085 * low 3 bytes as hash-table indices.
1086 *
1087 * 'blocked_delegations_lock', which is always taken in block_delegations(),
1088 * is used to manage concurrent access. Testing does not need the lock
1089 * except when swapping the two filters.
1090 */
1091static DEFINE_SPINLOCK(blocked_delegations_lock);
1092static struct bloom_pair {
1093 int entries, old_entries;
1094 time64_t swap_time;
1095 int new; /* index into 'set' */
1096 DECLARE_BITMAP(set[2], 256);
1097} blocked_delegations;
1098
1099static int delegation_blocked(struct knfsd_fh *fh)
1100{
1101 u32 hash;
1102 struct bloom_pair *bd = &blocked_delegations;
1103
1104 if (bd->entries == 0)
1105 return 0;
1106 if (ktime_get_seconds() - bd->swap_time > 30) {
1107 spin_lock(&blocked_delegations_lock);
1108 if (ktime_get_seconds() - bd->swap_time > 30) {
1109 bd->entries -= bd->old_entries;
1110 bd->old_entries = bd->entries;
1111 bd->new = 1-bd->new;
1112 memset(bd->set[bd->new], 0,
1113 sizeof(bd->set[0]));
1114 bd->swap_time = ktime_get_seconds();
1115 }
1116 spin_unlock(&blocked_delegations_lock);
1117 }
1118 hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1119 if (test_bit(hash&255, bd->set[0]) &&
1120 test_bit((hash>>8)&255, bd->set[0]) &&
1121 test_bit((hash>>16)&255, bd->set[0]))
1122 return 1;
1123
1124 if (test_bit(hash&255, bd->set[1]) &&
1125 test_bit((hash>>8)&255, bd->set[1]) &&
1126 test_bit((hash>>16)&255, bd->set[1]))
1127 return 1;
1128
1129 return 0;
1130}
1131
1132static void block_delegations(struct knfsd_fh *fh)
1133{
1134 u32 hash;
1135 struct bloom_pair *bd = &blocked_delegations;
1136
1137 hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1138
1139 spin_lock(&blocked_delegations_lock);
1140 __set_bit(hash&255, bd->set[bd->new]);
1141 __set_bit((hash>>8)&255, bd->set[bd->new]);
1142 __set_bit((hash>>16)&255, bd->set[bd->new]);
1143 if (bd->entries == 0)
1144 bd->swap_time = ktime_get_seconds();
1145 bd->entries += 1;
1146 spin_unlock(&blocked_delegations_lock);
1147}
1148
1149static struct nfs4_delegation *
1150alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
1151 struct nfs4_clnt_odstate *odstate, u32 dl_type)
1152{
1153 struct nfs4_delegation *dp;
1154 struct nfs4_stid *stid;
1155 long n;
1156
1157 dprintk("NFSD alloc_init_deleg\n");
1158 n = atomic_long_inc_return(&num_delegations);
1159 if (n < 0 || n > max_delegations)
1160 goto out_dec;
1161 if (delegation_blocked(&fp->fi_fhandle))
1162 goto out_dec;
1163 stid = nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg);
1164 if (stid == NULL)
1165 goto out_dec;
1166 dp = delegstateid(stid);
1167
1168 /*
1169 * delegation seqid's are never incremented. The 4.1 special
1170 * meaning of seqid 0 isn't meaningful, really, but let's avoid
1171 * 0 anyway just for consistency and use 1:
1172 */
1173 dp->dl_stid.sc_stateid.si_generation = 1;
1174 INIT_LIST_HEAD(&dp->dl_perfile);
1175 INIT_LIST_HEAD(&dp->dl_perclnt);
1176 INIT_LIST_HEAD(&dp->dl_recall_lru);
1177 dp->dl_clnt_odstate = odstate;
1178 get_clnt_odstate(odstate);
1179 dp->dl_type = dl_type;
1180 dp->dl_retries = 1;
1181 dp->dl_recalled = false;
1182 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
1183 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
1184 nfsd4_init_cb(&dp->dl_cb_fattr.ncf_getattr, dp->dl_stid.sc_client,
1185 &nfsd4_cb_getattr_ops, NFSPROC4_CLNT_CB_GETATTR);
1186 dp->dl_cb_fattr.ncf_file_modified = false;
1187 dp->dl_cb_fattr.ncf_cb_bmap[0] = FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE;
1188 get_nfs4_file(fp);
1189 dp->dl_stid.sc_file = fp;
1190 return dp;
1191out_dec:
1192 atomic_long_dec(&num_delegations);
1193 return NULL;
1194}
1195
1196void
1197nfs4_put_stid(struct nfs4_stid *s)
1198{
1199 struct nfs4_file *fp = s->sc_file;
1200 struct nfs4_client *clp = s->sc_client;
1201
1202 might_lock(&clp->cl_lock);
1203
1204 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
1205 wake_up_all(&close_wq);
1206 return;
1207 }
1208 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1209 if (s->sc_status & SC_STATUS_ADMIN_REVOKED)
1210 atomic_dec(&s->sc_client->cl_admin_revoked);
1211 nfs4_free_cpntf_statelist(clp->net, s);
1212 spin_unlock(&clp->cl_lock);
1213 s->sc_free(s);
1214 if (fp)
1215 put_nfs4_file(fp);
1216}
1217
1218void
1219nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
1220{
1221 stateid_t *src = &stid->sc_stateid;
1222
1223 spin_lock(&stid->sc_lock);
1224 if (unlikely(++src->si_generation == 0))
1225 src->si_generation = 1;
1226 memcpy(dst, src, sizeof(*dst));
1227 spin_unlock(&stid->sc_lock);
1228}
1229
1230static void put_deleg_file(struct nfs4_file *fp)
1231{
1232 struct nfsd_file *nf = NULL;
1233
1234 spin_lock(&fp->fi_lock);
1235 if (--fp->fi_delegees == 0)
1236 swap(nf, fp->fi_deleg_file);
1237 spin_unlock(&fp->fi_lock);
1238
1239 if (nf)
1240 nfsd_file_put(nf);
1241}
1242
1243static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
1244{
1245 struct nfs4_file *fp = dp->dl_stid.sc_file;
1246 struct nfsd_file *nf = fp->fi_deleg_file;
1247
1248 WARN_ON_ONCE(!fp->fi_delegees);
1249
1250 kernel_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
1251 put_deleg_file(fp);
1252}
1253
1254static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
1255{
1256 put_clnt_odstate(dp->dl_clnt_odstate);
1257 nfs4_unlock_deleg_lease(dp);
1258 nfs4_put_stid(&dp->dl_stid);
1259}
1260
1261/**
1262 * nfs4_delegation_exists - Discover if this delegation already exists
1263 * @clp: a pointer to the nfs4_client we're granting a delegation to
1264 * @fp: a pointer to the nfs4_file we're granting a delegation on
1265 *
1266 * Return:
1267 * On success: true iff an existing delegation is found
1268 */
1269
1270static bool
1271nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
1272{
1273 struct nfs4_delegation *searchdp = NULL;
1274 struct nfs4_client *searchclp = NULL;
1275
1276 lockdep_assert_held(&state_lock);
1277 lockdep_assert_held(&fp->fi_lock);
1278
1279 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
1280 searchclp = searchdp->dl_stid.sc_client;
1281 if (clp == searchclp) {
1282 return true;
1283 }
1284 }
1285 return false;
1286}
1287
1288/**
1289 * hash_delegation_locked - Add a delegation to the appropriate lists
1290 * @dp: a pointer to the nfs4_delegation we are adding.
1291 * @fp: a pointer to the nfs4_file we're granting a delegation on
1292 *
1293 * Return:
1294 * On success: NULL if the delegation was successfully hashed.
1295 *
1296 * On error: -EAGAIN if one was previously granted to this
1297 * nfs4_client for this nfs4_file. Delegation is not hashed.
1298 *
1299 */
1300
1301static int
1302hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1303{
1304 struct nfs4_client *clp = dp->dl_stid.sc_client;
1305
1306 lockdep_assert_held(&state_lock);
1307 lockdep_assert_held(&fp->fi_lock);
1308 lockdep_assert_held(&clp->cl_lock);
1309
1310 if (nfs4_delegation_exists(clp, fp))
1311 return -EAGAIN;
1312 refcount_inc(&dp->dl_stid.sc_count);
1313 dp->dl_stid.sc_type = SC_TYPE_DELEG;
1314 list_add(&dp->dl_perfile, &fp->fi_delegations);
1315 list_add(&dp->dl_perclnt, &clp->cl_delegations);
1316 return 0;
1317}
1318
1319static bool delegation_hashed(struct nfs4_delegation *dp)
1320{
1321 return !(list_empty(&dp->dl_perfile));
1322}
1323
1324static bool
1325unhash_delegation_locked(struct nfs4_delegation *dp, unsigned short statusmask)
1326{
1327 struct nfs4_file *fp = dp->dl_stid.sc_file;
1328
1329 lockdep_assert_held(&state_lock);
1330
1331 if (!delegation_hashed(dp))
1332 return false;
1333
1334 if (statusmask == SC_STATUS_REVOKED &&
1335 dp->dl_stid.sc_client->cl_minorversion == 0)
1336 statusmask = SC_STATUS_CLOSED;
1337 dp->dl_stid.sc_status |= statusmask;
1338 if (statusmask & SC_STATUS_ADMIN_REVOKED)
1339 atomic_inc(&dp->dl_stid.sc_client->cl_admin_revoked);
1340
1341 /* Ensure that deleg break won't try to requeue it */
1342 ++dp->dl_time;
1343 spin_lock(&fp->fi_lock);
1344 list_del_init(&dp->dl_perclnt);
1345 list_del_init(&dp->dl_recall_lru);
1346 list_del_init(&dp->dl_perfile);
1347 spin_unlock(&fp->fi_lock);
1348 return true;
1349}
1350
1351static void destroy_delegation(struct nfs4_delegation *dp)
1352{
1353 bool unhashed;
1354
1355 spin_lock(&state_lock);
1356 unhashed = unhash_delegation_locked(dp, SC_STATUS_CLOSED);
1357 spin_unlock(&state_lock);
1358 if (unhashed)
1359 destroy_unhashed_deleg(dp);
1360}
1361
1362static void revoke_delegation(struct nfs4_delegation *dp)
1363{
1364 struct nfs4_client *clp = dp->dl_stid.sc_client;
1365
1366 WARN_ON(!list_empty(&dp->dl_recall_lru));
1367
1368 trace_nfsd_stid_revoke(&dp->dl_stid);
1369
1370 if (dp->dl_stid.sc_status &
1371 (SC_STATUS_REVOKED | SC_STATUS_ADMIN_REVOKED)) {
1372 spin_lock(&clp->cl_lock);
1373 refcount_inc(&dp->dl_stid.sc_count);
1374 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1375 spin_unlock(&clp->cl_lock);
1376 }
1377 destroy_unhashed_deleg(dp);
1378}
1379
1380/*
1381 * SETCLIENTID state
1382 */
1383
1384static unsigned int clientid_hashval(u32 id)
1385{
1386 return id & CLIENT_HASH_MASK;
1387}
1388
1389static unsigned int clientstr_hashval(struct xdr_netobj name)
1390{
1391 return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
1392}
1393
1394/*
1395 * A stateid that had a deny mode associated with it is being released
1396 * or downgraded. Recalculate the deny mode on the file.
1397 */
1398static void
1399recalculate_deny_mode(struct nfs4_file *fp)
1400{
1401 struct nfs4_ol_stateid *stp;
1402 u32 old_deny;
1403
1404 spin_lock(&fp->fi_lock);
1405 old_deny = fp->fi_share_deny;
1406 fp->fi_share_deny = 0;
1407 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
1408 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1409 if (fp->fi_share_deny == old_deny)
1410 break;
1411 }
1412 spin_unlock(&fp->fi_lock);
1413}
1414
1415static void
1416reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1417{
1418 int i;
1419 bool change = false;
1420
1421 for (i = 1; i < 4; i++) {
1422 if ((i & deny) != i) {
1423 change = true;
1424 clear_deny(i, stp);
1425 }
1426 }
1427
1428 /* Recalculate per-file deny mode if there was a change */
1429 if (change)
1430 recalculate_deny_mode(stp->st_stid.sc_file);
1431}
1432
1433/* release all access and file references for a given stateid */
1434static void
1435release_all_access(struct nfs4_ol_stateid *stp)
1436{
1437 int i;
1438 struct nfs4_file *fp = stp->st_stid.sc_file;
1439
1440 if (fp && stp->st_deny_bmap != 0)
1441 recalculate_deny_mode(fp);
1442
1443 for (i = 1; i < 4; i++) {
1444 if (test_access(i, stp))
1445 nfs4_file_put_access(stp->st_stid.sc_file, i);
1446 clear_access(i, stp);
1447 }
1448}
1449
1450static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1451{
1452 kfree(sop->so_owner.data);
1453 sop->so_ops->so_free(sop);
1454}
1455
1456static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1457{
1458 struct nfs4_client *clp = sop->so_client;
1459
1460 might_lock(&clp->cl_lock);
1461
1462 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1463 return;
1464 sop->so_ops->so_unhash(sop);
1465 spin_unlock(&clp->cl_lock);
1466 nfs4_free_stateowner(sop);
1467}
1468
1469static bool
1470nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
1471{
1472 return list_empty(&stp->st_perfile);
1473}
1474
1475static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1476{
1477 struct nfs4_file *fp = stp->st_stid.sc_file;
1478
1479 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1480
1481 if (list_empty(&stp->st_perfile))
1482 return false;
1483
1484 spin_lock(&fp->fi_lock);
1485 list_del_init(&stp->st_perfile);
1486 spin_unlock(&fp->fi_lock);
1487 list_del(&stp->st_perstateowner);
1488 return true;
1489}
1490
1491static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1492{
1493 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1494
1495 put_clnt_odstate(stp->st_clnt_odstate);
1496 release_all_access(stp);
1497 if (stp->st_stateowner)
1498 nfs4_put_stateowner(stp->st_stateowner);
1499 WARN_ON(!list_empty(&stid->sc_cp_list));
1500 kmem_cache_free(stateid_slab, stid);
1501}
1502
1503static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1504{
1505 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1506 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1507 struct nfsd_file *nf;
1508
1509 nf = find_any_file(stp->st_stid.sc_file);
1510 if (nf) {
1511 get_file(nf->nf_file);
1512 filp_close(nf->nf_file, (fl_owner_t)lo);
1513 nfsd_file_put(nf);
1514 }
1515 nfs4_free_ol_stateid(stid);
1516}
1517
1518/*
1519 * Put the persistent reference to an already unhashed generic stateid, while
1520 * holding the cl_lock. If it's the last reference, then put it onto the
1521 * reaplist for later destruction.
1522 */
1523static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1524 struct list_head *reaplist)
1525{
1526 struct nfs4_stid *s = &stp->st_stid;
1527 struct nfs4_client *clp = s->sc_client;
1528
1529 lockdep_assert_held(&clp->cl_lock);
1530
1531 WARN_ON_ONCE(!list_empty(&stp->st_locks));
1532
1533 if (!refcount_dec_and_test(&s->sc_count)) {
1534 wake_up_all(&close_wq);
1535 return;
1536 }
1537
1538 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1539 if (s->sc_status & SC_STATUS_ADMIN_REVOKED)
1540 atomic_dec(&s->sc_client->cl_admin_revoked);
1541 list_add(&stp->st_locks, reaplist);
1542}
1543
1544static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1545{
1546 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1547
1548 if (!unhash_ol_stateid(stp))
1549 return false;
1550 list_del_init(&stp->st_locks);
1551 stp->st_stid.sc_status |= SC_STATUS_CLOSED;
1552 return true;
1553}
1554
1555static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1556{
1557 struct nfs4_client *clp = stp->st_stid.sc_client;
1558 bool unhashed;
1559
1560 spin_lock(&clp->cl_lock);
1561 unhashed = unhash_lock_stateid(stp);
1562 spin_unlock(&clp->cl_lock);
1563 if (unhashed)
1564 nfs4_put_stid(&stp->st_stid);
1565}
1566
1567static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1568{
1569 struct nfs4_client *clp = lo->lo_owner.so_client;
1570
1571 lockdep_assert_held(&clp->cl_lock);
1572
1573 list_del_init(&lo->lo_owner.so_strhash);
1574}
1575
1576/*
1577 * Free a list of generic stateids that were collected earlier after being
1578 * fully unhashed.
1579 */
1580static void
1581free_ol_stateid_reaplist(struct list_head *reaplist)
1582{
1583 struct nfs4_ol_stateid *stp;
1584 struct nfs4_file *fp;
1585
1586 might_sleep();
1587
1588 while (!list_empty(reaplist)) {
1589 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1590 st_locks);
1591 list_del(&stp->st_locks);
1592 fp = stp->st_stid.sc_file;
1593 stp->st_stid.sc_free(&stp->st_stid);
1594 if (fp)
1595 put_nfs4_file(fp);
1596 }
1597}
1598
1599static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1600 struct list_head *reaplist)
1601{
1602 struct nfs4_ol_stateid *stp;
1603
1604 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1605
1606 while (!list_empty(&open_stp->st_locks)) {
1607 stp = list_entry(open_stp->st_locks.next,
1608 struct nfs4_ol_stateid, st_locks);
1609 unhash_lock_stateid(stp);
1610 put_ol_stateid_locked(stp, reaplist);
1611 }
1612}
1613
1614static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1615 struct list_head *reaplist)
1616{
1617 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1618
1619 if (!unhash_ol_stateid(stp))
1620 return false;
1621 release_open_stateid_locks(stp, reaplist);
1622 return true;
1623}
1624
1625static void release_open_stateid(struct nfs4_ol_stateid *stp)
1626{
1627 LIST_HEAD(reaplist);
1628
1629 spin_lock(&stp->st_stid.sc_client->cl_lock);
1630 stp->st_stid.sc_status |= SC_STATUS_CLOSED;
1631 if (unhash_open_stateid(stp, &reaplist))
1632 put_ol_stateid_locked(stp, &reaplist);
1633 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1634 free_ol_stateid_reaplist(&reaplist);
1635}
1636
1637static void unhash_openowner_locked(struct nfs4_openowner *oo)
1638{
1639 struct nfs4_client *clp = oo->oo_owner.so_client;
1640
1641 lockdep_assert_held(&clp->cl_lock);
1642
1643 list_del_init(&oo->oo_owner.so_strhash);
1644 list_del_init(&oo->oo_perclient);
1645}
1646
1647static void release_last_closed_stateid(struct nfs4_openowner *oo)
1648{
1649 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1650 nfsd_net_id);
1651 struct nfs4_ol_stateid *s;
1652
1653 spin_lock(&nn->client_lock);
1654 s = oo->oo_last_closed_stid;
1655 if (s) {
1656 list_del_init(&oo->oo_close_lru);
1657 oo->oo_last_closed_stid = NULL;
1658 }
1659 spin_unlock(&nn->client_lock);
1660 if (s)
1661 nfs4_put_stid(&s->st_stid);
1662}
1663
1664static void release_openowner(struct nfs4_openowner *oo)
1665{
1666 struct nfs4_ol_stateid *stp;
1667 struct nfs4_client *clp = oo->oo_owner.so_client;
1668 LIST_HEAD(reaplist);
1669
1670 spin_lock(&clp->cl_lock);
1671 unhash_openowner_locked(oo);
1672 while (!list_empty(&oo->oo_owner.so_stateids)) {
1673 stp = list_first_entry(&oo->oo_owner.so_stateids,
1674 struct nfs4_ol_stateid, st_perstateowner);
1675 if (unhash_open_stateid(stp, &reaplist))
1676 put_ol_stateid_locked(stp, &reaplist);
1677 }
1678 spin_unlock(&clp->cl_lock);
1679 free_ol_stateid_reaplist(&reaplist);
1680 release_last_closed_stateid(oo);
1681 nfs4_put_stateowner(&oo->oo_owner);
1682}
1683
1684static struct nfs4_stid *find_one_sb_stid(struct nfs4_client *clp,
1685 struct super_block *sb,
1686 unsigned int sc_types)
1687{
1688 unsigned long id, tmp;
1689 struct nfs4_stid *stid;
1690
1691 spin_lock(&clp->cl_lock);
1692 idr_for_each_entry_ul(&clp->cl_stateids, stid, tmp, id)
1693 if ((stid->sc_type & sc_types) &&
1694 stid->sc_status == 0 &&
1695 stid->sc_file->fi_inode->i_sb == sb) {
1696 refcount_inc(&stid->sc_count);
1697 break;
1698 }
1699 spin_unlock(&clp->cl_lock);
1700 return stid;
1701}
1702
1703/**
1704 * nfsd4_revoke_states - revoke all nfsv4 states associated with given filesystem
1705 * @net: used to identify instance of nfsd (there is one per net namespace)
1706 * @sb: super_block used to identify target filesystem
1707 *
1708 * All nfs4 states (open, lock, delegation, layout) held by the server instance
1709 * and associated with a file on the given filesystem will be revoked resulting
1710 * in any files being closed and so all references from nfsd to the filesystem
1711 * being released. Thus nfsd will no longer prevent the filesystem from being
1712 * unmounted.
1713 *
1714 * The clients which own the states will subsequently being notified that the
1715 * states have been "admin-revoked".
1716 */
1717void nfsd4_revoke_states(struct net *net, struct super_block *sb)
1718{
1719 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1720 unsigned int idhashval;
1721 unsigned int sc_types;
1722
1723 sc_types = SC_TYPE_OPEN | SC_TYPE_LOCK | SC_TYPE_DELEG | SC_TYPE_LAYOUT;
1724
1725 spin_lock(&nn->client_lock);
1726 for (idhashval = 0; idhashval < CLIENT_HASH_MASK; idhashval++) {
1727 struct list_head *head = &nn->conf_id_hashtbl[idhashval];
1728 struct nfs4_client *clp;
1729 retry:
1730 list_for_each_entry(clp, head, cl_idhash) {
1731 struct nfs4_stid *stid = find_one_sb_stid(clp, sb,
1732 sc_types);
1733 if (stid) {
1734 struct nfs4_ol_stateid *stp;
1735 struct nfs4_delegation *dp;
1736 struct nfs4_layout_stateid *ls;
1737
1738 spin_unlock(&nn->client_lock);
1739 switch (stid->sc_type) {
1740 case SC_TYPE_OPEN:
1741 stp = openlockstateid(stid);
1742 mutex_lock_nested(&stp->st_mutex,
1743 OPEN_STATEID_MUTEX);
1744
1745 spin_lock(&clp->cl_lock);
1746 if (stid->sc_status == 0) {
1747 stid->sc_status |=
1748 SC_STATUS_ADMIN_REVOKED;
1749 atomic_inc(&clp->cl_admin_revoked);
1750 spin_unlock(&clp->cl_lock);
1751 release_all_access(stp);
1752 } else
1753 spin_unlock(&clp->cl_lock);
1754 mutex_unlock(&stp->st_mutex);
1755 break;
1756 case SC_TYPE_LOCK:
1757 stp = openlockstateid(stid);
1758 mutex_lock_nested(&stp->st_mutex,
1759 LOCK_STATEID_MUTEX);
1760 spin_lock(&clp->cl_lock);
1761 if (stid->sc_status == 0) {
1762 struct nfs4_lockowner *lo =
1763 lockowner(stp->st_stateowner);
1764 struct nfsd_file *nf;
1765
1766 stid->sc_status |=
1767 SC_STATUS_ADMIN_REVOKED;
1768 atomic_inc(&clp->cl_admin_revoked);
1769 spin_unlock(&clp->cl_lock);
1770 nf = find_any_file(stp->st_stid.sc_file);
1771 if (nf) {
1772 get_file(nf->nf_file);
1773 filp_close(nf->nf_file,
1774 (fl_owner_t)lo);
1775 nfsd_file_put(nf);
1776 }
1777 release_all_access(stp);
1778 } else
1779 spin_unlock(&clp->cl_lock);
1780 mutex_unlock(&stp->st_mutex);
1781 break;
1782 case SC_TYPE_DELEG:
1783 dp = delegstateid(stid);
1784 spin_lock(&state_lock);
1785 if (!unhash_delegation_locked(
1786 dp, SC_STATUS_ADMIN_REVOKED))
1787 dp = NULL;
1788 spin_unlock(&state_lock);
1789 if (dp)
1790 revoke_delegation(dp);
1791 break;
1792 case SC_TYPE_LAYOUT:
1793 ls = layoutstateid(stid);
1794 nfsd4_close_layout(ls);
1795 break;
1796 }
1797 nfs4_put_stid(stid);
1798 spin_lock(&nn->client_lock);
1799 if (clp->cl_minorversion == 0)
1800 /* Allow cleanup after a lease period.
1801 * store_release ensures cleanup will
1802 * see any newly revoked states if it
1803 * sees the time updated.
1804 */
1805 nn->nfs40_last_revoke =
1806 ktime_get_boottime_seconds();
1807 goto retry;
1808 }
1809 }
1810 }
1811 spin_unlock(&nn->client_lock);
1812}
1813
1814static inline int
1815hash_sessionid(struct nfs4_sessionid *sessionid)
1816{
1817 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1818
1819 return sid->sequence % SESSION_HASH_SIZE;
1820}
1821
1822#ifdef CONFIG_SUNRPC_DEBUG
1823static inline void
1824dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1825{
1826 u32 *ptr = (u32 *)(&sessionid->data[0]);
1827 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1828}
1829#else
1830static inline void
1831dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1832{
1833}
1834#endif
1835
1836/*
1837 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1838 * won't be used for replay.
1839 */
1840void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1841{
1842 struct nfs4_stateowner *so = cstate->replay_owner;
1843
1844 if (nfserr == nfserr_replay_me)
1845 return;
1846
1847 if (!seqid_mutating_err(ntohl(nfserr))) {
1848 nfsd4_cstate_clear_replay(cstate);
1849 return;
1850 }
1851 if (!so)
1852 return;
1853 if (so->so_is_open_owner)
1854 release_last_closed_stateid(openowner(so));
1855 so->so_seqid++;
1856 return;
1857}
1858
1859static void
1860gen_sessionid(struct nfsd4_session *ses)
1861{
1862 struct nfs4_client *clp = ses->se_client;
1863 struct nfsd4_sessionid *sid;
1864
1865 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1866 sid->clientid = clp->cl_clientid;
1867 sid->sequence = current_sessionid++;
1868 sid->reserved = 0;
1869}
1870
1871/*
1872 * The protocol defines ca_maxresponssize_cached to include the size of
1873 * the rpc header, but all we need to cache is the data starting after
1874 * the end of the initial SEQUENCE operation--the rest we regenerate
1875 * each time. Therefore we can advertise a ca_maxresponssize_cached
1876 * value that is the number of bytes in our cache plus a few additional
1877 * bytes. In order to stay on the safe side, and not promise more than
1878 * we can cache, those additional bytes must be the minimum possible: 24
1879 * bytes of rpc header (xid through accept state, with AUTH_NULL
1880 * verifier), 12 for the compound header (with zero-length tag), and 44
1881 * for the SEQUENCE op response:
1882 */
1883#define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1884
1885static void
1886free_session_slots(struct nfsd4_session *ses)
1887{
1888 int i;
1889
1890 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1891 free_svc_cred(&ses->se_slots[i]->sl_cred);
1892 kfree(ses->se_slots[i]);
1893 }
1894}
1895
1896/*
1897 * We don't actually need to cache the rpc and session headers, so we
1898 * can allocate a little less for each slot:
1899 */
1900static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1901{
1902 u32 size;
1903
1904 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1905 size = 0;
1906 else
1907 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1908 return size + sizeof(struct nfsd4_slot);
1909}
1910
1911/*
1912 * XXX: If we run out of reserved DRC memory we could (up to a point)
1913 * re-negotiate active sessions and reduce their slot usage to make
1914 * room for new connections. For now we just fail the create session.
1915 */
1916static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1917{
1918 u32 slotsize = slot_bytes(ca);
1919 u32 num = ca->maxreqs;
1920 unsigned long avail, total_avail;
1921 unsigned int scale_factor;
1922
1923 spin_lock(&nfsd_drc_lock);
1924 if (nfsd_drc_max_mem > nfsd_drc_mem_used)
1925 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1926 else
1927 /* We have handed out more space than we chose in
1928 * set_max_drc() to allow. That isn't really a
1929 * problem as long as that doesn't make us think we
1930 * have lots more due to integer overflow.
1931 */
1932 total_avail = 0;
1933 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1934 /*
1935 * Never use more than a fraction of the remaining memory,
1936 * unless it's the only way to give this client a slot.
1937 * The chosen fraction is either 1/8 or 1/number of threads,
1938 * whichever is smaller. This ensures there are adequate
1939 * slots to support multiple clients per thread.
1940 * Give the client one slot even if that would require
1941 * over-allocation--it is better than failure.
1942 */
1943 scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
1944
1945 avail = clamp_t(unsigned long, avail, slotsize,
1946 total_avail/scale_factor);
1947 num = min_t(int, num, avail / slotsize);
1948 num = max_t(int, num, 1);
1949 nfsd_drc_mem_used += num * slotsize;
1950 spin_unlock(&nfsd_drc_lock);
1951
1952 return num;
1953}
1954
1955static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1956{
1957 int slotsize = slot_bytes(ca);
1958
1959 spin_lock(&nfsd_drc_lock);
1960 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1961 spin_unlock(&nfsd_drc_lock);
1962}
1963
1964static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1965 struct nfsd4_channel_attrs *battrs)
1966{
1967 int numslots = fattrs->maxreqs;
1968 int slotsize = slot_bytes(fattrs);
1969 struct nfsd4_session *new;
1970 int i;
1971
1972 BUILD_BUG_ON(struct_size(new, se_slots, NFSD_MAX_SLOTS_PER_SESSION)
1973 > PAGE_SIZE);
1974
1975 new = kzalloc(struct_size(new, se_slots, numslots), GFP_KERNEL);
1976 if (!new)
1977 return NULL;
1978 /* allocate each struct nfsd4_slot and data cache in one piece */
1979 for (i = 0; i < numslots; i++) {
1980 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1981 if (!new->se_slots[i])
1982 goto out_free;
1983 }
1984
1985 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1986 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1987
1988 return new;
1989out_free:
1990 while (i--)
1991 kfree(new->se_slots[i]);
1992 kfree(new);
1993 return NULL;
1994}
1995
1996static void free_conn(struct nfsd4_conn *c)
1997{
1998 svc_xprt_put(c->cn_xprt);
1999 kfree(c);
2000}
2001
2002static void nfsd4_conn_lost(struct svc_xpt_user *u)
2003{
2004 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
2005 struct nfs4_client *clp = c->cn_session->se_client;
2006
2007 trace_nfsd_cb_lost(clp);
2008
2009 spin_lock(&clp->cl_lock);
2010 if (!list_empty(&c->cn_persession)) {
2011 list_del(&c->cn_persession);
2012 free_conn(c);
2013 }
2014 nfsd4_probe_callback(clp);
2015 spin_unlock(&clp->cl_lock);
2016}
2017
2018static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
2019{
2020 struct nfsd4_conn *conn;
2021
2022 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
2023 if (!conn)
2024 return NULL;
2025 svc_xprt_get(rqstp->rq_xprt);
2026 conn->cn_xprt = rqstp->rq_xprt;
2027 conn->cn_flags = flags;
2028 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
2029 return conn;
2030}
2031
2032static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
2033{
2034 conn->cn_session = ses;
2035 list_add(&conn->cn_persession, &ses->se_conns);
2036}
2037
2038static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
2039{
2040 struct nfs4_client *clp = ses->se_client;
2041
2042 spin_lock(&clp->cl_lock);
2043 __nfsd4_hash_conn(conn, ses);
2044 spin_unlock(&clp->cl_lock);
2045}
2046
2047static int nfsd4_register_conn(struct nfsd4_conn *conn)
2048{
2049 conn->cn_xpt_user.callback = nfsd4_conn_lost;
2050 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
2051}
2052
2053static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
2054{
2055 int ret;
2056
2057 nfsd4_hash_conn(conn, ses);
2058 ret = nfsd4_register_conn(conn);
2059 if (ret)
2060 /* oops; xprt is already down: */
2061 nfsd4_conn_lost(&conn->cn_xpt_user);
2062 /* We may have gained or lost a callback channel: */
2063 nfsd4_probe_callback_sync(ses->se_client);
2064}
2065
2066static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
2067{
2068 u32 dir = NFS4_CDFC4_FORE;
2069
2070 if (cses->flags & SESSION4_BACK_CHAN)
2071 dir |= NFS4_CDFC4_BACK;
2072 return alloc_conn(rqstp, dir);
2073}
2074
2075/* must be called under client_lock */
2076static void nfsd4_del_conns(struct nfsd4_session *s)
2077{
2078 struct nfs4_client *clp = s->se_client;
2079 struct nfsd4_conn *c;
2080
2081 spin_lock(&clp->cl_lock);
2082 while (!list_empty(&s->se_conns)) {
2083 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
2084 list_del_init(&c->cn_persession);
2085 spin_unlock(&clp->cl_lock);
2086
2087 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
2088 free_conn(c);
2089
2090 spin_lock(&clp->cl_lock);
2091 }
2092 spin_unlock(&clp->cl_lock);
2093}
2094
2095static void __free_session(struct nfsd4_session *ses)
2096{
2097 free_session_slots(ses);
2098 kfree(ses);
2099}
2100
2101static void free_session(struct nfsd4_session *ses)
2102{
2103 nfsd4_del_conns(ses);
2104 nfsd4_put_drc_mem(&ses->se_fchannel);
2105 __free_session(ses);
2106}
2107
2108static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
2109{
2110 int idx;
2111 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2112
2113 new->se_client = clp;
2114 gen_sessionid(new);
2115
2116 INIT_LIST_HEAD(&new->se_conns);
2117
2118 new->se_cb_seq_nr = 1;
2119 new->se_flags = cses->flags;
2120 new->se_cb_prog = cses->callback_prog;
2121 new->se_cb_sec = cses->cb_sec;
2122 atomic_set(&new->se_ref, 0);
2123 idx = hash_sessionid(&new->se_sessionid);
2124 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
2125 spin_lock(&clp->cl_lock);
2126 list_add(&new->se_perclnt, &clp->cl_sessions);
2127 spin_unlock(&clp->cl_lock);
2128
2129 {
2130 struct sockaddr *sa = svc_addr(rqstp);
2131 /*
2132 * This is a little silly; with sessions there's no real
2133 * use for the callback address. Use the peer address
2134 * as a reasonable default for now, but consider fixing
2135 * the rpc client not to require an address in the
2136 * future:
2137 */
2138 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
2139 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
2140 }
2141}
2142
2143/* caller must hold client_lock */
2144static struct nfsd4_session *
2145__find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
2146{
2147 struct nfsd4_session *elem;
2148 int idx;
2149 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2150
2151 lockdep_assert_held(&nn->client_lock);
2152
2153 dump_sessionid(__func__, sessionid);
2154 idx = hash_sessionid(sessionid);
2155 /* Search in the appropriate list */
2156 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
2157 if (!memcmp(elem->se_sessionid.data, sessionid->data,
2158 NFS4_MAX_SESSIONID_LEN)) {
2159 return elem;
2160 }
2161 }
2162
2163 dprintk("%s: session not found\n", __func__);
2164 return NULL;
2165}
2166
2167static struct nfsd4_session *
2168find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
2169 __be32 *ret)
2170{
2171 struct nfsd4_session *session;
2172 __be32 status = nfserr_badsession;
2173
2174 session = __find_in_sessionid_hashtbl(sessionid, net);
2175 if (!session)
2176 goto out;
2177 status = nfsd4_get_session_locked(session);
2178 if (status)
2179 session = NULL;
2180out:
2181 *ret = status;
2182 return session;
2183}
2184
2185/* caller must hold client_lock */
2186static void
2187unhash_session(struct nfsd4_session *ses)
2188{
2189 struct nfs4_client *clp = ses->se_client;
2190 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2191
2192 lockdep_assert_held(&nn->client_lock);
2193
2194 list_del(&ses->se_hash);
2195 spin_lock(&ses->se_client->cl_lock);
2196 list_del(&ses->se_perclnt);
2197 spin_unlock(&ses->se_client->cl_lock);
2198}
2199
2200/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
2201static int
2202STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
2203{
2204 /*
2205 * We're assuming the clid was not given out from a boot
2206 * precisely 2^32 (about 136 years) before this one. That seems
2207 * a safe assumption:
2208 */
2209 if (clid->cl_boot == (u32)nn->boot_time)
2210 return 0;
2211 trace_nfsd_clid_stale(clid);
2212 return 1;
2213}
2214
2215/*
2216 * XXX Should we use a slab cache ?
2217 * This type of memory management is somewhat inefficient, but we use it
2218 * anyway since SETCLIENTID is not a common operation.
2219 */
2220static struct nfs4_client *alloc_client(struct xdr_netobj name,
2221 struct nfsd_net *nn)
2222{
2223 struct nfs4_client *clp;
2224 int i;
2225
2226 if (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) {
2227 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
2228 return NULL;
2229 }
2230 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
2231 if (clp == NULL)
2232 return NULL;
2233 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
2234 if (clp->cl_name.data == NULL)
2235 goto err_no_name;
2236 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
2237 sizeof(struct list_head),
2238 GFP_KERNEL);
2239 if (!clp->cl_ownerstr_hashtbl)
2240 goto err_no_hashtbl;
2241 clp->cl_callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0);
2242 if (!clp->cl_callback_wq)
2243 goto err_no_callback_wq;
2244
2245 for (i = 0; i < OWNER_HASH_SIZE; i++)
2246 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
2247 INIT_LIST_HEAD(&clp->cl_sessions);
2248 idr_init(&clp->cl_stateids);
2249 atomic_set(&clp->cl_rpc_users, 0);
2250 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
2251 clp->cl_state = NFSD4_ACTIVE;
2252 atomic_inc(&nn->nfs4_client_count);
2253 atomic_set(&clp->cl_delegs_in_recall, 0);
2254 INIT_LIST_HEAD(&clp->cl_idhash);
2255 INIT_LIST_HEAD(&clp->cl_openowners);
2256 INIT_LIST_HEAD(&clp->cl_delegations);
2257 INIT_LIST_HEAD(&clp->cl_lru);
2258 INIT_LIST_HEAD(&clp->cl_revoked);
2259#ifdef CONFIG_NFSD_PNFS
2260 INIT_LIST_HEAD(&clp->cl_lo_states);
2261#endif
2262 INIT_LIST_HEAD(&clp->async_copies);
2263 spin_lock_init(&clp->async_lock);
2264 spin_lock_init(&clp->cl_lock);
2265 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
2266 return clp;
2267err_no_callback_wq:
2268 kfree(clp->cl_ownerstr_hashtbl);
2269err_no_hashtbl:
2270 kfree(clp->cl_name.data);
2271err_no_name:
2272 kmem_cache_free(client_slab, clp);
2273 return NULL;
2274}
2275
2276static void __free_client(struct kref *k)
2277{
2278 struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
2279 struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
2280
2281 free_svc_cred(&clp->cl_cred);
2282 destroy_workqueue(clp->cl_callback_wq);
2283 kfree(clp->cl_ownerstr_hashtbl);
2284 kfree(clp->cl_name.data);
2285 kfree(clp->cl_nii_domain.data);
2286 kfree(clp->cl_nii_name.data);
2287 idr_destroy(&clp->cl_stateids);
2288 kfree(clp->cl_ra);
2289 kmem_cache_free(client_slab, clp);
2290}
2291
2292static void drop_client(struct nfs4_client *clp)
2293{
2294 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
2295}
2296
2297static void
2298free_client(struct nfs4_client *clp)
2299{
2300 while (!list_empty(&clp->cl_sessions)) {
2301 struct nfsd4_session *ses;
2302 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
2303 se_perclnt);
2304 list_del(&ses->se_perclnt);
2305 WARN_ON_ONCE(atomic_read(&ses->se_ref));
2306 free_session(ses);
2307 }
2308 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
2309 if (clp->cl_nfsd_dentry) {
2310 nfsd_client_rmdir(clp->cl_nfsd_dentry);
2311 clp->cl_nfsd_dentry = NULL;
2312 wake_up_all(&expiry_wq);
2313 }
2314 drop_client(clp);
2315}
2316
2317/* must be called under the client_lock */
2318static void
2319unhash_client_locked(struct nfs4_client *clp)
2320{
2321 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2322 struct nfsd4_session *ses;
2323
2324 lockdep_assert_held(&nn->client_lock);
2325
2326 /* Mark the client as expired! */
2327 clp->cl_time = 0;
2328 /* Make it invisible */
2329 if (!list_empty(&clp->cl_idhash)) {
2330 list_del_init(&clp->cl_idhash);
2331 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2332 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
2333 else
2334 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2335 }
2336 list_del_init(&clp->cl_lru);
2337 spin_lock(&clp->cl_lock);
2338 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
2339 list_del_init(&ses->se_hash);
2340 spin_unlock(&clp->cl_lock);
2341}
2342
2343static void
2344unhash_client(struct nfs4_client *clp)
2345{
2346 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2347
2348 spin_lock(&nn->client_lock);
2349 unhash_client_locked(clp);
2350 spin_unlock(&nn->client_lock);
2351}
2352
2353static __be32 mark_client_expired_locked(struct nfs4_client *clp)
2354{
2355 int users = atomic_read(&clp->cl_rpc_users);
2356
2357 trace_nfsd_mark_client_expired(clp, users);
2358
2359 if (users)
2360 return nfserr_jukebox;
2361 unhash_client_locked(clp);
2362 return nfs_ok;
2363}
2364
2365static void
2366__destroy_client(struct nfs4_client *clp)
2367{
2368 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2369 int i;
2370 struct nfs4_openowner *oo;
2371 struct nfs4_delegation *dp;
2372 LIST_HEAD(reaplist);
2373
2374 spin_lock(&state_lock);
2375 while (!list_empty(&clp->cl_delegations)) {
2376 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
2377 unhash_delegation_locked(dp, SC_STATUS_CLOSED);
2378 list_add(&dp->dl_recall_lru, &reaplist);
2379 }
2380 spin_unlock(&state_lock);
2381 while (!list_empty(&reaplist)) {
2382 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
2383 list_del_init(&dp->dl_recall_lru);
2384 destroy_unhashed_deleg(dp);
2385 }
2386 while (!list_empty(&clp->cl_revoked)) {
2387 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
2388 list_del_init(&dp->dl_recall_lru);
2389 nfs4_put_stid(&dp->dl_stid);
2390 }
2391 while (!list_empty(&clp->cl_openowners)) {
2392 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
2393 nfs4_get_stateowner(&oo->oo_owner);
2394 release_openowner(oo);
2395 }
2396 for (i = 0; i < OWNER_HASH_SIZE; i++) {
2397 struct nfs4_stateowner *so, *tmp;
2398
2399 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
2400 so_strhash) {
2401 /* Should be no openowners at this point */
2402 WARN_ON_ONCE(so->so_is_open_owner);
2403 remove_blocked_locks(lockowner(so));
2404 }
2405 }
2406 nfsd4_return_all_client_layouts(clp);
2407 nfsd4_shutdown_copy(clp);
2408 nfsd4_shutdown_callback(clp);
2409 if (clp->cl_cb_conn.cb_xprt)
2410 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
2411 atomic_add_unless(&nn->nfs4_client_count, -1, 0);
2412 nfsd4_dec_courtesy_client_count(nn, clp);
2413 free_client(clp);
2414 wake_up_all(&expiry_wq);
2415}
2416
2417static void
2418destroy_client(struct nfs4_client *clp)
2419{
2420 unhash_client(clp);
2421 __destroy_client(clp);
2422}
2423
2424static void inc_reclaim_complete(struct nfs4_client *clp)
2425{
2426 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2427
2428 if (!nn->track_reclaim_completes)
2429 return;
2430 if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
2431 return;
2432 if (atomic_inc_return(&nn->nr_reclaim_complete) ==
2433 nn->reclaim_str_hashtbl_size) {
2434 printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2435 clp->net->ns.inum);
2436 nfsd4_end_grace(nn);
2437 }
2438}
2439
2440static void expire_client(struct nfs4_client *clp)
2441{
2442 unhash_client(clp);
2443 nfsd4_client_record_remove(clp);
2444 __destroy_client(clp);
2445}
2446
2447static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
2448{
2449 memcpy(target->cl_verifier.data, source->data,
2450 sizeof(target->cl_verifier.data));
2451}
2452
2453static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
2454{
2455 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
2456 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
2457}
2458
2459static int copy_cred(struct svc_cred *target, struct svc_cred *source)
2460{
2461 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
2462 target->cr_raw_principal = kstrdup(source->cr_raw_principal,
2463 GFP_KERNEL);
2464 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
2465 if ((source->cr_principal && !target->cr_principal) ||
2466 (source->cr_raw_principal && !target->cr_raw_principal) ||
2467 (source->cr_targ_princ && !target->cr_targ_princ))
2468 return -ENOMEM;
2469
2470 target->cr_flavor = source->cr_flavor;
2471 target->cr_uid = source->cr_uid;
2472 target->cr_gid = source->cr_gid;
2473 target->cr_group_info = source->cr_group_info;
2474 get_group_info(target->cr_group_info);
2475 target->cr_gss_mech = source->cr_gss_mech;
2476 if (source->cr_gss_mech)
2477 gss_mech_get(source->cr_gss_mech);
2478 return 0;
2479}
2480
2481static int
2482compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
2483{
2484 if (o1->len < o2->len)
2485 return -1;
2486 if (o1->len > o2->len)
2487 return 1;
2488 return memcmp(o1->data, o2->data, o1->len);
2489}
2490
2491static int
2492same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2493{
2494 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2495}
2496
2497static int
2498same_clid(clientid_t *cl1, clientid_t *cl2)
2499{
2500 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2501}
2502
2503static bool groups_equal(struct group_info *g1, struct group_info *g2)
2504{
2505 int i;
2506
2507 if (g1->ngroups != g2->ngroups)
2508 return false;
2509 for (i=0; i<g1->ngroups; i++)
2510 if (!gid_eq(g1->gid[i], g2->gid[i]))
2511 return false;
2512 return true;
2513}
2514
2515/*
2516 * RFC 3530 language requires clid_inuse be returned when the
2517 * "principal" associated with a requests differs from that previously
2518 * used. We use uid, gid's, and gss principal string as our best
2519 * approximation. We also don't want to allow non-gss use of a client
2520 * established using gss: in theory cr_principal should catch that
2521 * change, but in practice cr_principal can be null even in the gss case
2522 * since gssd doesn't always pass down a principal string.
2523 */
2524static bool is_gss_cred(struct svc_cred *cr)
2525{
2526 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2527 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2528}
2529
2530
2531static bool
2532same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2533{
2534 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2535 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2536 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2537 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2538 return false;
2539 /* XXX: check that cr_targ_princ fields match ? */
2540 if (cr1->cr_principal == cr2->cr_principal)
2541 return true;
2542 if (!cr1->cr_principal || !cr2->cr_principal)
2543 return false;
2544 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2545}
2546
2547static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2548{
2549 struct svc_cred *cr = &rqstp->rq_cred;
2550 u32 service;
2551
2552 if (!cr->cr_gss_mech)
2553 return false;
2554 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2555 return service == RPC_GSS_SVC_INTEGRITY ||
2556 service == RPC_GSS_SVC_PRIVACY;
2557}
2558
2559bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2560{
2561 struct svc_cred *cr = &rqstp->rq_cred;
2562
2563 if (!cl->cl_mach_cred)
2564 return true;
2565 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2566 return false;
2567 if (!svc_rqst_integrity_protected(rqstp))
2568 return false;
2569 if (cl->cl_cred.cr_raw_principal)
2570 return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2571 cr->cr_raw_principal);
2572 if (!cr->cr_principal)
2573 return false;
2574 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2575}
2576
2577static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2578{
2579 __be32 verf[2];
2580
2581 /*
2582 * This is opaque to client, so no need to byte-swap. Use
2583 * __force to keep sparse happy
2584 */
2585 verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
2586 verf[1] = (__force __be32)nn->clverifier_counter++;
2587 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2588}
2589
2590static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2591{
2592 clp->cl_clientid.cl_boot = (u32)nn->boot_time;
2593 clp->cl_clientid.cl_id = nn->clientid_counter++;
2594 gen_confirm(clp, nn);
2595}
2596
2597static struct nfs4_stid *
2598find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2599{
2600 struct nfs4_stid *ret;
2601
2602 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2603 if (!ret || !ret->sc_type)
2604 return NULL;
2605 return ret;
2606}
2607
2608static struct nfs4_stid *
2609find_stateid_by_type(struct nfs4_client *cl, stateid_t *t,
2610 unsigned short typemask, unsigned short ok_states)
2611{
2612 struct nfs4_stid *s;
2613
2614 spin_lock(&cl->cl_lock);
2615 s = find_stateid_locked(cl, t);
2616 if (s != NULL) {
2617 if ((s->sc_status & ~ok_states) == 0 &&
2618 (typemask & s->sc_type))
2619 refcount_inc(&s->sc_count);
2620 else
2621 s = NULL;
2622 }
2623 spin_unlock(&cl->cl_lock);
2624 return s;
2625}
2626
2627static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
2628{
2629 struct nfsdfs_client *nc;
2630 nc = get_nfsdfs_client(inode);
2631 if (!nc)
2632 return NULL;
2633 return container_of(nc, struct nfs4_client, cl_nfsdfs);
2634}
2635
2636static void seq_quote_mem(struct seq_file *m, char *data, int len)
2637{
2638 seq_puts(m, "\"");
2639 seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\");
2640 seq_puts(m, "\"");
2641}
2642
2643static const char *cb_state2str(int state)
2644{
2645 switch (state) {
2646 case NFSD4_CB_UP:
2647 return "UP";
2648 case NFSD4_CB_UNKNOWN:
2649 return "UNKNOWN";
2650 case NFSD4_CB_DOWN:
2651 return "DOWN";
2652 case NFSD4_CB_FAULT:
2653 return "FAULT";
2654 }
2655 return "UNDEFINED";
2656}
2657
2658static int client_info_show(struct seq_file *m, void *v)
2659{
2660 struct inode *inode = file_inode(m->file);
2661 struct nfs4_client *clp;
2662 u64 clid;
2663
2664 clp = get_nfsdfs_clp(inode);
2665 if (!clp)
2666 return -ENXIO;
2667 memcpy(&clid, &clp->cl_clientid, sizeof(clid));
2668 seq_printf(m, "clientid: 0x%llx\n", clid);
2669 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
2670
2671 if (clp->cl_state == NFSD4_COURTESY)
2672 seq_puts(m, "status: courtesy\n");
2673 else if (clp->cl_state == NFSD4_EXPIRABLE)
2674 seq_puts(m, "status: expirable\n");
2675 else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2676 seq_puts(m, "status: confirmed\n");
2677 else
2678 seq_puts(m, "status: unconfirmed\n");
2679 seq_printf(m, "seconds from last renew: %lld\n",
2680 ktime_get_boottime_seconds() - clp->cl_time);
2681 seq_puts(m, "name: ");
2682 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
2683 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
2684 if (clp->cl_nii_domain.data) {
2685 seq_puts(m, "Implementation domain: ");
2686 seq_quote_mem(m, clp->cl_nii_domain.data,
2687 clp->cl_nii_domain.len);
2688 seq_puts(m, "\nImplementation name: ");
2689 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
2690 seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
2691 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
2692 }
2693 seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state));
2694 seq_printf(m, "callback address: \"%pISpc\"\n", &clp->cl_cb_conn.cb_addr);
2695 seq_printf(m, "admin-revoked states: %d\n",
2696 atomic_read(&clp->cl_admin_revoked));
2697 drop_client(clp);
2698
2699 return 0;
2700}
2701
2702DEFINE_SHOW_ATTRIBUTE(client_info);
2703
2704static void *states_start(struct seq_file *s, loff_t *pos)
2705 __acquires(&clp->cl_lock)
2706{
2707 struct nfs4_client *clp = s->private;
2708 unsigned long id = *pos;
2709 void *ret;
2710
2711 spin_lock(&clp->cl_lock);
2712 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2713 *pos = id;
2714 return ret;
2715}
2716
2717static void *states_next(struct seq_file *s, void *v, loff_t *pos)
2718{
2719 struct nfs4_client *clp = s->private;
2720 unsigned long id = *pos;
2721 void *ret;
2722
2723 id = *pos;
2724 id++;
2725 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2726 *pos = id;
2727 return ret;
2728}
2729
2730static void states_stop(struct seq_file *s, void *v)
2731 __releases(&clp->cl_lock)
2732{
2733 struct nfs4_client *clp = s->private;
2734
2735 spin_unlock(&clp->cl_lock);
2736}
2737
2738static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
2739{
2740 seq_printf(s, "filename: \"%pD2\"", f->nf_file);
2741}
2742
2743static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
2744{
2745 struct inode *inode = file_inode(f->nf_file);
2746
2747 seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
2748 MAJOR(inode->i_sb->s_dev),
2749 MINOR(inode->i_sb->s_dev),
2750 inode->i_ino);
2751}
2752
2753static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
2754{
2755 seq_puts(s, "owner: ");
2756 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
2757}
2758
2759static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
2760{
2761 seq_printf(s, "0x%.8x", stid->si_generation);
2762 seq_printf(s, "%12phN", &stid->si_opaque);
2763}
2764
2765static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
2766{
2767 struct nfs4_ol_stateid *ols;
2768 struct nfs4_file *nf;
2769 struct nfsd_file *file;
2770 struct nfs4_stateowner *oo;
2771 unsigned int access, deny;
2772
2773 ols = openlockstateid(st);
2774 oo = ols->st_stateowner;
2775 nf = st->sc_file;
2776
2777 seq_puts(s, "- ");
2778 nfs4_show_stateid(s, &st->sc_stateid);
2779 seq_puts(s, ": { type: open, ");
2780
2781 access = bmap_to_share_mode(ols->st_access_bmap);
2782 deny = bmap_to_share_mode(ols->st_deny_bmap);
2783
2784 seq_printf(s, "access: %s%s, ",
2785 access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2786 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2787 seq_printf(s, "deny: %s%s, ",
2788 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2789 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2790
2791 if (nf) {
2792 spin_lock(&nf->fi_lock);
2793 file = find_any_file_locked(nf);
2794 if (file) {
2795 nfs4_show_superblock(s, file);
2796 seq_puts(s, ", ");
2797 nfs4_show_fname(s, file);
2798 seq_puts(s, ", ");
2799 }
2800 spin_unlock(&nf->fi_lock);
2801 } else
2802 seq_puts(s, "closed, ");
2803 nfs4_show_owner(s, oo);
2804 if (st->sc_status & SC_STATUS_ADMIN_REVOKED)
2805 seq_puts(s, ", admin-revoked");
2806 seq_puts(s, " }\n");
2807 return 0;
2808}
2809
2810static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
2811{
2812 struct nfs4_ol_stateid *ols;
2813 struct nfs4_file *nf;
2814 struct nfsd_file *file;
2815 struct nfs4_stateowner *oo;
2816
2817 ols = openlockstateid(st);
2818 oo = ols->st_stateowner;
2819 nf = st->sc_file;
2820
2821 seq_puts(s, "- ");
2822 nfs4_show_stateid(s, &st->sc_stateid);
2823 seq_puts(s, ": { type: lock, ");
2824
2825 spin_lock(&nf->fi_lock);
2826 file = find_any_file_locked(nf);
2827 if (file) {
2828 /*
2829 * Note: a lock stateid isn't really the same thing as a lock,
2830 * it's the locking state held by one owner on a file, and there
2831 * may be multiple (or no) lock ranges associated with it.
2832 * (Same for the matter is true of open stateids.)
2833 */
2834
2835 nfs4_show_superblock(s, file);
2836 /* XXX: open stateid? */
2837 seq_puts(s, ", ");
2838 nfs4_show_fname(s, file);
2839 seq_puts(s, ", ");
2840 }
2841 nfs4_show_owner(s, oo);
2842 if (st->sc_status & SC_STATUS_ADMIN_REVOKED)
2843 seq_puts(s, ", admin-revoked");
2844 seq_puts(s, " }\n");
2845 spin_unlock(&nf->fi_lock);
2846 return 0;
2847}
2848
2849static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
2850{
2851 struct nfs4_delegation *ds;
2852 struct nfs4_file *nf;
2853 struct nfsd_file *file;
2854
2855 ds = delegstateid(st);
2856 nf = st->sc_file;
2857
2858 seq_puts(s, "- ");
2859 nfs4_show_stateid(s, &st->sc_stateid);
2860 seq_puts(s, ": { type: deleg, ");
2861
2862 seq_printf(s, "access: %s",
2863 ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
2864
2865 /* XXX: lease time, whether it's being recalled. */
2866
2867 spin_lock(&nf->fi_lock);
2868 file = nf->fi_deleg_file;
2869 if (file) {
2870 seq_puts(s, ", ");
2871 nfs4_show_superblock(s, file);
2872 seq_puts(s, ", ");
2873 nfs4_show_fname(s, file);
2874 }
2875 spin_unlock(&nf->fi_lock);
2876 if (st->sc_status & SC_STATUS_ADMIN_REVOKED)
2877 seq_puts(s, ", admin-revoked");
2878 seq_puts(s, " }\n");
2879 return 0;
2880}
2881
2882static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
2883{
2884 struct nfs4_layout_stateid *ls;
2885 struct nfsd_file *file;
2886
2887 ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
2888
2889 seq_puts(s, "- ");
2890 nfs4_show_stateid(s, &st->sc_stateid);
2891 seq_puts(s, ": { type: layout");
2892
2893 /* XXX: What else would be useful? */
2894
2895 spin_lock(&ls->ls_stid.sc_file->fi_lock);
2896 file = ls->ls_file;
2897 if (file) {
2898 seq_puts(s, ", ");
2899 nfs4_show_superblock(s, file);
2900 seq_puts(s, ", ");
2901 nfs4_show_fname(s, file);
2902 }
2903 spin_unlock(&ls->ls_stid.sc_file->fi_lock);
2904 if (st->sc_status & SC_STATUS_ADMIN_REVOKED)
2905 seq_puts(s, ", admin-revoked");
2906 seq_puts(s, " }\n");
2907
2908 return 0;
2909}
2910
2911static int states_show(struct seq_file *s, void *v)
2912{
2913 struct nfs4_stid *st = v;
2914
2915 switch (st->sc_type) {
2916 case SC_TYPE_OPEN:
2917 return nfs4_show_open(s, st);
2918 case SC_TYPE_LOCK:
2919 return nfs4_show_lock(s, st);
2920 case SC_TYPE_DELEG:
2921 return nfs4_show_deleg(s, st);
2922 case SC_TYPE_LAYOUT:
2923 return nfs4_show_layout(s, st);
2924 default:
2925 return 0; /* XXX: or SEQ_SKIP? */
2926 }
2927 /* XXX: copy stateids? */
2928}
2929
2930static struct seq_operations states_seq_ops = {
2931 .start = states_start,
2932 .next = states_next,
2933 .stop = states_stop,
2934 .show = states_show
2935};
2936
2937static int client_states_open(struct inode *inode, struct file *file)
2938{
2939 struct seq_file *s;
2940 struct nfs4_client *clp;
2941 int ret;
2942
2943 clp = get_nfsdfs_clp(inode);
2944 if (!clp)
2945 return -ENXIO;
2946
2947 ret = seq_open(file, &states_seq_ops);
2948 if (ret)
2949 return ret;
2950 s = file->private_data;
2951 s->private = clp;
2952 return 0;
2953}
2954
2955static int client_opens_release(struct inode *inode, struct file *file)
2956{
2957 struct seq_file *m = file->private_data;
2958 struct nfs4_client *clp = m->private;
2959
2960 /* XXX: alternatively, we could get/drop in seq start/stop */
2961 drop_client(clp);
2962 return seq_release(inode, file);
2963}
2964
2965static const struct file_operations client_states_fops = {
2966 .open = client_states_open,
2967 .read = seq_read,
2968 .llseek = seq_lseek,
2969 .release = client_opens_release,
2970};
2971
2972/*
2973 * Normally we refuse to destroy clients that are in use, but here the
2974 * administrator is telling us to just do it. We also want to wait
2975 * so the caller has a guarantee that the client's locks are gone by
2976 * the time the write returns:
2977 */
2978static void force_expire_client(struct nfs4_client *clp)
2979{
2980 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2981 bool already_expired;
2982
2983 trace_nfsd_clid_admin_expired(&clp->cl_clientid);
2984
2985 spin_lock(&nn->client_lock);
2986 clp->cl_time = 0;
2987 spin_unlock(&nn->client_lock);
2988
2989 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
2990 spin_lock(&nn->client_lock);
2991 already_expired = list_empty(&clp->cl_lru);
2992 if (!already_expired)
2993 unhash_client_locked(clp);
2994 spin_unlock(&nn->client_lock);
2995
2996 if (!already_expired)
2997 expire_client(clp);
2998 else
2999 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
3000}
3001
3002static ssize_t client_ctl_write(struct file *file, const char __user *buf,
3003 size_t size, loff_t *pos)
3004{
3005 char *data;
3006 struct nfs4_client *clp;
3007
3008 data = simple_transaction_get(file, buf, size);
3009 if (IS_ERR(data))
3010 return PTR_ERR(data);
3011 if (size != 7 || 0 != memcmp(data, "expire\n", 7))
3012 return -EINVAL;
3013 clp = get_nfsdfs_clp(file_inode(file));
3014 if (!clp)
3015 return -ENXIO;
3016 force_expire_client(clp);
3017 drop_client(clp);
3018 return 7;
3019}
3020
3021static const struct file_operations client_ctl_fops = {
3022 .write = client_ctl_write,
3023 .release = simple_transaction_release,
3024};
3025
3026static const struct tree_descr client_files[] = {
3027 [0] = {"info", &client_info_fops, S_IRUSR},
3028 [1] = {"states", &client_states_fops, S_IRUSR},
3029 [2] = {"ctl", &client_ctl_fops, S_IWUSR},
3030 [3] = {""},
3031};
3032
3033static int
3034nfsd4_cb_recall_any_done(struct nfsd4_callback *cb,
3035 struct rpc_task *task)
3036{
3037 trace_nfsd_cb_recall_any_done(cb, task);
3038 switch (task->tk_status) {
3039 case -NFS4ERR_DELAY:
3040 rpc_delay(task, 2 * HZ);
3041 return 0;
3042 default:
3043 return 1;
3044 }
3045}
3046
3047static void
3048nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
3049{
3050 struct nfs4_client *clp = cb->cb_clp;
3051
3052 clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
3053 drop_client(clp);
3054}
3055
3056static int
3057nfsd4_cb_getattr_done(struct nfsd4_callback *cb, struct rpc_task *task)
3058{
3059 struct nfs4_cb_fattr *ncf =
3060 container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
3061 struct nfs4_delegation *dp =
3062 container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
3063
3064 trace_nfsd_cb_getattr_done(&dp->dl_stid.sc_stateid, task);
3065 ncf->ncf_cb_status = task->tk_status;
3066 switch (task->tk_status) {
3067 case -NFS4ERR_DELAY:
3068 rpc_delay(task, 2 * HZ);
3069 return 0;
3070 default:
3071 return 1;
3072 }
3073}
3074
3075static void
3076nfsd4_cb_getattr_release(struct nfsd4_callback *cb)
3077{
3078 struct nfs4_cb_fattr *ncf =
3079 container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
3080 struct nfs4_delegation *dp =
3081 container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
3082
3083 clear_and_wake_up_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags);
3084 nfs4_put_stid(&dp->dl_stid);
3085}
3086
3087static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
3088 .done = nfsd4_cb_recall_any_done,
3089 .release = nfsd4_cb_recall_any_release,
3090 .opcode = OP_CB_RECALL_ANY,
3091};
3092
3093static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops = {
3094 .done = nfsd4_cb_getattr_done,
3095 .release = nfsd4_cb_getattr_release,
3096 .opcode = OP_CB_GETATTR,
3097};
3098
3099static void nfs4_cb_getattr(struct nfs4_cb_fattr *ncf)
3100{
3101 struct nfs4_delegation *dp =
3102 container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
3103
3104 if (test_and_set_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags))
3105 return;
3106 /* set to proper status when nfsd4_cb_getattr_done runs */
3107 ncf->ncf_cb_status = NFS4ERR_IO;
3108
3109 refcount_inc(&dp->dl_stid.sc_count);
3110 nfsd4_run_cb(&ncf->ncf_getattr);
3111}
3112
3113static struct nfs4_client *create_client(struct xdr_netobj name,
3114 struct svc_rqst *rqstp, nfs4_verifier *verf)
3115{
3116 struct nfs4_client *clp;
3117 struct sockaddr *sa = svc_addr(rqstp);
3118 int ret;
3119 struct net *net = SVC_NET(rqstp);
3120 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3121 struct dentry *dentries[ARRAY_SIZE(client_files)];
3122
3123 clp = alloc_client(name, nn);
3124 if (clp == NULL)
3125 return NULL;
3126
3127 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
3128 if (ret) {
3129 free_client(clp);
3130 return NULL;
3131 }
3132 gen_clid(clp, nn);
3133 kref_init(&clp->cl_nfsdfs.cl_ref);
3134 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
3135 clp->cl_time = ktime_get_boottime_seconds();
3136 clear_bit(0, &clp->cl_cb_slot_busy);
3137 copy_verf(clp, verf);
3138 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
3139 clp->cl_cb_session = NULL;
3140 clp->net = net;
3141 clp->cl_nfsd_dentry = nfsd_client_mkdir(
3142 nn, &clp->cl_nfsdfs,
3143 clp->cl_clientid.cl_id - nn->clientid_base,
3144 client_files, dentries);
3145 clp->cl_nfsd_info_dentry = dentries[0];
3146 if (!clp->cl_nfsd_dentry) {
3147 free_client(clp);
3148 return NULL;
3149 }
3150 clp->cl_ra = kzalloc(sizeof(*clp->cl_ra), GFP_KERNEL);
3151 if (!clp->cl_ra) {
3152 free_client(clp);
3153 return NULL;
3154 }
3155 clp->cl_ra_time = 0;
3156 nfsd4_init_cb(&clp->cl_ra->ra_cb, clp, &nfsd4_cb_recall_any_ops,
3157 NFSPROC4_CLNT_CB_RECALL_ANY);
3158 return clp;
3159}
3160
3161static void
3162add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
3163{
3164 struct rb_node **new = &(root->rb_node), *parent = NULL;
3165 struct nfs4_client *clp;
3166
3167 while (*new) {
3168 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
3169 parent = *new;
3170
3171 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
3172 new = &((*new)->rb_left);
3173 else
3174 new = &((*new)->rb_right);
3175 }
3176
3177 rb_link_node(&new_clp->cl_namenode, parent, new);
3178 rb_insert_color(&new_clp->cl_namenode, root);
3179}
3180
3181static struct nfs4_client *
3182find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
3183{
3184 int cmp;
3185 struct rb_node *node = root->rb_node;
3186 struct nfs4_client *clp;
3187
3188 while (node) {
3189 clp = rb_entry(node, struct nfs4_client, cl_namenode);
3190 cmp = compare_blob(&clp->cl_name, name);
3191 if (cmp > 0)
3192 node = node->rb_left;
3193 else if (cmp < 0)
3194 node = node->rb_right;
3195 else
3196 return clp;
3197 }
3198 return NULL;
3199}
3200
3201static void
3202add_to_unconfirmed(struct nfs4_client *clp)
3203{
3204 unsigned int idhashval;
3205 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
3206
3207 lockdep_assert_held(&nn->client_lock);
3208
3209 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
3210 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
3211 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
3212 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
3213 renew_client_locked(clp);
3214}
3215
3216static void
3217move_to_confirmed(struct nfs4_client *clp)
3218{
3219 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
3220 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
3221
3222 lockdep_assert_held(&nn->client_lock);
3223
3224 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
3225 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
3226 add_clp_to_name_tree(clp, &nn->conf_name_tree);
3227 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
3228 trace_nfsd_clid_confirmed(&clp->cl_clientid);
3229 renew_client_locked(clp);
3230}
3231
3232static struct nfs4_client *
3233find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
3234{
3235 struct nfs4_client *clp;
3236 unsigned int idhashval = clientid_hashval(clid->cl_id);
3237
3238 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
3239 if (same_clid(&clp->cl_clientid, clid)) {
3240 if ((bool)clp->cl_minorversion != sessions)
3241 return NULL;
3242 renew_client_locked(clp);
3243 return clp;
3244 }
3245 }
3246 return NULL;
3247}
3248
3249static struct nfs4_client *
3250find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
3251{
3252 struct list_head *tbl = nn->conf_id_hashtbl;
3253
3254 lockdep_assert_held(&nn->client_lock);
3255 return find_client_in_id_table(tbl, clid, sessions);
3256}
3257
3258static struct nfs4_client *
3259find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
3260{
3261 struct list_head *tbl = nn->unconf_id_hashtbl;
3262
3263 lockdep_assert_held(&nn->client_lock);
3264 return find_client_in_id_table(tbl, clid, sessions);
3265}
3266
3267static bool clp_used_exchangeid(struct nfs4_client *clp)
3268{
3269 return clp->cl_exchange_flags != 0;
3270}
3271
3272static struct nfs4_client *
3273find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
3274{
3275 lockdep_assert_held(&nn->client_lock);
3276 return find_clp_in_name_tree(name, &nn->conf_name_tree);
3277}
3278
3279static struct nfs4_client *
3280find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
3281{
3282 lockdep_assert_held(&nn->client_lock);
3283 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
3284}
3285
3286static void
3287gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
3288{
3289 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
3290 struct sockaddr *sa = svc_addr(rqstp);
3291 u32 scopeid = rpc_get_scope_id(sa);
3292 unsigned short expected_family;
3293
3294 /* Currently, we only support tcp and tcp6 for the callback channel */
3295 if (se->se_callback_netid_len == 3 &&
3296 !memcmp(se->se_callback_netid_val, "tcp", 3))
3297 expected_family = AF_INET;
3298 else if (se->se_callback_netid_len == 4 &&
3299 !memcmp(se->se_callback_netid_val, "tcp6", 4))
3300 expected_family = AF_INET6;
3301 else
3302 goto out_err;
3303
3304 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
3305 se->se_callback_addr_len,
3306 (struct sockaddr *)&conn->cb_addr,
3307 sizeof(conn->cb_addr));
3308
3309 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
3310 goto out_err;
3311
3312 if (conn->cb_addr.ss_family == AF_INET6)
3313 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
3314
3315 conn->cb_prog = se->se_callback_prog;
3316 conn->cb_ident = se->se_callback_ident;
3317 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
3318 trace_nfsd_cb_args(clp, conn);
3319 return;
3320out_err:
3321 conn->cb_addr.ss_family = AF_UNSPEC;
3322 conn->cb_addrlen = 0;
3323 trace_nfsd_cb_nodelegs(clp);
3324 return;
3325}
3326
3327/*
3328 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
3329 */
3330static void
3331nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
3332{
3333 struct xdr_buf *buf = resp->xdr->buf;
3334 struct nfsd4_slot *slot = resp->cstate.slot;
3335 unsigned int base;
3336
3337 dprintk("--> %s slot %p\n", __func__, slot);
3338
3339 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
3340 slot->sl_opcnt = resp->opcnt;
3341 slot->sl_status = resp->cstate.status;
3342 free_svc_cred(&slot->sl_cred);
3343 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
3344
3345 if (!nfsd4_cache_this(resp)) {
3346 slot->sl_flags &= ~NFSD4_SLOT_CACHED;
3347 return;
3348 }
3349 slot->sl_flags |= NFSD4_SLOT_CACHED;
3350
3351 base = resp->cstate.data_offset;
3352 slot->sl_datalen = buf->len - base;
3353 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
3354 WARN(1, "%s: sessions DRC could not cache compound\n",
3355 __func__);
3356 return;
3357}
3358
3359/*
3360 * Encode the replay sequence operation from the slot values.
3361 * If cachethis is FALSE encode the uncached rep error on the next
3362 * operation which sets resp->p and increments resp->opcnt for
3363 * nfs4svc_encode_compoundres.
3364 *
3365 */
3366static __be32
3367nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
3368 struct nfsd4_compoundres *resp)
3369{
3370 struct nfsd4_op *op;
3371 struct nfsd4_slot *slot = resp->cstate.slot;
3372
3373 /* Encode the replayed sequence operation */
3374 op = &args->ops[resp->opcnt - 1];
3375 nfsd4_encode_operation(resp, op);
3376
3377 if (slot->sl_flags & NFSD4_SLOT_CACHED)
3378 return op->status;
3379 if (args->opcnt == 1) {
3380 /*
3381 * The original operation wasn't a solo sequence--we
3382 * always cache those--so this retry must not match the
3383 * original:
3384 */
3385 op->status = nfserr_seq_false_retry;
3386 } else {
3387 op = &args->ops[resp->opcnt++];
3388 op->status = nfserr_retry_uncached_rep;
3389 nfsd4_encode_operation(resp, op);
3390 }
3391 return op->status;
3392}
3393
3394/*
3395 * The sequence operation is not cached because we can use the slot and
3396 * session values.
3397 */
3398static __be32
3399nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
3400 struct nfsd4_sequence *seq)
3401{
3402 struct nfsd4_slot *slot = resp->cstate.slot;
3403 struct xdr_stream *xdr = resp->xdr;
3404 __be32 *p;
3405 __be32 status;
3406
3407 dprintk("--> %s slot %p\n", __func__, slot);
3408
3409 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
3410 if (status)
3411 return status;
3412
3413 p = xdr_reserve_space(xdr, slot->sl_datalen);
3414 if (!p) {
3415 WARN_ON_ONCE(1);
3416 return nfserr_serverfault;
3417 }
3418 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
3419 xdr_commit_encode(xdr);
3420
3421 resp->opcnt = slot->sl_opcnt;
3422 return slot->sl_status;
3423}
3424
3425/*
3426 * Set the exchange_id flags returned by the server.
3427 */
3428static void
3429nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
3430{
3431#ifdef CONFIG_NFSD_PNFS
3432 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
3433#else
3434 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
3435#endif
3436
3437 /* Referrals are supported, Migration is not. */
3438 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
3439
3440 /* set the wire flags to return to client. */
3441 clid->flags = new->cl_exchange_flags;
3442}
3443
3444static bool client_has_openowners(struct nfs4_client *clp)
3445{
3446 struct nfs4_openowner *oo;
3447
3448 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
3449 if (!list_empty(&oo->oo_owner.so_stateids))
3450 return true;
3451 }
3452 return false;
3453}
3454
3455static bool client_has_state(struct nfs4_client *clp)
3456{
3457 return client_has_openowners(clp)
3458#ifdef CONFIG_NFSD_PNFS
3459 || !list_empty(&clp->cl_lo_states)
3460#endif
3461 || !list_empty(&clp->cl_delegations)
3462 || !list_empty(&clp->cl_sessions)
3463 || !list_empty(&clp->async_copies);
3464}
3465
3466static __be32 copy_impl_id(struct nfs4_client *clp,
3467 struct nfsd4_exchange_id *exid)
3468{
3469 if (!exid->nii_domain.data)
3470 return 0;
3471 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
3472 if (!clp->cl_nii_domain.data)
3473 return nfserr_jukebox;
3474 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
3475 if (!clp->cl_nii_name.data)
3476 return nfserr_jukebox;
3477 clp->cl_nii_time = exid->nii_time;
3478 return 0;
3479}
3480
3481__be32
3482nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3483 union nfsd4_op_u *u)
3484{
3485 struct nfsd4_exchange_id *exid = &u->exchange_id;
3486 struct nfs4_client *conf, *new;
3487 struct nfs4_client *unconf = NULL;
3488 __be32 status;
3489 char addr_str[INET6_ADDRSTRLEN];
3490 nfs4_verifier verf = exid->verifier;
3491 struct sockaddr *sa = svc_addr(rqstp);
3492 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
3493 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3494
3495 rpc_ntop(sa, addr_str, sizeof(addr_str));
3496 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
3497 "ip_addr=%s flags %x, spa_how %u\n",
3498 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
3499 addr_str, exid->flags, exid->spa_how);
3500
3501 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
3502 return nfserr_inval;
3503
3504 new = create_client(exid->clname, rqstp, &verf);
3505 if (new == NULL)
3506 return nfserr_jukebox;
3507 status = copy_impl_id(new, exid);
3508 if (status)
3509 goto out_nolock;
3510
3511 switch (exid->spa_how) {
3512 case SP4_MACH_CRED:
3513 exid->spo_must_enforce[0] = 0;
3514 exid->spo_must_enforce[1] = (
3515 1 << (OP_BIND_CONN_TO_SESSION - 32) |
3516 1 << (OP_EXCHANGE_ID - 32) |
3517 1 << (OP_CREATE_SESSION - 32) |
3518 1 << (OP_DESTROY_SESSION - 32) |
3519 1 << (OP_DESTROY_CLIENTID - 32));
3520
3521 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
3522 1 << (OP_OPEN_DOWNGRADE) |
3523 1 << (OP_LOCKU) |
3524 1 << (OP_DELEGRETURN));
3525
3526 exid->spo_must_allow[1] &= (
3527 1 << (OP_TEST_STATEID - 32) |
3528 1 << (OP_FREE_STATEID - 32));
3529 if (!svc_rqst_integrity_protected(rqstp)) {
3530 status = nfserr_inval;
3531 goto out_nolock;
3532 }
3533 /*
3534 * Sometimes userspace doesn't give us a principal.
3535 * Which is a bug, really. Anyway, we can't enforce
3536 * MACH_CRED in that case, better to give up now:
3537 */
3538 if (!new->cl_cred.cr_principal &&
3539 !new->cl_cred.cr_raw_principal) {
3540 status = nfserr_serverfault;
3541 goto out_nolock;
3542 }
3543 new->cl_mach_cred = true;
3544 break;
3545 case SP4_NONE:
3546 break;
3547 default: /* checked by xdr code */
3548 WARN_ON_ONCE(1);
3549 fallthrough;
3550 case SP4_SSV:
3551 status = nfserr_encr_alg_unsupp;
3552 goto out_nolock;
3553 }
3554
3555 /* Cases below refer to rfc 5661 section 18.35.4: */
3556 spin_lock(&nn->client_lock);
3557 conf = find_confirmed_client_by_name(&exid->clname, nn);
3558 if (conf) {
3559 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
3560 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
3561
3562 if (update) {
3563 if (!clp_used_exchangeid(conf)) { /* buggy client */
3564 status = nfserr_inval;
3565 goto out;
3566 }
3567 if (!nfsd4_mach_creds_match(conf, rqstp)) {
3568 status = nfserr_wrong_cred;
3569 goto out;
3570 }
3571 if (!creds_match) { /* case 9 */
3572 status = nfserr_perm;
3573 goto out;
3574 }
3575 if (!verfs_match) { /* case 8 */
3576 status = nfserr_not_same;
3577 goto out;
3578 }
3579 /* case 6 */
3580 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
3581 trace_nfsd_clid_confirmed_r(conf);
3582 goto out_copy;
3583 }
3584 if (!creds_match) { /* case 3 */
3585 if (client_has_state(conf)) {
3586 status = nfserr_clid_inuse;
3587 trace_nfsd_clid_cred_mismatch(conf, rqstp);
3588 goto out;
3589 }
3590 goto out_new;
3591 }
3592 if (verfs_match) { /* case 2 */
3593 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
3594 trace_nfsd_clid_confirmed_r(conf);
3595 goto out_copy;
3596 }
3597 /* case 5, client reboot */
3598 trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf);
3599 conf = NULL;
3600 goto out_new;
3601 }
3602
3603 if (update) { /* case 7 */
3604 status = nfserr_noent;
3605 goto out;
3606 }
3607
3608 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
3609 if (unconf) /* case 4, possible retry or client restart */
3610 unhash_client_locked(unconf);
3611
3612 /* case 1, new owner ID */
3613 trace_nfsd_clid_fresh(new);
3614
3615out_new:
3616 if (conf) {
3617 status = mark_client_expired_locked(conf);
3618 if (status)
3619 goto out;
3620 trace_nfsd_clid_replaced(&conf->cl_clientid);
3621 }
3622 new->cl_minorversion = cstate->minorversion;
3623 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
3624 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
3625
3626 /* Contrived initial CREATE_SESSION response */
3627 new->cl_cs_slot.sl_status = nfserr_seq_misordered;
3628
3629 add_to_unconfirmed(new);
3630 swap(new, conf);
3631out_copy:
3632 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
3633 exid->clientid.cl_id = conf->cl_clientid.cl_id;
3634
3635 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
3636 nfsd4_set_ex_flags(conf, exid);
3637
3638 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
3639 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
3640 status = nfs_ok;
3641
3642out:
3643 spin_unlock(&nn->client_lock);
3644out_nolock:
3645 if (new)
3646 expire_client(new);
3647 if (unconf) {
3648 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
3649 expire_client(unconf);
3650 }
3651 return status;
3652}
3653
3654static __be32 check_slot_seqid(u32 seqid, u32 slot_seqid, bool slot_inuse)
3655{
3656 /* The slot is in use, and no response has been sent. */
3657 if (slot_inuse) {
3658 if (seqid == slot_seqid)
3659 return nfserr_jukebox;
3660 else
3661 return nfserr_seq_misordered;
3662 }
3663 /* Note unsigned 32-bit arithmetic handles wraparound: */
3664 if (likely(seqid == slot_seqid + 1))
3665 return nfs_ok;
3666 if (seqid == slot_seqid)
3667 return nfserr_replay_cache;
3668 return nfserr_seq_misordered;
3669}
3670
3671/*
3672 * Cache the create session result into the create session single DRC
3673 * slot cache by saving the xdr structure. sl_seqid has been set.
3674 * Do this for solo or embedded create session operations.
3675 */
3676static void
3677nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
3678 struct nfsd4_clid_slot *slot, __be32 nfserr)
3679{
3680 slot->sl_status = nfserr;
3681 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
3682}
3683
3684static __be32
3685nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
3686 struct nfsd4_clid_slot *slot)
3687{
3688 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
3689 return slot->sl_status;
3690}
3691
3692#define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
3693 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
3694 1 + /* MIN tag is length with zero, only length */ \
3695 3 + /* version, opcount, opcode */ \
3696 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3697 /* seqid, slotID, slotID, cache */ \
3698 4 ) * sizeof(__be32))
3699
3700#define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
3701 2 + /* verifier: AUTH_NULL, length 0 */\
3702 1 + /* status */ \
3703 1 + /* MIN tag is length with zero, only length */ \
3704 3 + /* opcount, opcode, opstatus*/ \
3705 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3706 /* seqid, slotID, slotID, slotID, status */ \
3707 5 ) * sizeof(__be32))
3708
3709static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
3710{
3711 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
3712
3713 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
3714 return nfserr_toosmall;
3715 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
3716 return nfserr_toosmall;
3717 ca->headerpadsz = 0;
3718 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
3719 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
3720 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
3721 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
3722 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
3723 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
3724 /*
3725 * Note decreasing slot size below client's request may make it
3726 * difficult for client to function correctly, whereas
3727 * decreasing the number of slots will (just?) affect
3728 * performance. When short on memory we therefore prefer to
3729 * decrease number of slots instead of their size. Clients that
3730 * request larger slots than they need will get poor results:
3731 * Note that we always allow at least one slot, because our
3732 * accounting is soft and provides no guarantees either way.
3733 */
3734 ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
3735
3736 return nfs_ok;
3737}
3738
3739/*
3740 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3741 * These are based on similar macros in linux/sunrpc/msg_prot.h .
3742 */
3743#define RPC_MAX_HEADER_WITH_AUTH_SYS \
3744 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
3745
3746#define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
3747 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
3748
3749#define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
3750 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
3751#define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
3752 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
3753 sizeof(__be32))
3754
3755static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
3756{
3757 ca->headerpadsz = 0;
3758
3759 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
3760 return nfserr_toosmall;
3761 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
3762 return nfserr_toosmall;
3763 ca->maxresp_cached = 0;
3764 if (ca->maxops < 2)
3765 return nfserr_toosmall;
3766
3767 return nfs_ok;
3768}
3769
3770static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
3771{
3772 switch (cbs->flavor) {
3773 case RPC_AUTH_NULL:
3774 case RPC_AUTH_UNIX:
3775 return nfs_ok;
3776 default:
3777 /*
3778 * GSS case: the spec doesn't allow us to return this
3779 * error. But it also doesn't allow us not to support
3780 * GSS.
3781 * I'd rather this fail hard than return some error the
3782 * client might think it can already handle:
3783 */
3784 return nfserr_encr_alg_unsupp;
3785 }
3786}
3787
3788__be32
3789nfsd4_create_session(struct svc_rqst *rqstp,
3790 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3791{
3792 struct nfsd4_create_session *cr_ses = &u->create_session;
3793 struct sockaddr *sa = svc_addr(rqstp);
3794 struct nfs4_client *conf, *unconf;
3795 struct nfsd4_clid_slot *cs_slot;
3796 struct nfs4_client *old = NULL;
3797 struct nfsd4_session *new;
3798 struct nfsd4_conn *conn;
3799 __be32 status = 0;
3800 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3801
3802 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
3803 return nfserr_inval;
3804 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
3805 if (status)
3806 return status;
3807 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
3808 if (status)
3809 return status;
3810 status = check_backchannel_attrs(&cr_ses->back_channel);
3811 if (status)
3812 goto out_release_drc_mem;
3813 status = nfserr_jukebox;
3814 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
3815 if (!new)
3816 goto out_release_drc_mem;
3817 conn = alloc_conn_from_crses(rqstp, cr_ses);
3818 if (!conn)
3819 goto out_free_session;
3820
3821 spin_lock(&nn->client_lock);
3822
3823 /* RFC 8881 Section 18.36.4 Phase 1: Client record look-up. */
3824 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
3825 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
3826 if (!conf && !unconf) {
3827 status = nfserr_stale_clientid;
3828 goto out_free_conn;
3829 }
3830
3831 /* RFC 8881 Section 18.36.4 Phase 2: Sequence ID processing. */
3832 if (conf) {
3833 cs_slot = &conf->cl_cs_slot;
3834 trace_nfsd_slot_seqid_conf(conf, cr_ses);
3835 } else {
3836 cs_slot = &unconf->cl_cs_slot;
3837 trace_nfsd_slot_seqid_unconf(unconf, cr_ses);
3838 }
3839 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3840 switch (status) {
3841 case nfs_ok:
3842 cs_slot->sl_seqid++;
3843 cr_ses->seqid = cs_slot->sl_seqid;
3844 break;
3845 case nfserr_replay_cache:
3846 status = nfsd4_replay_create_session(cr_ses, cs_slot);
3847 fallthrough;
3848 case nfserr_jukebox:
3849 /* The server MUST NOT cache NFS4ERR_DELAY */
3850 goto out_free_conn;
3851 default:
3852 goto out_cache_error;
3853 }
3854
3855 /* RFC 8881 Section 18.36.4 Phase 3: Client ID confirmation. */
3856 if (conf) {
3857 status = nfserr_wrong_cred;
3858 if (!nfsd4_mach_creds_match(conf, rqstp))
3859 goto out_cache_error;
3860 } else {
3861 status = nfserr_clid_inuse;
3862 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
3863 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
3864 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
3865 goto out_cache_error;
3866 }
3867 status = nfserr_wrong_cred;
3868 if (!nfsd4_mach_creds_match(unconf, rqstp))
3869 goto out_cache_error;
3870 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3871 if (old) {
3872 status = mark_client_expired_locked(old);
3873 if (status)
3874 goto out_expired_error;
3875 trace_nfsd_clid_replaced(&old->cl_clientid);
3876 }
3877 move_to_confirmed(unconf);
3878 conf = unconf;
3879 }
3880
3881 /* RFC 8881 Section 18.36.4 Phase 4: Session creation. */
3882 status = nfs_ok;
3883 /* Persistent sessions are not supported */
3884 cr_ses->flags &= ~SESSION4_PERSIST;
3885 /* Upshifting from TCP to RDMA is not supported */
3886 cr_ses->flags &= ~SESSION4_RDMA;
3887
3888 init_session(rqstp, new, conf, cr_ses);
3889 nfsd4_get_session_locked(new);
3890
3891 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
3892 NFS4_MAX_SESSIONID_LEN);
3893
3894 /* cache solo and embedded create sessions under the client_lock */
3895 nfsd4_cache_create_session(cr_ses, cs_slot, status);
3896 spin_unlock(&nn->client_lock);
3897 if (conf == unconf)
3898 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
3899 /* init connection and backchannel */
3900 nfsd4_init_conn(rqstp, conn, new);
3901 nfsd4_put_session(new);
3902 if (old)
3903 expire_client(old);
3904 return status;
3905
3906out_expired_error:
3907 old = NULL;
3908 /*
3909 * Revert the slot seq_nr change so the server will process
3910 * the client's resend instead of returning a cached response.
3911 */
3912 if (status == nfserr_jukebox) {
3913 cs_slot->sl_seqid--;
3914 cr_ses->seqid = cs_slot->sl_seqid;
3915 goto out_free_conn;
3916 }
3917out_cache_error:
3918 nfsd4_cache_create_session(cr_ses, cs_slot, status);
3919out_free_conn:
3920 spin_unlock(&nn->client_lock);
3921 free_conn(conn);
3922 if (old)
3923 expire_client(old);
3924out_free_session:
3925 __free_session(new);
3926out_release_drc_mem:
3927 nfsd4_put_drc_mem(&cr_ses->fore_channel);
3928 return status;
3929}
3930
3931static __be32 nfsd4_map_bcts_dir(u32 *dir)
3932{
3933 switch (*dir) {
3934 case NFS4_CDFC4_FORE:
3935 case NFS4_CDFC4_BACK:
3936 return nfs_ok;
3937 case NFS4_CDFC4_FORE_OR_BOTH:
3938 case NFS4_CDFC4_BACK_OR_BOTH:
3939 *dir = NFS4_CDFC4_BOTH;
3940 return nfs_ok;
3941 }
3942 return nfserr_inval;
3943}
3944
3945__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
3946 struct nfsd4_compound_state *cstate,
3947 union nfsd4_op_u *u)
3948{
3949 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
3950 struct nfsd4_session *session = cstate->session;
3951 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3952 __be32 status;
3953
3954 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
3955 if (status)
3956 return status;
3957 spin_lock(&nn->client_lock);
3958 session->se_cb_prog = bc->bc_cb_program;
3959 session->se_cb_sec = bc->bc_cb_sec;
3960 spin_unlock(&nn->client_lock);
3961
3962 nfsd4_probe_callback(session->se_client);
3963
3964 return nfs_ok;
3965}
3966
3967static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3968{
3969 struct nfsd4_conn *c;
3970
3971 list_for_each_entry(c, &s->se_conns, cn_persession) {
3972 if (c->cn_xprt == xpt) {
3973 return c;
3974 }
3975 }
3976 return NULL;
3977}
3978
3979static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
3980 struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
3981{
3982 struct nfs4_client *clp = session->se_client;
3983 struct svc_xprt *xpt = rqst->rq_xprt;
3984 struct nfsd4_conn *c;
3985 __be32 status;
3986
3987 /* Following the last paragraph of RFC 5661 Section 18.34.3: */
3988 spin_lock(&clp->cl_lock);
3989 c = __nfsd4_find_conn(xpt, session);
3990 if (!c)
3991 status = nfserr_noent;
3992 else if (req == c->cn_flags)
3993 status = nfs_ok;
3994 else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
3995 c->cn_flags != NFS4_CDFC4_BACK)
3996 status = nfs_ok;
3997 else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
3998 c->cn_flags != NFS4_CDFC4_FORE)
3999 status = nfs_ok;
4000 else
4001 status = nfserr_inval;
4002 spin_unlock(&clp->cl_lock);
4003 if (status == nfs_ok && conn)
4004 *conn = c;
4005 return status;
4006}
4007
4008__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
4009 struct nfsd4_compound_state *cstate,
4010 union nfsd4_op_u *u)
4011{
4012 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
4013 __be32 status;
4014 struct nfsd4_conn *conn;
4015 struct nfsd4_session *session;
4016 struct net *net = SVC_NET(rqstp);
4017 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4018
4019 if (!nfsd4_last_compound_op(rqstp))
4020 return nfserr_not_only_op;
4021 spin_lock(&nn->client_lock);
4022 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
4023 spin_unlock(&nn->client_lock);
4024 if (!session)
4025 goto out_no_session;
4026 status = nfserr_wrong_cred;
4027 if (!nfsd4_mach_creds_match(session->se_client, rqstp))
4028 goto out;
4029 status = nfsd4_match_existing_connection(rqstp, session,
4030 bcts->dir, &conn);
4031 if (status == nfs_ok) {
4032 if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
4033 bcts->dir == NFS4_CDFC4_BACK)
4034 conn->cn_flags |= NFS4_CDFC4_BACK;
4035 nfsd4_probe_callback(session->se_client);
4036 goto out;
4037 }
4038 if (status == nfserr_inval)
4039 goto out;
4040 status = nfsd4_map_bcts_dir(&bcts->dir);
4041 if (status)
4042 goto out;
4043 conn = alloc_conn(rqstp, bcts->dir);
4044 status = nfserr_jukebox;
4045 if (!conn)
4046 goto out;
4047 nfsd4_init_conn(rqstp, conn, session);
4048 status = nfs_ok;
4049out:
4050 nfsd4_put_session(session);
4051out_no_session:
4052 return status;
4053}
4054
4055static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
4056{
4057 if (!cstate->session)
4058 return false;
4059 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
4060}
4061
4062__be32
4063nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
4064 union nfsd4_op_u *u)
4065{
4066 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
4067 struct nfsd4_session *ses;
4068 __be32 status;
4069 int ref_held_by_me = 0;
4070 struct net *net = SVC_NET(r);
4071 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4072
4073 status = nfserr_not_only_op;
4074 if (nfsd4_compound_in_session(cstate, sessionid)) {
4075 if (!nfsd4_last_compound_op(r))
4076 goto out;
4077 ref_held_by_me++;
4078 }
4079 dump_sessionid(__func__, sessionid);
4080 spin_lock(&nn->client_lock);
4081 ses = find_in_sessionid_hashtbl(sessionid, net, &status);
4082 if (!ses)
4083 goto out_client_lock;
4084 status = nfserr_wrong_cred;
4085 if (!nfsd4_mach_creds_match(ses->se_client, r))
4086 goto out_put_session;
4087 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
4088 if (status)
4089 goto out_put_session;
4090 unhash_session(ses);
4091 spin_unlock(&nn->client_lock);
4092
4093 nfsd4_probe_callback_sync(ses->se_client);
4094
4095 spin_lock(&nn->client_lock);
4096 status = nfs_ok;
4097out_put_session:
4098 nfsd4_put_session_locked(ses);
4099out_client_lock:
4100 spin_unlock(&nn->client_lock);
4101out:
4102 return status;
4103}
4104
4105static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
4106{
4107 struct nfs4_client *clp = ses->se_client;
4108 struct nfsd4_conn *c;
4109 __be32 status = nfs_ok;
4110 int ret;
4111
4112 spin_lock(&clp->cl_lock);
4113 c = __nfsd4_find_conn(new->cn_xprt, ses);
4114 if (c)
4115 goto out_free;
4116 status = nfserr_conn_not_bound_to_session;
4117 if (clp->cl_mach_cred)
4118 goto out_free;
4119 __nfsd4_hash_conn(new, ses);
4120 spin_unlock(&clp->cl_lock);
4121 ret = nfsd4_register_conn(new);
4122 if (ret)
4123 /* oops; xprt is already down: */
4124 nfsd4_conn_lost(&new->cn_xpt_user);
4125 return nfs_ok;
4126out_free:
4127 spin_unlock(&clp->cl_lock);
4128 free_conn(new);
4129 return status;
4130}
4131
4132static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
4133{
4134 struct nfsd4_compoundargs *args = rqstp->rq_argp;
4135
4136 return args->opcnt > session->se_fchannel.maxops;
4137}
4138
4139static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
4140 struct nfsd4_session *session)
4141{
4142 struct xdr_buf *xb = &rqstp->rq_arg;
4143
4144 return xb->len > session->se_fchannel.maxreq_sz;
4145}
4146
4147static bool replay_matches_cache(struct svc_rqst *rqstp,
4148 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
4149{
4150 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
4151
4152 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
4153 (bool)seq->cachethis)
4154 return false;
4155 /*
4156 * If there's an error then the reply can have fewer ops than
4157 * the call.
4158 */
4159 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
4160 return false;
4161 /*
4162 * But if we cached a reply with *more* ops than the call you're
4163 * sending us now, then this new call is clearly not really a
4164 * replay of the old one:
4165 */
4166 if (slot->sl_opcnt > argp->opcnt)
4167 return false;
4168 /* This is the only check explicitly called by spec: */
4169 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
4170 return false;
4171 /*
4172 * There may be more comparisons we could actually do, but the
4173 * spec doesn't require us to catch every case where the calls
4174 * don't match (that would require caching the call as well as
4175 * the reply), so we don't bother.
4176 */
4177 return true;
4178}
4179
4180__be32
4181nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4182 union nfsd4_op_u *u)
4183{
4184 struct nfsd4_sequence *seq = &u->sequence;
4185 struct nfsd4_compoundres *resp = rqstp->rq_resp;
4186 struct xdr_stream *xdr = resp->xdr;
4187 struct nfsd4_session *session;
4188 struct nfs4_client *clp;
4189 struct nfsd4_slot *slot;
4190 struct nfsd4_conn *conn;
4191 __be32 status;
4192 int buflen;
4193 struct net *net = SVC_NET(rqstp);
4194 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4195
4196 if (resp->opcnt != 1)
4197 return nfserr_sequence_pos;
4198
4199 /*
4200 * Will be either used or freed by nfsd4_sequence_check_conn
4201 * below.
4202 */
4203 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
4204 if (!conn)
4205 return nfserr_jukebox;
4206
4207 spin_lock(&nn->client_lock);
4208 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
4209 if (!session)
4210 goto out_no_session;
4211 clp = session->se_client;
4212
4213 status = nfserr_too_many_ops;
4214 if (nfsd4_session_too_many_ops(rqstp, session))
4215 goto out_put_session;
4216
4217 status = nfserr_req_too_big;
4218 if (nfsd4_request_too_big(rqstp, session))
4219 goto out_put_session;
4220
4221 status = nfserr_badslot;
4222 if (seq->slotid >= session->se_fchannel.maxreqs)
4223 goto out_put_session;
4224
4225 slot = session->se_slots[seq->slotid];
4226 dprintk("%s: slotid %d\n", __func__, seq->slotid);
4227
4228 /* We do not negotiate the number of slots yet, so set the
4229 * maxslots to the session maxreqs which is used to encode
4230 * sr_highest_slotid and the sr_target_slot id to maxslots */
4231 seq->maxslots = session->se_fchannel.maxreqs;
4232
4233 trace_nfsd_slot_seqid_sequence(clp, seq, slot);
4234 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
4235 slot->sl_flags & NFSD4_SLOT_INUSE);
4236 if (status == nfserr_replay_cache) {
4237 status = nfserr_seq_misordered;
4238 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
4239 goto out_put_session;
4240 status = nfserr_seq_false_retry;
4241 if (!replay_matches_cache(rqstp, seq, slot))
4242 goto out_put_session;
4243 cstate->slot = slot;
4244 cstate->session = session;
4245 cstate->clp = clp;
4246 /* Return the cached reply status and set cstate->status
4247 * for nfsd4_proc_compound processing */
4248 status = nfsd4_replay_cache_entry(resp, seq);
4249 cstate->status = nfserr_replay_cache;
4250 goto out;
4251 }
4252 if (status)
4253 goto out_put_session;
4254
4255 status = nfsd4_sequence_check_conn(conn, session);
4256 conn = NULL;
4257 if (status)
4258 goto out_put_session;
4259
4260 buflen = (seq->cachethis) ?
4261 session->se_fchannel.maxresp_cached :
4262 session->se_fchannel.maxresp_sz;
4263 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
4264 nfserr_rep_too_big;
4265 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
4266 goto out_put_session;
4267 svc_reserve(rqstp, buflen);
4268
4269 status = nfs_ok;
4270 /* Success! bump slot seqid */
4271 slot->sl_seqid = seq->seqid;
4272 slot->sl_flags |= NFSD4_SLOT_INUSE;
4273 if (seq->cachethis)
4274 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
4275 else
4276 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
4277
4278 cstate->slot = slot;
4279 cstate->session = session;
4280 cstate->clp = clp;
4281
4282out:
4283 switch (clp->cl_cb_state) {
4284 case NFSD4_CB_DOWN:
4285 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
4286 break;
4287 case NFSD4_CB_FAULT:
4288 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
4289 break;
4290 default:
4291 seq->status_flags = 0;
4292 }
4293 if (!list_empty(&clp->cl_revoked))
4294 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
4295 if (atomic_read(&clp->cl_admin_revoked))
4296 seq->status_flags |= SEQ4_STATUS_ADMIN_STATE_REVOKED;
4297 trace_nfsd_seq4_status(rqstp, seq);
4298out_no_session:
4299 if (conn)
4300 free_conn(conn);
4301 spin_unlock(&nn->client_lock);
4302 return status;
4303out_put_session:
4304 nfsd4_put_session_locked(session);
4305 goto out_no_session;
4306}
4307
4308void
4309nfsd4_sequence_done(struct nfsd4_compoundres *resp)
4310{
4311 struct nfsd4_compound_state *cs = &resp->cstate;
4312
4313 if (nfsd4_has_session(cs)) {
4314 if (cs->status != nfserr_replay_cache) {
4315 nfsd4_store_cache_entry(resp);
4316 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
4317 }
4318 /* Drop session reference that was taken in nfsd4_sequence() */
4319 nfsd4_put_session(cs->session);
4320 } else if (cs->clp)
4321 put_client_renew(cs->clp);
4322}
4323
4324__be32
4325nfsd4_destroy_clientid(struct svc_rqst *rqstp,
4326 struct nfsd4_compound_state *cstate,
4327 union nfsd4_op_u *u)
4328{
4329 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
4330 struct nfs4_client *conf, *unconf;
4331 struct nfs4_client *clp = NULL;
4332 __be32 status = 0;
4333 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4334
4335 spin_lock(&nn->client_lock);
4336 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
4337 conf = find_confirmed_client(&dc->clientid, true, nn);
4338 WARN_ON_ONCE(conf && unconf);
4339
4340 if (conf) {
4341 if (client_has_state(conf)) {
4342 status = nfserr_clientid_busy;
4343 goto out;
4344 }
4345 status = mark_client_expired_locked(conf);
4346 if (status)
4347 goto out;
4348 clp = conf;
4349 } else if (unconf)
4350 clp = unconf;
4351 else {
4352 status = nfserr_stale_clientid;
4353 goto out;
4354 }
4355 if (!nfsd4_mach_creds_match(clp, rqstp)) {
4356 clp = NULL;
4357 status = nfserr_wrong_cred;
4358 goto out;
4359 }
4360 trace_nfsd_clid_destroyed(&clp->cl_clientid);
4361 unhash_client_locked(clp);
4362out:
4363 spin_unlock(&nn->client_lock);
4364 if (clp)
4365 expire_client(clp);
4366 return status;
4367}
4368
4369__be32
4370nfsd4_reclaim_complete(struct svc_rqst *rqstp,
4371 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
4372{
4373 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
4374 struct nfs4_client *clp = cstate->clp;
4375 __be32 status = 0;
4376
4377 if (rc->rca_one_fs) {
4378 if (!cstate->current_fh.fh_dentry)
4379 return nfserr_nofilehandle;
4380 /*
4381 * We don't take advantage of the rca_one_fs case.
4382 * That's OK, it's optional, we can safely ignore it.
4383 */
4384 return nfs_ok;
4385 }
4386
4387 status = nfserr_complete_already;
4388 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
4389 goto out;
4390
4391 status = nfserr_stale_clientid;
4392 if (is_client_expired(clp))
4393 /*
4394 * The following error isn't really legal.
4395 * But we only get here if the client just explicitly
4396 * destroyed the client. Surely it no longer cares what
4397 * error it gets back on an operation for the dead
4398 * client.
4399 */
4400 goto out;
4401
4402 status = nfs_ok;
4403 trace_nfsd_clid_reclaim_complete(&clp->cl_clientid);
4404 nfsd4_client_record_create(clp);
4405 inc_reclaim_complete(clp);
4406out:
4407 return status;
4408}
4409
4410__be32
4411nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4412 union nfsd4_op_u *u)
4413{
4414 struct nfsd4_setclientid *setclid = &u->setclientid;
4415 struct xdr_netobj clname = setclid->se_name;
4416 nfs4_verifier clverifier = setclid->se_verf;
4417 struct nfs4_client *conf, *new;
4418 struct nfs4_client *unconf = NULL;
4419 __be32 status;
4420 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4421
4422 new = create_client(clname, rqstp, &clverifier);
4423 if (new == NULL)
4424 return nfserr_jukebox;
4425 spin_lock(&nn->client_lock);
4426 conf = find_confirmed_client_by_name(&clname, nn);
4427 if (conf && client_has_state(conf)) {
4428 status = nfserr_clid_inuse;
4429 if (clp_used_exchangeid(conf))
4430 goto out;
4431 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4432 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4433 goto out;
4434 }
4435 }
4436 unconf = find_unconfirmed_client_by_name(&clname, nn);
4437 if (unconf)
4438 unhash_client_locked(unconf);
4439 if (conf) {
4440 if (same_verf(&conf->cl_verifier, &clverifier)) {
4441 copy_clid(new, conf);
4442 gen_confirm(new, nn);
4443 } else
4444 trace_nfsd_clid_verf_mismatch(conf, rqstp,
4445 &clverifier);
4446 } else
4447 trace_nfsd_clid_fresh(new);
4448 new->cl_minorversion = 0;
4449 gen_callback(new, setclid, rqstp);
4450 add_to_unconfirmed(new);
4451 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
4452 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
4453 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
4454 new = NULL;
4455 status = nfs_ok;
4456out:
4457 spin_unlock(&nn->client_lock);
4458 if (new)
4459 free_client(new);
4460 if (unconf) {
4461 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
4462 expire_client(unconf);
4463 }
4464 return status;
4465}
4466
4467__be32
4468nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
4469 struct nfsd4_compound_state *cstate,
4470 union nfsd4_op_u *u)
4471{
4472 struct nfsd4_setclientid_confirm *setclientid_confirm =
4473 &u->setclientid_confirm;
4474 struct nfs4_client *conf, *unconf;
4475 struct nfs4_client *old = NULL;
4476 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
4477 clientid_t * clid = &setclientid_confirm->sc_clientid;
4478 __be32 status;
4479 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4480
4481 if (STALE_CLIENTID(clid, nn))
4482 return nfserr_stale_clientid;
4483
4484 spin_lock(&nn->client_lock);
4485 conf = find_confirmed_client(clid, false, nn);
4486 unconf = find_unconfirmed_client(clid, false, nn);
4487 /*
4488 * We try hard to give out unique clientid's, so if we get an
4489 * attempt to confirm the same clientid with a different cred,
4490 * the client may be buggy; this should never happen.
4491 *
4492 * Nevertheless, RFC 7530 recommends INUSE for this case:
4493 */
4494 status = nfserr_clid_inuse;
4495 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
4496 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
4497 goto out;
4498 }
4499 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4500 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4501 goto out;
4502 }
4503 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
4504 if (conf && same_verf(&confirm, &conf->cl_confirm)) {
4505 status = nfs_ok;
4506 } else
4507 status = nfserr_stale_clientid;
4508 goto out;
4509 }
4510 status = nfs_ok;
4511 if (conf) {
4512 old = unconf;
4513 unhash_client_locked(old);
4514 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
4515 } else {
4516 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
4517 if (old) {
4518 status = nfserr_clid_inuse;
4519 if (client_has_state(old)
4520 && !same_creds(&unconf->cl_cred,
4521 &old->cl_cred)) {
4522 old = NULL;
4523 goto out;
4524 }
4525 status = mark_client_expired_locked(old);
4526 if (status) {
4527 old = NULL;
4528 goto out;
4529 }
4530 trace_nfsd_clid_replaced(&old->cl_clientid);
4531 }
4532 move_to_confirmed(unconf);
4533 conf = unconf;
4534 }
4535 get_client_locked(conf);
4536 spin_unlock(&nn->client_lock);
4537 if (conf == unconf)
4538 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
4539 nfsd4_probe_callback(conf);
4540 spin_lock(&nn->client_lock);
4541 put_client_renew_locked(conf);
4542out:
4543 spin_unlock(&nn->client_lock);
4544 if (old)
4545 expire_client(old);
4546 return status;
4547}
4548
4549static struct nfs4_file *nfsd4_alloc_file(void)
4550{
4551 return kmem_cache_alloc(file_slab, GFP_KERNEL);
4552}
4553
4554/* OPEN Share state helper functions */
4555
4556static void nfsd4_file_init(const struct svc_fh *fh, struct nfs4_file *fp)
4557{
4558 refcount_set(&fp->fi_ref, 1);
4559 spin_lock_init(&fp->fi_lock);
4560 INIT_LIST_HEAD(&fp->fi_stateids);
4561 INIT_LIST_HEAD(&fp->fi_delegations);
4562 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
4563 fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle);
4564 fp->fi_deleg_file = NULL;
4565 fp->fi_had_conflict = false;
4566 fp->fi_share_deny = 0;
4567 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
4568 memset(fp->fi_access, 0, sizeof(fp->fi_access));
4569 fp->fi_aliased = false;
4570 fp->fi_inode = d_inode(fh->fh_dentry);
4571#ifdef CONFIG_NFSD_PNFS
4572 INIT_LIST_HEAD(&fp->fi_lo_states);
4573 atomic_set(&fp->fi_lo_recalls, 0);
4574#endif
4575}
4576
4577void
4578nfsd4_free_slabs(void)
4579{
4580 kmem_cache_destroy(client_slab);
4581 kmem_cache_destroy(openowner_slab);
4582 kmem_cache_destroy(lockowner_slab);
4583 kmem_cache_destroy(file_slab);
4584 kmem_cache_destroy(stateid_slab);
4585 kmem_cache_destroy(deleg_slab);
4586 kmem_cache_destroy(odstate_slab);
4587}
4588
4589int
4590nfsd4_init_slabs(void)
4591{
4592 client_slab = KMEM_CACHE(nfs4_client, 0);
4593 if (client_slab == NULL)
4594 goto out;
4595 openowner_slab = KMEM_CACHE(nfs4_openowner, 0);
4596 if (openowner_slab == NULL)
4597 goto out_free_client_slab;
4598 lockowner_slab = KMEM_CACHE(nfs4_lockowner, 0);
4599 if (lockowner_slab == NULL)
4600 goto out_free_openowner_slab;
4601 file_slab = KMEM_CACHE(nfs4_file, 0);
4602 if (file_slab == NULL)
4603 goto out_free_lockowner_slab;
4604 stateid_slab = KMEM_CACHE(nfs4_ol_stateid, 0);
4605 if (stateid_slab == NULL)
4606 goto out_free_file_slab;
4607 deleg_slab = KMEM_CACHE(nfs4_delegation, 0);
4608 if (deleg_slab == NULL)
4609 goto out_free_stateid_slab;
4610 odstate_slab = KMEM_CACHE(nfs4_clnt_odstate, 0);
4611 if (odstate_slab == NULL)
4612 goto out_free_deleg_slab;
4613 return 0;
4614
4615out_free_deleg_slab:
4616 kmem_cache_destroy(deleg_slab);
4617out_free_stateid_slab:
4618 kmem_cache_destroy(stateid_slab);
4619out_free_file_slab:
4620 kmem_cache_destroy(file_slab);
4621out_free_lockowner_slab:
4622 kmem_cache_destroy(lockowner_slab);
4623out_free_openowner_slab:
4624 kmem_cache_destroy(openowner_slab);
4625out_free_client_slab:
4626 kmem_cache_destroy(client_slab);
4627out:
4628 return -ENOMEM;
4629}
4630
4631static unsigned long
4632nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
4633{
4634 int count;
4635 struct nfsd_net *nn = shrink->private_data;
4636
4637 count = atomic_read(&nn->nfsd_courtesy_clients);
4638 if (!count)
4639 count = atomic_long_read(&num_delegations);
4640 if (count)
4641 queue_work(laundry_wq, &nn->nfsd_shrinker_work);
4642 return (unsigned long)count;
4643}
4644
4645static unsigned long
4646nfsd4_state_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
4647{
4648 return SHRINK_STOP;
4649}
4650
4651void
4652nfsd4_init_leases_net(struct nfsd_net *nn)
4653{
4654 struct sysinfo si;
4655 u64 max_clients;
4656
4657 nn->nfsd4_lease = 90; /* default lease time */
4658 nn->nfsd4_grace = 90;
4659 nn->somebody_reclaimed = false;
4660 nn->track_reclaim_completes = false;
4661 nn->clverifier_counter = get_random_u32();
4662 nn->clientid_base = get_random_u32();
4663 nn->clientid_counter = nn->clientid_base + 1;
4664 nn->s2s_cp_cl_id = nn->clientid_counter++;
4665
4666 atomic_set(&nn->nfs4_client_count, 0);
4667 si_meminfo(&si);
4668 max_clients = (u64)si.totalram * si.mem_unit / (1024 * 1024 * 1024);
4669 max_clients *= NFS4_CLIENTS_PER_GB;
4670 nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB);
4671
4672 atomic_set(&nn->nfsd_courtesy_clients, 0);
4673}
4674
4675enum rp_lock {
4676 RP_UNLOCKED,
4677 RP_LOCKED,
4678 RP_UNHASHED,
4679};
4680
4681static void init_nfs4_replay(struct nfs4_replay *rp)
4682{
4683 rp->rp_status = nfserr_serverfault;
4684 rp->rp_buflen = 0;
4685 rp->rp_buf = rp->rp_ibuf;
4686 atomic_set(&rp->rp_locked, RP_UNLOCKED);
4687}
4688
4689static int nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
4690 struct nfs4_stateowner *so)
4691{
4692 if (!nfsd4_has_session(cstate)) {
4693 wait_var_event(&so->so_replay.rp_locked,
4694 atomic_cmpxchg(&so->so_replay.rp_locked,
4695 RP_UNLOCKED, RP_LOCKED) != RP_LOCKED);
4696 if (atomic_read(&so->so_replay.rp_locked) == RP_UNHASHED)
4697 return -EAGAIN;
4698 cstate->replay_owner = nfs4_get_stateowner(so);
4699 }
4700 return 0;
4701}
4702
4703void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
4704{
4705 struct nfs4_stateowner *so = cstate->replay_owner;
4706
4707 if (so != NULL) {
4708 cstate->replay_owner = NULL;
4709 atomic_set(&so->so_replay.rp_locked, RP_UNLOCKED);
4710 smp_mb__after_atomic();
4711 wake_up_var(&so->so_replay.rp_locked);
4712 nfs4_put_stateowner(so);
4713 }
4714}
4715
4716static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
4717{
4718 struct nfs4_stateowner *sop;
4719
4720 sop = kmem_cache_alloc(slab, GFP_KERNEL);
4721 if (!sop)
4722 return NULL;
4723
4724 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
4725 if (!sop->so_owner.data) {
4726 kmem_cache_free(slab, sop);
4727 return NULL;
4728 }
4729
4730 INIT_LIST_HEAD(&sop->so_stateids);
4731 sop->so_client = clp;
4732 init_nfs4_replay(&sop->so_replay);
4733 atomic_set(&sop->so_count, 1);
4734 return sop;
4735}
4736
4737static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
4738{
4739 lockdep_assert_held(&clp->cl_lock);
4740
4741 list_add(&oo->oo_owner.so_strhash,
4742 &clp->cl_ownerstr_hashtbl[strhashval]);
4743 list_add(&oo->oo_perclient, &clp->cl_openowners);
4744}
4745
4746static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
4747{
4748 unhash_openowner_locked(openowner(so));
4749}
4750
4751static void nfs4_free_openowner(struct nfs4_stateowner *so)
4752{
4753 struct nfs4_openowner *oo = openowner(so);
4754
4755 kmem_cache_free(openowner_slab, oo);
4756}
4757
4758static const struct nfs4_stateowner_operations openowner_ops = {
4759 .so_unhash = nfs4_unhash_openowner,
4760 .so_free = nfs4_free_openowner,
4761};
4762
4763static struct nfs4_ol_stateid *
4764nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4765{
4766 struct nfs4_ol_stateid *local, *ret = NULL;
4767 struct nfs4_openowner *oo = open->op_openowner;
4768
4769 lockdep_assert_held(&fp->fi_lock);
4770
4771 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
4772 /* ignore lock owners */
4773 if (local->st_stateowner->so_is_open_owner == 0)
4774 continue;
4775 if (local->st_stateowner != &oo->oo_owner)
4776 continue;
4777 if (local->st_stid.sc_type == SC_TYPE_OPEN &&
4778 !local->st_stid.sc_status) {
4779 ret = local;
4780 refcount_inc(&ret->st_stid.sc_count);
4781 break;
4782 }
4783 }
4784 return ret;
4785}
4786
4787static void nfsd4_drop_revoked_stid(struct nfs4_stid *s)
4788 __releases(&s->sc_client->cl_lock)
4789{
4790 struct nfs4_client *cl = s->sc_client;
4791 LIST_HEAD(reaplist);
4792 struct nfs4_ol_stateid *stp;
4793 struct nfs4_delegation *dp;
4794 bool unhashed;
4795
4796 switch (s->sc_type) {
4797 case SC_TYPE_OPEN:
4798 stp = openlockstateid(s);
4799 if (unhash_open_stateid(stp, &reaplist))
4800 put_ol_stateid_locked(stp, &reaplist);
4801 spin_unlock(&cl->cl_lock);
4802 free_ol_stateid_reaplist(&reaplist);
4803 break;
4804 case SC_TYPE_LOCK:
4805 stp = openlockstateid(s);
4806 unhashed = unhash_lock_stateid(stp);
4807 spin_unlock(&cl->cl_lock);
4808 if (unhashed)
4809 nfs4_put_stid(s);
4810 break;
4811 case SC_TYPE_DELEG:
4812 dp = delegstateid(s);
4813 list_del_init(&dp->dl_recall_lru);
4814 spin_unlock(&cl->cl_lock);
4815 nfs4_put_stid(s);
4816 break;
4817 default:
4818 spin_unlock(&cl->cl_lock);
4819 }
4820}
4821
4822static void nfsd40_drop_revoked_stid(struct nfs4_client *cl,
4823 stateid_t *stid)
4824{
4825 /* NFSv4.0 has no way for the client to tell the server
4826 * that it can forget an admin-revoked stateid.
4827 * So we keep it around until the first time that the
4828 * client uses it, and drop it the first time
4829 * nfserr_admin_revoked is returned.
4830 * For v4.1 and later we wait until explicitly told
4831 * to free the stateid.
4832 */
4833 if (cl->cl_minorversion == 0) {
4834 struct nfs4_stid *st;
4835
4836 spin_lock(&cl->cl_lock);
4837 st = find_stateid_locked(cl, stid);
4838 if (st)
4839 nfsd4_drop_revoked_stid(st);
4840 else
4841 spin_unlock(&cl->cl_lock);
4842 }
4843}
4844
4845static __be32
4846nfsd4_verify_open_stid(struct nfs4_stid *s)
4847{
4848 __be32 ret = nfs_ok;
4849
4850 if (s->sc_status & SC_STATUS_ADMIN_REVOKED)
4851 ret = nfserr_admin_revoked;
4852 else if (s->sc_status & SC_STATUS_REVOKED)
4853 ret = nfserr_deleg_revoked;
4854 else if (s->sc_status & SC_STATUS_CLOSED)
4855 ret = nfserr_bad_stateid;
4856 return ret;
4857}
4858
4859/* Lock the stateid st_mutex, and deal with races with CLOSE */
4860static __be32
4861nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
4862{
4863 __be32 ret;
4864
4865 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
4866 ret = nfsd4_verify_open_stid(&stp->st_stid);
4867 if (ret == nfserr_admin_revoked)
4868 nfsd40_drop_revoked_stid(stp->st_stid.sc_client,
4869 &stp->st_stid.sc_stateid);
4870
4871 if (ret != nfs_ok)
4872 mutex_unlock(&stp->st_mutex);
4873 return ret;
4874}
4875
4876static struct nfs4_ol_stateid *
4877nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4878{
4879 struct nfs4_ol_stateid *stp;
4880 for (;;) {
4881 spin_lock(&fp->fi_lock);
4882 stp = nfsd4_find_existing_open(fp, open);
4883 spin_unlock(&fp->fi_lock);
4884 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
4885 break;
4886 nfs4_put_stid(&stp->st_stid);
4887 }
4888 return stp;
4889}
4890
4891static struct nfs4_openowner *
4892find_or_alloc_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
4893 struct nfsd4_compound_state *cstate)
4894{
4895 struct nfs4_client *clp = cstate->clp;
4896 struct nfs4_openowner *oo, *new = NULL;
4897
4898retry:
4899 spin_lock(&clp->cl_lock);
4900 oo = find_openstateowner_str(strhashval, open, clp);
4901 if (!oo && new) {
4902 hash_openowner(new, clp, strhashval);
4903 spin_unlock(&clp->cl_lock);
4904 return new;
4905 }
4906 spin_unlock(&clp->cl_lock);
4907
4908 if (oo && !(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4909 /* Replace unconfirmed owners without checking for replay. */
4910 release_openowner(oo);
4911 oo = NULL;
4912 }
4913 if (oo) {
4914 if (new)
4915 nfs4_free_stateowner(&new->oo_owner);
4916 return oo;
4917 }
4918
4919 new = alloc_stateowner(openowner_slab, &open->op_owner, clp);
4920 if (!new)
4921 return NULL;
4922 new->oo_owner.so_ops = &openowner_ops;
4923 new->oo_owner.so_is_open_owner = 1;
4924 new->oo_owner.so_seqid = open->op_seqid;
4925 new->oo_flags = 0;
4926 if (nfsd4_has_session(cstate))
4927 new->oo_flags |= NFS4_OO_CONFIRMED;
4928 new->oo_time = 0;
4929 new->oo_last_closed_stid = NULL;
4930 INIT_LIST_HEAD(&new->oo_close_lru);
4931 goto retry;
4932}
4933
4934static struct nfs4_ol_stateid *
4935init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
4936{
4937
4938 struct nfs4_openowner *oo = open->op_openowner;
4939 struct nfs4_ol_stateid *retstp = NULL;
4940 struct nfs4_ol_stateid *stp;
4941
4942 stp = open->op_stp;
4943 /* We are moving these outside of the spinlocks to avoid the warnings */
4944 mutex_init(&stp->st_mutex);
4945 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
4946
4947retry:
4948 spin_lock(&oo->oo_owner.so_client->cl_lock);
4949 spin_lock(&fp->fi_lock);
4950
4951 retstp = nfsd4_find_existing_open(fp, open);
4952 if (retstp)
4953 goto out_unlock;
4954
4955 open->op_stp = NULL;
4956 refcount_inc(&stp->st_stid.sc_count);
4957 stp->st_stid.sc_type = SC_TYPE_OPEN;
4958 INIT_LIST_HEAD(&stp->st_locks);
4959 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
4960 get_nfs4_file(fp);
4961 stp->st_stid.sc_file = fp;
4962 stp->st_access_bmap = 0;
4963 stp->st_deny_bmap = 0;
4964 stp->st_openstp = NULL;
4965 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
4966 list_add(&stp->st_perfile, &fp->fi_stateids);
4967
4968out_unlock:
4969 spin_unlock(&fp->fi_lock);
4970 spin_unlock(&oo->oo_owner.so_client->cl_lock);
4971 if (retstp) {
4972 /* Handle races with CLOSE */
4973 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
4974 nfs4_put_stid(&retstp->st_stid);
4975 goto retry;
4976 }
4977 /* To keep mutex tracking happy */
4978 mutex_unlock(&stp->st_mutex);
4979 stp = retstp;
4980 }
4981 return stp;
4982}
4983
4984/*
4985 * In the 4.0 case we need to keep the owners around a little while to handle
4986 * CLOSE replay. We still do need to release any file access that is held by
4987 * them before returning however.
4988 */
4989static void
4990move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
4991{
4992 struct nfs4_ol_stateid *last;
4993 struct nfs4_openowner *oo = openowner(s->st_stateowner);
4994 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
4995 nfsd_net_id);
4996
4997 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
4998
4999 /*
5000 * We know that we hold one reference via nfsd4_close, and another
5001 * "persistent" reference for the client. If the refcount is higher
5002 * than 2, then there are still calls in progress that are using this
5003 * stateid. We can't put the sc_file reference until they are finished.
5004 * Wait for the refcount to drop to 2. Since it has been unhashed,
5005 * there should be no danger of the refcount going back up again at
5006 * this point.
5007 * Some threads with a reference might be waiting for rp_locked,
5008 * so tell them to stop waiting.
5009 */
5010 atomic_set(&oo->oo_owner.so_replay.rp_locked, RP_UNHASHED);
5011 smp_mb__after_atomic();
5012 wake_up_var(&oo->oo_owner.so_replay.rp_locked);
5013 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
5014
5015 release_all_access(s);
5016 if (s->st_stid.sc_file) {
5017 put_nfs4_file(s->st_stid.sc_file);
5018 s->st_stid.sc_file = NULL;
5019 }
5020
5021 spin_lock(&nn->client_lock);
5022 last = oo->oo_last_closed_stid;
5023 oo->oo_last_closed_stid = s;
5024 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
5025 oo->oo_time = ktime_get_boottime_seconds();
5026 spin_unlock(&nn->client_lock);
5027 if (last)
5028 nfs4_put_stid(&last->st_stid);
5029}
5030
5031static noinline_for_stack struct nfs4_file *
5032nfsd4_file_hash_lookup(const struct svc_fh *fhp)
5033{
5034 struct inode *inode = d_inode(fhp->fh_dentry);
5035 struct rhlist_head *tmp, *list;
5036 struct nfs4_file *fi;
5037
5038 rcu_read_lock();
5039 list = rhltable_lookup(&nfs4_file_rhltable, &inode,
5040 nfs4_file_rhash_params);
5041 rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
5042 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
5043 if (refcount_inc_not_zero(&fi->fi_ref)) {
5044 rcu_read_unlock();
5045 return fi;
5046 }
5047 }
5048 }
5049 rcu_read_unlock();
5050 return NULL;
5051}
5052
5053/*
5054 * On hash insertion, identify entries with the same inode but
5055 * distinct filehandles. They will all be on the list returned
5056 * by rhltable_lookup().
5057 *
5058 * inode->i_lock prevents racing insertions from adding an entry
5059 * for the same inode/fhp pair twice.
5060 */
5061static noinline_for_stack struct nfs4_file *
5062nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp)
5063{
5064 struct inode *inode = d_inode(fhp->fh_dentry);
5065 struct rhlist_head *tmp, *list;
5066 struct nfs4_file *ret = NULL;
5067 bool alias_found = false;
5068 struct nfs4_file *fi;
5069 int err;
5070
5071 rcu_read_lock();
5072 spin_lock(&inode->i_lock);
5073
5074 list = rhltable_lookup(&nfs4_file_rhltable, &inode,
5075 nfs4_file_rhash_params);
5076 rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
5077 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
5078 if (refcount_inc_not_zero(&fi->fi_ref))
5079 ret = fi;
5080 } else
5081 fi->fi_aliased = alias_found = true;
5082 }
5083 if (ret)
5084 goto out_unlock;
5085
5086 nfsd4_file_init(fhp, new);
5087 err = rhltable_insert(&nfs4_file_rhltable, &new->fi_rlist,
5088 nfs4_file_rhash_params);
5089 if (err)
5090 goto out_unlock;
5091
5092 new->fi_aliased = alias_found;
5093 ret = new;
5094
5095out_unlock:
5096 spin_unlock(&inode->i_lock);
5097 rcu_read_unlock();
5098 return ret;
5099}
5100
5101static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi)
5102{
5103 rhltable_remove(&nfs4_file_rhltable, &fi->fi_rlist,
5104 nfs4_file_rhash_params);
5105}
5106
5107/*
5108 * Called to check deny when READ with all zero stateid or
5109 * WRITE with all zero or all one stateid
5110 */
5111static __be32
5112nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
5113{
5114 struct nfs4_file *fp;
5115 __be32 ret = nfs_ok;
5116
5117 fp = nfsd4_file_hash_lookup(current_fh);
5118 if (!fp)
5119 return ret;
5120
5121 /* Check for conflicting share reservations */
5122 spin_lock(&fp->fi_lock);
5123 if (fp->fi_share_deny & deny_type)
5124 ret = nfserr_locked;
5125 spin_unlock(&fp->fi_lock);
5126 put_nfs4_file(fp);
5127 return ret;
5128}
5129
5130static bool nfsd4_deleg_present(const struct inode *inode)
5131{
5132 struct file_lock_context *ctx = locks_inode_context(inode);
5133
5134 return ctx && !list_empty_careful(&ctx->flc_lease);
5135}
5136
5137/**
5138 * nfsd_wait_for_delegreturn - wait for delegations to be returned
5139 * @rqstp: the RPC transaction being executed
5140 * @inode: in-core inode of the file being waited for
5141 *
5142 * The timeout prevents deadlock if all nfsd threads happen to be
5143 * tied up waiting for returning delegations.
5144 *
5145 * Return values:
5146 * %true: delegation was returned
5147 * %false: timed out waiting for delegreturn
5148 */
5149bool nfsd_wait_for_delegreturn(struct svc_rqst *rqstp, struct inode *inode)
5150{
5151 long __maybe_unused timeo;
5152
5153 timeo = wait_var_event_timeout(inode, !nfsd4_deleg_present(inode),
5154 NFSD_DELEGRETURN_TIMEOUT);
5155 trace_nfsd_delegret_wakeup(rqstp, inode, timeo);
5156 return timeo > 0;
5157}
5158
5159static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
5160{
5161 struct nfs4_delegation *dp = cb_to_delegation(cb);
5162 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
5163 nfsd_net_id);
5164
5165 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
5166
5167 /*
5168 * We can't do this in nfsd_break_deleg_cb because it is
5169 * already holding inode->i_lock.
5170 *
5171 * If the dl_time != 0, then we know that it has already been
5172 * queued for a lease break. Don't queue it again.
5173 */
5174 spin_lock(&state_lock);
5175 if (delegation_hashed(dp) && dp->dl_time == 0) {
5176 dp->dl_time = ktime_get_boottime_seconds();
5177 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
5178 }
5179 spin_unlock(&state_lock);
5180}
5181
5182static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
5183 struct rpc_task *task)
5184{
5185 struct nfs4_delegation *dp = cb_to_delegation(cb);
5186
5187 trace_nfsd_cb_recall_done(&dp->dl_stid.sc_stateid, task);
5188
5189 if (dp->dl_stid.sc_status)
5190 /* CLOSED or REVOKED */
5191 return 1;
5192
5193 switch (task->tk_status) {
5194 case 0:
5195 return 1;
5196 case -NFS4ERR_DELAY:
5197 rpc_delay(task, 2 * HZ);
5198 return 0;
5199 case -EBADHANDLE:
5200 case -NFS4ERR_BAD_STATEID:
5201 /*
5202 * Race: client probably got cb_recall before open reply
5203 * granting delegation.
5204 */
5205 if (dp->dl_retries--) {
5206 rpc_delay(task, 2 * HZ);
5207 return 0;
5208 }
5209 fallthrough;
5210 default:
5211 return 1;
5212 }
5213}
5214
5215static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
5216{
5217 struct nfs4_delegation *dp = cb_to_delegation(cb);
5218
5219 nfs4_put_stid(&dp->dl_stid);
5220}
5221
5222static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
5223 .prepare = nfsd4_cb_recall_prepare,
5224 .done = nfsd4_cb_recall_done,
5225 .release = nfsd4_cb_recall_release,
5226 .opcode = OP_CB_RECALL,
5227};
5228
5229static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
5230{
5231 /*
5232 * We're assuming the state code never drops its reference
5233 * without first removing the lease. Since we're in this lease
5234 * callback (and since the lease code is serialized by the
5235 * flc_lock) we know the server hasn't removed the lease yet, and
5236 * we know it's safe to take a reference.
5237 */
5238 refcount_inc(&dp->dl_stid.sc_count);
5239 WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall));
5240}
5241
5242/* Called from break_lease() with flc_lock held. */
5243static bool
5244nfsd_break_deleg_cb(struct file_lease *fl)
5245{
5246 struct nfs4_delegation *dp = (struct nfs4_delegation *) fl->c.flc_owner;
5247 struct nfs4_file *fp = dp->dl_stid.sc_file;
5248 struct nfs4_client *clp = dp->dl_stid.sc_client;
5249 struct nfsd_net *nn;
5250
5251 trace_nfsd_cb_recall(&dp->dl_stid);
5252
5253 dp->dl_recalled = true;
5254 atomic_inc(&clp->cl_delegs_in_recall);
5255 if (try_to_expire_client(clp)) {
5256 nn = net_generic(clp->net, nfsd_net_id);
5257 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
5258 }
5259
5260 /*
5261 * We don't want the locks code to timeout the lease for us;
5262 * we'll remove it ourself if a delegation isn't returned
5263 * in time:
5264 */
5265 fl->fl_break_time = 0;
5266
5267 fp->fi_had_conflict = true;
5268 nfsd_break_one_deleg(dp);
5269 return false;
5270}
5271
5272/**
5273 * nfsd_breaker_owns_lease - Check if lease conflict was resolved
5274 * @fl: Lock state to check
5275 *
5276 * Return values:
5277 * %true: Lease conflict was resolved
5278 * %false: Lease conflict was not resolved.
5279 */
5280static bool nfsd_breaker_owns_lease(struct file_lease *fl)
5281{
5282 struct nfs4_delegation *dl = fl->c.flc_owner;
5283 struct svc_rqst *rqst;
5284 struct nfs4_client *clp;
5285
5286 rqst = nfsd_current_rqst();
5287 if (!nfsd_v4client(rqst))
5288 return false;
5289 clp = *(rqst->rq_lease_breaker);
5290 return dl->dl_stid.sc_client == clp;
5291}
5292
5293static int
5294nfsd_change_deleg_cb(struct file_lease *onlist, int arg,
5295 struct list_head *dispose)
5296{
5297 struct nfs4_delegation *dp = (struct nfs4_delegation *) onlist->c.flc_owner;
5298 struct nfs4_client *clp = dp->dl_stid.sc_client;
5299
5300 if (arg & F_UNLCK) {
5301 if (dp->dl_recalled)
5302 atomic_dec(&clp->cl_delegs_in_recall);
5303 return lease_modify(onlist, arg, dispose);
5304 } else
5305 return -EAGAIN;
5306}
5307
5308static const struct lease_manager_operations nfsd_lease_mng_ops = {
5309 .lm_breaker_owns_lease = nfsd_breaker_owns_lease,
5310 .lm_break = nfsd_break_deleg_cb,
5311 .lm_change = nfsd_change_deleg_cb,
5312};
5313
5314static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
5315{
5316 if (nfsd4_has_session(cstate))
5317 return nfs_ok;
5318 if (seqid == so->so_seqid - 1)
5319 return nfserr_replay_me;
5320 if (seqid == so->so_seqid)
5321 return nfs_ok;
5322 return nfserr_bad_seqid;
5323}
5324
5325static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions,
5326 struct nfsd_net *nn)
5327{
5328 struct nfs4_client *found;
5329
5330 spin_lock(&nn->client_lock);
5331 found = find_confirmed_client(clid, sessions, nn);
5332 if (found)
5333 atomic_inc(&found->cl_rpc_users);
5334 spin_unlock(&nn->client_lock);
5335 return found;
5336}
5337
5338static __be32 set_client(clientid_t *clid,
5339 struct nfsd4_compound_state *cstate,
5340 struct nfsd_net *nn)
5341{
5342 if (cstate->clp) {
5343 if (!same_clid(&cstate->clp->cl_clientid, clid))
5344 return nfserr_stale_clientid;
5345 return nfs_ok;
5346 }
5347 if (STALE_CLIENTID(clid, nn))
5348 return nfserr_stale_clientid;
5349 /*
5350 * We're in the 4.0 case (otherwise the SEQUENCE op would have
5351 * set cstate->clp), so session = false:
5352 */
5353 cstate->clp = lookup_clientid(clid, false, nn);
5354 if (!cstate->clp)
5355 return nfserr_expired;
5356 return nfs_ok;
5357}
5358
5359__be32
5360nfsd4_process_open1(struct nfsd4_compound_state *cstate,
5361 struct nfsd4_open *open, struct nfsd_net *nn)
5362{
5363 clientid_t *clientid = &open->op_clientid;
5364 struct nfs4_client *clp = NULL;
5365 unsigned int strhashval;
5366 struct nfs4_openowner *oo = NULL;
5367 __be32 status;
5368
5369 /*
5370 * In case we need it later, after we've already created the
5371 * file and don't want to risk a further failure:
5372 */
5373 open->op_file = nfsd4_alloc_file();
5374 if (open->op_file == NULL)
5375 return nfserr_jukebox;
5376
5377 status = set_client(clientid, cstate, nn);
5378 if (status)
5379 return status;
5380 clp = cstate->clp;
5381
5382 strhashval = ownerstr_hashval(&open->op_owner);
5383retry:
5384 oo = find_or_alloc_open_stateowner(strhashval, open, cstate);
5385 open->op_openowner = oo;
5386 if (!oo)
5387 return nfserr_jukebox;
5388 if (nfsd4_cstate_assign_replay(cstate, &oo->oo_owner) == -EAGAIN) {
5389 nfs4_put_stateowner(&oo->oo_owner);
5390 goto retry;
5391 }
5392 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
5393 if (status)
5394 return status;
5395
5396 open->op_stp = nfs4_alloc_open_stateid(clp);
5397 if (!open->op_stp)
5398 return nfserr_jukebox;
5399
5400 if (nfsd4_has_session(cstate) &&
5401 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
5402 open->op_odstate = alloc_clnt_odstate(clp);
5403 if (!open->op_odstate)
5404 return nfserr_jukebox;
5405 }
5406
5407 return nfs_ok;
5408}
5409
5410static inline __be32
5411nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
5412{
5413 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
5414 return nfserr_openmode;
5415 else
5416 return nfs_ok;
5417}
5418
5419static int share_access_to_flags(u32 share_access)
5420{
5421 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
5422}
5423
5424static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl,
5425 stateid_t *s)
5426{
5427 struct nfs4_stid *ret;
5428
5429 ret = find_stateid_by_type(cl, s, SC_TYPE_DELEG, SC_STATUS_REVOKED);
5430 if (!ret)
5431 return NULL;
5432 return delegstateid(ret);
5433}
5434
5435static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
5436{
5437 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
5438 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
5439}
5440
5441static __be32
5442nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
5443 struct nfs4_delegation **dp)
5444{
5445 int flags;
5446 __be32 status = nfserr_bad_stateid;
5447 struct nfs4_delegation *deleg;
5448
5449 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
5450 if (deleg == NULL)
5451 goto out;
5452 if (deleg->dl_stid.sc_status & SC_STATUS_ADMIN_REVOKED) {
5453 nfs4_put_stid(&deleg->dl_stid);
5454 status = nfserr_admin_revoked;
5455 goto out;
5456 }
5457 if (deleg->dl_stid.sc_status & SC_STATUS_REVOKED) {
5458 nfs4_put_stid(&deleg->dl_stid);
5459 nfsd40_drop_revoked_stid(cl, &open->op_delegate_stateid);
5460 status = nfserr_deleg_revoked;
5461 goto out;
5462 }
5463 flags = share_access_to_flags(open->op_share_access);
5464 status = nfs4_check_delegmode(deleg, flags);
5465 if (status) {
5466 nfs4_put_stid(&deleg->dl_stid);
5467 goto out;
5468 }
5469 *dp = deleg;
5470out:
5471 if (!nfsd4_is_deleg_cur(open))
5472 return nfs_ok;
5473 if (status)
5474 return status;
5475 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5476 return nfs_ok;
5477}
5478
5479static inline int nfs4_access_to_access(u32 nfs4_access)
5480{
5481 int flags = 0;
5482
5483 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
5484 flags |= NFSD_MAY_READ;
5485 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
5486 flags |= NFSD_MAY_WRITE;
5487 return flags;
5488}
5489
5490static inline __be32
5491nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
5492 struct nfsd4_open *open)
5493{
5494 struct iattr iattr = {
5495 .ia_valid = ATTR_SIZE,
5496 .ia_size = 0,
5497 };
5498 struct nfsd_attrs attrs = {
5499 .na_iattr = &iattr,
5500 };
5501 if (!open->op_truncate)
5502 return 0;
5503 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
5504 return nfserr_inval;
5505 return nfsd_setattr(rqstp, fh, &attrs, NULL);
5506}
5507
5508static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
5509 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
5510 struct nfsd4_open *open, bool new_stp)
5511{
5512 struct nfsd_file *nf = NULL;
5513 __be32 status;
5514 int oflag = nfs4_access_to_omode(open->op_share_access);
5515 int access = nfs4_access_to_access(open->op_share_access);
5516 unsigned char old_access_bmap, old_deny_bmap;
5517
5518 spin_lock(&fp->fi_lock);
5519
5520 /*
5521 * Are we trying to set a deny mode that would conflict with
5522 * current access?
5523 */
5524 status = nfs4_file_check_deny(fp, open->op_share_deny);
5525 if (status != nfs_ok) {
5526 if (status != nfserr_share_denied) {
5527 spin_unlock(&fp->fi_lock);
5528 goto out;
5529 }
5530 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
5531 stp, open->op_share_deny, false))
5532 status = nfserr_jukebox;
5533 spin_unlock(&fp->fi_lock);
5534 goto out;
5535 }
5536
5537 /* set access to the file */
5538 status = nfs4_file_get_access(fp, open->op_share_access);
5539 if (status != nfs_ok) {
5540 if (status != nfserr_share_denied) {
5541 spin_unlock(&fp->fi_lock);
5542 goto out;
5543 }
5544 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
5545 stp, open->op_share_access, true))
5546 status = nfserr_jukebox;
5547 spin_unlock(&fp->fi_lock);
5548 goto out;
5549 }
5550
5551 /* Set access bits in stateid */
5552 old_access_bmap = stp->st_access_bmap;
5553 set_access(open->op_share_access, stp);
5554
5555 /* Set new deny mask */
5556 old_deny_bmap = stp->st_deny_bmap;
5557 set_deny(open->op_share_deny, stp);
5558 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5559
5560 if (!fp->fi_fds[oflag]) {
5561 spin_unlock(&fp->fi_lock);
5562
5563 status = nfsd_file_acquire_opened(rqstp, cur_fh, access,
5564 open->op_filp, &nf);
5565 if (status != nfs_ok)
5566 goto out_put_access;
5567
5568 spin_lock(&fp->fi_lock);
5569 if (!fp->fi_fds[oflag]) {
5570 fp->fi_fds[oflag] = nf;
5571 nf = NULL;
5572 }
5573 }
5574 spin_unlock(&fp->fi_lock);
5575 if (nf)
5576 nfsd_file_put(nf);
5577
5578 status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
5579 access));
5580 if (status)
5581 goto out_put_access;
5582
5583 status = nfsd4_truncate(rqstp, cur_fh, open);
5584 if (status)
5585 goto out_put_access;
5586out:
5587 return status;
5588out_put_access:
5589 stp->st_access_bmap = old_access_bmap;
5590 nfs4_file_put_access(fp, open->op_share_access);
5591 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
5592 goto out;
5593}
5594
5595static __be32
5596nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp,
5597 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
5598 struct nfsd4_open *open)
5599{
5600 __be32 status;
5601 unsigned char old_deny_bmap = stp->st_deny_bmap;
5602
5603 if (!test_access(open->op_share_access, stp))
5604 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open, false);
5605
5606 /* test and set deny mode */
5607 spin_lock(&fp->fi_lock);
5608 status = nfs4_file_check_deny(fp, open->op_share_deny);
5609 switch (status) {
5610 case nfs_ok:
5611 set_deny(open->op_share_deny, stp);
5612 fp->fi_share_deny |=
5613 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5614 break;
5615 case nfserr_share_denied:
5616 if (nfs4_resolve_deny_conflicts_locked(fp, false,
5617 stp, open->op_share_deny, false))
5618 status = nfserr_jukebox;
5619 break;
5620 }
5621 spin_unlock(&fp->fi_lock);
5622
5623 if (status != nfs_ok)
5624 return status;
5625
5626 status = nfsd4_truncate(rqstp, cur_fh, open);
5627 if (status != nfs_ok)
5628 reset_union_bmap_deny(old_deny_bmap, stp);
5629 return status;
5630}
5631
5632/* Should we give out recallable state?: */
5633static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
5634{
5635 if (clp->cl_cb_state == NFSD4_CB_UP)
5636 return true;
5637 /*
5638 * In the sessions case, since we don't have to establish a
5639 * separate connection for callbacks, we assume it's OK
5640 * until we hear otherwise:
5641 */
5642 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
5643}
5644
5645static struct file_lease *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
5646 int flag)
5647{
5648 struct file_lease *fl;
5649
5650 fl = locks_alloc_lease();
5651 if (!fl)
5652 return NULL;
5653 fl->fl_lmops = &nfsd_lease_mng_ops;
5654 fl->c.flc_flags = FL_DELEG;
5655 fl->c.flc_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
5656 fl->c.flc_owner = (fl_owner_t)dp;
5657 fl->c.flc_pid = current->tgid;
5658 fl->c.flc_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
5659 return fl;
5660}
5661
5662static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
5663 struct nfs4_file *fp)
5664{
5665 struct nfs4_ol_stateid *st;
5666 struct file *f = fp->fi_deleg_file->nf_file;
5667 struct inode *ino = file_inode(f);
5668 int writes;
5669
5670 writes = atomic_read(&ino->i_writecount);
5671 if (!writes)
5672 return 0;
5673 /*
5674 * There could be multiple filehandles (hence multiple
5675 * nfs4_files) referencing this file, but that's not too
5676 * common; let's just give up in that case rather than
5677 * trying to go look up all the clients using that other
5678 * nfs4_file as well:
5679 */
5680 if (fp->fi_aliased)
5681 return -EAGAIN;
5682 /*
5683 * If there's a close in progress, make sure that we see it
5684 * clear any fi_fds[] entries before we see it decrement
5685 * i_writecount:
5686 */
5687 smp_mb__after_atomic();
5688
5689 if (fp->fi_fds[O_WRONLY])
5690 writes--;
5691 if (fp->fi_fds[O_RDWR])
5692 writes--;
5693 if (writes > 0)
5694 return -EAGAIN; /* There may be non-NFSv4 writers */
5695 /*
5696 * It's possible there are non-NFSv4 write opens in progress,
5697 * but if they haven't incremented i_writecount yet then they
5698 * also haven't called break lease yet; so, they'll break this
5699 * lease soon enough. So, all that's left to check for is NFSv4
5700 * opens:
5701 */
5702 spin_lock(&fp->fi_lock);
5703 list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
5704 if (st->st_openstp == NULL /* it's an open */ &&
5705 access_permit_write(st) &&
5706 st->st_stid.sc_client != clp) {
5707 spin_unlock(&fp->fi_lock);
5708 return -EAGAIN;
5709 }
5710 }
5711 spin_unlock(&fp->fi_lock);
5712 /*
5713 * There's a small chance that we could be racing with another
5714 * NFSv4 open. However, any open that hasn't added itself to
5715 * the fi_stateids list also hasn't called break_lease yet; so,
5716 * they'll break this lease soon enough.
5717 */
5718 return 0;
5719}
5720
5721/*
5722 * It's possible that between opening the dentry and setting the delegation,
5723 * that it has been renamed or unlinked. Redo the lookup to verify that this
5724 * hasn't happened.
5725 */
5726static int
5727nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp,
5728 struct svc_fh *parent)
5729{
5730 struct svc_export *exp;
5731 struct dentry *child;
5732 __be32 err;
5733
5734 err = nfsd_lookup_dentry(open->op_rqstp, parent,
5735 open->op_fname, open->op_fnamelen,
5736 &exp, &child);
5737
5738 if (err)
5739 return -EAGAIN;
5740
5741 exp_put(exp);
5742 dput(child);
5743 if (child != file_dentry(fp->fi_deleg_file->nf_file))
5744 return -EAGAIN;
5745
5746 return 0;
5747}
5748
5749/*
5750 * We avoid breaking delegations held by a client due to its own activity, but
5751 * clearing setuid/setgid bits on a write is an implicit activity and the client
5752 * may not notice and continue using the old mode. Avoid giving out a delegation
5753 * on setuid/setgid files when the client is requesting an open for write.
5754 */
5755static int
5756nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf)
5757{
5758 struct inode *inode = file_inode(nf->nf_file);
5759
5760 if ((open->op_share_access & NFS4_SHARE_ACCESS_WRITE) &&
5761 (inode->i_mode & (S_ISUID|S_ISGID)))
5762 return -EAGAIN;
5763 return 0;
5764}
5765
5766static struct nfs4_delegation *
5767nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
5768 struct svc_fh *parent)
5769{
5770 int status = 0;
5771 struct nfs4_client *clp = stp->st_stid.sc_client;
5772 struct nfs4_file *fp = stp->st_stid.sc_file;
5773 struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate;
5774 struct nfs4_delegation *dp;
5775 struct nfsd_file *nf = NULL;
5776 struct file_lease *fl;
5777 u32 dl_type;
5778
5779 /*
5780 * The fi_had_conflict and nfs_get_existing_delegation checks
5781 * here are just optimizations; we'll need to recheck them at
5782 * the end:
5783 */
5784 if (fp->fi_had_conflict)
5785 return ERR_PTR(-EAGAIN);
5786
5787 /*
5788 * Try for a write delegation first. RFC8881 section 10.4 says:
5789 *
5790 * "An OPEN_DELEGATE_WRITE delegation allows the client to handle,
5791 * on its own, all opens."
5792 *
5793 * Furthermore the client can use a write delegation for most READ
5794 * operations as well, so we require a O_RDWR file here.
5795 *
5796 * Offer a write delegation in the case of a BOTH open, and ensure
5797 * we get the O_RDWR descriptor.
5798 */
5799 if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) == NFS4_SHARE_ACCESS_BOTH) {
5800 nf = find_rw_file(fp);
5801 dl_type = NFS4_OPEN_DELEGATE_WRITE;
5802 }
5803
5804 /*
5805 * If the file is being opened O_RDONLY or we couldn't get a O_RDWR
5806 * file for some reason, then try for a read delegation instead.
5807 */
5808 if (!nf && (open->op_share_access & NFS4_SHARE_ACCESS_READ)) {
5809 nf = find_readable_file(fp);
5810 dl_type = NFS4_OPEN_DELEGATE_READ;
5811 }
5812
5813 if (!nf)
5814 return ERR_PTR(-EAGAIN);
5815
5816 spin_lock(&state_lock);
5817 spin_lock(&fp->fi_lock);
5818 if (nfs4_delegation_exists(clp, fp))
5819 status = -EAGAIN;
5820 else if (nfsd4_verify_setuid_write(open, nf))
5821 status = -EAGAIN;
5822 else if (!fp->fi_deleg_file) {
5823 fp->fi_deleg_file = nf;
5824 /* increment early to prevent fi_deleg_file from being
5825 * cleared */
5826 fp->fi_delegees = 1;
5827 nf = NULL;
5828 } else
5829 fp->fi_delegees++;
5830 spin_unlock(&fp->fi_lock);
5831 spin_unlock(&state_lock);
5832 if (nf)
5833 nfsd_file_put(nf);
5834 if (status)
5835 return ERR_PTR(status);
5836
5837 status = -ENOMEM;
5838 dp = alloc_init_deleg(clp, fp, odstate, dl_type);
5839 if (!dp)
5840 goto out_delegees;
5841
5842 fl = nfs4_alloc_init_lease(dp, dl_type);
5843 if (!fl)
5844 goto out_clnt_odstate;
5845
5846 status = kernel_setlease(fp->fi_deleg_file->nf_file,
5847 fl->c.flc_type, &fl, NULL);
5848 if (fl)
5849 locks_free_lease(fl);
5850 if (status)
5851 goto out_clnt_odstate;
5852
5853 if (parent) {
5854 status = nfsd4_verify_deleg_dentry(open, fp, parent);
5855 if (status)
5856 goto out_unlock;
5857 }
5858
5859 status = nfsd4_check_conflicting_opens(clp, fp);
5860 if (status)
5861 goto out_unlock;
5862
5863 /*
5864 * Now that the deleg is set, check again to ensure that nothing
5865 * raced in and changed the mode while we weren't looking.
5866 */
5867 status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file);
5868 if (status)
5869 goto out_unlock;
5870
5871 status = -EAGAIN;
5872 if (fp->fi_had_conflict)
5873 goto out_unlock;
5874
5875 spin_lock(&state_lock);
5876 spin_lock(&clp->cl_lock);
5877 spin_lock(&fp->fi_lock);
5878 status = hash_delegation_locked(dp, fp);
5879 spin_unlock(&fp->fi_lock);
5880 spin_unlock(&clp->cl_lock);
5881 spin_unlock(&state_lock);
5882
5883 if (status)
5884 goto out_unlock;
5885
5886 return dp;
5887out_unlock:
5888 kernel_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
5889out_clnt_odstate:
5890 put_clnt_odstate(dp->dl_clnt_odstate);
5891 nfs4_put_stid(&dp->dl_stid);
5892out_delegees:
5893 put_deleg_file(fp);
5894 return ERR_PTR(status);
5895}
5896
5897static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
5898{
5899 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5900 if (status == -EAGAIN)
5901 open->op_why_no_deleg = WND4_CONTENTION;
5902 else {
5903 open->op_why_no_deleg = WND4_RESOURCE;
5904 switch (open->op_deleg_want) {
5905 case NFS4_SHARE_WANT_READ_DELEG:
5906 case NFS4_SHARE_WANT_WRITE_DELEG:
5907 case NFS4_SHARE_WANT_ANY_DELEG:
5908 break;
5909 case NFS4_SHARE_WANT_CANCEL:
5910 open->op_why_no_deleg = WND4_CANCELLED;
5911 break;
5912 case NFS4_SHARE_WANT_NO_DELEG:
5913 WARN_ON_ONCE(1);
5914 }
5915 }
5916}
5917
5918static bool
5919nfs4_delegation_stat(struct nfs4_delegation *dp, struct svc_fh *currentfh,
5920 struct kstat *stat)
5921{
5922 struct nfsd_file *nf = find_rw_file(dp->dl_stid.sc_file);
5923 struct path path;
5924 int rc;
5925
5926 if (!nf)
5927 return false;
5928
5929 path.mnt = currentfh->fh_export->ex_path.mnt;
5930 path.dentry = file_dentry(nf->nf_file);
5931
5932 rc = vfs_getattr(&path, stat,
5933 (STATX_SIZE | STATX_CTIME | STATX_CHANGE_COOKIE),
5934 AT_STATX_SYNC_AS_STAT);
5935
5936 nfsd_file_put(nf);
5937 return rc == 0;
5938}
5939
5940/*
5941 * The Linux NFS server does not offer write delegations to NFSv4.0
5942 * clients in order to avoid conflicts between write delegations and
5943 * GETATTRs requesting CHANGE or SIZE attributes.
5944 *
5945 * With NFSv4.1 and later minorversions, the SEQUENCE operation that
5946 * begins each COMPOUND contains a client ID. Delegation recall can
5947 * be avoided when the server recognizes the client sending a
5948 * GETATTR also holds write delegation it conflicts with.
5949 *
5950 * However, the NFSv4.0 protocol does not enable a server to
5951 * determine that a GETATTR originated from the client holding the
5952 * conflicting delegation versus coming from some other client. Per
5953 * RFC 7530 Section 16.7.5, the server must recall or send a
5954 * CB_GETATTR even when the GETATTR originates from the client that
5955 * holds the conflicting delegation.
5956 *
5957 * An NFSv4.0 client can trigger a pathological situation if it
5958 * always sends a DELEGRETURN preceded by a conflicting GETATTR in
5959 * the same COMPOUND. COMPOUND execution will always stop at the
5960 * GETATTR and the DELEGRETURN will never get executed. The server
5961 * eventually revokes the delegation, which can result in loss of
5962 * open or lock state.
5963 */
5964static void
5965nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
5966 struct svc_fh *currentfh)
5967{
5968 struct nfs4_delegation *dp;
5969 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
5970 struct nfs4_client *clp = stp->st_stid.sc_client;
5971 struct svc_fh *parent = NULL;
5972 int cb_up;
5973 int status = 0;
5974 struct kstat stat;
5975
5976 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
5977 open->op_recall = false;
5978 switch (open->op_claim_type) {
5979 case NFS4_OPEN_CLAIM_PREVIOUS:
5980 if (!cb_up)
5981 open->op_recall = true;
5982 break;
5983 case NFS4_OPEN_CLAIM_NULL:
5984 parent = currentfh;
5985 fallthrough;
5986 case NFS4_OPEN_CLAIM_FH:
5987 /*
5988 * Let's not give out any delegations till everyone's
5989 * had the chance to reclaim theirs, *and* until
5990 * NLM locks have all been reclaimed:
5991 */
5992 if (locks_in_grace(clp->net))
5993 goto out_no_deleg;
5994 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
5995 goto out_no_deleg;
5996 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE &&
5997 !clp->cl_minorversion)
5998 goto out_no_deleg;
5999 break;
6000 default:
6001 goto out_no_deleg;
6002 }
6003 dp = nfs4_set_delegation(open, stp, parent);
6004 if (IS_ERR(dp))
6005 goto out_no_deleg;
6006
6007 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
6008
6009 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) {
6010 if (!nfs4_delegation_stat(dp, currentfh, &stat)) {
6011 nfs4_put_stid(&dp->dl_stid);
6012 destroy_delegation(dp);
6013 goto out_no_deleg;
6014 }
6015 open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE;
6016 dp->dl_cb_fattr.ncf_cur_fsize = stat.size;
6017 dp->dl_cb_fattr.ncf_initial_cinfo =
6018 nfsd4_change_attribute(&stat, d_inode(currentfh->fh_dentry));
6019 trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
6020 } else {
6021 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
6022 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
6023 }
6024 nfs4_put_stid(&dp->dl_stid);
6025 return;
6026out_no_deleg:
6027 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
6028 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
6029 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
6030 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
6031 open->op_recall = true;
6032 }
6033
6034 /* 4.1 client asking for a delegation? */
6035 if (open->op_deleg_want)
6036 nfsd4_open_deleg_none_ext(open, status);
6037 return;
6038}
6039
6040static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
6041 struct nfs4_delegation *dp)
6042{
6043 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
6044 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
6045 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
6046 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
6047 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
6048 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
6049 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
6050 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
6051 }
6052 /* Otherwise the client must be confused wanting a delegation
6053 * it already has, therefore we don't return
6054 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
6055 */
6056}
6057
6058/**
6059 * nfsd4_process_open2 - finish open processing
6060 * @rqstp: the RPC transaction being executed
6061 * @current_fh: NFSv4 COMPOUND's current filehandle
6062 * @open: OPEN arguments
6063 *
6064 * If successful, (1) truncate the file if open->op_truncate was
6065 * set, (2) set open->op_stateid, (3) set open->op_delegation.
6066 *
6067 * Returns %nfs_ok on success; otherwise an nfs4stat value in
6068 * network byte order is returned.
6069 */
6070__be32
6071nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
6072{
6073 struct nfsd4_compoundres *resp = rqstp->rq_resp;
6074 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
6075 struct nfs4_file *fp = NULL;
6076 struct nfs4_ol_stateid *stp = NULL;
6077 struct nfs4_delegation *dp = NULL;
6078 __be32 status;
6079 bool new_stp = false;
6080
6081 /*
6082 * Lookup file; if found, lookup stateid and check open request,
6083 * and check for delegations in the process of being recalled.
6084 * If not found, create the nfs4_file struct
6085 */
6086 fp = nfsd4_file_hash_insert(open->op_file, current_fh);
6087 if (unlikely(!fp))
6088 return nfserr_jukebox;
6089 if (fp != open->op_file) {
6090 status = nfs4_check_deleg(cl, open, &dp);
6091 if (status)
6092 goto out;
6093 stp = nfsd4_find_and_lock_existing_open(fp, open);
6094 } else {
6095 open->op_file = NULL;
6096 status = nfserr_bad_stateid;
6097 if (nfsd4_is_deleg_cur(open))
6098 goto out;
6099 }
6100
6101 if (!stp) {
6102 stp = init_open_stateid(fp, open);
6103 if (!open->op_stp)
6104 new_stp = true;
6105 }
6106
6107 /*
6108 * OPEN the file, or upgrade an existing OPEN.
6109 * If truncate fails, the OPEN fails.
6110 *
6111 * stp is already locked.
6112 */
6113 if (!new_stp) {
6114 /* Stateid was found, this is an OPEN upgrade */
6115 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
6116 if (status) {
6117 mutex_unlock(&stp->st_mutex);
6118 goto out;
6119 }
6120 } else {
6121 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open, true);
6122 if (status) {
6123 release_open_stateid(stp);
6124 mutex_unlock(&stp->st_mutex);
6125 goto out;
6126 }
6127
6128 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
6129 open->op_odstate);
6130 if (stp->st_clnt_odstate == open->op_odstate)
6131 open->op_odstate = NULL;
6132 }
6133
6134 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
6135 mutex_unlock(&stp->st_mutex);
6136
6137 if (nfsd4_has_session(&resp->cstate)) {
6138 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
6139 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
6140 open->op_why_no_deleg = WND4_NOT_WANTED;
6141 goto nodeleg;
6142 }
6143 }
6144
6145 /*
6146 * Attempt to hand out a delegation. No error return, because the
6147 * OPEN succeeds even if we fail.
6148 */
6149 nfs4_open_delegation(open, stp, &resp->cstate.current_fh);
6150nodeleg:
6151 status = nfs_ok;
6152 trace_nfsd_open(&stp->st_stid.sc_stateid);
6153out:
6154 /* 4.1 client trying to upgrade/downgrade delegation? */
6155 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
6156 open->op_deleg_want)
6157 nfsd4_deleg_xgrade_none_ext(open, dp);
6158
6159 if (fp)
6160 put_nfs4_file(fp);
6161 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
6162 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
6163 /*
6164 * To finish the open response, we just need to set the rflags.
6165 */
6166 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
6167 if (nfsd4_has_session(&resp->cstate))
6168 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
6169 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
6170 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
6171
6172 if (dp)
6173 nfs4_put_stid(&dp->dl_stid);
6174 if (stp)
6175 nfs4_put_stid(&stp->st_stid);
6176
6177 return status;
6178}
6179
6180void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
6181 struct nfsd4_open *open)
6182{
6183 if (open->op_openowner)
6184 nfs4_put_stateowner(&open->op_openowner->oo_owner);
6185 if (open->op_file)
6186 kmem_cache_free(file_slab, open->op_file);
6187 if (open->op_stp)
6188 nfs4_put_stid(&open->op_stp->st_stid);
6189 if (open->op_odstate)
6190 kmem_cache_free(odstate_slab, open->op_odstate);
6191}
6192
6193__be32
6194nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6195 union nfsd4_op_u *u)
6196{
6197 clientid_t *clid = &u->renew;
6198 struct nfs4_client *clp;
6199 __be32 status;
6200 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6201
6202 trace_nfsd_clid_renew(clid);
6203 status = set_client(clid, cstate, nn);
6204 if (status)
6205 return status;
6206 clp = cstate->clp;
6207 if (!list_empty(&clp->cl_delegations)
6208 && clp->cl_cb_state != NFSD4_CB_UP)
6209 return nfserr_cb_path_down;
6210 return nfs_ok;
6211}
6212
6213void
6214nfsd4_end_grace(struct nfsd_net *nn)
6215{
6216 /* do nothing if grace period already ended */
6217 if (nn->grace_ended)
6218 return;
6219
6220 trace_nfsd_grace_complete(nn);
6221 nn->grace_ended = true;
6222 /*
6223 * If the server goes down again right now, an NFSv4
6224 * client will still be allowed to reclaim after it comes back up,
6225 * even if it hasn't yet had a chance to reclaim state this time.
6226 *
6227 */
6228 nfsd4_record_grace_done(nn);
6229 /*
6230 * At this point, NFSv4 clients can still reclaim. But if the
6231 * server crashes, any that have not yet reclaimed will be out
6232 * of luck on the next boot.
6233 *
6234 * (NFSv4.1+ clients are considered to have reclaimed once they
6235 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
6236 * have reclaimed after their first OPEN.)
6237 */
6238 locks_end_grace(&nn->nfsd4_manager);
6239 /*
6240 * At this point, and once lockd and/or any other containers
6241 * exit their grace period, further reclaims will fail and
6242 * regular locking can resume.
6243 */
6244}
6245
6246/*
6247 * If we've waited a lease period but there are still clients trying to
6248 * reclaim, wait a little longer to give them a chance to finish.
6249 */
6250static bool clients_still_reclaiming(struct nfsd_net *nn)
6251{
6252 time64_t double_grace_period_end = nn->boot_time +
6253 2 * nn->nfsd4_lease;
6254
6255 if (nn->track_reclaim_completes &&
6256 atomic_read(&nn->nr_reclaim_complete) ==
6257 nn->reclaim_str_hashtbl_size)
6258 return false;
6259 if (!nn->somebody_reclaimed)
6260 return false;
6261 nn->somebody_reclaimed = false;
6262 /*
6263 * If we've given them *two* lease times to reclaim, and they're
6264 * still not done, give up:
6265 */
6266 if (ktime_get_boottime_seconds() > double_grace_period_end)
6267 return false;
6268 return true;
6269}
6270
6271struct laundry_time {
6272 time64_t cutoff;
6273 time64_t new_timeo;
6274};
6275
6276static bool state_expired(struct laundry_time *lt, time64_t last_refresh)
6277{
6278 time64_t time_remaining;
6279
6280 if (last_refresh < lt->cutoff)
6281 return true;
6282 time_remaining = last_refresh - lt->cutoff;
6283 lt->new_timeo = min(lt->new_timeo, time_remaining);
6284 return false;
6285}
6286
6287#ifdef CONFIG_NFSD_V4_2_INTER_SSC
6288void nfsd4_ssc_init_umount_work(struct nfsd_net *nn)
6289{
6290 spin_lock_init(&nn->nfsd_ssc_lock);
6291 INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list);
6292 init_waitqueue_head(&nn->nfsd_ssc_waitq);
6293}
6294
6295/*
6296 * This is called when nfsd is being shutdown, after all inter_ssc
6297 * cleanup were done, to destroy the ssc delayed unmount list.
6298 */
6299static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn)
6300{
6301 struct nfsd4_ssc_umount_item *ni = NULL;
6302 struct nfsd4_ssc_umount_item *tmp;
6303
6304 spin_lock(&nn->nfsd_ssc_lock);
6305 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
6306 list_del(&ni->nsui_list);
6307 spin_unlock(&nn->nfsd_ssc_lock);
6308 mntput(ni->nsui_vfsmount);
6309 kfree(ni);
6310 spin_lock(&nn->nfsd_ssc_lock);
6311 }
6312 spin_unlock(&nn->nfsd_ssc_lock);
6313}
6314
6315static void nfsd4_ssc_expire_umount(struct nfsd_net *nn)
6316{
6317 bool do_wakeup = false;
6318 struct nfsd4_ssc_umount_item *ni = NULL;
6319 struct nfsd4_ssc_umount_item *tmp;
6320
6321 spin_lock(&nn->nfsd_ssc_lock);
6322 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
6323 if (time_after(jiffies, ni->nsui_expire)) {
6324 if (refcount_read(&ni->nsui_refcnt) > 1)
6325 continue;
6326
6327 /* mark being unmount */
6328 ni->nsui_busy = true;
6329 spin_unlock(&nn->nfsd_ssc_lock);
6330 mntput(ni->nsui_vfsmount);
6331 spin_lock(&nn->nfsd_ssc_lock);
6332
6333 /* waiters need to start from begin of list */
6334 list_del(&ni->nsui_list);
6335 kfree(ni);
6336
6337 /* wakeup ssc_connect waiters */
6338 do_wakeup = true;
6339 continue;
6340 }
6341 break;
6342 }
6343 if (do_wakeup)
6344 wake_up_all(&nn->nfsd_ssc_waitq);
6345 spin_unlock(&nn->nfsd_ssc_lock);
6346}
6347#endif
6348
6349/* Check if any lock belonging to this lockowner has any blockers */
6350static bool
6351nfs4_lockowner_has_blockers(struct nfs4_lockowner *lo)
6352{
6353 struct file_lock_context *ctx;
6354 struct nfs4_ol_stateid *stp;
6355 struct nfs4_file *nf;
6356
6357 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
6358 nf = stp->st_stid.sc_file;
6359 ctx = locks_inode_context(nf->fi_inode);
6360 if (!ctx)
6361 continue;
6362 if (locks_owner_has_blockers(ctx, lo))
6363 return true;
6364 }
6365 return false;
6366}
6367
6368static bool
6369nfs4_anylock_blockers(struct nfs4_client *clp)
6370{
6371 int i;
6372 struct nfs4_stateowner *so;
6373 struct nfs4_lockowner *lo;
6374
6375 if (atomic_read(&clp->cl_delegs_in_recall))
6376 return true;
6377 spin_lock(&clp->cl_lock);
6378 for (i = 0; i < OWNER_HASH_SIZE; i++) {
6379 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i],
6380 so_strhash) {
6381 if (so->so_is_open_owner)
6382 continue;
6383 lo = lockowner(so);
6384 if (nfs4_lockowner_has_blockers(lo)) {
6385 spin_unlock(&clp->cl_lock);
6386 return true;
6387 }
6388 }
6389 }
6390 spin_unlock(&clp->cl_lock);
6391 return false;
6392}
6393
6394static void
6395nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
6396 struct laundry_time *lt)
6397{
6398 unsigned int maxreap, reapcnt = 0;
6399 struct list_head *pos, *next;
6400 struct nfs4_client *clp;
6401
6402 maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ?
6403 NFSD_CLIENT_MAX_TRIM_PER_RUN : 0;
6404 INIT_LIST_HEAD(reaplist);
6405 spin_lock(&nn->client_lock);
6406 list_for_each_safe(pos, next, &nn->client_lru) {
6407 clp = list_entry(pos, struct nfs4_client, cl_lru);
6408 if (clp->cl_state == NFSD4_EXPIRABLE)
6409 goto exp_client;
6410 if (!state_expired(lt, clp->cl_time))
6411 break;
6412 if (!atomic_read(&clp->cl_rpc_users)) {
6413 if (clp->cl_state == NFSD4_ACTIVE)
6414 atomic_inc(&nn->nfsd_courtesy_clients);
6415 clp->cl_state = NFSD4_COURTESY;
6416 }
6417 if (!client_has_state(clp))
6418 goto exp_client;
6419 if (!nfs4_anylock_blockers(clp))
6420 if (reapcnt >= maxreap)
6421 continue;
6422exp_client:
6423 if (!mark_client_expired_locked(clp)) {
6424 list_add(&clp->cl_lru, reaplist);
6425 reapcnt++;
6426 }
6427 }
6428 spin_unlock(&nn->client_lock);
6429}
6430
6431static void
6432nfs4_get_courtesy_client_reaplist(struct nfsd_net *nn,
6433 struct list_head *reaplist)
6434{
6435 unsigned int maxreap = 0, reapcnt = 0;
6436 struct list_head *pos, *next;
6437 struct nfs4_client *clp;
6438
6439 maxreap = NFSD_CLIENT_MAX_TRIM_PER_RUN;
6440 INIT_LIST_HEAD(reaplist);
6441
6442 spin_lock(&nn->client_lock);
6443 list_for_each_safe(pos, next, &nn->client_lru) {
6444 clp = list_entry(pos, struct nfs4_client, cl_lru);
6445 if (clp->cl_state == NFSD4_ACTIVE)
6446 break;
6447 if (reapcnt >= maxreap)
6448 break;
6449 if (!mark_client_expired_locked(clp)) {
6450 list_add(&clp->cl_lru, reaplist);
6451 reapcnt++;
6452 }
6453 }
6454 spin_unlock(&nn->client_lock);
6455}
6456
6457static void
6458nfs4_process_client_reaplist(struct list_head *reaplist)
6459{
6460 struct list_head *pos, *next;
6461 struct nfs4_client *clp;
6462
6463 list_for_each_safe(pos, next, reaplist) {
6464 clp = list_entry(pos, struct nfs4_client, cl_lru);
6465 trace_nfsd_clid_purged(&clp->cl_clientid);
6466 list_del_init(&clp->cl_lru);
6467 expire_client(clp);
6468 }
6469}
6470
6471static void nfs40_clean_admin_revoked(struct nfsd_net *nn,
6472 struct laundry_time *lt)
6473{
6474 struct nfs4_client *clp;
6475
6476 spin_lock(&nn->client_lock);
6477 if (nn->nfs40_last_revoke == 0 ||
6478 nn->nfs40_last_revoke > lt->cutoff) {
6479 spin_unlock(&nn->client_lock);
6480 return;
6481 }
6482 nn->nfs40_last_revoke = 0;
6483
6484retry:
6485 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6486 unsigned long id, tmp;
6487 struct nfs4_stid *stid;
6488
6489 if (atomic_read(&clp->cl_admin_revoked) == 0)
6490 continue;
6491
6492 spin_lock(&clp->cl_lock);
6493 idr_for_each_entry_ul(&clp->cl_stateids, stid, tmp, id)
6494 if (stid->sc_status & SC_STATUS_ADMIN_REVOKED) {
6495 refcount_inc(&stid->sc_count);
6496 spin_unlock(&nn->client_lock);
6497 /* this function drops ->cl_lock */
6498 nfsd4_drop_revoked_stid(stid);
6499 nfs4_put_stid(stid);
6500 spin_lock(&nn->client_lock);
6501 goto retry;
6502 }
6503 spin_unlock(&clp->cl_lock);
6504 }
6505 spin_unlock(&nn->client_lock);
6506}
6507
6508static time64_t
6509nfs4_laundromat(struct nfsd_net *nn)
6510{
6511 struct nfs4_openowner *oo;
6512 struct nfs4_delegation *dp;
6513 struct nfs4_ol_stateid *stp;
6514 struct nfsd4_blocked_lock *nbl;
6515 struct list_head *pos, *next, reaplist;
6516 struct laundry_time lt = {
6517 .cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease,
6518 .new_timeo = nn->nfsd4_lease
6519 };
6520 struct nfs4_cpntf_state *cps;
6521 copy_stateid_t *cps_t;
6522 int i;
6523
6524 if (clients_still_reclaiming(nn)) {
6525 lt.new_timeo = 0;
6526 goto out;
6527 }
6528 nfsd4_end_grace(nn);
6529
6530 spin_lock(&nn->s2s_cp_lock);
6531 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
6532 cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
6533 if (cps->cp_stateid.cs_type == NFS4_COPYNOTIFY_STID &&
6534 state_expired(&lt, cps->cpntf_time))
6535 _free_cpntf_state_locked(nn, cps);
6536 }
6537 spin_unlock(&nn->s2s_cp_lock);
6538 nfs4_get_client_reaplist(nn, &reaplist, &lt);
6539 nfs4_process_client_reaplist(&reaplist);
6540
6541 nfs40_clean_admin_revoked(nn, &lt);
6542
6543 spin_lock(&state_lock);
6544 list_for_each_safe(pos, next, &nn->del_recall_lru) {
6545 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6546 if (!state_expired(&lt, dp->dl_time))
6547 break;
6548 unhash_delegation_locked(dp, SC_STATUS_REVOKED);
6549 list_add(&dp->dl_recall_lru, &reaplist);
6550 }
6551 spin_unlock(&state_lock);
6552 while (!list_empty(&reaplist)) {
6553 dp = list_first_entry(&reaplist, struct nfs4_delegation,
6554 dl_recall_lru);
6555 list_del_init(&dp->dl_recall_lru);
6556 revoke_delegation(dp);
6557 }
6558
6559 spin_lock(&nn->client_lock);
6560 while (!list_empty(&nn->close_lru)) {
6561 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
6562 oo_close_lru);
6563 if (!state_expired(&lt, oo->oo_time))
6564 break;
6565 list_del_init(&oo->oo_close_lru);
6566 stp = oo->oo_last_closed_stid;
6567 oo->oo_last_closed_stid = NULL;
6568 spin_unlock(&nn->client_lock);
6569 nfs4_put_stid(&stp->st_stid);
6570 spin_lock(&nn->client_lock);
6571 }
6572 spin_unlock(&nn->client_lock);
6573
6574 /*
6575 * It's possible for a client to try and acquire an already held lock
6576 * that is being held for a long time, and then lose interest in it.
6577 * So, we clean out any un-revisited request after a lease period
6578 * under the assumption that the client is no longer interested.
6579 *
6580 * RFC5661, sec. 9.6 states that the client must not rely on getting
6581 * notifications and must continue to poll for locks, even when the
6582 * server supports them. Thus this shouldn't lead to clients blocking
6583 * indefinitely once the lock does become free.
6584 */
6585 BUG_ON(!list_empty(&reaplist));
6586 spin_lock(&nn->blocked_locks_lock);
6587 while (!list_empty(&nn->blocked_locks_lru)) {
6588 nbl = list_first_entry(&nn->blocked_locks_lru,
6589 struct nfsd4_blocked_lock, nbl_lru);
6590 if (!state_expired(&lt, nbl->nbl_time))
6591 break;
6592 list_move(&nbl->nbl_lru, &reaplist);
6593 list_del_init(&nbl->nbl_list);
6594 }
6595 spin_unlock(&nn->blocked_locks_lock);
6596
6597 while (!list_empty(&reaplist)) {
6598 nbl = list_first_entry(&reaplist,
6599 struct nfsd4_blocked_lock, nbl_lru);
6600 list_del_init(&nbl->nbl_lru);
6601 free_blocked_lock(nbl);
6602 }
6603#ifdef CONFIG_NFSD_V4_2_INTER_SSC
6604 /* service the server-to-server copy delayed unmount list */
6605 nfsd4_ssc_expire_umount(nn);
6606#endif
6607 if (atomic_long_read(&num_delegations) >= max_delegations)
6608 deleg_reaper(nn);
6609out:
6610 return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
6611}
6612
6613static void laundromat_main(struct work_struct *);
6614
6615static void
6616laundromat_main(struct work_struct *laundry)
6617{
6618 time64_t t;
6619 struct delayed_work *dwork = to_delayed_work(laundry);
6620 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
6621 laundromat_work);
6622
6623 t = nfs4_laundromat(nn);
6624 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
6625}
6626
6627static void
6628courtesy_client_reaper(struct nfsd_net *nn)
6629{
6630 struct list_head reaplist;
6631
6632 nfs4_get_courtesy_client_reaplist(nn, &reaplist);
6633 nfs4_process_client_reaplist(&reaplist);
6634}
6635
6636static void
6637deleg_reaper(struct nfsd_net *nn)
6638{
6639 struct list_head *pos, *next;
6640 struct nfs4_client *clp;
6641 LIST_HEAD(cblist);
6642
6643 spin_lock(&nn->client_lock);
6644 list_for_each_safe(pos, next, &nn->client_lru) {
6645 clp = list_entry(pos, struct nfs4_client, cl_lru);
6646 if (clp->cl_state != NFSD4_ACTIVE ||
6647 list_empty(&clp->cl_delegations) ||
6648 atomic_read(&clp->cl_delegs_in_recall) ||
6649 test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) ||
6650 (ktime_get_boottime_seconds() -
6651 clp->cl_ra_time < 5)) {
6652 continue;
6653 }
6654 list_add(&clp->cl_ra_cblist, &cblist);
6655
6656 /* release in nfsd4_cb_recall_any_release */
6657 kref_get(&clp->cl_nfsdfs.cl_ref);
6658 set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
6659 clp->cl_ra_time = ktime_get_boottime_seconds();
6660 }
6661 spin_unlock(&nn->client_lock);
6662
6663 while (!list_empty(&cblist)) {
6664 clp = list_first_entry(&cblist, struct nfs4_client,
6665 cl_ra_cblist);
6666 list_del_init(&clp->cl_ra_cblist);
6667 clp->cl_ra->ra_keep = 0;
6668 clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG) |
6669 BIT(RCA4_TYPE_MASK_WDATA_DLG);
6670 trace_nfsd_cb_recall_any(clp->cl_ra);
6671 nfsd4_run_cb(&clp->cl_ra->ra_cb);
6672 }
6673}
6674
6675static void
6676nfsd4_state_shrinker_worker(struct work_struct *work)
6677{
6678 struct nfsd_net *nn = container_of(work, struct nfsd_net,
6679 nfsd_shrinker_work);
6680
6681 courtesy_client_reaper(nn);
6682 deleg_reaper(nn);
6683}
6684
6685static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
6686{
6687 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
6688 return nfserr_bad_stateid;
6689 return nfs_ok;
6690}
6691
6692static
6693__be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
6694{
6695 __be32 status = nfserr_openmode;
6696
6697 /* For lock stateid's, we test the parent open, not the lock: */
6698 if (stp->st_openstp)
6699 stp = stp->st_openstp;
6700 if ((flags & WR_STATE) && !access_permit_write(stp))
6701 goto out;
6702 if ((flags & RD_STATE) && !access_permit_read(stp))
6703 goto out;
6704 status = nfs_ok;
6705out:
6706 return status;
6707}
6708
6709static inline __be32
6710check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
6711{
6712 if (ONE_STATEID(stateid) && (flags & RD_STATE))
6713 return nfs_ok;
6714 else if (opens_in_grace(net)) {
6715 /* Answer in remaining cases depends on existence of
6716 * conflicting state; so we must wait out the grace period. */
6717 return nfserr_grace;
6718 } else if (flags & WR_STATE)
6719 return nfs4_share_conflict(current_fh,
6720 NFS4_SHARE_DENY_WRITE);
6721 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
6722 return nfs4_share_conflict(current_fh,
6723 NFS4_SHARE_DENY_READ);
6724}
6725
6726static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
6727{
6728 /*
6729 * When sessions are used the stateid generation number is ignored
6730 * when it is zero.
6731 */
6732 if (has_session && in->si_generation == 0)
6733 return nfs_ok;
6734
6735 if (in->si_generation == ref->si_generation)
6736 return nfs_ok;
6737
6738 /* If the client sends us a stateid from the future, it's buggy: */
6739 if (nfsd4_stateid_generation_after(in, ref))
6740 return nfserr_bad_stateid;
6741 /*
6742 * However, we could see a stateid from the past, even from a
6743 * non-buggy client. For example, if the client sends a lock
6744 * while some IO is outstanding, the lock may bump si_generation
6745 * while the IO is still in flight. The client could avoid that
6746 * situation by waiting for responses on all the IO requests,
6747 * but better performance may result in retrying IO that
6748 * receives an old_stateid error if requests are rarely
6749 * reordered in flight:
6750 */
6751 return nfserr_old_stateid;
6752}
6753
6754static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
6755{
6756 __be32 ret;
6757
6758 spin_lock(&s->sc_lock);
6759 ret = nfsd4_verify_open_stid(s);
6760 if (ret == nfs_ok)
6761 ret = check_stateid_generation(in, &s->sc_stateid, has_session);
6762 spin_unlock(&s->sc_lock);
6763 if (ret == nfserr_admin_revoked)
6764 nfsd40_drop_revoked_stid(s->sc_client,
6765 &s->sc_stateid);
6766 return ret;
6767}
6768
6769static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
6770{
6771 if (ols->st_stateowner->so_is_open_owner &&
6772 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
6773 return nfserr_bad_stateid;
6774 return nfs_ok;
6775}
6776
6777static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
6778{
6779 struct nfs4_stid *s;
6780 __be32 status = nfserr_bad_stateid;
6781
6782 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
6783 CLOSE_STATEID(stateid))
6784 return status;
6785 spin_lock(&cl->cl_lock);
6786 s = find_stateid_locked(cl, stateid);
6787 if (!s)
6788 goto out_unlock;
6789 status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
6790 if (status)
6791 goto out_unlock;
6792 status = nfsd4_verify_open_stid(s);
6793 if (status)
6794 goto out_unlock;
6795
6796 switch (s->sc_type) {
6797 case SC_TYPE_DELEG:
6798 status = nfs_ok;
6799 break;
6800 case SC_TYPE_OPEN:
6801 case SC_TYPE_LOCK:
6802 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
6803 break;
6804 default:
6805 printk("unknown stateid type %x\n", s->sc_type);
6806 status = nfserr_bad_stateid;
6807 }
6808out_unlock:
6809 spin_unlock(&cl->cl_lock);
6810 if (status == nfserr_admin_revoked)
6811 nfsd40_drop_revoked_stid(cl, stateid);
6812 return status;
6813}
6814
6815__be32
6816nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
6817 stateid_t *stateid,
6818 unsigned short typemask, unsigned short statusmask,
6819 struct nfs4_stid **s, struct nfsd_net *nn)
6820{
6821 __be32 status;
6822 struct nfs4_stid *stid;
6823 bool return_revoked = false;
6824
6825 /*
6826 * only return revoked delegations if explicitly asked.
6827 * otherwise we report revoked or bad_stateid status.
6828 */
6829 if (statusmask & SC_STATUS_REVOKED)
6830 return_revoked = true;
6831 if (typemask & SC_TYPE_DELEG)
6832 /* Always allow REVOKED for DELEG so we can
6833 * retturn the appropriate error.
6834 */
6835 statusmask |= SC_STATUS_REVOKED;
6836
6837 statusmask |= SC_STATUS_ADMIN_REVOKED;
6838
6839 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
6840 CLOSE_STATEID(stateid))
6841 return nfserr_bad_stateid;
6842 status = set_client(&stateid->si_opaque.so_clid, cstate, nn);
6843 if (status == nfserr_stale_clientid) {
6844 if (cstate->session)
6845 return nfserr_bad_stateid;
6846 return nfserr_stale_stateid;
6847 }
6848 if (status)
6849 return status;
6850 stid = find_stateid_by_type(cstate->clp, stateid, typemask, statusmask);
6851 if (!stid)
6852 return nfserr_bad_stateid;
6853 if ((stid->sc_status & SC_STATUS_REVOKED) && !return_revoked) {
6854 nfs4_put_stid(stid);
6855 return nfserr_deleg_revoked;
6856 }
6857 if (stid->sc_status & SC_STATUS_ADMIN_REVOKED) {
6858 nfsd40_drop_revoked_stid(cstate->clp, stateid);
6859 nfs4_put_stid(stid);
6860 return nfserr_admin_revoked;
6861 }
6862 *s = stid;
6863 return nfs_ok;
6864}
6865
6866static struct nfsd_file *
6867nfs4_find_file(struct nfs4_stid *s, int flags)
6868{
6869 struct nfsd_file *ret = NULL;
6870
6871 if (!s || s->sc_status)
6872 return NULL;
6873
6874 switch (s->sc_type) {
6875 case SC_TYPE_DELEG:
6876 spin_lock(&s->sc_file->fi_lock);
6877 ret = nfsd_file_get(s->sc_file->fi_deleg_file);
6878 spin_unlock(&s->sc_file->fi_lock);
6879 break;
6880 case SC_TYPE_OPEN:
6881 case SC_TYPE_LOCK:
6882 if (flags & RD_STATE)
6883 ret = find_readable_file(s->sc_file);
6884 else
6885 ret = find_writeable_file(s->sc_file);
6886 }
6887
6888 return ret;
6889}
6890
6891static __be32
6892nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
6893{
6894 __be32 status;
6895
6896 status = nfsd4_check_openowner_confirmed(ols);
6897 if (status)
6898 return status;
6899 return nfs4_check_openmode(ols, flags);
6900}
6901
6902static __be32
6903nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
6904 struct nfsd_file **nfp, int flags)
6905{
6906 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
6907 struct nfsd_file *nf;
6908 __be32 status;
6909
6910 nf = nfs4_find_file(s, flags);
6911 if (nf) {
6912 status = nfsd_permission(&rqstp->rq_cred,
6913 fhp->fh_export, fhp->fh_dentry,
6914 acc | NFSD_MAY_OWNER_OVERRIDE);
6915 if (status) {
6916 nfsd_file_put(nf);
6917 goto out;
6918 }
6919 } else {
6920 status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
6921 if (status)
6922 return status;
6923 }
6924 *nfp = nf;
6925out:
6926 return status;
6927}
6928static void
6929_free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
6930{
6931 WARN_ON_ONCE(cps->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID);
6932 if (!refcount_dec_and_test(&cps->cp_stateid.cs_count))
6933 return;
6934 list_del(&cps->cp_list);
6935 idr_remove(&nn->s2s_cp_stateids,
6936 cps->cp_stateid.cs_stid.si_opaque.so_id);
6937 kfree(cps);
6938}
6939/*
6940 * A READ from an inter server to server COPY will have a
6941 * copy stateid. Look up the copy notify stateid from the
6942 * idr structure and take a reference on it.
6943 */
6944__be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
6945 struct nfs4_client *clp,
6946 struct nfs4_cpntf_state **cps)
6947{
6948 copy_stateid_t *cps_t;
6949 struct nfs4_cpntf_state *state = NULL;
6950
6951 if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
6952 return nfserr_bad_stateid;
6953 spin_lock(&nn->s2s_cp_lock);
6954 cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
6955 if (cps_t) {
6956 state = container_of(cps_t, struct nfs4_cpntf_state,
6957 cp_stateid);
6958 if (state->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID) {
6959 state = NULL;
6960 goto unlock;
6961 }
6962 if (!clp)
6963 refcount_inc(&state->cp_stateid.cs_count);
6964 else
6965 _free_cpntf_state_locked(nn, state);
6966 }
6967unlock:
6968 spin_unlock(&nn->s2s_cp_lock);
6969 if (!state)
6970 return nfserr_bad_stateid;
6971 if (!clp)
6972 *cps = state;
6973 return 0;
6974}
6975
6976static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
6977 struct nfs4_stid **stid)
6978{
6979 __be32 status;
6980 struct nfs4_cpntf_state *cps = NULL;
6981 struct nfs4_client *found;
6982
6983 status = manage_cpntf_state(nn, st, NULL, &cps);
6984 if (status)
6985 return status;
6986
6987 cps->cpntf_time = ktime_get_boottime_seconds();
6988
6989 status = nfserr_expired;
6990 found = lookup_clientid(&cps->cp_p_clid, true, nn);
6991 if (!found)
6992 goto out;
6993
6994 *stid = find_stateid_by_type(found, &cps->cp_p_stateid,
6995 SC_TYPE_DELEG|SC_TYPE_OPEN|SC_TYPE_LOCK,
6996 0);
6997 if (*stid)
6998 status = nfs_ok;
6999 else
7000 status = nfserr_bad_stateid;
7001
7002 put_client_renew(found);
7003out:
7004 nfs4_put_cpntf_state(nn, cps);
7005 return status;
7006}
7007
7008void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
7009{
7010 spin_lock(&nn->s2s_cp_lock);
7011 _free_cpntf_state_locked(nn, cps);
7012 spin_unlock(&nn->s2s_cp_lock);
7013}
7014
7015/**
7016 * nfs4_preprocess_stateid_op - find and prep stateid for an operation
7017 * @rqstp: incoming request from client
7018 * @cstate: current compound state
7019 * @fhp: filehandle associated with requested stateid
7020 * @stateid: stateid (provided by client)
7021 * @flags: flags describing type of operation to be done
7022 * @nfp: optional nfsd_file return pointer (may be NULL)
7023 * @cstid: optional returned nfs4_stid pointer (may be NULL)
7024 *
7025 * Given info from the client, look up a nfs4_stid for the operation. On
7026 * success, it returns a reference to the nfs4_stid and/or the nfsd_file
7027 * associated with it.
7028 */
7029__be32
7030nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
7031 struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
7032 stateid_t *stateid, int flags, struct nfsd_file **nfp,
7033 struct nfs4_stid **cstid)
7034{
7035 struct net *net = SVC_NET(rqstp);
7036 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7037 struct nfs4_stid *s = NULL;
7038 __be32 status;
7039
7040 if (nfp)
7041 *nfp = NULL;
7042
7043 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
7044 status = check_special_stateids(net, fhp, stateid, flags);
7045 goto done;
7046 }
7047
7048 status = nfsd4_lookup_stateid(cstate, stateid,
7049 SC_TYPE_DELEG|SC_TYPE_OPEN|SC_TYPE_LOCK,
7050 0, &s, nn);
7051 if (status == nfserr_bad_stateid)
7052 status = find_cpntf_state(nn, stateid, &s);
7053 if (status)
7054 return status;
7055 status = nfsd4_stid_check_stateid_generation(stateid, s,
7056 nfsd4_has_session(cstate));
7057 if (status)
7058 goto out;
7059
7060 switch (s->sc_type) {
7061 case SC_TYPE_DELEG:
7062 status = nfs4_check_delegmode(delegstateid(s), flags);
7063 break;
7064 case SC_TYPE_OPEN:
7065 case SC_TYPE_LOCK:
7066 status = nfs4_check_olstateid(openlockstateid(s), flags);
7067 break;
7068 }
7069 if (status)
7070 goto out;
7071 status = nfs4_check_fh(fhp, s);
7072
7073done:
7074 if (status == nfs_ok && nfp)
7075 status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
7076out:
7077 if (s) {
7078 if (!status && cstid)
7079 *cstid = s;
7080 else
7081 nfs4_put_stid(s);
7082 }
7083 return status;
7084}
7085
7086/*
7087 * Test if the stateid is valid
7088 */
7089__be32
7090nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7091 union nfsd4_op_u *u)
7092{
7093 struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
7094 struct nfsd4_test_stateid_id *stateid;
7095 struct nfs4_client *cl = cstate->clp;
7096
7097 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
7098 stateid->ts_id_status =
7099 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
7100
7101 return nfs_ok;
7102}
7103
7104static __be32
7105nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
7106{
7107 struct nfs4_ol_stateid *stp = openlockstateid(s);
7108 __be32 ret;
7109
7110 ret = nfsd4_lock_ol_stateid(stp);
7111 if (ret)
7112 goto out_put_stid;
7113
7114 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
7115 if (ret)
7116 goto out;
7117
7118 ret = nfserr_locks_held;
7119 if (check_for_locks(stp->st_stid.sc_file,
7120 lockowner(stp->st_stateowner)))
7121 goto out;
7122
7123 release_lock_stateid(stp);
7124 ret = nfs_ok;
7125
7126out:
7127 mutex_unlock(&stp->st_mutex);
7128out_put_stid:
7129 nfs4_put_stid(s);
7130 return ret;
7131}
7132
7133__be32
7134nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7135 union nfsd4_op_u *u)
7136{
7137 struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
7138 stateid_t *stateid = &free_stateid->fr_stateid;
7139 struct nfs4_stid *s;
7140 struct nfs4_delegation *dp;
7141 struct nfs4_client *cl = cstate->clp;
7142 __be32 ret = nfserr_bad_stateid;
7143
7144 spin_lock(&cl->cl_lock);
7145 s = find_stateid_locked(cl, stateid);
7146 if (!s || s->sc_status & SC_STATUS_CLOSED)
7147 goto out_unlock;
7148 if (s->sc_status & SC_STATUS_ADMIN_REVOKED) {
7149 nfsd4_drop_revoked_stid(s);
7150 ret = nfs_ok;
7151 goto out;
7152 }
7153 spin_lock(&s->sc_lock);
7154 switch (s->sc_type) {
7155 case SC_TYPE_DELEG:
7156 if (s->sc_status & SC_STATUS_REVOKED) {
7157 spin_unlock(&s->sc_lock);
7158 dp = delegstateid(s);
7159 list_del_init(&dp->dl_recall_lru);
7160 spin_unlock(&cl->cl_lock);
7161 nfs4_put_stid(s);
7162 ret = nfs_ok;
7163 goto out;
7164 }
7165 ret = nfserr_locks_held;
7166 break;
7167 case SC_TYPE_OPEN:
7168 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
7169 if (ret)
7170 break;
7171 ret = nfserr_locks_held;
7172 break;
7173 case SC_TYPE_LOCK:
7174 spin_unlock(&s->sc_lock);
7175 refcount_inc(&s->sc_count);
7176 spin_unlock(&cl->cl_lock);
7177 ret = nfsd4_free_lock_stateid(stateid, s);
7178 goto out;
7179 }
7180 spin_unlock(&s->sc_lock);
7181out_unlock:
7182 spin_unlock(&cl->cl_lock);
7183out:
7184 return ret;
7185}
7186
7187static inline int
7188setlkflg (int type)
7189{
7190 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
7191 RD_STATE : WR_STATE;
7192}
7193
7194static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
7195{
7196 struct svc_fh *current_fh = &cstate->current_fh;
7197 struct nfs4_stateowner *sop = stp->st_stateowner;
7198 __be32 status;
7199
7200 status = nfsd4_check_seqid(cstate, sop, seqid);
7201 if (status)
7202 return status;
7203 status = nfsd4_lock_ol_stateid(stp);
7204 if (status != nfs_ok)
7205 return status;
7206 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
7207 if (status == nfs_ok)
7208 status = nfs4_check_fh(current_fh, &stp->st_stid);
7209 if (status != nfs_ok)
7210 mutex_unlock(&stp->st_mutex);
7211 return status;
7212}
7213
7214/**
7215 * nfs4_preprocess_seqid_op - find and prep an ol_stateid for a seqid-morphing op
7216 * @cstate: compund state
7217 * @seqid: seqid (provided by client)
7218 * @stateid: stateid (provided by client)
7219 * @typemask: mask of allowable types for this operation
7220 * @statusmask: mask of allowed states: 0 or STID_CLOSED
7221 * @stpp: return pointer for the stateid found
7222 * @nn: net namespace for request
7223 *
7224 * Given a stateid+seqid from a client, look up an nfs4_ol_stateid and
7225 * return it in @stpp. On a nfs_ok return, the returned stateid will
7226 * have its st_mutex locked.
7227 */
7228static __be32
7229nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
7230 stateid_t *stateid,
7231 unsigned short typemask, unsigned short statusmask,
7232 struct nfs4_ol_stateid **stpp,
7233 struct nfsd_net *nn)
7234{
7235 __be32 status;
7236 struct nfs4_stid *s;
7237 struct nfs4_ol_stateid *stp = NULL;
7238
7239 trace_nfsd_preprocess(seqid, stateid);
7240
7241 *stpp = NULL;
7242retry:
7243 status = nfsd4_lookup_stateid(cstate, stateid,
7244 typemask, statusmask, &s, nn);
7245 if (status)
7246 return status;
7247 stp = openlockstateid(s);
7248 if (nfsd4_cstate_assign_replay(cstate, stp->st_stateowner) == -EAGAIN) {
7249 nfs4_put_stateowner(stp->st_stateowner);
7250 goto retry;
7251 }
7252
7253 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
7254 if (!status)
7255 *stpp = stp;
7256 else
7257 nfs4_put_stid(&stp->st_stid);
7258 return status;
7259}
7260
7261static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
7262 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
7263{
7264 __be32 status;
7265 struct nfs4_openowner *oo;
7266 struct nfs4_ol_stateid *stp;
7267
7268 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
7269 SC_TYPE_OPEN, 0, &stp, nn);
7270 if (status)
7271 return status;
7272 oo = openowner(stp->st_stateowner);
7273 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
7274 mutex_unlock(&stp->st_mutex);
7275 nfs4_put_stid(&stp->st_stid);
7276 return nfserr_bad_stateid;
7277 }
7278 *stpp = stp;
7279 return nfs_ok;
7280}
7281
7282__be32
7283nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7284 union nfsd4_op_u *u)
7285{
7286 struct nfsd4_open_confirm *oc = &u->open_confirm;
7287 __be32 status;
7288 struct nfs4_openowner *oo;
7289 struct nfs4_ol_stateid *stp;
7290 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7291
7292 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
7293 cstate->current_fh.fh_dentry);
7294
7295 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
7296 if (status)
7297 return status;
7298
7299 status = nfs4_preprocess_seqid_op(cstate,
7300 oc->oc_seqid, &oc->oc_req_stateid,
7301 SC_TYPE_OPEN, 0, &stp, nn);
7302 if (status)
7303 goto out;
7304 oo = openowner(stp->st_stateowner);
7305 status = nfserr_bad_stateid;
7306 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
7307 mutex_unlock(&stp->st_mutex);
7308 goto put_stateid;
7309 }
7310 oo->oo_flags |= NFS4_OO_CONFIRMED;
7311 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
7312 mutex_unlock(&stp->st_mutex);
7313 trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
7314 nfsd4_client_record_create(oo->oo_owner.so_client);
7315 status = nfs_ok;
7316put_stateid:
7317 nfs4_put_stid(&stp->st_stid);
7318out:
7319 nfsd4_bump_seqid(cstate, status);
7320 return status;
7321}
7322
7323static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
7324{
7325 if (!test_access(access, stp))
7326 return;
7327 nfs4_file_put_access(stp->st_stid.sc_file, access);
7328 clear_access(access, stp);
7329}
7330
7331static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
7332{
7333 switch (to_access) {
7334 case NFS4_SHARE_ACCESS_READ:
7335 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
7336 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
7337 break;
7338 case NFS4_SHARE_ACCESS_WRITE:
7339 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
7340 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
7341 break;
7342 case NFS4_SHARE_ACCESS_BOTH:
7343 break;
7344 default:
7345 WARN_ON_ONCE(1);
7346 }
7347}
7348
7349__be32
7350nfsd4_open_downgrade(struct svc_rqst *rqstp,
7351 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
7352{
7353 struct nfsd4_open_downgrade *od = &u->open_downgrade;
7354 __be32 status;
7355 struct nfs4_ol_stateid *stp;
7356 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7357
7358 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
7359 cstate->current_fh.fh_dentry);
7360
7361 /* We don't yet support WANT bits: */
7362 if (od->od_deleg_want)
7363 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
7364 od->od_deleg_want);
7365
7366 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
7367 &od->od_stateid, &stp, nn);
7368 if (status)
7369 goto out;
7370 status = nfserr_inval;
7371 if (!test_access(od->od_share_access, stp)) {
7372 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
7373 stp->st_access_bmap, od->od_share_access);
7374 goto put_stateid;
7375 }
7376 if (!test_deny(od->od_share_deny, stp)) {
7377 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
7378 stp->st_deny_bmap, od->od_share_deny);
7379 goto put_stateid;
7380 }
7381 nfs4_stateid_downgrade(stp, od->od_share_access);
7382 reset_union_bmap_deny(od->od_share_deny, stp);
7383 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
7384 status = nfs_ok;
7385put_stateid:
7386 mutex_unlock(&stp->st_mutex);
7387 nfs4_put_stid(&stp->st_stid);
7388out:
7389 nfsd4_bump_seqid(cstate, status);
7390 return status;
7391}
7392
7393static bool nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
7394{
7395 struct nfs4_client *clp = s->st_stid.sc_client;
7396 bool unhashed;
7397 LIST_HEAD(reaplist);
7398 struct nfs4_ol_stateid *stp;
7399
7400 spin_lock(&clp->cl_lock);
7401 unhashed = unhash_open_stateid(s, &reaplist);
7402
7403 if (clp->cl_minorversion) {
7404 if (unhashed)
7405 put_ol_stateid_locked(s, &reaplist);
7406 spin_unlock(&clp->cl_lock);
7407 list_for_each_entry(stp, &reaplist, st_locks)
7408 nfs4_free_cpntf_statelist(clp->net, &stp->st_stid);
7409 free_ol_stateid_reaplist(&reaplist);
7410 return false;
7411 } else {
7412 spin_unlock(&clp->cl_lock);
7413 free_ol_stateid_reaplist(&reaplist);
7414 return unhashed;
7415 }
7416}
7417
7418/*
7419 * nfs4_unlock_state() called after encode
7420 */
7421__be32
7422nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7423 union nfsd4_op_u *u)
7424{
7425 struct nfsd4_close *close = &u->close;
7426 __be32 status;
7427 struct nfs4_ol_stateid *stp;
7428 struct net *net = SVC_NET(rqstp);
7429 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7430 bool need_move_to_close_list;
7431
7432 dprintk("NFSD: nfsd4_close on file %pd\n",
7433 cstate->current_fh.fh_dentry);
7434
7435 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
7436 &close->cl_stateid,
7437 SC_TYPE_OPEN, SC_STATUS_CLOSED,
7438 &stp, nn);
7439 nfsd4_bump_seqid(cstate, status);
7440 if (status)
7441 goto out;
7442
7443 spin_lock(&stp->st_stid.sc_client->cl_lock);
7444 stp->st_stid.sc_status |= SC_STATUS_CLOSED;
7445 spin_unlock(&stp->st_stid.sc_client->cl_lock);
7446
7447 /*
7448 * Technically we don't _really_ have to increment or copy it, since
7449 * it should just be gone after this operation and we clobber the
7450 * copied value below, but we continue to do so here just to ensure
7451 * that racing ops see that there was a state change.
7452 */
7453 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
7454
7455 need_move_to_close_list = nfsd4_close_open_stateid(stp);
7456 mutex_unlock(&stp->st_mutex);
7457 if (need_move_to_close_list)
7458 move_to_close_lru(stp, net);
7459
7460 /* v4.1+ suggests that we send a special stateid in here, since the
7461 * clients should just ignore this anyway. Since this is not useful
7462 * for v4.0 clients either, we set it to the special close_stateid
7463 * universally.
7464 *
7465 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
7466 */
7467 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
7468
7469 /* put reference from nfs4_preprocess_seqid_op */
7470 nfs4_put_stid(&stp->st_stid);
7471out:
7472 return status;
7473}
7474
7475__be32
7476nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7477 union nfsd4_op_u *u)
7478{
7479 struct nfsd4_delegreturn *dr = &u->delegreturn;
7480 struct nfs4_delegation *dp;
7481 stateid_t *stateid = &dr->dr_stateid;
7482 struct nfs4_stid *s;
7483 __be32 status;
7484 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7485
7486 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
7487 return status;
7488
7489 status = nfsd4_lookup_stateid(cstate, stateid, SC_TYPE_DELEG, 0, &s, nn);
7490 if (status)
7491 goto out;
7492 dp = delegstateid(s);
7493 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
7494 if (status)
7495 goto put_stateid;
7496
7497 trace_nfsd_deleg_return(stateid);
7498 destroy_delegation(dp);
7499 smp_mb__after_atomic();
7500 wake_up_var(d_inode(cstate->current_fh.fh_dentry));
7501put_stateid:
7502 nfs4_put_stid(&dp->dl_stid);
7503out:
7504 return status;
7505}
7506
7507/* last octet in a range */
7508static inline u64
7509last_byte_offset(u64 start, u64 len)
7510{
7511 u64 end;
7512
7513 WARN_ON_ONCE(!len);
7514 end = start + len;
7515 return end > start ? end - 1: NFS4_MAX_UINT64;
7516}
7517
7518/*
7519 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
7520 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
7521 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
7522 * locking, this prevents us from being completely protocol-compliant. The
7523 * real solution to this problem is to start using unsigned file offsets in
7524 * the VFS, but this is a very deep change!
7525 */
7526static inline void
7527nfs4_transform_lock_offset(struct file_lock *lock)
7528{
7529 if (lock->fl_start < 0)
7530 lock->fl_start = OFFSET_MAX;
7531 if (lock->fl_end < 0)
7532 lock->fl_end = OFFSET_MAX;
7533}
7534
7535static fl_owner_t
7536nfsd4_lm_get_owner(fl_owner_t owner)
7537{
7538 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
7539
7540 nfs4_get_stateowner(&lo->lo_owner);
7541 return owner;
7542}
7543
7544static void
7545nfsd4_lm_put_owner(fl_owner_t owner)
7546{
7547 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
7548
7549 if (lo)
7550 nfs4_put_stateowner(&lo->lo_owner);
7551}
7552
7553/* return pointer to struct nfs4_client if client is expirable */
7554static bool
7555nfsd4_lm_lock_expirable(struct file_lock *cfl)
7556{
7557 struct nfs4_lockowner *lo = (struct nfs4_lockowner *) cfl->c.flc_owner;
7558 struct nfs4_client *clp = lo->lo_owner.so_client;
7559 struct nfsd_net *nn;
7560
7561 if (try_to_expire_client(clp)) {
7562 nn = net_generic(clp->net, nfsd_net_id);
7563 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
7564 return true;
7565 }
7566 return false;
7567}
7568
7569/* schedule laundromat to run immediately and wait for it to complete */
7570static void
7571nfsd4_lm_expire_lock(void)
7572{
7573 flush_workqueue(laundry_wq);
7574}
7575
7576static void
7577nfsd4_lm_notify(struct file_lock *fl)
7578{
7579 struct nfs4_lockowner *lo = (struct nfs4_lockowner *) fl->c.flc_owner;
7580 struct net *net = lo->lo_owner.so_client->net;
7581 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7582 struct nfsd4_blocked_lock *nbl = container_of(fl,
7583 struct nfsd4_blocked_lock, nbl_lock);
7584 bool queue = false;
7585
7586 /* An empty list means that something else is going to be using it */
7587 spin_lock(&nn->blocked_locks_lock);
7588 if (!list_empty(&nbl->nbl_list)) {
7589 list_del_init(&nbl->nbl_list);
7590 list_del_init(&nbl->nbl_lru);
7591 queue = true;
7592 }
7593 spin_unlock(&nn->blocked_locks_lock);
7594
7595 if (queue) {
7596 trace_nfsd_cb_notify_lock(lo, nbl);
7597 nfsd4_run_cb(&nbl->nbl_cb);
7598 }
7599}
7600
7601static const struct lock_manager_operations nfsd_posix_mng_ops = {
7602 .lm_mod_owner = THIS_MODULE,
7603 .lm_notify = nfsd4_lm_notify,
7604 .lm_get_owner = nfsd4_lm_get_owner,
7605 .lm_put_owner = nfsd4_lm_put_owner,
7606 .lm_lock_expirable = nfsd4_lm_lock_expirable,
7607 .lm_expire_lock = nfsd4_lm_expire_lock,
7608};
7609
7610static inline void
7611nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
7612{
7613 struct nfs4_lockowner *lo;
7614
7615 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
7616 lo = (struct nfs4_lockowner *) fl->c.flc_owner;
7617 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
7618 GFP_KERNEL);
7619 if (!deny->ld_owner.data)
7620 /* We just don't care that much */
7621 goto nevermind;
7622 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
7623 } else {
7624nevermind:
7625 deny->ld_owner.len = 0;
7626 deny->ld_owner.data = NULL;
7627 deny->ld_clientid.cl_boot = 0;
7628 deny->ld_clientid.cl_id = 0;
7629 }
7630 deny->ld_start = fl->fl_start;
7631 deny->ld_length = NFS4_MAX_UINT64;
7632 if (fl->fl_end != NFS4_MAX_UINT64)
7633 deny->ld_length = fl->fl_end - fl->fl_start + 1;
7634 deny->ld_type = NFS4_READ_LT;
7635 if (fl->c.flc_type != F_RDLCK)
7636 deny->ld_type = NFS4_WRITE_LT;
7637}
7638
7639static struct nfs4_lockowner *
7640find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
7641{
7642 unsigned int strhashval = ownerstr_hashval(owner);
7643 struct nfs4_stateowner *so;
7644
7645 lockdep_assert_held(&clp->cl_lock);
7646
7647 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
7648 so_strhash) {
7649 if (so->so_is_open_owner)
7650 continue;
7651 if (same_owner_str(so, owner))
7652 return lockowner(nfs4_get_stateowner(so));
7653 }
7654 return NULL;
7655}
7656
7657static struct nfs4_lockowner *
7658find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
7659{
7660 struct nfs4_lockowner *lo;
7661
7662 spin_lock(&clp->cl_lock);
7663 lo = find_lockowner_str_locked(clp, owner);
7664 spin_unlock(&clp->cl_lock);
7665 return lo;
7666}
7667
7668static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
7669{
7670 unhash_lockowner_locked(lockowner(sop));
7671}
7672
7673static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
7674{
7675 struct nfs4_lockowner *lo = lockowner(sop);
7676
7677 kmem_cache_free(lockowner_slab, lo);
7678}
7679
7680static const struct nfs4_stateowner_operations lockowner_ops = {
7681 .so_unhash = nfs4_unhash_lockowner,
7682 .so_free = nfs4_free_lockowner,
7683};
7684
7685/*
7686 * Alloc a lock owner structure.
7687 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
7688 * occurred.
7689 *
7690 * strhashval = ownerstr_hashval
7691 */
7692static struct nfs4_lockowner *
7693alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
7694 struct nfs4_ol_stateid *open_stp,
7695 struct nfsd4_lock *lock)
7696{
7697 struct nfs4_lockowner *lo, *ret;
7698
7699 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
7700 if (!lo)
7701 return NULL;
7702 INIT_LIST_HEAD(&lo->lo_blocked);
7703 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
7704 lo->lo_owner.so_is_open_owner = 0;
7705 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
7706 lo->lo_owner.so_ops = &lockowner_ops;
7707 spin_lock(&clp->cl_lock);
7708 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
7709 if (ret == NULL) {
7710 list_add(&lo->lo_owner.so_strhash,
7711 &clp->cl_ownerstr_hashtbl[strhashval]);
7712 ret = lo;
7713 } else
7714 nfs4_free_stateowner(&lo->lo_owner);
7715
7716 spin_unlock(&clp->cl_lock);
7717 return ret;
7718}
7719
7720static struct nfs4_ol_stateid *
7721find_lock_stateid(const struct nfs4_lockowner *lo,
7722 const struct nfs4_ol_stateid *ost)
7723{
7724 struct nfs4_ol_stateid *lst;
7725
7726 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
7727
7728 /* If ost is not hashed, ost->st_locks will not be valid */
7729 if (!nfs4_ol_stateid_unhashed(ost))
7730 list_for_each_entry(lst, &ost->st_locks, st_locks) {
7731 if (lst->st_stateowner == &lo->lo_owner) {
7732 refcount_inc(&lst->st_stid.sc_count);
7733 return lst;
7734 }
7735 }
7736 return NULL;
7737}
7738
7739static struct nfs4_ol_stateid *
7740init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
7741 struct nfs4_file *fp, struct inode *inode,
7742 struct nfs4_ol_stateid *open_stp)
7743{
7744 struct nfs4_client *clp = lo->lo_owner.so_client;
7745 struct nfs4_ol_stateid *retstp;
7746
7747 mutex_init(&stp->st_mutex);
7748 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
7749retry:
7750 spin_lock(&clp->cl_lock);
7751 if (nfs4_ol_stateid_unhashed(open_stp))
7752 goto out_close;
7753 retstp = find_lock_stateid(lo, open_stp);
7754 if (retstp)
7755 goto out_found;
7756 refcount_inc(&stp->st_stid.sc_count);
7757 stp->st_stid.sc_type = SC_TYPE_LOCK;
7758 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
7759 get_nfs4_file(fp);
7760 stp->st_stid.sc_file = fp;
7761 stp->st_access_bmap = 0;
7762 stp->st_deny_bmap = open_stp->st_deny_bmap;
7763 stp->st_openstp = open_stp;
7764 spin_lock(&fp->fi_lock);
7765 list_add(&stp->st_locks, &open_stp->st_locks);
7766 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
7767 list_add(&stp->st_perfile, &fp->fi_stateids);
7768 spin_unlock(&fp->fi_lock);
7769 spin_unlock(&clp->cl_lock);
7770 return stp;
7771out_found:
7772 spin_unlock(&clp->cl_lock);
7773 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
7774 nfs4_put_stid(&retstp->st_stid);
7775 goto retry;
7776 }
7777 /* To keep mutex tracking happy */
7778 mutex_unlock(&stp->st_mutex);
7779 return retstp;
7780out_close:
7781 spin_unlock(&clp->cl_lock);
7782 mutex_unlock(&stp->st_mutex);
7783 return NULL;
7784}
7785
7786static struct nfs4_ol_stateid *
7787find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
7788 struct inode *inode, struct nfs4_ol_stateid *ost,
7789 bool *new)
7790{
7791 struct nfs4_stid *ns = NULL;
7792 struct nfs4_ol_stateid *lst;
7793 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
7794 struct nfs4_client *clp = oo->oo_owner.so_client;
7795
7796 *new = false;
7797 spin_lock(&clp->cl_lock);
7798 lst = find_lock_stateid(lo, ost);
7799 spin_unlock(&clp->cl_lock);
7800 if (lst != NULL) {
7801 if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
7802 goto out;
7803 nfs4_put_stid(&lst->st_stid);
7804 }
7805 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
7806 if (ns == NULL)
7807 return NULL;
7808
7809 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
7810 if (lst == openlockstateid(ns))
7811 *new = true;
7812 else
7813 nfs4_put_stid(ns);
7814out:
7815 return lst;
7816}
7817
7818static int
7819check_lock_length(u64 offset, u64 length)
7820{
7821 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
7822 (length > ~offset)));
7823}
7824
7825static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
7826{
7827 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
7828
7829 lockdep_assert_held(&fp->fi_lock);
7830
7831 if (test_access(access, lock_stp))
7832 return;
7833 __nfs4_file_get_access(fp, access);
7834 set_access(access, lock_stp);
7835}
7836
7837static __be32
7838lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
7839 struct nfs4_ol_stateid *ost,
7840 struct nfsd4_lock *lock,
7841 struct nfs4_ol_stateid **plst, bool *new)
7842{
7843 __be32 status;
7844 struct nfs4_file *fi = ost->st_stid.sc_file;
7845 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
7846 struct nfs4_client *cl = oo->oo_owner.so_client;
7847 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
7848 struct nfs4_lockowner *lo;
7849 struct nfs4_ol_stateid *lst;
7850 unsigned int strhashval;
7851
7852 lo = find_lockowner_str(cl, &lock->lk_new_owner);
7853 if (!lo) {
7854 strhashval = ownerstr_hashval(&lock->lk_new_owner);
7855 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
7856 if (lo == NULL)
7857 return nfserr_jukebox;
7858 } else {
7859 /* with an existing lockowner, seqids must be the same */
7860 status = nfserr_bad_seqid;
7861 if (!cstate->minorversion &&
7862 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
7863 goto out;
7864 }
7865
7866 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
7867 if (lst == NULL) {
7868 status = nfserr_jukebox;
7869 goto out;
7870 }
7871
7872 status = nfs_ok;
7873 *plst = lst;
7874out:
7875 nfs4_put_stateowner(&lo->lo_owner);
7876 return status;
7877}
7878
7879/*
7880 * LOCK operation
7881 */
7882__be32
7883nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7884 union nfsd4_op_u *u)
7885{
7886 struct nfsd4_lock *lock = &u->lock;
7887 struct nfs4_openowner *open_sop = NULL;
7888 struct nfs4_lockowner *lock_sop = NULL;
7889 struct nfs4_ol_stateid *lock_stp = NULL;
7890 struct nfs4_ol_stateid *open_stp = NULL;
7891 struct nfs4_file *fp;
7892 struct nfsd_file *nf = NULL;
7893 struct nfsd4_blocked_lock *nbl = NULL;
7894 struct file_lock *file_lock = NULL;
7895 struct file_lock *conflock = NULL;
7896 struct super_block *sb;
7897 __be32 status = 0;
7898 int lkflg;
7899 int err;
7900 bool new = false;
7901 unsigned char type;
7902 unsigned int flags = FL_POSIX;
7903 struct net *net = SVC_NET(rqstp);
7904 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7905
7906 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
7907 (long long) lock->lk_offset,
7908 (long long) lock->lk_length);
7909
7910 if (check_lock_length(lock->lk_offset, lock->lk_length))
7911 return nfserr_inval;
7912
7913 if ((status = fh_verify(rqstp, &cstate->current_fh,
7914 S_IFREG, NFSD_MAY_LOCK))) {
7915 dprintk("NFSD: nfsd4_lock: permission denied!\n");
7916 return status;
7917 }
7918 sb = cstate->current_fh.fh_dentry->d_sb;
7919
7920 if (lock->lk_is_new) {
7921 if (nfsd4_has_session(cstate))
7922 /* See rfc 5661 18.10.3: given clientid is ignored: */
7923 memcpy(&lock->lk_new_clientid,
7924 &cstate->clp->cl_clientid,
7925 sizeof(clientid_t));
7926
7927 /* validate and update open stateid and open seqid */
7928 status = nfs4_preprocess_confirmed_seqid_op(cstate,
7929 lock->lk_new_open_seqid,
7930 &lock->lk_new_open_stateid,
7931 &open_stp, nn);
7932 if (status)
7933 goto out;
7934 mutex_unlock(&open_stp->st_mutex);
7935 open_sop = openowner(open_stp->st_stateowner);
7936 status = nfserr_bad_stateid;
7937 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
7938 &lock->lk_new_clientid))
7939 goto out;
7940 status = lookup_or_create_lock_state(cstate, open_stp, lock,
7941 &lock_stp, &new);
7942 } else {
7943 status = nfs4_preprocess_seqid_op(cstate,
7944 lock->lk_old_lock_seqid,
7945 &lock->lk_old_lock_stateid,
7946 SC_TYPE_LOCK, 0, &lock_stp,
7947 nn);
7948 }
7949 if (status)
7950 goto out;
7951 lock_sop = lockowner(lock_stp->st_stateowner);
7952
7953 lkflg = setlkflg(lock->lk_type);
7954 status = nfs4_check_openmode(lock_stp, lkflg);
7955 if (status)
7956 goto out;
7957
7958 status = nfserr_grace;
7959 if (locks_in_grace(net) && !lock->lk_reclaim)
7960 goto out;
7961 status = nfserr_no_grace;
7962 if (!locks_in_grace(net) && lock->lk_reclaim)
7963 goto out;
7964
7965 if (lock->lk_reclaim)
7966 flags |= FL_RECLAIM;
7967
7968 fp = lock_stp->st_stid.sc_file;
7969 switch (lock->lk_type) {
7970 case NFS4_READW_LT:
7971 if (nfsd4_has_session(cstate) ||
7972 exportfs_lock_op_is_async(sb->s_export_op))
7973 flags |= FL_SLEEP;
7974 fallthrough;
7975 case NFS4_READ_LT:
7976 spin_lock(&fp->fi_lock);
7977 nf = find_readable_file_locked(fp);
7978 if (nf)
7979 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
7980 spin_unlock(&fp->fi_lock);
7981 type = F_RDLCK;
7982 break;
7983 case NFS4_WRITEW_LT:
7984 if (nfsd4_has_session(cstate) ||
7985 exportfs_lock_op_is_async(sb->s_export_op))
7986 flags |= FL_SLEEP;
7987 fallthrough;
7988 case NFS4_WRITE_LT:
7989 spin_lock(&fp->fi_lock);
7990 nf = find_writeable_file_locked(fp);
7991 if (nf)
7992 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
7993 spin_unlock(&fp->fi_lock);
7994 type = F_WRLCK;
7995 break;
7996 default:
7997 status = nfserr_inval;
7998 goto out;
7999 }
8000
8001 if (!nf) {
8002 status = nfserr_openmode;
8003 goto out;
8004 }
8005
8006 /*
8007 * Most filesystems with their own ->lock operations will block
8008 * the nfsd thread waiting to acquire the lock. That leads to
8009 * deadlocks (we don't want every nfsd thread tied up waiting
8010 * for file locks), so don't attempt blocking lock notifications
8011 * on those filesystems:
8012 */
8013 if (!exportfs_lock_op_is_async(sb->s_export_op))
8014 flags &= ~FL_SLEEP;
8015
8016 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
8017 if (!nbl) {
8018 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
8019 status = nfserr_jukebox;
8020 goto out;
8021 }
8022
8023 file_lock = &nbl->nbl_lock;
8024 file_lock->c.flc_type = type;
8025 file_lock->c.flc_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
8026 file_lock->c.flc_pid = current->tgid;
8027 file_lock->c.flc_file = nf->nf_file;
8028 file_lock->c.flc_flags = flags;
8029 file_lock->fl_lmops = &nfsd_posix_mng_ops;
8030 file_lock->fl_start = lock->lk_offset;
8031 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
8032 nfs4_transform_lock_offset(file_lock);
8033
8034 conflock = locks_alloc_lock();
8035 if (!conflock) {
8036 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
8037 status = nfserr_jukebox;
8038 goto out;
8039 }
8040
8041 if (flags & FL_SLEEP) {
8042 nbl->nbl_time = ktime_get_boottime_seconds();
8043 spin_lock(&nn->blocked_locks_lock);
8044 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
8045 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
8046 kref_get(&nbl->nbl_kref);
8047 spin_unlock(&nn->blocked_locks_lock);
8048 }
8049
8050 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
8051 switch (err) {
8052 case 0: /* success! */
8053 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
8054 status = 0;
8055 if (lock->lk_reclaim)
8056 nn->somebody_reclaimed = true;
8057 break;
8058 case FILE_LOCK_DEFERRED:
8059 kref_put(&nbl->nbl_kref, free_nbl);
8060 nbl = NULL;
8061 fallthrough;
8062 case -EAGAIN: /* conflock holds conflicting lock */
8063 status = nfserr_denied;
8064 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
8065 nfs4_set_lock_denied(conflock, &lock->lk_denied);
8066 break;
8067 case -EDEADLK:
8068 status = nfserr_deadlock;
8069 break;
8070 default:
8071 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
8072 status = nfserrno(err);
8073 break;
8074 }
8075out:
8076 if (nbl) {
8077 /* dequeue it if we queued it before */
8078 if (flags & FL_SLEEP) {
8079 spin_lock(&nn->blocked_locks_lock);
8080 if (!list_empty(&nbl->nbl_list) &&
8081 !list_empty(&nbl->nbl_lru)) {
8082 list_del_init(&nbl->nbl_list);
8083 list_del_init(&nbl->nbl_lru);
8084 kref_put(&nbl->nbl_kref, free_nbl);
8085 }
8086 /* nbl can use one of lists to be linked to reaplist */
8087 spin_unlock(&nn->blocked_locks_lock);
8088 }
8089 free_blocked_lock(nbl);
8090 }
8091 if (nf)
8092 nfsd_file_put(nf);
8093 if (lock_stp) {
8094 /* Bump seqid manually if the 4.0 replay owner is openowner */
8095 if (cstate->replay_owner &&
8096 cstate->replay_owner != &lock_sop->lo_owner &&
8097 seqid_mutating_err(ntohl(status)))
8098 lock_sop->lo_owner.so_seqid++;
8099
8100 /*
8101 * If this is a new, never-before-used stateid, and we are
8102 * returning an error, then just go ahead and release it.
8103 */
8104 if (status && new)
8105 release_lock_stateid(lock_stp);
8106
8107 mutex_unlock(&lock_stp->st_mutex);
8108
8109 nfs4_put_stid(&lock_stp->st_stid);
8110 }
8111 if (open_stp)
8112 nfs4_put_stid(&open_stp->st_stid);
8113 nfsd4_bump_seqid(cstate, status);
8114 if (conflock)
8115 locks_free_lock(conflock);
8116 return status;
8117}
8118
8119void nfsd4_lock_release(union nfsd4_op_u *u)
8120{
8121 struct nfsd4_lock *lock = &u->lock;
8122 struct nfsd4_lock_denied *deny = &lock->lk_denied;
8123
8124 kfree(deny->ld_owner.data);
8125}
8126
8127/*
8128 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
8129 * so we do a temporary open here just to get an open file to pass to
8130 * vfs_test_lock.
8131 */
8132static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
8133{
8134 struct nfsd_file *nf;
8135 struct inode *inode;
8136 __be32 err;
8137
8138 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
8139 if (err)
8140 return err;
8141 inode = fhp->fh_dentry->d_inode;
8142 inode_lock(inode); /* to block new leases till after test_lock: */
8143 err = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
8144 if (err)
8145 goto out;
8146 lock->c.flc_file = nf->nf_file;
8147 err = nfserrno(vfs_test_lock(nf->nf_file, lock));
8148 lock->c.flc_file = NULL;
8149out:
8150 inode_unlock(inode);
8151 nfsd_file_put(nf);
8152 return err;
8153}
8154
8155/*
8156 * LOCKT operation
8157 */
8158__be32
8159nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
8160 union nfsd4_op_u *u)
8161{
8162 struct nfsd4_lockt *lockt = &u->lockt;
8163 struct file_lock *file_lock = NULL;
8164 struct nfs4_lockowner *lo = NULL;
8165 __be32 status;
8166 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
8167
8168 if (locks_in_grace(SVC_NET(rqstp)))
8169 return nfserr_grace;
8170
8171 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
8172 return nfserr_inval;
8173
8174 if (!nfsd4_has_session(cstate)) {
8175 status = set_client(&lockt->lt_clientid, cstate, nn);
8176 if (status)
8177 goto out;
8178 }
8179
8180 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
8181 goto out;
8182
8183 file_lock = locks_alloc_lock();
8184 if (!file_lock) {
8185 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
8186 status = nfserr_jukebox;
8187 goto out;
8188 }
8189
8190 switch (lockt->lt_type) {
8191 case NFS4_READ_LT:
8192 case NFS4_READW_LT:
8193 file_lock->c.flc_type = F_RDLCK;
8194 break;
8195 case NFS4_WRITE_LT:
8196 case NFS4_WRITEW_LT:
8197 file_lock->c.flc_type = F_WRLCK;
8198 break;
8199 default:
8200 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
8201 status = nfserr_inval;
8202 goto out;
8203 }
8204
8205 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
8206 if (lo)
8207 file_lock->c.flc_owner = (fl_owner_t)lo;
8208 file_lock->c.flc_pid = current->tgid;
8209 file_lock->c.flc_flags = FL_POSIX;
8210
8211 file_lock->fl_start = lockt->lt_offset;
8212 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
8213
8214 nfs4_transform_lock_offset(file_lock);
8215
8216 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
8217 if (status)
8218 goto out;
8219
8220 if (file_lock->c.flc_type != F_UNLCK) {
8221 status = nfserr_denied;
8222 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
8223 }
8224out:
8225 if (lo)
8226 nfs4_put_stateowner(&lo->lo_owner);
8227 if (file_lock)
8228 locks_free_lock(file_lock);
8229 return status;
8230}
8231
8232void nfsd4_lockt_release(union nfsd4_op_u *u)
8233{
8234 struct nfsd4_lockt *lockt = &u->lockt;
8235 struct nfsd4_lock_denied *deny = &lockt->lt_denied;
8236
8237 kfree(deny->ld_owner.data);
8238}
8239
8240__be32
8241nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
8242 union nfsd4_op_u *u)
8243{
8244 struct nfsd4_locku *locku = &u->locku;
8245 struct nfs4_ol_stateid *stp;
8246 struct nfsd_file *nf = NULL;
8247 struct file_lock *file_lock = NULL;
8248 __be32 status;
8249 int err;
8250 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
8251
8252 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
8253 (long long) locku->lu_offset,
8254 (long long) locku->lu_length);
8255
8256 if (check_lock_length(locku->lu_offset, locku->lu_length))
8257 return nfserr_inval;
8258
8259 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
8260 &locku->lu_stateid, SC_TYPE_LOCK, 0,
8261 &stp, nn);
8262 if (status)
8263 goto out;
8264 nf = find_any_file(stp->st_stid.sc_file);
8265 if (!nf) {
8266 status = nfserr_lock_range;
8267 goto put_stateid;
8268 }
8269 file_lock = locks_alloc_lock();
8270 if (!file_lock) {
8271 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
8272 status = nfserr_jukebox;
8273 goto put_file;
8274 }
8275
8276 file_lock->c.flc_type = F_UNLCK;
8277 file_lock->c.flc_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
8278 file_lock->c.flc_pid = current->tgid;
8279 file_lock->c.flc_file = nf->nf_file;
8280 file_lock->c.flc_flags = FL_POSIX;
8281 file_lock->fl_lmops = &nfsd_posix_mng_ops;
8282 file_lock->fl_start = locku->lu_offset;
8283
8284 file_lock->fl_end = last_byte_offset(locku->lu_offset,
8285 locku->lu_length);
8286 nfs4_transform_lock_offset(file_lock);
8287
8288 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
8289 if (err) {
8290 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
8291 goto out_nfserr;
8292 }
8293 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
8294put_file:
8295 nfsd_file_put(nf);
8296put_stateid:
8297 mutex_unlock(&stp->st_mutex);
8298 nfs4_put_stid(&stp->st_stid);
8299out:
8300 nfsd4_bump_seqid(cstate, status);
8301 if (file_lock)
8302 locks_free_lock(file_lock);
8303 return status;
8304
8305out_nfserr:
8306 status = nfserrno(err);
8307 goto put_file;
8308}
8309
8310/*
8311 * returns
8312 * true: locks held by lockowner
8313 * false: no locks held by lockowner
8314 */
8315static bool
8316check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
8317{
8318 struct file_lock *fl;
8319 int status = false;
8320 struct nfsd_file *nf;
8321 struct inode *inode;
8322 struct file_lock_context *flctx;
8323
8324 spin_lock(&fp->fi_lock);
8325 nf = find_any_file_locked(fp);
8326 if (!nf) {
8327 /* Any valid lock stateid should have some sort of access */
8328 WARN_ON_ONCE(1);
8329 goto out;
8330 }
8331
8332 inode = file_inode(nf->nf_file);
8333 flctx = locks_inode_context(inode);
8334
8335 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
8336 spin_lock(&flctx->flc_lock);
8337 for_each_file_lock(fl, &flctx->flc_posix) {
8338 if (fl->c.flc_owner == (fl_owner_t)lowner) {
8339 status = true;
8340 break;
8341 }
8342 }
8343 spin_unlock(&flctx->flc_lock);
8344 }
8345out:
8346 spin_unlock(&fp->fi_lock);
8347 return status;
8348}
8349
8350/**
8351 * nfsd4_release_lockowner - process NFSv4.0 RELEASE_LOCKOWNER operations
8352 * @rqstp: RPC transaction
8353 * @cstate: NFSv4 COMPOUND state
8354 * @u: RELEASE_LOCKOWNER arguments
8355 *
8356 * Check if there are any locks still held and if not, free the lockowner
8357 * and any lock state that is owned.
8358 *
8359 * Return values:
8360 * %nfs_ok: lockowner released or not found
8361 * %nfserr_locks_held: lockowner still in use
8362 * %nfserr_stale_clientid: clientid no longer active
8363 * %nfserr_expired: clientid not recognized
8364 */
8365__be32
8366nfsd4_release_lockowner(struct svc_rqst *rqstp,
8367 struct nfsd4_compound_state *cstate,
8368 union nfsd4_op_u *u)
8369{
8370 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
8371 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
8372 clientid_t *clid = &rlockowner->rl_clientid;
8373 struct nfs4_ol_stateid *stp;
8374 struct nfs4_lockowner *lo;
8375 struct nfs4_client *clp;
8376 LIST_HEAD(reaplist);
8377 __be32 status;
8378
8379 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
8380 clid->cl_boot, clid->cl_id);
8381
8382 status = set_client(clid, cstate, nn);
8383 if (status)
8384 return status;
8385 clp = cstate->clp;
8386
8387 spin_lock(&clp->cl_lock);
8388 lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner);
8389 if (!lo) {
8390 spin_unlock(&clp->cl_lock);
8391 return nfs_ok;
8392 }
8393
8394 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
8395 if (check_for_locks(stp->st_stid.sc_file, lo)) {
8396 spin_unlock(&clp->cl_lock);
8397 nfs4_put_stateowner(&lo->lo_owner);
8398 return nfserr_locks_held;
8399 }
8400 }
8401 unhash_lockowner_locked(lo);
8402 while (!list_empty(&lo->lo_owner.so_stateids)) {
8403 stp = list_first_entry(&lo->lo_owner.so_stateids,
8404 struct nfs4_ol_stateid,
8405 st_perstateowner);
8406 unhash_lock_stateid(stp);
8407 put_ol_stateid_locked(stp, &reaplist);
8408 }
8409 spin_unlock(&clp->cl_lock);
8410
8411 free_ol_stateid_reaplist(&reaplist);
8412 remove_blocked_locks(lo);
8413 nfs4_put_stateowner(&lo->lo_owner);
8414 return nfs_ok;
8415}
8416
8417static inline struct nfs4_client_reclaim *
8418alloc_reclaim(void)
8419{
8420 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
8421}
8422
8423bool
8424nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
8425{
8426 struct nfs4_client_reclaim *crp;
8427
8428 crp = nfsd4_find_reclaim_client(name, nn);
8429 return (crp && crp->cr_clp);
8430}
8431
8432/*
8433 * failure => all reset bets are off, nfserr_no_grace...
8434 *
8435 * The caller is responsible for freeing name.data if NULL is returned (it
8436 * will be freed in nfs4_remove_reclaim_record in the normal case).
8437 */
8438struct nfs4_client_reclaim *
8439nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
8440 struct nfsd_net *nn)
8441{
8442 unsigned int strhashval;
8443 struct nfs4_client_reclaim *crp;
8444
8445 crp = alloc_reclaim();
8446 if (crp) {
8447 strhashval = clientstr_hashval(name);
8448 INIT_LIST_HEAD(&crp->cr_strhash);
8449 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
8450 crp->cr_name.data = name.data;
8451 crp->cr_name.len = name.len;
8452 crp->cr_princhash.data = princhash.data;
8453 crp->cr_princhash.len = princhash.len;
8454 crp->cr_clp = NULL;
8455 nn->reclaim_str_hashtbl_size++;
8456 }
8457 return crp;
8458}
8459
8460void
8461nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
8462{
8463 list_del(&crp->cr_strhash);
8464 kfree(crp->cr_name.data);
8465 kfree(crp->cr_princhash.data);
8466 kfree(crp);
8467 nn->reclaim_str_hashtbl_size--;
8468}
8469
8470void
8471nfs4_release_reclaim(struct nfsd_net *nn)
8472{
8473 struct nfs4_client_reclaim *crp = NULL;
8474 int i;
8475
8476 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8477 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
8478 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
8479 struct nfs4_client_reclaim, cr_strhash);
8480 nfs4_remove_reclaim_record(crp, nn);
8481 }
8482 }
8483 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
8484}
8485
8486/*
8487 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
8488struct nfs4_client_reclaim *
8489nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
8490{
8491 unsigned int strhashval;
8492 struct nfs4_client_reclaim *crp = NULL;
8493
8494 strhashval = clientstr_hashval(name);
8495 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
8496 if (compare_blob(&crp->cr_name, &name) == 0) {
8497 return crp;
8498 }
8499 }
8500 return NULL;
8501}
8502
8503__be32
8504nfs4_check_open_reclaim(struct nfs4_client *clp)
8505{
8506 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
8507 return nfserr_no_grace;
8508
8509 if (nfsd4_client_record_check(clp))
8510 return nfserr_reclaim_bad;
8511
8512 return nfs_ok;
8513}
8514
8515/*
8516 * Since the lifetime of a delegation isn't limited to that of an open, a
8517 * client may quite reasonably hang on to a delegation as long as it has
8518 * the inode cached. This becomes an obvious problem the first time a
8519 * client's inode cache approaches the size of the server's total memory.
8520 *
8521 * For now we avoid this problem by imposing a hard limit on the number
8522 * of delegations, which varies according to the server's memory size.
8523 */
8524static void
8525set_max_delegations(void)
8526{
8527 /*
8528 * Allow at most 4 delegations per megabyte of RAM. Quick
8529 * estimates suggest that in the worst case (where every delegation
8530 * is for a different inode), a delegation could take about 1.5K,
8531 * giving a worst case usage of about 6% of memory.
8532 */
8533 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
8534}
8535
8536static int nfs4_state_create_net(struct net *net)
8537{
8538 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8539 int i;
8540
8541 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
8542 sizeof(struct list_head),
8543 GFP_KERNEL);
8544 if (!nn->conf_id_hashtbl)
8545 goto err;
8546 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
8547 sizeof(struct list_head),
8548 GFP_KERNEL);
8549 if (!nn->unconf_id_hashtbl)
8550 goto err_unconf_id;
8551 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
8552 sizeof(struct list_head),
8553 GFP_KERNEL);
8554 if (!nn->sessionid_hashtbl)
8555 goto err_sessionid;
8556
8557 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8558 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
8559 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
8560 }
8561 for (i = 0; i < SESSION_HASH_SIZE; i++)
8562 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
8563 nn->conf_name_tree = RB_ROOT;
8564 nn->unconf_name_tree = RB_ROOT;
8565 nn->boot_time = ktime_get_real_seconds();
8566 nn->grace_ended = false;
8567 nn->nfsd4_manager.block_opens = true;
8568 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
8569 INIT_LIST_HEAD(&nn->client_lru);
8570 INIT_LIST_HEAD(&nn->close_lru);
8571 INIT_LIST_HEAD(&nn->del_recall_lru);
8572 spin_lock_init(&nn->client_lock);
8573 spin_lock_init(&nn->s2s_cp_lock);
8574 idr_init(&nn->s2s_cp_stateids);
8575 atomic_set(&nn->pending_async_copies, 0);
8576
8577 spin_lock_init(&nn->blocked_locks_lock);
8578 INIT_LIST_HEAD(&nn->blocked_locks_lru);
8579
8580 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
8581 INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker);
8582 get_net(net);
8583
8584 nn->nfsd_client_shrinker = shrinker_alloc(0, "nfsd-client");
8585 if (!nn->nfsd_client_shrinker)
8586 goto err_shrinker;
8587
8588 nn->nfsd_client_shrinker->scan_objects = nfsd4_state_shrinker_scan;
8589 nn->nfsd_client_shrinker->count_objects = nfsd4_state_shrinker_count;
8590 nn->nfsd_client_shrinker->private_data = nn;
8591
8592 shrinker_register(nn->nfsd_client_shrinker);
8593
8594 return 0;
8595
8596err_shrinker:
8597 put_net(net);
8598 kfree(nn->sessionid_hashtbl);
8599err_sessionid:
8600 kfree(nn->unconf_id_hashtbl);
8601err_unconf_id:
8602 kfree(nn->conf_id_hashtbl);
8603err:
8604 return -ENOMEM;
8605}
8606
8607static void
8608nfs4_state_destroy_net(struct net *net)
8609{
8610 int i;
8611 struct nfs4_client *clp = NULL;
8612 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8613
8614 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8615 while (!list_empty(&nn->conf_id_hashtbl[i])) {
8616 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
8617 destroy_client(clp);
8618 }
8619 }
8620
8621 WARN_ON(!list_empty(&nn->blocked_locks_lru));
8622
8623 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8624 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
8625 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
8626 destroy_client(clp);
8627 }
8628 }
8629
8630 kfree(nn->sessionid_hashtbl);
8631 kfree(nn->unconf_id_hashtbl);
8632 kfree(nn->conf_id_hashtbl);
8633 put_net(net);
8634}
8635
8636int
8637nfs4_state_start_net(struct net *net)
8638{
8639 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8640 int ret;
8641
8642 ret = nfs4_state_create_net(net);
8643 if (ret)
8644 return ret;
8645 locks_start_grace(net, &nn->nfsd4_manager);
8646 nfsd4_client_tracking_init(net);
8647 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
8648 goto skip_grace;
8649 printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
8650 nn->nfsd4_grace, net->ns.inum);
8651 trace_nfsd_grace_start(nn);
8652 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
8653 return 0;
8654
8655skip_grace:
8656 printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
8657 net->ns.inum);
8658 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
8659 nfsd4_end_grace(nn);
8660 return 0;
8661}
8662
8663/* initialization to perform when the nfsd service is started: */
8664
8665int
8666nfs4_state_start(void)
8667{
8668 int ret;
8669
8670 ret = rhltable_init(&nfs4_file_rhltable, &nfs4_file_rhash_params);
8671 if (ret)
8672 return ret;
8673
8674 set_max_delegations();
8675 return 0;
8676}
8677
8678void
8679nfs4_state_shutdown_net(struct net *net)
8680{
8681 struct nfs4_delegation *dp = NULL;
8682 struct list_head *pos, *next, reaplist;
8683 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8684
8685 shrinker_free(nn->nfsd_client_shrinker);
8686 cancel_work(&nn->nfsd_shrinker_work);
8687 cancel_delayed_work_sync(&nn->laundromat_work);
8688 locks_end_grace(&nn->nfsd4_manager);
8689
8690 INIT_LIST_HEAD(&reaplist);
8691 spin_lock(&state_lock);
8692 list_for_each_safe(pos, next, &nn->del_recall_lru) {
8693 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
8694 unhash_delegation_locked(dp, SC_STATUS_CLOSED);
8695 list_add(&dp->dl_recall_lru, &reaplist);
8696 }
8697 spin_unlock(&state_lock);
8698 list_for_each_safe(pos, next, &reaplist) {
8699 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
8700 list_del_init(&dp->dl_recall_lru);
8701 destroy_unhashed_deleg(dp);
8702 }
8703
8704 nfsd4_client_tracking_exit(net);
8705 nfs4_state_destroy_net(net);
8706#ifdef CONFIG_NFSD_V4_2_INTER_SSC
8707 nfsd4_ssc_shutdown_umount(nn);
8708#endif
8709}
8710
8711void
8712nfs4_state_shutdown(void)
8713{
8714 rhltable_destroy(&nfs4_file_rhltable);
8715}
8716
8717static void
8718get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
8719{
8720 if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
8721 CURRENT_STATEID(stateid))
8722 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
8723}
8724
8725static void
8726put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
8727{
8728 if (cstate->minorversion) {
8729 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
8730 SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
8731 }
8732}
8733
8734void
8735clear_current_stateid(struct nfsd4_compound_state *cstate)
8736{
8737 CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
8738}
8739
8740/*
8741 * functions to set current state id
8742 */
8743void
8744nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
8745 union nfsd4_op_u *u)
8746{
8747 put_stateid(cstate, &u->open_downgrade.od_stateid);
8748}
8749
8750void
8751nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
8752 union nfsd4_op_u *u)
8753{
8754 put_stateid(cstate, &u->open.op_stateid);
8755}
8756
8757void
8758nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
8759 union nfsd4_op_u *u)
8760{
8761 put_stateid(cstate, &u->close.cl_stateid);
8762}
8763
8764void
8765nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
8766 union nfsd4_op_u *u)
8767{
8768 put_stateid(cstate, &u->lock.lk_resp_stateid);
8769}
8770
8771/*
8772 * functions to consume current state id
8773 */
8774
8775void
8776nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
8777 union nfsd4_op_u *u)
8778{
8779 get_stateid(cstate, &u->open_downgrade.od_stateid);
8780}
8781
8782void
8783nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
8784 union nfsd4_op_u *u)
8785{
8786 get_stateid(cstate, &u->delegreturn.dr_stateid);
8787}
8788
8789void
8790nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
8791 union nfsd4_op_u *u)
8792{
8793 get_stateid(cstate, &u->free_stateid.fr_stateid);
8794}
8795
8796void
8797nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
8798 union nfsd4_op_u *u)
8799{
8800 get_stateid(cstate, &u->setattr.sa_stateid);
8801}
8802
8803void
8804nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
8805 union nfsd4_op_u *u)
8806{
8807 get_stateid(cstate, &u->close.cl_stateid);
8808}
8809
8810void
8811nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
8812 union nfsd4_op_u *u)
8813{
8814 get_stateid(cstate, &u->locku.lu_stateid);
8815}
8816
8817void
8818nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
8819 union nfsd4_op_u *u)
8820{
8821 get_stateid(cstate, &u->read.rd_stateid);
8822}
8823
8824void
8825nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
8826 union nfsd4_op_u *u)
8827{
8828 get_stateid(cstate, &u->write.wr_stateid);
8829}
8830
8831/**
8832 * nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
8833 * @rqstp: RPC transaction context
8834 * @dentry: dentry of inode to be checked for a conflict
8835 * @modified: return true if file was modified
8836 * @size: new size of file if modified is true
8837 *
8838 * This function is called when there is a conflict between a write
8839 * delegation and a change/size GETATTR from another client. The server
8840 * must either use the CB_GETATTR to get the current values of the
8841 * attributes from the client that holds the delegation or recall the
8842 * delegation before replying to the GETATTR. See RFC 8881 section
8843 * 18.7.4.
8844 *
8845 * Returns 0 if there is no conflict; otherwise an nfs_stat
8846 * code is returned.
8847 */
8848__be32
8849nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry,
8850 bool *modified, u64 *size)
8851{
8852 __be32 status;
8853 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
8854 struct file_lock_context *ctx;
8855 struct nfs4_delegation *dp = NULL;
8856 struct file_lease *fl;
8857 struct iattr attrs;
8858 struct nfs4_cb_fattr *ncf;
8859 struct inode *inode = d_inode(dentry);
8860
8861 *modified = false;
8862 ctx = locks_inode_context(inode);
8863 if (!ctx)
8864 return 0;
8865
8866#define NON_NFSD_LEASE ((void *)1)
8867
8868 spin_lock(&ctx->flc_lock);
8869 for_each_file_lock(fl, &ctx->flc_lease) {
8870 if (fl->c.flc_flags == FL_LAYOUT)
8871 continue;
8872 if (fl->c.flc_type == F_WRLCK) {
8873 if (fl->fl_lmops == &nfsd_lease_mng_ops)
8874 dp = fl->c.flc_owner;
8875 else
8876 dp = NON_NFSD_LEASE;
8877 }
8878 break;
8879 }
8880 if (dp == NULL || dp == NON_NFSD_LEASE ||
8881 dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) {
8882 spin_unlock(&ctx->flc_lock);
8883 if (dp == NON_NFSD_LEASE) {
8884 status = nfserrno(nfsd_open_break_lease(inode,
8885 NFSD_MAY_READ));
8886 if (status != nfserr_jukebox ||
8887 !nfsd_wait_for_delegreturn(rqstp, inode))
8888 return status;
8889 }
8890 return 0;
8891 }
8892
8893 nfsd_stats_wdeleg_getattr_inc(nn);
8894 refcount_inc(&dp->dl_stid.sc_count);
8895 ncf = &dp->dl_cb_fattr;
8896 nfs4_cb_getattr(&dp->dl_cb_fattr);
8897 spin_unlock(&ctx->flc_lock);
8898
8899 wait_on_bit_timeout(&ncf->ncf_cb_flags, CB_GETATTR_BUSY,
8900 TASK_INTERRUPTIBLE, NFSD_CB_GETATTR_TIMEOUT);
8901 if (ncf->ncf_cb_status) {
8902 /* Recall delegation only if client didn't respond */
8903 status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
8904 if (status != nfserr_jukebox ||
8905 !nfsd_wait_for_delegreturn(rqstp, inode))
8906 goto out_status;
8907 }
8908 if (!ncf->ncf_file_modified &&
8909 (ncf->ncf_initial_cinfo != ncf->ncf_cb_change ||
8910 ncf->ncf_cur_fsize != ncf->ncf_cb_fsize))
8911 ncf->ncf_file_modified = true;
8912 if (ncf->ncf_file_modified) {
8913 int err;
8914
8915 /*
8916 * Per section 10.4.3 of RFC 8881, the server would
8917 * not update the file's metadata with the client's
8918 * modified size
8919 */
8920 attrs.ia_mtime = attrs.ia_ctime = current_time(inode);
8921 attrs.ia_valid = ATTR_MTIME | ATTR_CTIME | ATTR_DELEG;
8922 inode_lock(inode);
8923 err = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL);
8924 inode_unlock(inode);
8925 if (err) {
8926 status = nfserrno(err);
8927 goto out_status;
8928 }
8929 ncf->ncf_cur_fsize = ncf->ncf_cb_fsize;
8930 *size = ncf->ncf_cur_fsize;
8931 *modified = true;
8932 }
8933 status = 0;
8934out_status:
8935 nfs4_put_stid(&dp->dl_stid);
8936 return status;
8937}