dm-crypt: use __bio_add_page to add single page to clone bio
[linux-block.git] / fs / nfsd / nfscache.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
1da177e4
LT
3 * Request reply cache. This is currently a global cache, but this may
4 * change in the future and be a per-client cache.
5 *
6 * This code is heavily inspired by the 44BSD implementation, although
7 * it does things a bit differently.
8 *
9 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
10 */
11
3ba75830 12#include <linux/sunrpc/svc_xprt.h>
5a0e3ad6 13#include <linux/slab.h>
8f97514b 14#include <linux/vmalloc.h>
5976687a 15#include <linux/sunrpc/addr.h>
0338dd15 16#include <linux/highmem.h>
0733c7ba
JL
17#include <linux/log2.h>
18#include <linux/hash.h>
01a7decf 19#include <net/checksum.h>
5a0e3ad6 20
9a74af21
BH
21#include "nfsd.h"
22#include "cache.h"
0b175b18 23#include "trace.h"
0338dd15 24
0733c7ba
JL
25/*
26 * We use this value to determine the number of hash buckets from the max
27 * cache size, the idea being that when the cache is at its maximum number
28 * of entries, then this should be the average number of entries per bucket.
29 */
30#define TARGET_BUCKET_SIZE 64
1da177e4 31
7142b98d 32struct nfsd_drc_bucket {
736c6625 33 struct rb_root rb_head;
bedd4b61 34 struct list_head lru_head;
89a26b3d 35 spinlock_t cache_lock;
7142b98d
TM
36};
37
027690c7
BF
38static struct kmem_cache *drc_slab;
39
1da177e4 40static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
1ab6c499
DC
41static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
42 struct shrink_control *sc);
43static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
44 struct shrink_control *sc);
b4e7f2c9 45
0338dd15
JL
46/*
47 * Put a cap on the size of the DRC based on the amount of available
48 * low memory in the machine.
49 *
50 * 64MB: 8192
51 * 128MB: 11585
52 * 256MB: 16384
53 * 512MB: 23170
54 * 1GB: 32768
55 * 2GB: 46340
56 * 4GB: 65536
57 * 8GB: 92681
58 * 16GB: 131072
59 *
60 * ...with a hard cap of 256k entries. In the worst case, each entry will be
61 * ~1k, so the above numbers should give a rough max of the amount of memory
62 * used in k.
3ba75830
BF
63 *
64 * XXX: these limits are per-container, so memory used will increase
65 * linearly with number of containers. Maybe that's OK.
0338dd15
JL
66 */
67static unsigned int
68nfsd_cache_size_limit(void)
69{
70 unsigned int limit;
ca79b0c2 71 unsigned long low_pages = totalram_pages() - totalhigh_pages();
0338dd15
JL
72
73 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
74 return min_t(unsigned int, limit, 256*1024);
75}
76
0733c7ba
JL
77/*
78 * Compute the number of hash buckets we need. Divide the max cachesize by
79 * the "target" max bucket size, and round up to next power of two.
80 */
81static unsigned int
82nfsd_hashsize(unsigned int limit)
83{
84 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
85}
86
f09841fd 87static struct svc_cacherep *
3ba75830
BF
88nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
89 struct nfsd_net *nn)
1da177e4
LT
90{
91 struct svc_cacherep *rp;
1da177e4 92
027690c7 93 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
f09841fd 94 if (rp) {
1da177e4
LT
95 rp->c_state = RC_UNUSED;
96 rp->c_type = RC_NOCACHE;
736c6625 97 RB_CLEAR_NODE(&rp->c_node);
f09841fd 98 INIT_LIST_HEAD(&rp->c_lru);
76ecec21 99
ed00c2f6
TM
100 memset(&rp->c_key, 0, sizeof(rp->c_key));
101 rp->c_key.k_xid = rqstp->rq_xid;
102 rp->c_key.k_proc = rqstp->rq_proc;
103 rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp));
104 rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp)));
105 rp->c_key.k_prot = rqstp->rq_prot;
106 rp->c_key.k_vers = rqstp->rq_vers;
107 rp->c_key.k_len = rqstp->rq_arg.len;
108 rp->c_key.k_csum = csum;
1da177e4 109 }
f09841fd
JL
110 return rp;
111}
1da177e4 112
f09841fd 113static void
3ba75830
BF
114nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
115 struct nfsd_net *nn)
f09841fd 116{
6c6910cd 117 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
e567b98c 118 nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
f09841fd 119 kfree(rp->c_replvec.iov_base);
6c6910cd 120 }
76ecec21 121 if (rp->c_state != RC_UNUSED) {
736c6625 122 rb_erase(&rp->c_node, &b->rb_head);
76ecec21 123 list_del(&rp->c_lru);
3ba75830 124 atomic_dec(&nn->num_drc_entries);
e567b98c 125 nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp));
76ecec21 126 }
027690c7 127 kmem_cache_free(drc_slab, rp);
f09841fd
JL
128}
129
2c6b691c 130static void
3ba75830
BF
131nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
132 struct nfsd_net *nn)
2c6b691c 133{
89a26b3d 134 spin_lock(&b->cache_lock);
3ba75830 135 nfsd_reply_cache_free_locked(b, rp, nn);
89a26b3d 136 spin_unlock(&b->cache_lock);
2c6b691c
JL
137}
138
027690c7
BF
139int nfsd_drc_slab_create(void)
140{
141 drc_slab = kmem_cache_create("nfsd_drc",
142 sizeof(struct svc_cacherep), 0, 0, NULL);
143 return drc_slab ? 0: -ENOMEM;
144}
145
146void nfsd_drc_slab_free(void)
147{
148 kmem_cache_destroy(drc_slab);
149}
150
e567b98c
AG
151static int nfsd_reply_cache_stats_init(struct nfsd_net *nn)
152{
153 return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM);
154}
155
156static void nfsd_reply_cache_stats_destroy(struct nfsd_net *nn)
157{
158 nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM);
159}
160
3ba75830 161int nfsd_reply_cache_init(struct nfsd_net *nn)
f09841fd 162{
0733c7ba 163 unsigned int hashsize;
bedd4b61 164 unsigned int i;
a68465c9 165 int status = 0;
0733c7ba 166
3ba75830
BF
167 nn->max_drc_entries = nfsd_cache_size_limit();
168 atomic_set(&nn->num_drc_entries, 0);
169 hashsize = nfsd_hashsize(nn->max_drc_entries);
170 nn->maskbits = ilog2(hashsize);
ac534ff2 171
e567b98c
AG
172 status = nfsd_reply_cache_stats_init(nn);
173 if (status)
174 goto out_nomem;
175
3ba75830
BF
176 nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
177 nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
178 nn->nfsd_reply_cache_shrinker.seeks = 1;
e33c267a
RG
179 status = register_shrinker(&nn->nfsd_reply_cache_shrinker,
180 "nfsd-reply:%s", nn->nfsd_name);
a68465c9 181 if (status)
e567b98c 182 goto out_stats_destroy;
a68465c9 183
8c38b705
RR
184 nn->drc_hashtbl = kvzalloc(array_size(hashsize,
185 sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
186 if (!nn->drc_hashtbl)
187 goto out_shrinker;
8f97514b 188
89a26b3d 189 for (i = 0; i < hashsize; i++) {
3ba75830
BF
190 INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head);
191 spin_lock_init(&nn->drc_hashtbl[i].cache_lock);
89a26b3d 192 }
3ba75830 193 nn->drc_hashsize = hashsize;
1da177e4 194
d5c3428b 195 return 0;
689d7ba4
BF
196out_shrinker:
197 unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
e567b98c
AG
198out_stats_destroy:
199 nfsd_reply_cache_stats_destroy(nn);
d5c3428b
BF
200out_nomem:
201 printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
d5c3428b 202 return -ENOMEM;
1da177e4
LT
203}
204
3ba75830 205void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
1da177e4
LT
206{
207 struct svc_cacherep *rp;
bedd4b61 208 unsigned int i;
1da177e4 209
3ba75830 210 unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
aca8a23d 211
3ba75830
BF
212 for (i = 0; i < nn->drc_hashsize; i++) {
213 struct list_head *head = &nn->drc_hashtbl[i].lru_head;
bedd4b61
TM
214 while (!list_empty(head)) {
215 rp = list_first_entry(head, struct svc_cacherep, c_lru);
3ba75830
BF
216 nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i],
217 rp, nn);
bedd4b61 218 }
1da177e4 219 }
fd5e363e 220 nfsd_reply_cache_stats_destroy(nn);
1da177e4 221
3ba75830
BF
222 kvfree(nn->drc_hashtbl);
223 nn->drc_hashtbl = NULL;
224 nn->drc_hashsize = 0;
8a8bc40d 225
1da177e4
LT
226}
227
228/*
aca8a23d
JL
229 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
230 * not already scheduled.
1da177e4
LT
231 */
232static void
bedd4b61 233lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
1da177e4 234{
56c2548b 235 rp->c_timestamp = jiffies;
bedd4b61 236 list_move_tail(&rp->c_lru, &b->lru_head);
1da177e4
LT
237}
238
378a6109
CL
239static noinline struct nfsd_drc_bucket *
240nfsd_cache_bucket_find(__be32 xid, struct nfsd_net *nn)
241{
242 unsigned int hash = hash_32((__force u32)xid, nn->maskbits);
243
244 return &nn->drc_hashtbl[hash];
245}
246
8847ecc9
CL
247static long prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn,
248 unsigned int max)
aca8a23d
JL
249{
250 struct svc_cacherep *rp, *tmp;
1ab6c499 251 long freed = 0;
aca8a23d 252
bedd4b61 253 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
1b19453d
JL
254 /*
255 * Don't free entries attached to calls that are still
256 * in-progress, but do keep scanning the list.
257 */
258 if (rp->c_state == RC_INPROG)
259 continue;
3ba75830 260 if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
1b19453d 261 time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
aca8a23d 262 break;
3ba75830 263 nfsd_reply_cache_free_locked(b, rp, nn);
8847ecc9
CL
264 if (max && freed++ > max)
265 break;
aca8a23d 266 }
bedd4b61
TM
267 return freed;
268}
269
8847ecc9
CL
270static long nfsd_prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn)
271{
272 return prune_bucket(b, nn, 3);
273}
274
bedd4b61
TM
275/*
276 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
277 * Also prune the oldest ones when the total exceeds the max number of entries.
278 */
279static long
3ba75830 280prune_cache_entries(struct nfsd_net *nn)
bedd4b61
TM
281{
282 unsigned int i;
283 long freed = 0;
bedd4b61 284
3ba75830
BF
285 for (i = 0; i < nn->drc_hashsize; i++) {
286 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
bedd4b61 287
89a26b3d
TM
288 if (list_empty(&b->lru_head))
289 continue;
290 spin_lock(&b->cache_lock);
8847ecc9 291 freed += prune_bucket(b, nn, 0);
89a26b3d 292 spin_unlock(&b->cache_lock);
bedd4b61 293 }
1ab6c499 294 return freed;
aca8a23d
JL
295}
296
1ab6c499
DC
297static unsigned long
298nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
b4e7f2c9 299{
3ba75830
BF
300 struct nfsd_net *nn = container_of(shrink,
301 struct nfsd_net, nfsd_reply_cache_shrinker);
302
303 return atomic_read(&nn->num_drc_entries);
b4e7f2c9
JL
304}
305
1ab6c499
DC
306static unsigned long
307nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
308{
3ba75830
BF
309 struct nfsd_net *nn = container_of(shrink,
310 struct nfsd_net, nfsd_reply_cache_shrinker);
311
312 return prune_cache_entries(nn);
1ab6c499 313}
01a7decf
JL
314/*
315 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
316 */
317static __wsum
318nfsd_cache_csum(struct svc_rqst *rqstp)
319{
320 int idx;
321 unsigned int base;
322 __wsum csum;
323 struct xdr_buf *buf = &rqstp->rq_arg;
324 const unsigned char *p = buf->head[0].iov_base;
325 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
326 RC_CSUMLEN);
327 size_t len = min(buf->head[0].iov_len, csum_len);
328
329 /* rq_arg.head first */
330 csum = csum_partial(p, len, 0);
331 csum_len -= len;
332
333 /* Continue into page array */
334 idx = buf->page_base / PAGE_SIZE;
335 base = buf->page_base & ~PAGE_MASK;
336 while (csum_len) {
337 p = page_address(buf->pages[idx]) + base;
56edc86b 338 len = min_t(size_t, PAGE_SIZE - base, csum_len);
01a7decf
JL
339 csum = csum_partial(p, len, csum);
340 csum_len -= len;
341 base = 0;
342 ++idx;
343 }
344 return csum;
345}
346
ed00c2f6 347static int
3ba75830
BF
348nfsd_cache_key_cmp(const struct svc_cacherep *key,
349 const struct svc_cacherep *rp, struct nfsd_net *nn)
9dc56143 350{
ed00c2f6 351 if (key->c_key.k_xid == rp->c_key.k_xid &&
0b175b18 352 key->c_key.k_csum != rp->c_key.k_csum) {
e567b98c 353 nfsd_stats_payload_misses_inc(nn);
0b175b18
CL
354 trace_nfsd_drc_mismatch(nn, key, rp);
355 }
ef9b16dc 356
ed00c2f6 357 return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
9dc56143
JL
358}
359
a4a3ec32
JL
360/*
361 * Search the request hash for an entry that matches the given rqstp.
362 * Must be called with cache_lock held. Returns the found entry or
76ecec21 363 * inserts an empty key on failure.
a4a3ec32
JL
364 */
365static struct svc_cacherep *
3ba75830
BF
366nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key,
367 struct nfsd_net *nn)
a4a3ec32 368{
76ecec21 369 struct svc_cacherep *rp, *ret = key;
736c6625
TM
370 struct rb_node **p = &b->rb_head.rb_node,
371 *parent = NULL;
98d821bd 372 unsigned int entries = 0;
736c6625 373 int cmp;
a4a3ec32 374
736c6625 375 while (*p != NULL) {
98d821bd 376 ++entries;
736c6625
TM
377 parent = *p;
378 rp = rb_entry(parent, struct svc_cacherep, c_node);
379
3ba75830 380 cmp = nfsd_cache_key_cmp(key, rp, nn);
736c6625
TM
381 if (cmp < 0)
382 p = &parent->rb_left;
383 else if (cmp > 0)
384 p = &parent->rb_right;
385 else {
98d821bd 386 ret = rp;
736c6625 387 goto out;
98d821bd
JL
388 }
389 }
736c6625
TM
390 rb_link_node(&key->c_node, parent, p);
391 rb_insert_color(&key->c_node, &b->rb_head);
392out:
98d821bd 393 /* tally hash chain length stats */
3ba75830
BF
394 if (entries > nn->longest_chain) {
395 nn->longest_chain = entries;
396 nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries);
397 } else if (entries == nn->longest_chain) {
98d821bd 398 /* prefer to keep the smallest cachesize possible here */
3ba75830
BF
399 nn->longest_chain_cachesize = min_t(unsigned int,
400 nn->longest_chain_cachesize,
401 atomic_read(&nn->num_drc_entries));
a4a3ec32 402 }
98d821bd 403
76ecec21 404 lru_put_end(b, ret);
98d821bd 405 return ret;
a4a3ec32
JL
406}
407
0b175b18
CL
408/**
409 * nfsd_cache_lookup - Find an entry in the duplicate reply cache
410 * @rqstp: Incoming Call to find
411 *
1da177e4 412 * Try to find an entry matching the current call in the cache. When none
1ac83629
JL
413 * is found, we try to grab the oldest expired entry off the LRU list. If
414 * a suitable one isn't there, then drop the cache_lock and allocate a
415 * new one, then search again in case one got inserted while this thread
416 * didn't hold the lock.
0b175b18
CL
417 *
418 * Return values:
419 * %RC_DOIT: Process the request normally
420 * %RC_REPLY: Reply from cache
421 * %RC_DROPIT: Do not process the request further
1da177e4 422 */
0b175b18 423int nfsd_cache_lookup(struct svc_rqst *rqstp)
1da177e4 424{
0f29ce32 425 struct nfsd_net *nn;
0338dd15 426 struct svc_cacherep *rp, *found;
01a7decf 427 __wsum csum;
0f29ce32 428 struct nfsd_drc_bucket *b;
1091006c 429 int type = rqstp->rq_cachetype;
0b9ea37f 430 int rtn = RC_DOIT;
1da177e4
LT
431
432 rqstp->rq_cacherep = NULL;
13cc8a78 433 if (type == RC_NOCACHE) {
e567b98c 434 nfsd_stats_rc_nocache_inc();
0b175b18 435 goto out;
1da177e4
LT
436 }
437
01a7decf
JL
438 csum = nfsd_cache_csum(rqstp);
439
0b9ea37f
JL
440 /*
441 * Since the common case is a cache miss followed by an insert,
a0ef5e19 442 * preallocate an entry.
0b9ea37f 443 */
0f29ce32 444 nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3ba75830 445 rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
0b175b18
CL
446 if (!rp)
447 goto out;
0338dd15 448
0f29ce32 449 b = nfsd_cache_bucket_find(rqstp->rq_xid, nn);
76ecec21 450 spin_lock(&b->cache_lock);
3ba75830 451 found = nfsd_cache_insert(b, rp, nn);
add1511c 452 if (found != rp)
0338dd15 453 goto found_entry;
1da177e4 454
e567b98c 455 nfsd_stats_rc_misses_inc();
1da177e4
LT
456 rqstp->rq_cacherep = rp;
457 rp->c_state = RC_INPROG;
76ecec21 458
3ba75830 459 atomic_inc(&nn->num_drc_entries);
e567b98c 460 nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp));
76ecec21 461
8847ecc9 462 nfsd_prune_bucket(b, nn);
0b175b18
CL
463
464out_unlock:
89a26b3d 465 spin_unlock(&b->cache_lock);
0b175b18 466out:
1da177e4
LT
467 return rtn;
468
469found_entry:
470 /* We found a matching entry which is either in progress or done. */
add1511c 471 nfsd_reply_cache_free_locked(NULL, rp, nn);
e567b98c 472 nfsd_stats_rc_hits_inc();
1da177e4 473 rtn = RC_DROPIT;
add1511c 474 rp = found;
76ecec21 475
7e5d0e0d
TM
476 /* Request being processed */
477 if (rp->c_state == RC_INPROG)
0b175b18 478 goto out_trace;
1da177e4
LT
479
480 /* From the hall of fame of impractical attacks:
481 * Is this a user who tries to snoop on the cache? */
482 rtn = RC_DOIT;
4d152e2c 483 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
0b175b18 484 goto out_trace;
1da177e4
LT
485
486 /* Compose RPC reply header */
487 switch (rp->c_type) {
488 case RC_NOCACHE:
489 break;
490 case RC_REPLSTAT:
8dd41d70 491 xdr_stream_encode_be32(&rqstp->rq_res_stream, rp->c_replstat);
1da177e4
LT
492 rtn = RC_REPLY;
493 break;
494 case RC_REPLBUFF:
495 if (!nfsd_cache_append(rqstp, &rp->c_replvec))
0b175b18 496 goto out_unlock; /* should not happen */
1da177e4
LT
497 rtn = RC_REPLY;
498 break;
499 default:
c25bf185 500 WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type);
1da177e4
LT
501 }
502
0b175b18
CL
503out_trace:
504 trace_nfsd_drc_found(nn, rqstp, rtn);
505 goto out_unlock;
1da177e4
LT
506}
507
0b175b18
CL
508/**
509 * nfsd_cache_update - Update an entry in the duplicate reply cache.
510 * @rqstp: svc_rqst with a finished Reply
511 * @cachetype: which cache to update
cee4db19 512 * @statp: pointer to Reply's NFS status code, or NULL
0b175b18
CL
513 *
514 * This is called from nfsd_dispatch when the procedure has been
515 * executed and the complete reply is in rqstp->rq_res.
1da177e4
LT
516 *
517 * We're copying around data here rather than swapping buffers because
518 * the toplevel loop requires max-sized buffers, which would be a waste
519 * of memory for a cache with a max reply size of 100 bytes (diropokres).
520 *
521 * If we should start to use different types of cache entries tailored
522 * specifically for attrstat and fh's, we may save even more space.
523 *
524 * Also note that a cachetype of RC_NOCACHE can legally be passed when
525 * nfsd failed to encode a reply that otherwise would have been cached.
526 * In this case, nfsd_cache_update is called with statp == NULL.
527 */
0b175b18 528void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
1da177e4 529{
3ba75830 530 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
13cc8a78 531 struct svc_cacherep *rp = rqstp->rq_cacherep;
1da177e4 532 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
bedd4b61 533 struct nfsd_drc_bucket *b;
1da177e4 534 int len;
6c6910cd 535 size_t bufsize = 0;
1da177e4 536
13cc8a78 537 if (!rp)
1da177e4
LT
538 return;
539
378a6109 540 b = nfsd_cache_bucket_find(rp->c_key.k_xid, nn);
bedd4b61 541
1da177e4
LT
542 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
543 len >>= 2;
fca4217c 544
1da177e4
LT
545 /* Don't cache excessive amounts of data and XDR failures */
546 if (!statp || len > (256 >> 2)) {
3ba75830 547 nfsd_reply_cache_free(b, rp, nn);
1da177e4
LT
548 return;
549 }
550
551 switch (cachetype) {
552 case RC_REPLSTAT:
553 if (len != 1)
554 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
555 rp->c_replstat = *statp;
556 break;
557 case RC_REPLBUFF:
558 cachv = &rp->c_replvec;
6c6910cd
JL
559 bufsize = len << 2;
560 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
1da177e4 561 if (!cachv->iov_base) {
3ba75830 562 nfsd_reply_cache_free(b, rp, nn);
1da177e4
LT
563 return;
564 }
6c6910cd
JL
565 cachv->iov_len = bufsize;
566 memcpy(cachv->iov_base, statp, bufsize);
1da177e4 567 break;
2c6b691c 568 case RC_NOCACHE:
3ba75830 569 nfsd_reply_cache_free(b, rp, nn);
2c6b691c 570 return;
1da177e4 571 }
89a26b3d 572 spin_lock(&b->cache_lock);
e567b98c 573 nfsd_stats_drc_mem_usage_add(nn, bufsize);
bedd4b61 574 lru_put_end(b, rp);
4d152e2c 575 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
1da177e4
LT
576 rp->c_type = cachetype;
577 rp->c_state = RC_DONE;
89a26b3d 578 spin_unlock(&b->cache_lock);
1da177e4
LT
579 return;
580}
581
582/*
583 * Copy cached reply to current reply buffer. Should always fit.
584 * FIXME as reply is in a page, we should just attach the page, and
585 * keep a refcount....
586 */
587static int
588nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
589{
590 struct kvec *vec = &rqstp->rq_res.head[0];
591
592 if (vec->iov_len + data->iov_len > PAGE_SIZE) {
5b5e0928 593 printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n",
1da177e4
LT
594 data->iov_len);
595 return 0;
596 }
597 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
598 vec->iov_len += data->iov_len;
599 return 1;
600}
a2f999a3
JL
601
602/*
603 * Note that fields may be added, removed or reordered in the future. Programs
604 * scraping this file for info should test the labels to ensure they're
605 * getting the correct field.
606 */
64776611 607int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
a2f999a3 608{
64776611
C
609 struct nfsd_net *nn = net_generic(file_inode(m->file)->i_sb->s_fs_info,
610 nfsd_net_id);
3ba75830
BF
611
612 seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
31e60f52 613 seq_printf(m, "num entries: %u\n",
e567b98c 614 atomic_read(&nn->num_drc_entries));
3ba75830 615 seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits);
e567b98c
AG
616 seq_printf(m, "mem usage: %lld\n",
617 percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE]));
618 seq_printf(m, "cache hits: %lld\n",
619 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]));
620 seq_printf(m, "cache misses: %lld\n",
621 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]));
622 seq_printf(m, "not cached: %lld\n",
623 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]));
624 seq_printf(m, "payload misses: %lld\n",
625 percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES]));
3ba75830
BF
626 seq_printf(m, "longest chain len: %u\n", nn->longest_chain);
627 seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize);
a2f999a3
JL
628 return 0;
629}