1 // SPDX-License-Identifier: GPL-2.0
3 * Request reply cache. This is currently a global cache, but this may
4 * change in the future and be a per-client cache.
6 * This code is heavily inspired by the 44BSD implementation, although
7 * it does things a bit differently.
9 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
12 #include <linux/sunrpc/svc_xprt.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/sunrpc/addr.h>
16 #include <linux/highmem.h>
17 #include <linux/log2.h>
18 #include <linux/hash.h>
19 #include <net/checksum.h>
26 * We use this value to determine the number of hash buckets from the max
27 * cache size, the idea being that when the cache is at its maximum number
28 * of entries, then this should be the average number of entries per bucket.
30 #define TARGET_BUCKET_SIZE 64
32 struct nfsd_drc_bucket {
33 struct rb_root rb_head;
34 struct list_head lru_head;
35 spinlock_t cache_lock;
38 static struct kmem_cache *drc_slab;
40 static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
41 static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
42 struct shrink_control *sc);
43 static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
44 struct shrink_control *sc);
47 * Put a cap on the size of the DRC based on the amount of available
48 * low memory in the machine.
60 * ...with a hard cap of 256k entries. In the worst case, each entry will be
61 * ~1k, so the above numbers should give a rough max of the amount of memory
64 * XXX: these limits are per-container, so memory used will increase
65 * linearly with number of containers. Maybe that's OK.
68 nfsd_cache_size_limit(void)
71 unsigned long low_pages = totalram_pages() - totalhigh_pages();
73 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
74 return min_t(unsigned int, limit, 256*1024);
78 * Compute the number of hash buckets we need. Divide the max cachesize by
79 * the "target" max bucket size, and round up to next power of two.
82 nfsd_hashsize(unsigned int limit)
84 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
87 static struct nfsd_cacherep *
88 nfsd_cacherep_alloc(struct svc_rqst *rqstp, __wsum csum,
91 struct nfsd_cacherep *rp;
93 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
95 rp->c_state = RC_UNUSED;
96 rp->c_type = RC_NOCACHE;
97 RB_CLEAR_NODE(&rp->c_node);
98 INIT_LIST_HEAD(&rp->c_lru);
100 memset(&rp->c_key, 0, sizeof(rp->c_key));
101 rp->c_key.k_xid = rqstp->rq_xid;
102 rp->c_key.k_proc = rqstp->rq_proc;
103 rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp));
104 rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp)));
105 rp->c_key.k_prot = rqstp->rq_prot;
106 rp->c_key.k_vers = rqstp->rq_vers;
107 rp->c_key.k_len = rqstp->rq_arg.len;
108 rp->c_key.k_csum = csum;
113 static void nfsd_cacherep_free(struct nfsd_cacherep *rp)
115 if (rp->c_type == RC_REPLBUFF)
116 kfree(rp->c_replvec.iov_base);
117 kmem_cache_free(drc_slab, rp);
121 nfsd_cacherep_dispose(struct list_head *dispose)
123 struct nfsd_cacherep *rp;
124 unsigned long freed = 0;
126 while (!list_empty(dispose)) {
127 rp = list_first_entry(dispose, struct nfsd_cacherep, c_lru);
128 list_del(&rp->c_lru);
129 nfsd_cacherep_free(rp);
136 nfsd_cacherep_unlink_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
137 struct nfsd_cacherep *rp)
139 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base)
140 nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
141 if (rp->c_state != RC_UNUSED) {
142 rb_erase(&rp->c_node, &b->rb_head);
143 list_del(&rp->c_lru);
144 atomic_dec(&nn->num_drc_entries);
145 nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp));
150 nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp,
153 nfsd_cacherep_unlink_locked(nn, b, rp);
154 nfsd_cacherep_free(rp);
158 nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp,
161 spin_lock(&b->cache_lock);
162 nfsd_cacherep_unlink_locked(nn, b, rp);
163 spin_unlock(&b->cache_lock);
164 nfsd_cacherep_free(rp);
167 int nfsd_drc_slab_create(void)
169 drc_slab = kmem_cache_create("nfsd_drc",
170 sizeof(struct nfsd_cacherep), 0, 0, NULL);
171 return drc_slab ? 0: -ENOMEM;
174 void nfsd_drc_slab_free(void)
176 kmem_cache_destroy(drc_slab);
180 * nfsd_net_reply_cache_init - per net namespace reply cache set-up
181 * @nn: nfsd_net being initialized
183 * Returns zero on succes; otherwise a negative errno is returned.
185 int nfsd_net_reply_cache_init(struct nfsd_net *nn)
187 return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM);
191 * nfsd_net_reply_cache_destroy - per net namespace reply cache tear-down
192 * @nn: nfsd_net being freed
195 void nfsd_net_reply_cache_destroy(struct nfsd_net *nn)
197 nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM);
200 int nfsd_reply_cache_init(struct nfsd_net *nn)
202 unsigned int hashsize;
205 nn->max_drc_entries = nfsd_cache_size_limit();
206 atomic_set(&nn->num_drc_entries, 0);
207 hashsize = nfsd_hashsize(nn->max_drc_entries);
208 nn->maskbits = ilog2(hashsize);
210 nn->drc_hashtbl = kvzalloc(array_size(hashsize,
211 sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
212 if (!nn->drc_hashtbl)
215 nn->nfsd_reply_cache_shrinker = shrinker_alloc(0, "nfsd-reply:%s",
217 if (!nn->nfsd_reply_cache_shrinker)
220 nn->nfsd_reply_cache_shrinker->scan_objects = nfsd_reply_cache_scan;
221 nn->nfsd_reply_cache_shrinker->count_objects = nfsd_reply_cache_count;
222 nn->nfsd_reply_cache_shrinker->seeks = 1;
223 nn->nfsd_reply_cache_shrinker->private_data = nn;
225 shrinker_register(nn->nfsd_reply_cache_shrinker);
227 for (i = 0; i < hashsize; i++) {
228 INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head);
229 spin_lock_init(&nn->drc_hashtbl[i].cache_lock);
231 nn->drc_hashsize = hashsize;
235 kvfree(nn->drc_hashtbl);
236 printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
240 void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
242 struct nfsd_cacherep *rp;
245 shrinker_free(nn->nfsd_reply_cache_shrinker);
247 for (i = 0; i < nn->drc_hashsize; i++) {
248 struct list_head *head = &nn->drc_hashtbl[i].lru_head;
249 while (!list_empty(head)) {
250 rp = list_first_entry(head, struct nfsd_cacherep, c_lru);
251 nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i],
256 kvfree(nn->drc_hashtbl);
257 nn->drc_hashtbl = NULL;
258 nn->drc_hashsize = 0;
263 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
264 * not already scheduled.
267 lru_put_end(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp)
269 rp->c_timestamp = jiffies;
270 list_move_tail(&rp->c_lru, &b->lru_head);
273 static noinline struct nfsd_drc_bucket *
274 nfsd_cache_bucket_find(__be32 xid, struct nfsd_net *nn)
276 unsigned int hash = hash_32((__force u32)xid, nn->maskbits);
278 return &nn->drc_hashtbl[hash];
282 * Remove and return no more than @max expired entries in bucket @b.
283 * If @max is zero, do not limit the number of removed entries.
286 nfsd_prune_bucket_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
287 unsigned int max, struct list_head *dispose)
289 unsigned long expiry = jiffies - RC_EXPIRE;
290 struct nfsd_cacherep *rp, *tmp;
291 unsigned int freed = 0;
293 lockdep_assert_held(&b->cache_lock);
295 /* The bucket LRU is ordered oldest-first. */
296 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
298 * Don't free entries attached to calls that are still
299 * in-progress, but do keep scanning the list.
301 if (rp->c_state == RC_INPROG)
304 if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
305 time_before(expiry, rp->c_timestamp))
308 nfsd_cacherep_unlink_locked(nn, b, rp);
309 list_add(&rp->c_lru, dispose);
311 if (max && ++freed > max)
317 * nfsd_reply_cache_count - count_objects method for the DRC shrinker
318 * @shrink: our registered shrinker context
319 * @sc: garbage collection parameters
321 * Returns the total number of entries in the duplicate reply cache. To
322 * keep things simple and quick, this is not the number of expired entries
323 * in the cache (ie, the number that would be removed by a call to
324 * nfsd_reply_cache_scan).
327 nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
329 struct nfsd_net *nn = shrink->private_data;
331 return atomic_read(&nn->num_drc_entries);
335 * nfsd_reply_cache_scan - scan_objects method for the DRC shrinker
336 * @shrink: our registered shrinker context
337 * @sc: garbage collection parameters
339 * Free expired entries on each bucket's LRU list until we've released
340 * nr_to_scan freed objects. Nothing will be released if the cache
341 * has not exceeded it's max_drc_entries limit.
343 * Returns the number of entries released by this call.
346 nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
348 struct nfsd_net *nn = shrink->private_data;
349 unsigned long freed = 0;
353 for (i = 0; i < nn->drc_hashsize; i++) {
354 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
356 if (list_empty(&b->lru_head))
359 spin_lock(&b->cache_lock);
360 nfsd_prune_bucket_locked(nn, b, 0, &dispose);
361 spin_unlock(&b->cache_lock);
363 freed += nfsd_cacherep_dispose(&dispose);
364 if (freed > sc->nr_to_scan)
371 * nfsd_cache_csum - Checksum incoming NFS Call arguments
372 * @buf: buffer containing a whole RPC Call message
373 * @start: starting byte of the NFS Call header
374 * @remaining: size of the NFS Call header, in bytes
376 * Compute a weak checksum of the leading bytes of an NFS procedure
377 * call header to help verify that a retransmitted Call matches an
378 * entry in the duplicate reply cache.
380 * To avoid assumptions about how the RPC message is laid out in
381 * @buf and what else it might contain (eg, a GSS MIC suffix), the
382 * caller passes us the exact location and length of the NFS Call
385 * Returns a 32-bit checksum value, as defined in RFC 793.
387 static __wsum nfsd_cache_csum(struct xdr_buf *buf, unsigned int start,
388 unsigned int remaining)
390 unsigned int base, len;
391 struct xdr_buf subbuf;
396 if (remaining > RC_CSUMLEN)
397 remaining = RC_CSUMLEN;
398 if (xdr_buf_subsegment(buf, &subbuf, start, remaining))
401 /* rq_arg.head first */
402 if (subbuf.head[0].iov_len) {
403 len = min_t(unsigned int, subbuf.head[0].iov_len, remaining);
404 csum = csum_partial(subbuf.head[0].iov_base, len, csum);
408 /* Continue into page array */
409 idx = subbuf.page_base / PAGE_SIZE;
410 base = subbuf.page_base & ~PAGE_MASK;
412 p = page_address(subbuf.pages[idx]) + base;
413 len = min_t(unsigned int, PAGE_SIZE - base, remaining);
414 csum = csum_partial(p, len, csum);
423 nfsd_cache_key_cmp(const struct nfsd_cacherep *key,
424 const struct nfsd_cacherep *rp, struct nfsd_net *nn)
426 if (key->c_key.k_xid == rp->c_key.k_xid &&
427 key->c_key.k_csum != rp->c_key.k_csum) {
428 nfsd_stats_payload_misses_inc(nn);
429 trace_nfsd_drc_mismatch(nn, key, rp);
432 return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
436 * Search the request hash for an entry that matches the given rqstp.
437 * Must be called with cache_lock held. Returns the found entry or
438 * inserts an empty key on failure.
440 static struct nfsd_cacherep *
441 nfsd_cache_insert(struct nfsd_drc_bucket *b, struct nfsd_cacherep *key,
444 struct nfsd_cacherep *rp, *ret = key;
445 struct rb_node **p = &b->rb_head.rb_node,
447 unsigned int entries = 0;
453 rp = rb_entry(parent, struct nfsd_cacherep, c_node);
455 cmp = nfsd_cache_key_cmp(key, rp, nn);
457 p = &parent->rb_left;
459 p = &parent->rb_right;
465 rb_link_node(&key->c_node, parent, p);
466 rb_insert_color(&key->c_node, &b->rb_head);
468 /* tally hash chain length stats */
469 if (entries > nn->longest_chain) {
470 nn->longest_chain = entries;
471 nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries);
472 } else if (entries == nn->longest_chain) {
473 /* prefer to keep the smallest cachesize possible here */
474 nn->longest_chain_cachesize = min_t(unsigned int,
475 nn->longest_chain_cachesize,
476 atomic_read(&nn->num_drc_entries));
484 * nfsd_cache_lookup - Find an entry in the duplicate reply cache
485 * @rqstp: Incoming Call to find
486 * @start: starting byte in @rqstp->rq_arg of the NFS Call header
487 * @len: size of the NFS Call header, in bytes
488 * @cacherep: OUT: DRC entry for this request
490 * Try to find an entry matching the current call in the cache. When none
491 * is found, we try to grab the oldest expired entry off the LRU list. If
492 * a suitable one isn't there, then drop the cache_lock and allocate a
493 * new one, then search again in case one got inserted while this thread
494 * didn't hold the lock.
497 * %RC_DOIT: Process the request normally
498 * %RC_REPLY: Reply from cache
499 * %RC_DROPIT: Do not process the request further
501 int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start,
502 unsigned int len, struct nfsd_cacherep **cacherep)
505 struct nfsd_cacherep *rp, *found;
507 struct nfsd_drc_bucket *b;
508 int type = rqstp->rq_cachetype;
512 if (type == RC_NOCACHE) {
513 nfsd_stats_rc_nocache_inc();
517 csum = nfsd_cache_csum(&rqstp->rq_arg, start, len);
520 * Since the common case is a cache miss followed by an insert,
521 * preallocate an entry.
523 nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
524 rp = nfsd_cacherep_alloc(rqstp, csum, nn);
528 b = nfsd_cache_bucket_find(rqstp->rq_xid, nn);
529 spin_lock(&b->cache_lock);
530 found = nfsd_cache_insert(b, rp, nn);
534 rp->c_state = RC_INPROG;
535 nfsd_prune_bucket_locked(nn, b, 3, &dispose);
536 spin_unlock(&b->cache_lock);
538 nfsd_cacherep_dispose(&dispose);
540 nfsd_stats_rc_misses_inc();
541 atomic_inc(&nn->num_drc_entries);
542 nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp));
546 /* We found a matching entry which is either in progress or done. */
547 nfsd_reply_cache_free_locked(NULL, rp, nn);
548 nfsd_stats_rc_hits_inc();
552 /* Request being processed */
553 if (rp->c_state == RC_INPROG)
556 /* From the hall of fame of impractical attacks:
557 * Is this a user who tries to snoop on the cache? */
559 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
562 /* Compose RPC reply header */
563 switch (rp->c_type) {
567 xdr_stream_encode_be32(&rqstp->rq_res_stream, rp->c_replstat);
571 if (!nfsd_cache_append(rqstp, &rp->c_replvec))
572 goto out_unlock; /* should not happen */
576 WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type);
580 trace_nfsd_drc_found(nn, rqstp, rtn);
582 spin_unlock(&b->cache_lock);
588 * nfsd_cache_update - Update an entry in the duplicate reply cache.
589 * @rqstp: svc_rqst with a finished Reply
590 * @rp: IN: DRC entry for this request
591 * @cachetype: which cache to update
592 * @statp: pointer to Reply's NFS status code, or NULL
594 * This is called from nfsd_dispatch when the procedure has been
595 * executed and the complete reply is in rqstp->rq_res.
597 * We're copying around data here rather than swapping buffers because
598 * the toplevel loop requires max-sized buffers, which would be a waste
599 * of memory for a cache with a max reply size of 100 bytes (diropokres).
601 * If we should start to use different types of cache entries tailored
602 * specifically for attrstat and fh's, we may save even more space.
604 * Also note that a cachetype of RC_NOCACHE can legally be passed when
605 * nfsd failed to encode a reply that otherwise would have been cached.
606 * In this case, nfsd_cache_update is called with statp == NULL.
608 void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp,
609 int cachetype, __be32 *statp)
611 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
612 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
613 struct nfsd_drc_bucket *b;
620 b = nfsd_cache_bucket_find(rp->c_key.k_xid, nn);
622 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
625 /* Don't cache excessive amounts of data and XDR failures */
626 if (!statp || len > (256 >> 2)) {
627 nfsd_reply_cache_free(b, rp, nn);
634 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
635 rp->c_replstat = *statp;
638 cachv = &rp->c_replvec;
640 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
641 if (!cachv->iov_base) {
642 nfsd_reply_cache_free(b, rp, nn);
645 cachv->iov_len = bufsize;
646 memcpy(cachv->iov_base, statp, bufsize);
649 nfsd_reply_cache_free(b, rp, nn);
652 spin_lock(&b->cache_lock);
653 nfsd_stats_drc_mem_usage_add(nn, bufsize);
655 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
656 rp->c_type = cachetype;
657 rp->c_state = RC_DONE;
658 spin_unlock(&b->cache_lock);
663 nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
667 p = xdr_reserve_space(&rqstp->rq_res_stream, data->iov_len);
670 memcpy(p, data->iov_base, data->iov_len);
671 xdr_commit_encode(&rqstp->rq_res_stream);
676 * Note that fields may be added, removed or reordered in the future. Programs
677 * scraping this file for info should test the labels to ensure they're
678 * getting the correct field.
680 int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
682 struct nfsd_net *nn = net_generic(file_inode(m->file)->i_sb->s_fs_info,
685 seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
686 seq_printf(m, "num entries: %u\n",
687 atomic_read(&nn->num_drc_entries));
688 seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits);
689 seq_printf(m, "mem usage: %lld\n",
690 percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE]));
691 seq_printf(m, "cache hits: %lld\n",
692 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]));
693 seq_printf(m, "cache misses: %lld\n",
694 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]));
695 seq_printf(m, "not cached: %lld\n",
696 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]));
697 seq_printf(m, "payload misses: %lld\n",
698 percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES]));
699 seq_printf(m, "longest chain len: %u\n", nn->longest_chain);
700 seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize);