| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Request reply cache. This is currently a global cache, but this may |
| 4 | * change in the future and be a per-client cache. |
| 5 | * |
| 6 | * This code is heavily inspired by the 44BSD implementation, although |
| 7 | * it does things a bit differently. |
| 8 | * |
| 9 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> |
| 10 | */ |
| 11 | |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/vmalloc.h> |
| 14 | #include <linux/sunrpc/addr.h> |
| 15 | #include <linux/highmem.h> |
| 16 | #include <linux/log2.h> |
| 17 | #include <linux/hash.h> |
| 18 | #include <net/checksum.h> |
| 19 | |
| 20 | #include "nfsd.h" |
| 21 | #include "cache.h" |
| 22 | |
| 23 | #define NFSDDBG_FACILITY NFSDDBG_REPCACHE |
| 24 | |
| 25 | /* |
| 26 | * We use this value to determine the number of hash buckets from the max |
| 27 | * cache size, the idea being that when the cache is at its maximum number |
| 28 | * of entries, then this should be the average number of entries per bucket. |
| 29 | */ |
| 30 | #define TARGET_BUCKET_SIZE 64 |
| 31 | |
| 32 | struct nfsd_drc_bucket { |
| 33 | struct rb_root rb_head; |
| 34 | struct list_head lru_head; |
| 35 | spinlock_t cache_lock; |
| 36 | }; |
| 37 | |
| 38 | static struct nfsd_drc_bucket *drc_hashtbl; |
| 39 | static struct kmem_cache *drc_slab; |
| 40 | |
| 41 | /* max number of entries allowed in the cache */ |
| 42 | static unsigned int max_drc_entries; |
| 43 | |
| 44 | /* number of significant bits in the hash value */ |
| 45 | static unsigned int maskbits; |
| 46 | static unsigned int drc_hashsize; |
| 47 | |
| 48 | /* |
| 49 | * Stats and other tracking of on the duplicate reply cache. All of these and |
| 50 | * the "rc" fields in nfsdstats are protected by the cache_lock |
| 51 | */ |
| 52 | |
| 53 | /* total number of entries */ |
| 54 | static atomic_t num_drc_entries; |
| 55 | |
| 56 | /* cache misses due only to checksum comparison failures */ |
| 57 | static unsigned int payload_misses; |
| 58 | |
| 59 | /* amount of memory (in bytes) currently consumed by the DRC */ |
| 60 | static unsigned int drc_mem_usage; |
| 61 | |
| 62 | /* longest hash chain seen */ |
| 63 | static unsigned int longest_chain; |
| 64 | |
| 65 | /* size of cache when we saw the longest hash chain */ |
| 66 | static unsigned int longest_chain_cachesize; |
| 67 | |
| 68 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); |
| 69 | static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, |
| 70 | struct shrink_control *sc); |
| 71 | static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink, |
| 72 | struct shrink_control *sc); |
| 73 | |
| 74 | static struct shrinker nfsd_reply_cache_shrinker = { |
| 75 | .scan_objects = nfsd_reply_cache_scan, |
| 76 | .count_objects = nfsd_reply_cache_count, |
| 77 | .seeks = 1, |
| 78 | }; |
| 79 | |
| 80 | /* |
| 81 | * Put a cap on the size of the DRC based on the amount of available |
| 82 | * low memory in the machine. |
| 83 | * |
| 84 | * 64MB: 8192 |
| 85 | * 128MB: 11585 |
| 86 | * 256MB: 16384 |
| 87 | * 512MB: 23170 |
| 88 | * 1GB: 32768 |
| 89 | * 2GB: 46340 |
| 90 | * 4GB: 65536 |
| 91 | * 8GB: 92681 |
| 92 | * 16GB: 131072 |
| 93 | * |
| 94 | * ...with a hard cap of 256k entries. In the worst case, each entry will be |
| 95 | * ~1k, so the above numbers should give a rough max of the amount of memory |
| 96 | * used in k. |
| 97 | */ |
| 98 | static unsigned int |
| 99 | nfsd_cache_size_limit(void) |
| 100 | { |
| 101 | unsigned int limit; |
| 102 | unsigned long low_pages = totalram_pages - totalhigh_pages; |
| 103 | |
| 104 | limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); |
| 105 | return min_t(unsigned int, limit, 256*1024); |
| 106 | } |
| 107 | |
| 108 | /* |
| 109 | * Compute the number of hash buckets we need. Divide the max cachesize by |
| 110 | * the "target" max bucket size, and round up to next power of two. |
| 111 | */ |
| 112 | static unsigned int |
| 113 | nfsd_hashsize(unsigned int limit) |
| 114 | { |
| 115 | return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE); |
| 116 | } |
| 117 | |
| 118 | static u32 |
| 119 | nfsd_cache_hash(__be32 xid) |
| 120 | { |
| 121 | return hash_32(be32_to_cpu(xid), maskbits); |
| 122 | } |
| 123 | |
| 124 | static struct svc_cacherep * |
| 125 | nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum) |
| 126 | { |
| 127 | struct svc_cacherep *rp; |
| 128 | |
| 129 | rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); |
| 130 | if (rp) { |
| 131 | rp->c_state = RC_UNUSED; |
| 132 | rp->c_type = RC_NOCACHE; |
| 133 | RB_CLEAR_NODE(&rp->c_node); |
| 134 | INIT_LIST_HEAD(&rp->c_lru); |
| 135 | |
| 136 | memset(&rp->c_key, 0, sizeof(rp->c_key)); |
| 137 | rp->c_key.k_xid = rqstp->rq_xid; |
| 138 | rp->c_key.k_proc = rqstp->rq_proc; |
| 139 | rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp)); |
| 140 | rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp))); |
| 141 | rp->c_key.k_prot = rqstp->rq_prot; |
| 142 | rp->c_key.k_vers = rqstp->rq_vers; |
| 143 | rp->c_key.k_len = rqstp->rq_arg.len; |
| 144 | rp->c_key.k_csum = csum; |
| 145 | } |
| 146 | return rp; |
| 147 | } |
| 148 | |
| 149 | static void |
| 150 | nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp) |
| 151 | { |
| 152 | if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { |
| 153 | drc_mem_usage -= rp->c_replvec.iov_len; |
| 154 | kfree(rp->c_replvec.iov_base); |
| 155 | } |
| 156 | if (rp->c_state != RC_UNUSED) { |
| 157 | rb_erase(&rp->c_node, &b->rb_head); |
| 158 | list_del(&rp->c_lru); |
| 159 | atomic_dec(&num_drc_entries); |
| 160 | drc_mem_usage -= sizeof(*rp); |
| 161 | } |
| 162 | kmem_cache_free(drc_slab, rp); |
| 163 | } |
| 164 | |
| 165 | static void |
| 166 | nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp) |
| 167 | { |
| 168 | spin_lock(&b->cache_lock); |
| 169 | nfsd_reply_cache_free_locked(b, rp); |
| 170 | spin_unlock(&b->cache_lock); |
| 171 | } |
| 172 | |
| 173 | int nfsd_reply_cache_init(void) |
| 174 | { |
| 175 | unsigned int hashsize; |
| 176 | unsigned int i; |
| 177 | int status = 0; |
| 178 | |
| 179 | max_drc_entries = nfsd_cache_size_limit(); |
| 180 | atomic_set(&num_drc_entries, 0); |
| 181 | hashsize = nfsd_hashsize(max_drc_entries); |
| 182 | maskbits = ilog2(hashsize); |
| 183 | |
| 184 | status = register_shrinker(&nfsd_reply_cache_shrinker); |
| 185 | if (status) |
| 186 | return status; |
| 187 | |
| 188 | drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), |
| 189 | 0, 0, NULL); |
| 190 | if (!drc_slab) |
| 191 | goto out_nomem; |
| 192 | |
| 193 | drc_hashtbl = kcalloc(hashsize, sizeof(*drc_hashtbl), GFP_KERNEL); |
| 194 | if (!drc_hashtbl) { |
| 195 | drc_hashtbl = vzalloc(array_size(hashsize, |
| 196 | sizeof(*drc_hashtbl))); |
| 197 | if (!drc_hashtbl) |
| 198 | goto out_nomem; |
| 199 | } |
| 200 | |
| 201 | for (i = 0; i < hashsize; i++) { |
| 202 | INIT_LIST_HEAD(&drc_hashtbl[i].lru_head); |
| 203 | spin_lock_init(&drc_hashtbl[i].cache_lock); |
| 204 | } |
| 205 | drc_hashsize = hashsize; |
| 206 | |
| 207 | return 0; |
| 208 | out_nomem: |
| 209 | printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); |
| 210 | nfsd_reply_cache_shutdown(); |
| 211 | return -ENOMEM; |
| 212 | } |
| 213 | |
| 214 | void nfsd_reply_cache_shutdown(void) |
| 215 | { |
| 216 | struct svc_cacherep *rp; |
| 217 | unsigned int i; |
| 218 | |
| 219 | unregister_shrinker(&nfsd_reply_cache_shrinker); |
| 220 | |
| 221 | for (i = 0; i < drc_hashsize; i++) { |
| 222 | struct list_head *head = &drc_hashtbl[i].lru_head; |
| 223 | while (!list_empty(head)) { |
| 224 | rp = list_first_entry(head, struct svc_cacherep, c_lru); |
| 225 | nfsd_reply_cache_free_locked(&drc_hashtbl[i], rp); |
| 226 | } |
| 227 | } |
| 228 | |
| 229 | kvfree(drc_hashtbl); |
| 230 | drc_hashtbl = NULL; |
| 231 | drc_hashsize = 0; |
| 232 | |
| 233 | kmem_cache_destroy(drc_slab); |
| 234 | drc_slab = NULL; |
| 235 | } |
| 236 | |
| 237 | /* |
| 238 | * Move cache entry to end of LRU list, and queue the cleaner to run if it's |
| 239 | * not already scheduled. |
| 240 | */ |
| 241 | static void |
| 242 | lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp) |
| 243 | { |
| 244 | rp->c_timestamp = jiffies; |
| 245 | list_move_tail(&rp->c_lru, &b->lru_head); |
| 246 | } |
| 247 | |
| 248 | static long |
| 249 | prune_bucket(struct nfsd_drc_bucket *b) |
| 250 | { |
| 251 | struct svc_cacherep *rp, *tmp; |
| 252 | long freed = 0; |
| 253 | |
| 254 | list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) { |
| 255 | /* |
| 256 | * Don't free entries attached to calls that are still |
| 257 | * in-progress, but do keep scanning the list. |
| 258 | */ |
| 259 | if (rp->c_state == RC_INPROG) |
| 260 | continue; |
| 261 | if (atomic_read(&num_drc_entries) <= max_drc_entries && |
| 262 | time_before(jiffies, rp->c_timestamp + RC_EXPIRE)) |
| 263 | break; |
| 264 | nfsd_reply_cache_free_locked(b, rp); |
| 265 | freed++; |
| 266 | } |
| 267 | return freed; |
| 268 | } |
| 269 | |
| 270 | /* |
| 271 | * Walk the LRU list and prune off entries that are older than RC_EXPIRE. |
| 272 | * Also prune the oldest ones when the total exceeds the max number of entries. |
| 273 | */ |
| 274 | static long |
| 275 | prune_cache_entries(void) |
| 276 | { |
| 277 | unsigned int i; |
| 278 | long freed = 0; |
| 279 | |
| 280 | for (i = 0; i < drc_hashsize; i++) { |
| 281 | struct nfsd_drc_bucket *b = &drc_hashtbl[i]; |
| 282 | |
| 283 | if (list_empty(&b->lru_head)) |
| 284 | continue; |
| 285 | spin_lock(&b->cache_lock); |
| 286 | freed += prune_bucket(b); |
| 287 | spin_unlock(&b->cache_lock); |
| 288 | } |
| 289 | return freed; |
| 290 | } |
| 291 | |
| 292 | static unsigned long |
| 293 | nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) |
| 294 | { |
| 295 | return atomic_read(&num_drc_entries); |
| 296 | } |
| 297 | |
| 298 | static unsigned long |
| 299 | nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc) |
| 300 | { |
| 301 | return prune_cache_entries(); |
| 302 | } |
| 303 | /* |
| 304 | * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes |
| 305 | */ |
| 306 | static __wsum |
| 307 | nfsd_cache_csum(struct svc_rqst *rqstp) |
| 308 | { |
| 309 | int idx; |
| 310 | unsigned int base; |
| 311 | __wsum csum; |
| 312 | struct xdr_buf *buf = &rqstp->rq_arg; |
| 313 | const unsigned char *p = buf->head[0].iov_base; |
| 314 | size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, |
| 315 | RC_CSUMLEN); |
| 316 | size_t len = min(buf->head[0].iov_len, csum_len); |
| 317 | |
| 318 | /* rq_arg.head first */ |
| 319 | csum = csum_partial(p, len, 0); |
| 320 | csum_len -= len; |
| 321 | |
| 322 | /* Continue into page array */ |
| 323 | idx = buf->page_base / PAGE_SIZE; |
| 324 | base = buf->page_base & ~PAGE_MASK; |
| 325 | while (csum_len) { |
| 326 | p = page_address(buf->pages[idx]) + base; |
| 327 | len = min_t(size_t, PAGE_SIZE - base, csum_len); |
| 328 | csum = csum_partial(p, len, csum); |
| 329 | csum_len -= len; |
| 330 | base = 0; |
| 331 | ++idx; |
| 332 | } |
| 333 | return csum; |
| 334 | } |
| 335 | |
| 336 | static int |
| 337 | nfsd_cache_key_cmp(const struct svc_cacherep *key, const struct svc_cacherep *rp) |
| 338 | { |
| 339 | if (key->c_key.k_xid == rp->c_key.k_xid && |
| 340 | key->c_key.k_csum != rp->c_key.k_csum) |
| 341 | ++payload_misses; |
| 342 | |
| 343 | return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key)); |
| 344 | } |
| 345 | |
| 346 | /* |
| 347 | * Search the request hash for an entry that matches the given rqstp. |
| 348 | * Must be called with cache_lock held. Returns the found entry or |
| 349 | * inserts an empty key on failure. |
| 350 | */ |
| 351 | static struct svc_cacherep * |
| 352 | nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key) |
| 353 | { |
| 354 | struct svc_cacherep *rp, *ret = key; |
| 355 | struct rb_node **p = &b->rb_head.rb_node, |
| 356 | *parent = NULL; |
| 357 | unsigned int entries = 0; |
| 358 | int cmp; |
| 359 | |
| 360 | while (*p != NULL) { |
| 361 | ++entries; |
| 362 | parent = *p; |
| 363 | rp = rb_entry(parent, struct svc_cacherep, c_node); |
| 364 | |
| 365 | cmp = nfsd_cache_key_cmp(key, rp); |
| 366 | if (cmp < 0) |
| 367 | p = &parent->rb_left; |
| 368 | else if (cmp > 0) |
| 369 | p = &parent->rb_right; |
| 370 | else { |
| 371 | ret = rp; |
| 372 | goto out; |
| 373 | } |
| 374 | } |
| 375 | rb_link_node(&key->c_node, parent, p); |
| 376 | rb_insert_color(&key->c_node, &b->rb_head); |
| 377 | out: |
| 378 | /* tally hash chain length stats */ |
| 379 | if (entries > longest_chain) { |
| 380 | longest_chain = entries; |
| 381 | longest_chain_cachesize = atomic_read(&num_drc_entries); |
| 382 | } else if (entries == longest_chain) { |
| 383 | /* prefer to keep the smallest cachesize possible here */ |
| 384 | longest_chain_cachesize = min_t(unsigned int, |
| 385 | longest_chain_cachesize, |
| 386 | atomic_read(&num_drc_entries)); |
| 387 | } |
| 388 | |
| 389 | lru_put_end(b, ret); |
| 390 | return ret; |
| 391 | } |
| 392 | |
| 393 | /* |
| 394 | * Try to find an entry matching the current call in the cache. When none |
| 395 | * is found, we try to grab the oldest expired entry off the LRU list. If |
| 396 | * a suitable one isn't there, then drop the cache_lock and allocate a |
| 397 | * new one, then search again in case one got inserted while this thread |
| 398 | * didn't hold the lock. |
| 399 | */ |
| 400 | int |
| 401 | nfsd_cache_lookup(struct svc_rqst *rqstp) |
| 402 | { |
| 403 | struct svc_cacherep *rp, *found; |
| 404 | __be32 xid = rqstp->rq_xid; |
| 405 | __wsum csum; |
| 406 | u32 hash = nfsd_cache_hash(xid); |
| 407 | struct nfsd_drc_bucket *b = &drc_hashtbl[hash]; |
| 408 | int type = rqstp->rq_cachetype; |
| 409 | int rtn = RC_DOIT; |
| 410 | |
| 411 | rqstp->rq_cacherep = NULL; |
| 412 | if (type == RC_NOCACHE) { |
| 413 | nfsdstats.rcnocache++; |
| 414 | return rtn; |
| 415 | } |
| 416 | |
| 417 | csum = nfsd_cache_csum(rqstp); |
| 418 | |
| 419 | /* |
| 420 | * Since the common case is a cache miss followed by an insert, |
| 421 | * preallocate an entry. |
| 422 | */ |
| 423 | rp = nfsd_reply_cache_alloc(rqstp, csum); |
| 424 | if (!rp) { |
| 425 | dprintk("nfsd: unable to allocate DRC entry!\n"); |
| 426 | return rtn; |
| 427 | } |
| 428 | |
| 429 | spin_lock(&b->cache_lock); |
| 430 | found = nfsd_cache_insert(b, rp); |
| 431 | if (found != rp) { |
| 432 | nfsd_reply_cache_free_locked(NULL, rp); |
| 433 | rp = found; |
| 434 | goto found_entry; |
| 435 | } |
| 436 | |
| 437 | nfsdstats.rcmisses++; |
| 438 | rqstp->rq_cacherep = rp; |
| 439 | rp->c_state = RC_INPROG; |
| 440 | |
| 441 | atomic_inc(&num_drc_entries); |
| 442 | drc_mem_usage += sizeof(*rp); |
| 443 | |
| 444 | /* go ahead and prune the cache */ |
| 445 | prune_bucket(b); |
| 446 | out: |
| 447 | spin_unlock(&b->cache_lock); |
| 448 | return rtn; |
| 449 | |
| 450 | found_entry: |
| 451 | /* We found a matching entry which is either in progress or done. */ |
| 452 | nfsdstats.rchits++; |
| 453 | rtn = RC_DROPIT; |
| 454 | |
| 455 | /* Request being processed */ |
| 456 | if (rp->c_state == RC_INPROG) |
| 457 | goto out; |
| 458 | |
| 459 | /* From the hall of fame of impractical attacks: |
| 460 | * Is this a user who tries to snoop on the cache? */ |
| 461 | rtn = RC_DOIT; |
| 462 | if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) |
| 463 | goto out; |
| 464 | |
| 465 | /* Compose RPC reply header */ |
| 466 | switch (rp->c_type) { |
| 467 | case RC_NOCACHE: |
| 468 | break; |
| 469 | case RC_REPLSTAT: |
| 470 | svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); |
| 471 | rtn = RC_REPLY; |
| 472 | break; |
| 473 | case RC_REPLBUFF: |
| 474 | if (!nfsd_cache_append(rqstp, &rp->c_replvec)) |
| 475 | goto out; /* should not happen */ |
| 476 | rtn = RC_REPLY; |
| 477 | break; |
| 478 | default: |
| 479 | printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); |
| 480 | nfsd_reply_cache_free_locked(b, rp); |
| 481 | } |
| 482 | |
| 483 | goto out; |
| 484 | } |
| 485 | |
| 486 | /* |
| 487 | * Update a cache entry. This is called from nfsd_dispatch when |
| 488 | * the procedure has been executed and the complete reply is in |
| 489 | * rqstp->rq_res. |
| 490 | * |
| 491 | * We're copying around data here rather than swapping buffers because |
| 492 | * the toplevel loop requires max-sized buffers, which would be a waste |
| 493 | * of memory for a cache with a max reply size of 100 bytes (diropokres). |
| 494 | * |
| 495 | * If we should start to use different types of cache entries tailored |
| 496 | * specifically for attrstat and fh's, we may save even more space. |
| 497 | * |
| 498 | * Also note that a cachetype of RC_NOCACHE can legally be passed when |
| 499 | * nfsd failed to encode a reply that otherwise would have been cached. |
| 500 | * In this case, nfsd_cache_update is called with statp == NULL. |
| 501 | */ |
| 502 | void |
| 503 | nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) |
| 504 | { |
| 505 | struct svc_cacherep *rp = rqstp->rq_cacherep; |
| 506 | struct kvec *resv = &rqstp->rq_res.head[0], *cachv; |
| 507 | u32 hash; |
| 508 | struct nfsd_drc_bucket *b; |
| 509 | int len; |
| 510 | size_t bufsize = 0; |
| 511 | |
| 512 | if (!rp) |
| 513 | return; |
| 514 | |
| 515 | hash = nfsd_cache_hash(rp->c_key.k_xid); |
| 516 | b = &drc_hashtbl[hash]; |
| 517 | |
| 518 | len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); |
| 519 | len >>= 2; |
| 520 | |
| 521 | /* Don't cache excessive amounts of data and XDR failures */ |
| 522 | if (!statp || len > (256 >> 2)) { |
| 523 | nfsd_reply_cache_free(b, rp); |
| 524 | return; |
| 525 | } |
| 526 | |
| 527 | switch (cachetype) { |
| 528 | case RC_REPLSTAT: |
| 529 | if (len != 1) |
| 530 | printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); |
| 531 | rp->c_replstat = *statp; |
| 532 | break; |
| 533 | case RC_REPLBUFF: |
| 534 | cachv = &rp->c_replvec; |
| 535 | bufsize = len << 2; |
| 536 | cachv->iov_base = kmalloc(bufsize, GFP_KERNEL); |
| 537 | if (!cachv->iov_base) { |
| 538 | nfsd_reply_cache_free(b, rp); |
| 539 | return; |
| 540 | } |
| 541 | cachv->iov_len = bufsize; |
| 542 | memcpy(cachv->iov_base, statp, bufsize); |
| 543 | break; |
| 544 | case RC_NOCACHE: |
| 545 | nfsd_reply_cache_free(b, rp); |
| 546 | return; |
| 547 | } |
| 548 | spin_lock(&b->cache_lock); |
| 549 | drc_mem_usage += bufsize; |
| 550 | lru_put_end(b, rp); |
| 551 | rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags); |
| 552 | rp->c_type = cachetype; |
| 553 | rp->c_state = RC_DONE; |
| 554 | spin_unlock(&b->cache_lock); |
| 555 | return; |
| 556 | } |
| 557 | |
| 558 | /* |
| 559 | * Copy cached reply to current reply buffer. Should always fit. |
| 560 | * FIXME as reply is in a page, we should just attach the page, and |
| 561 | * keep a refcount.... |
| 562 | */ |
| 563 | static int |
| 564 | nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) |
| 565 | { |
| 566 | struct kvec *vec = &rqstp->rq_res.head[0]; |
| 567 | |
| 568 | if (vec->iov_len + data->iov_len > PAGE_SIZE) { |
| 569 | printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n", |
| 570 | data->iov_len); |
| 571 | return 0; |
| 572 | } |
| 573 | memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); |
| 574 | vec->iov_len += data->iov_len; |
| 575 | return 1; |
| 576 | } |
| 577 | |
| 578 | /* |
| 579 | * Note that fields may be added, removed or reordered in the future. Programs |
| 580 | * scraping this file for info should test the labels to ensure they're |
| 581 | * getting the correct field. |
| 582 | */ |
| 583 | static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) |
| 584 | { |
| 585 | seq_printf(m, "max entries: %u\n", max_drc_entries); |
| 586 | seq_printf(m, "num entries: %u\n", |
| 587 | atomic_read(&num_drc_entries)); |
| 588 | seq_printf(m, "hash buckets: %u\n", 1 << maskbits); |
| 589 | seq_printf(m, "mem usage: %u\n", drc_mem_usage); |
| 590 | seq_printf(m, "cache hits: %u\n", nfsdstats.rchits); |
| 591 | seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses); |
| 592 | seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache); |
| 593 | seq_printf(m, "payload misses: %u\n", payload_misses); |
| 594 | seq_printf(m, "longest chain len: %u\n", longest_chain); |
| 595 | seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize); |
| 596 | return 0; |
| 597 | } |
| 598 | |
| 599 | int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file) |
| 600 | { |
| 601 | return single_open(file, nfsd_reply_cache_stats_show, NULL); |
| 602 | } |