1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Request reply cache. This is currently a global cache, but this may
4 * change in the future and be a per-client cache.
5 *
6 * This code is heavily inspired by the 44BSD implementation, although
7 * it does things a bit differently.
8 *
9 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
10 */
11
12 #include <linux/sunrpc/svc_xprt.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/sunrpc/addr.h>
16 #include <linux/highmem.h>
17 #include <linux/log2.h>
18 #include <linux/hash.h>
19 #include <net/checksum.h>
20
21 #include "nfsd.h"
22 #include "cache.h"
23 #include "trace.h"
24
25 /*
26 * We use this value to determine the number of hash buckets from the max
27 * cache size, the idea being that when the cache is at its maximum number
28 * of entries, then this should be the average number of entries per bucket.
29 */
30 #define TARGET_BUCKET_SIZE 64
31
32 struct nfsd_drc_bucket {
33 struct rb_root rb_head;
34 struct list_head lru_head;
35 spinlock_t cache_lock;
36 };
37
38 static struct kmem_cache *drc_slab;
39
40 static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
41 static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
42 struct shrink_control *sc);
43 static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
44 struct shrink_control *sc);
45
46 /*
47 * Put a cap on the size of the DRC based on the amount of available
48 * low memory in the machine.
49 *
50 * 64MB: 8192
51 * 128MB: 11585
52 * 256MB: 16384
53 * 512MB: 23170
54 * 1GB: 32768
55 * 2GB: 46340
56 * 4GB: 65536
57 * 8GB: 92681
58 * 16GB: 131072
59 *
60 * ...with a hard cap of 256k entries. In the worst case, each entry will be
61 * ~1k, so the above numbers should give a rough max of the amount of memory
62 * used in k.
63 *
64 * XXX: these limits are per-container, so memory used will increase
65 * linearly with number of containers. Maybe that's OK.
66 */
67 static unsigned int
nfsd_cache_size_limit(void)68 nfsd_cache_size_limit(void)
69 {
70 unsigned int limit;
71 unsigned long low_pages = totalram_pages() - totalhigh_pages();
72
73 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
74 return min_t(unsigned int, limit, 256*1024);
75 }
76
77 /*
78 * Compute the number of hash buckets we need. Divide the max cachesize by
79 * the "target" max bucket size, and round up to next power of two.
80 */
81 static unsigned int
nfsd_hashsize(unsigned int limit)82 nfsd_hashsize(unsigned int limit)
83 {
84 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
85 }
86
87 static struct svc_cacherep *
nfsd_reply_cache_alloc(struct svc_rqst * rqstp,__wsum csum,struct nfsd_net * nn)88 nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
89 struct nfsd_net *nn)
90 {
91 struct svc_cacherep *rp;
92
93 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
94 if (rp) {
95 rp->c_state = RC_UNUSED;
96 rp->c_type = RC_NOCACHE;
97 RB_CLEAR_NODE(&rp->c_node);
98 INIT_LIST_HEAD(&rp->c_lru);
99
100 memset(&rp->c_key, 0, sizeof(rp->c_key));
101 rp->c_key.k_xid = rqstp->rq_xid;
102 rp->c_key.k_proc = rqstp->rq_proc;
103 rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp));
104 rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp)));
105 rp->c_key.k_prot = rqstp->rq_prot;
106 rp->c_key.k_vers = rqstp->rq_vers;
107 rp->c_key.k_len = rqstp->rq_arg.len;
108 rp->c_key.k_csum = csum;
109 }
110 return rp;
111 }
112
113 static void
nfsd_reply_cache_free_locked(struct nfsd_drc_bucket * b,struct svc_cacherep * rp,struct nfsd_net * nn)114 nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
115 struct nfsd_net *nn)
116 {
117 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
118 nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
119 kfree(rp->c_replvec.iov_base);
120 }
121 if (rp->c_state != RC_UNUSED) {
122 rb_erase(&rp->c_node, &b->rb_head);
123 list_del(&rp->c_lru);
124 atomic_dec(&nn->num_drc_entries);
125 nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp));
126 }
127 kmem_cache_free(drc_slab, rp);
128 }
129
130 static void
nfsd_reply_cache_free(struct nfsd_drc_bucket * b,struct svc_cacherep * rp,struct nfsd_net * nn)131 nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
132 struct nfsd_net *nn)
133 {
134 spin_lock(&b->cache_lock);
135 nfsd_reply_cache_free_locked(b, rp, nn);
136 spin_unlock(&b->cache_lock);
137 }
138
nfsd_drc_slab_create(void)139 int nfsd_drc_slab_create(void)
140 {
141 drc_slab = kmem_cache_create("nfsd_drc",
142 sizeof(struct svc_cacherep), 0, 0, NULL);
143 return drc_slab ? 0: -ENOMEM;
144 }
145
nfsd_drc_slab_free(void)146 void nfsd_drc_slab_free(void)
147 {
148 kmem_cache_destroy(drc_slab);
149 }
150
nfsd_reply_cache_stats_init(struct nfsd_net * nn)151 static int nfsd_reply_cache_stats_init(struct nfsd_net *nn)
152 {
153 return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM);
154 }
155
nfsd_reply_cache_stats_destroy(struct nfsd_net * nn)156 static void nfsd_reply_cache_stats_destroy(struct nfsd_net *nn)
157 {
158 nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM);
159 }
160
nfsd_reply_cache_init(struct nfsd_net * nn)161 int nfsd_reply_cache_init(struct nfsd_net *nn)
162 {
163 unsigned int hashsize;
164 unsigned int i;
165 int status = 0;
166
167 nn->max_drc_entries = nfsd_cache_size_limit();
168 atomic_set(&nn->num_drc_entries, 0);
169 hashsize = nfsd_hashsize(nn->max_drc_entries);
170 nn->maskbits = ilog2(hashsize);
171
172 status = nfsd_reply_cache_stats_init(nn);
173 if (status)
174 goto out_nomem;
175
176 nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
177 nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
178 nn->nfsd_reply_cache_shrinker.seeks = 1;
179 status = register_shrinker(&nn->nfsd_reply_cache_shrinker);
180 if (status)
181 goto out_stats_destroy;
182
183 nn->drc_hashtbl = kvzalloc(array_size(hashsize,
184 sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
185 if (!nn->drc_hashtbl)
186 goto out_shrinker;
187
188 for (i = 0; i < hashsize; i++) {
189 INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head);
190 spin_lock_init(&nn->drc_hashtbl[i].cache_lock);
191 }
192 nn->drc_hashsize = hashsize;
193
194 return 0;
195 out_shrinker:
196 unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
197 out_stats_destroy:
198 nfsd_reply_cache_stats_destroy(nn);
199 out_nomem:
200 printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
201 return -ENOMEM;
202 }
203
nfsd_reply_cache_shutdown(struct nfsd_net * nn)204 void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
205 {
206 struct svc_cacherep *rp;
207 unsigned int i;
208
209 unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
210
211 for (i = 0; i < nn->drc_hashsize; i++) {
212 struct list_head *head = &nn->drc_hashtbl[i].lru_head;
213 while (!list_empty(head)) {
214 rp = list_first_entry(head, struct svc_cacherep, c_lru);
215 nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i],
216 rp, nn);
217 }
218 }
219 nfsd_reply_cache_stats_destroy(nn);
220
221 kvfree(nn->drc_hashtbl);
222 nn->drc_hashtbl = NULL;
223 nn->drc_hashsize = 0;
224
225 }
226
227 /*
228 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
229 * not already scheduled.
230 */
231 static void
lru_put_end(struct nfsd_drc_bucket * b,struct svc_cacherep * rp)232 lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
233 {
234 rp->c_timestamp = jiffies;
235 list_move_tail(&rp->c_lru, &b->lru_head);
236 }
237
238 static noinline struct nfsd_drc_bucket *
nfsd_cache_bucket_find(__be32 xid,struct nfsd_net * nn)239 nfsd_cache_bucket_find(__be32 xid, struct nfsd_net *nn)
240 {
241 unsigned int hash = hash_32((__force u32)xid, nn->maskbits);
242
243 return &nn->drc_hashtbl[hash];
244 }
245
prune_bucket(struct nfsd_drc_bucket * b,struct nfsd_net * nn,unsigned int max)246 static long prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn,
247 unsigned int max)
248 {
249 struct svc_cacherep *rp, *tmp;
250 long freed = 0;
251
252 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
253 /*
254 * Don't free entries attached to calls that are still
255 * in-progress, but do keep scanning the list.
256 */
257 if (rp->c_state == RC_INPROG)
258 continue;
259 if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
260 time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
261 break;
262 nfsd_reply_cache_free_locked(b, rp, nn);
263 if (max && freed++ > max)
264 break;
265 }
266 return freed;
267 }
268
nfsd_prune_bucket(struct nfsd_drc_bucket * b,struct nfsd_net * nn)269 static long nfsd_prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn)
270 {
271 return prune_bucket(b, nn, 3);
272 }
273
274 /*
275 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
276 * Also prune the oldest ones when the total exceeds the max number of entries.
277 */
278 static long
prune_cache_entries(struct nfsd_net * nn)279 prune_cache_entries(struct nfsd_net *nn)
280 {
281 unsigned int i;
282 long freed = 0;
283
284 for (i = 0; i < nn->drc_hashsize; i++) {
285 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
286
287 if (list_empty(&b->lru_head))
288 continue;
289 spin_lock(&b->cache_lock);
290 freed += prune_bucket(b, nn, 0);
291 spin_unlock(&b->cache_lock);
292 }
293 return freed;
294 }
295
296 static unsigned long
nfsd_reply_cache_count(struct shrinker * shrink,struct shrink_control * sc)297 nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
298 {
299 struct nfsd_net *nn = container_of(shrink,
300 struct nfsd_net, nfsd_reply_cache_shrinker);
301
302 return atomic_read(&nn->num_drc_entries);
303 }
304
305 static unsigned long
nfsd_reply_cache_scan(struct shrinker * shrink,struct shrink_control * sc)306 nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
307 {
308 struct nfsd_net *nn = container_of(shrink,
309 struct nfsd_net, nfsd_reply_cache_shrinker);
310
311 return prune_cache_entries(nn);
312 }
313 /*
314 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
315 */
316 static __wsum
nfsd_cache_csum(struct svc_rqst * rqstp)317 nfsd_cache_csum(struct svc_rqst *rqstp)
318 {
319 int idx;
320 unsigned int base;
321 __wsum csum;
322 struct xdr_buf *buf = &rqstp->rq_arg;
323 const unsigned char *p = buf->head[0].iov_base;
324 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
325 RC_CSUMLEN);
326 size_t len = min(buf->head[0].iov_len, csum_len);
327
328 /* rq_arg.head first */
329 csum = csum_partial(p, len, 0);
330 csum_len -= len;
331
332 /* Continue into page array */
333 idx = buf->page_base / PAGE_SIZE;
334 base = buf->page_base & ~PAGE_MASK;
335 while (csum_len) {
336 p = page_address(buf->pages[idx]) + base;
337 len = min_t(size_t, PAGE_SIZE - base, csum_len);
338 csum = csum_partial(p, len, csum);
339 csum_len -= len;
340 base = 0;
341 ++idx;
342 }
343 return csum;
344 }
345
346 static int
nfsd_cache_key_cmp(const struct svc_cacherep * key,const struct svc_cacherep * rp,struct nfsd_net * nn)347 nfsd_cache_key_cmp(const struct svc_cacherep *key,
348 const struct svc_cacherep *rp, struct nfsd_net *nn)
349 {
350 if (key->c_key.k_xid == rp->c_key.k_xid &&
351 key->c_key.k_csum != rp->c_key.k_csum) {
352 nfsd_stats_payload_misses_inc(nn);
353 trace_nfsd_drc_mismatch(nn, key, rp);
354 }
355
356 return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
357 }
358
359 /*
360 * Search the request hash for an entry that matches the given rqstp.
361 * Must be called with cache_lock held. Returns the found entry or
362 * inserts an empty key on failure.
363 */
364 static struct svc_cacherep *
nfsd_cache_insert(struct nfsd_drc_bucket * b,struct svc_cacherep * key,struct nfsd_net * nn)365 nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key,
366 struct nfsd_net *nn)
367 {
368 struct svc_cacherep *rp, *ret = key;
369 struct rb_node **p = &b->rb_head.rb_node,
370 *parent = NULL;
371 unsigned int entries = 0;
372 int cmp;
373
374 while (*p != NULL) {
375 ++entries;
376 parent = *p;
377 rp = rb_entry(parent, struct svc_cacherep, c_node);
378
379 cmp = nfsd_cache_key_cmp(key, rp, nn);
380 if (cmp < 0)
381 p = &parent->rb_left;
382 else if (cmp > 0)
383 p = &parent->rb_right;
384 else {
385 ret = rp;
386 goto out;
387 }
388 }
389 rb_link_node(&key->c_node, parent, p);
390 rb_insert_color(&key->c_node, &b->rb_head);
391 out:
392 /* tally hash chain length stats */
393 if (entries > nn->longest_chain) {
394 nn->longest_chain = entries;
395 nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries);
396 } else if (entries == nn->longest_chain) {
397 /* prefer to keep the smallest cachesize possible here */
398 nn->longest_chain_cachesize = min_t(unsigned int,
399 nn->longest_chain_cachesize,
400 atomic_read(&nn->num_drc_entries));
401 }
402
403 lru_put_end(b, ret);
404 return ret;
405 }
406
407 /**
408 * nfsd_cache_lookup - Find an entry in the duplicate reply cache
409 * @rqstp: Incoming Call to find
410 *
411 * Try to find an entry matching the current call in the cache. When none
412 * is found, we try to grab the oldest expired entry off the LRU list. If
413 * a suitable one isn't there, then drop the cache_lock and allocate a
414 * new one, then search again in case one got inserted while this thread
415 * didn't hold the lock.
416 *
417 * Return values:
418 * %RC_DOIT: Process the request normally
419 * %RC_REPLY: Reply from cache
420 * %RC_DROPIT: Do not process the request further
421 */
nfsd_cache_lookup(struct svc_rqst * rqstp)422 int nfsd_cache_lookup(struct svc_rqst *rqstp)
423 {
424 struct nfsd_net *nn;
425 struct svc_cacherep *rp, *found;
426 __wsum csum;
427 struct nfsd_drc_bucket *b;
428 int type = rqstp->rq_cachetype;
429 int rtn = RC_DOIT;
430
431 rqstp->rq_cacherep = NULL;
432 if (type == RC_NOCACHE) {
433 nfsd_stats_rc_nocache_inc();
434 goto out;
435 }
436
437 csum = nfsd_cache_csum(rqstp);
438
439 /*
440 * Since the common case is a cache miss followed by an insert,
441 * preallocate an entry.
442 */
443 nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
444 rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
445 if (!rp)
446 goto out;
447
448 b = nfsd_cache_bucket_find(rqstp->rq_xid, nn);
449 spin_lock(&b->cache_lock);
450 found = nfsd_cache_insert(b, rp, nn);
451 if (found != rp)
452 goto found_entry;
453
454 nfsd_stats_rc_misses_inc();
455 rqstp->rq_cacherep = rp;
456 rp->c_state = RC_INPROG;
457
458 atomic_inc(&nn->num_drc_entries);
459 nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp));
460
461 nfsd_prune_bucket(b, nn);
462
463 out_unlock:
464 spin_unlock(&b->cache_lock);
465 out:
466 return rtn;
467
468 found_entry:
469 /* We found a matching entry which is either in progress or done. */
470 nfsd_reply_cache_free_locked(NULL, rp, nn);
471 nfsd_stats_rc_hits_inc();
472 rtn = RC_DROPIT;
473 rp = found;
474
475 /* Request being processed */
476 if (rp->c_state == RC_INPROG)
477 goto out_trace;
478
479 /* From the hall of fame of impractical attacks:
480 * Is this a user who tries to snoop on the cache? */
481 rtn = RC_DOIT;
482 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
483 goto out_trace;
484
485 /* Compose RPC reply header */
486 switch (rp->c_type) {
487 case RC_NOCACHE:
488 break;
489 case RC_REPLSTAT:
490 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
491 rtn = RC_REPLY;
492 break;
493 case RC_REPLBUFF:
494 if (!nfsd_cache_append(rqstp, &rp->c_replvec))
495 goto out_unlock; /* should not happen */
496 rtn = RC_REPLY;
497 break;
498 default:
499 WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type);
500 }
501
502 out_trace:
503 trace_nfsd_drc_found(nn, rqstp, rtn);
504 goto out_unlock;
505 }
506
507 /**
508 * nfsd_cache_update - Update an entry in the duplicate reply cache.
509 * @rqstp: svc_rqst with a finished Reply
510 * @cachetype: which cache to update
511 * @statp: Reply's status code
512 *
513 * This is called from nfsd_dispatch when the procedure has been
514 * executed and the complete reply is in rqstp->rq_res.
515 *
516 * We're copying around data here rather than swapping buffers because
517 * the toplevel loop requires max-sized buffers, which would be a waste
518 * of memory for a cache with a max reply size of 100 bytes (diropokres).
519 *
520 * If we should start to use different types of cache entries tailored
521 * specifically for attrstat and fh's, we may save even more space.
522 *
523 * Also note that a cachetype of RC_NOCACHE can legally be passed when
524 * nfsd failed to encode a reply that otherwise would have been cached.
525 * In this case, nfsd_cache_update is called with statp == NULL.
526 */
nfsd_cache_update(struct svc_rqst * rqstp,int cachetype,__be32 * statp)527 void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
528 {
529 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
530 struct svc_cacherep *rp = rqstp->rq_cacherep;
531 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
532 struct nfsd_drc_bucket *b;
533 int len;
534 size_t bufsize = 0;
535
536 if (!rp)
537 return;
538
539 b = nfsd_cache_bucket_find(rp->c_key.k_xid, nn);
540
541 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
542 len >>= 2;
543
544 /* Don't cache excessive amounts of data and XDR failures */
545 if (!statp || len > (256 >> 2)) {
546 nfsd_reply_cache_free(b, rp, nn);
547 return;
548 }
549
550 switch (cachetype) {
551 case RC_REPLSTAT:
552 if (len != 1)
553 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
554 rp->c_replstat = *statp;
555 break;
556 case RC_REPLBUFF:
557 cachv = &rp->c_replvec;
558 bufsize = len << 2;
559 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
560 if (!cachv->iov_base) {
561 nfsd_reply_cache_free(b, rp, nn);
562 return;
563 }
564 cachv->iov_len = bufsize;
565 memcpy(cachv->iov_base, statp, bufsize);
566 break;
567 case RC_NOCACHE:
568 nfsd_reply_cache_free(b, rp, nn);
569 return;
570 }
571 spin_lock(&b->cache_lock);
572 nfsd_stats_drc_mem_usage_add(nn, bufsize);
573 lru_put_end(b, rp);
574 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
575 rp->c_type = cachetype;
576 rp->c_state = RC_DONE;
577 spin_unlock(&b->cache_lock);
578 return;
579 }
580
581 /*
582 * Copy cached reply to current reply buffer. Should always fit.
583 * FIXME as reply is in a page, we should just attach the page, and
584 * keep a refcount....
585 */
586 static int
nfsd_cache_append(struct svc_rqst * rqstp,struct kvec * data)587 nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
588 {
589 struct kvec *vec = &rqstp->rq_res.head[0];
590
591 if (vec->iov_len + data->iov_len > PAGE_SIZE) {
592 printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n",
593 data->iov_len);
594 return 0;
595 }
596 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
597 vec->iov_len += data->iov_len;
598 return 1;
599 }
600
601 /*
602 * Note that fields may be added, removed or reordered in the future. Programs
603 * scraping this file for info should test the labels to ensure they're
604 * getting the correct field.
605 */
nfsd_reply_cache_stats_show(struct seq_file * m,void * v)606 static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
607 {
608 struct nfsd_net *nn = m->private;
609
610 seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
611 seq_printf(m, "num entries: %u\n",
612 atomic_read(&nn->num_drc_entries));
613 seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits);
614 seq_printf(m, "mem usage: %lld\n",
615 percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE]));
616 seq_printf(m, "cache hits: %lld\n",
617 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]));
618 seq_printf(m, "cache misses: %lld\n",
619 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]));
620 seq_printf(m, "not cached: %lld\n",
621 percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]));
622 seq_printf(m, "payload misses: %lld\n",
623 percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES]));
624 seq_printf(m, "longest chain len: %u\n", nn->longest_chain);
625 seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize);
626 return 0;
627 }
628
nfsd_reply_cache_stats_open(struct inode * inode,struct file * file)629 int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
630 {
631 struct nfsd_net *nn = net_generic(file_inode(file)->i_sb->s_fs_info,
632 nfsd_net_id);
633
634 return single_open(file, nfsd_reply_cache_stats_show, nn);
635 }
636