1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * linux/include/linux/sunrpc/svc.h
4 *
5 * RPC server declarations.
6 *
7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 */
9
10
11 #ifndef SUNRPC_SVC_H
12 #define SUNRPC_SVC_H
13
14 #include <linux/in.h>
15 #include <linux/in6.h>
16 #include <linux/sunrpc/types.h>
17 #include <linux/sunrpc/xdr.h>
18 #include <linux/sunrpc/auth.h>
19 #include <linux/sunrpc/svcauth.h>
20 #include <linux/wait.h>
21 #include <linux/mm.h>
22 #include <linux/pagevec.h>
23
24 /* statistics for svc_pool structures */
25 struct svc_pool_stats {
26 atomic_long_t packets;
27 unsigned long sockets_queued;
28 atomic_long_t threads_woken;
29 atomic_long_t threads_timedout;
30 };
31
32 /*
33 *
34 * RPC service thread pool.
35 *
36 * Pool of threads and temporary sockets. Generally there is only
37 * a single one of these per RPC service, but on NUMA machines those
38 * services that can benefit from it (i.e. nfs but not lockd) will
39 * have one pool per NUMA node. This optimisation reduces cross-
40 * node traffic on multi-node NUMA NFS servers.
41 */
42 struct svc_pool {
43 unsigned int sp_id; /* pool id; also node id on NUMA */
44 spinlock_t sp_lock; /* protects all fields */
45 struct list_head sp_sockets; /* pending sockets */
46 unsigned int sp_nrthreads; /* # of threads in pool */
47 struct list_head sp_all_threads; /* all server threads */
48 struct svc_pool_stats sp_stats; /* statistics on pool operation */
49 #define SP_TASK_PENDING (0) /* still work to do even if no
50 * xprt is queued. */
51 #define SP_CONGESTED (1)
52 unsigned long sp_flags;
53 } ____cacheline_aligned_in_smp;
54
55 /*
56 * RPC service.
57 *
58 * An RPC service is a ``daemon,'' possibly multithreaded, which
59 * receives and processes incoming RPC messages.
60 * It has one or more transport sockets associated with it, and maintains
61 * a list of idle threads waiting for input.
62 *
63 * We currently do not support more than one RPC program per daemon.
64 */
65 struct svc_serv {
66 struct svc_program * sv_program; /* RPC program */
67 struct svc_stat * sv_stats; /* RPC statistics */
68 spinlock_t sv_lock;
69 struct kref sv_refcnt;
70 unsigned int sv_nrthreads; /* # of server threads */
71 unsigned int sv_maxconn; /* max connections allowed or
72 * '0' causing max to be based
73 * on number of threads. */
74
75 unsigned int sv_max_payload; /* datagram payload size */
76 unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */
77 unsigned int sv_xdrsize; /* XDR buffer size */
78 struct list_head sv_permsocks; /* all permanent sockets */
79 struct list_head sv_tempsocks; /* all temporary sockets */
80 int sv_tmpcnt; /* count of temporary sockets */
81 struct timer_list sv_temptimer; /* timer for aging temporary sockets */
82
83 char * sv_name; /* service name */
84
85 unsigned int sv_nrpools; /* number of thread pools */
86 struct svc_pool * sv_pools; /* array of thread pools */
87 int (*sv_threadfn)(void *data);
88
89 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
90 struct list_head sv_cb_list; /* queue for callback requests
91 * that arrive over the same
92 * connection */
93 spinlock_t sv_cb_lock; /* protects the svc_cb_list */
94 wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
95 * entries in the svc_cb_list */
96 bool sv_bc_enabled; /* service uses backchannel */
97 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
98 };
99
100 /**
101 * svc_get() - increment reference count on a SUNRPC serv
102 * @serv: the svc_serv to have count incremented
103 *
104 * Returns: the svc_serv that was passed in.
105 */
svc_get(struct svc_serv * serv)106 static inline struct svc_serv *svc_get(struct svc_serv *serv)
107 {
108 kref_get(&serv->sv_refcnt);
109 return serv;
110 }
111
112 void svc_destroy(struct kref *);
113
114 /**
115 * svc_put - decrement reference count on a SUNRPC serv
116 * @serv: the svc_serv to have count decremented
117 *
118 * When the reference count reaches zero, svc_destroy()
119 * is called to clean up and free the serv.
120 */
svc_put(struct svc_serv * serv)121 static inline void svc_put(struct svc_serv *serv)
122 {
123 kref_put(&serv->sv_refcnt, svc_destroy);
124 }
125
126 /**
127 * svc_put_not_last - decrement non-final reference count on SUNRPC serv
128 * @serv: the svc_serv to have count decremented
129 *
130 * Returns: %true is refcount was decremented.
131 *
132 * If the refcount is 1, it is not decremented and instead failure is reported.
133 */
svc_put_not_last(struct svc_serv * serv)134 static inline bool svc_put_not_last(struct svc_serv *serv)
135 {
136 return refcount_dec_not_one(&serv->sv_refcnt.refcount);
137 }
138
139 /*
140 * Maximum payload size supported by a kernel RPC server.
141 * This is use to determine the max number of pages nfsd is
142 * willing to return in a single READ operation.
143 *
144 * These happen to all be powers of 2, which is not strictly
145 * necessary but helps enforce the real limitation, which is
146 * that they should be multiples of PAGE_SIZE.
147 *
148 * For UDP transports, a block plus NFS,RPC, and UDP headers
149 * has to fit into the IP datagram limit of 64K. The largest
150 * feasible number for all known page sizes is probably 48K,
151 * but we choose 32K here. This is the same as the historical
152 * Linux limit; someone who cares more about NFS/UDP performance
153 * can test a larger number.
154 *
155 * For TCP transports we have more freedom. A size of 1MB is
156 * chosen to match the client limit. Other OSes are known to
157 * have larger limits, but those numbers are probably beyond
158 * the point of diminishing returns.
159 */
160 #define RPCSVC_MAXPAYLOAD (1*1024*1024u)
161 #define RPCSVC_MAXPAYLOAD_TCP RPCSVC_MAXPAYLOAD
162 #define RPCSVC_MAXPAYLOAD_UDP (32*1024u)
163
164 extern u32 svc_max_payload(const struct svc_rqst *rqstp);
165
166 /*
167 * RPC Requsts and replies are stored in one or more pages.
168 * We maintain an array of pages for each server thread.
169 * Requests are copied into these pages as they arrive. Remaining
170 * pages are available to write the reply into.
171 *
172 * Pages are sent using ->sendpage so each server thread needs to
173 * allocate more to replace those used in sending. To help keep track
174 * of these pages we have a receive list where all pages initialy live,
175 * and a send list where pages are moved to when there are to be part
176 * of a reply.
177 *
178 * We use xdr_buf for holding responses as it fits well with NFS
179 * read responses (that have a header, and some data pages, and possibly
180 * a tail) and means we can share some client side routines.
181 *
182 * The xdr_buf.head kvec always points to the first page in the rq_*pages
183 * list. The xdr_buf.pages pointer points to the second page on that
184 * list. xdr_buf.tail points to the end of the first page.
185 * This assumes that the non-page part of an rpc reply will fit
186 * in a page - NFSd ensures this. lockd also has no trouble.
187 *
188 * Each request/reply pair can have at most one "payload", plus two pages,
189 * one for the request, and one for the reply.
190 * We using ->sendfile to return read data, we might need one extra page
191 * if the request is not page-aligned. So add another '1'.
192 */
193 #define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \
194 + 2 + 1)
195
svc_getnl(struct kvec * iov)196 static inline u32 svc_getnl(struct kvec *iov)
197 {
198 __be32 val, *vp;
199 vp = iov->iov_base;
200 val = *vp++;
201 iov->iov_base = (void*)vp;
202 iov->iov_len -= sizeof(__be32);
203 return ntohl(val);
204 }
205
svc_putnl(struct kvec * iov,u32 val)206 static inline void svc_putnl(struct kvec *iov, u32 val)
207 {
208 __be32 *vp = iov->iov_base + iov->iov_len;
209 *vp = htonl(val);
210 iov->iov_len += sizeof(__be32);
211 }
212
svc_getu32(struct kvec * iov)213 static inline __be32 svc_getu32(struct kvec *iov)
214 {
215 __be32 val, *vp;
216 vp = iov->iov_base;
217 val = *vp++;
218 iov->iov_base = (void*)vp;
219 iov->iov_len -= sizeof(__be32);
220 return val;
221 }
222
svc_ungetu32(struct kvec * iov)223 static inline void svc_ungetu32(struct kvec *iov)
224 {
225 __be32 *vp = (__be32 *)iov->iov_base;
226 iov->iov_base = (void *)(vp - 1);
227 iov->iov_len += sizeof(*vp);
228 }
229
svc_putu32(struct kvec * iov,__be32 val)230 static inline void svc_putu32(struct kvec *iov, __be32 val)
231 {
232 __be32 *vp = iov->iov_base + iov->iov_len;
233 *vp = val;
234 iov->iov_len += sizeof(__be32);
235 }
236
237 /*
238 * The context of a single thread, including the request currently being
239 * processed.
240 */
241 struct svc_rqst {
242 struct list_head rq_all; /* all threads list */
243 struct rcu_head rq_rcu_head; /* for RCU deferred kfree */
244 struct svc_xprt * rq_xprt; /* transport ptr */
245
246 struct sockaddr_storage rq_addr; /* peer address */
247 size_t rq_addrlen;
248 struct sockaddr_storage rq_daddr; /* dest addr of request
249 * - reply from here */
250 size_t rq_daddrlen;
251
252 struct svc_serv * rq_server; /* RPC service definition */
253 struct svc_pool * rq_pool; /* thread pool */
254 const struct svc_procedure *rq_procinfo;/* procedure info */
255 struct auth_ops * rq_authop; /* authentication flavour */
256 struct svc_cred rq_cred; /* auth info */
257 void * rq_xprt_ctxt; /* transport specific context ptr */
258 struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
259
260 struct xdr_buf rq_arg;
261 struct xdr_stream rq_arg_stream;
262 struct xdr_stream rq_res_stream;
263 struct page *rq_scratch_page;
264 struct xdr_buf rq_res;
265 struct page *rq_pages[RPCSVC_MAXPAGES + 1];
266 struct page * *rq_respages; /* points into rq_pages */
267 struct page * *rq_next_page; /* next reply page to use */
268 struct page * *rq_page_end; /* one past the last page */
269
270 struct pagevec rq_pvec;
271 struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
272 struct bio_vec rq_bvec[RPCSVC_MAXPAGES];
273
274 __be32 rq_xid; /* transmission id */
275 u32 rq_prog; /* program number */
276 u32 rq_vers; /* program version */
277 u32 rq_proc; /* procedure number */
278 u32 rq_prot; /* IP protocol */
279 int rq_cachetype; /* catering to nfsd */
280 #define RQ_SECURE (0) /* secure port */
281 #define RQ_LOCAL (1) /* local request */
282 #define RQ_USEDEFERRAL (2) /* use deferral */
283 #define RQ_DROPME (3) /* drop current reply */
284 #define RQ_SPLICE_OK (4) /* turned off in gss privacy
285 * to prevent encrypting page
286 * cache pages */
287 #define RQ_VICTIM (5) /* about to be shut down */
288 #define RQ_BUSY (6) /* request is busy */
289 #define RQ_DATA (7) /* request has data */
290 unsigned long rq_flags; /* flags field */
291 ktime_t rq_qtime; /* enqueue time */
292
293 void * rq_argp; /* decoded arguments */
294 void * rq_resp; /* xdr'd results */
295 void * rq_auth_data; /* flavor-specific data */
296 __be32 rq_auth_stat; /* authentication status */
297 int rq_auth_slack; /* extra space xdr code
298 * should leave in head
299 * for krb5i, krb5p.
300 */
301 int rq_reserved; /* space on socket outq
302 * reserved for this request
303 */
304 ktime_t rq_stime; /* start time */
305
306 struct cache_req rq_chandle; /* handle passed to caches for
307 * request delaying
308 */
309 /* Catering to nfsd */
310 struct auth_domain * rq_client; /* RPC peer info */
311 struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
312 struct svc_cacherep * rq_cacherep; /* cache info */
313 struct task_struct *rq_task; /* service thread */
314 spinlock_t rq_lock; /* per-request lock */
315 struct net *rq_bc_net; /* pointer to backchannel's
316 * net namespace
317 */
318 void ** rq_lease_breaker; /* The v4 client breaking a lease */
319 };
320
321 #define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
322
323 /*
324 * Rigorous type checking on sockaddr type conversions
325 */
svc_addr_in(const struct svc_rqst * rqst)326 static inline struct sockaddr_in *svc_addr_in(const struct svc_rqst *rqst)
327 {
328 return (struct sockaddr_in *) &rqst->rq_addr;
329 }
330
svc_addr_in6(const struct svc_rqst * rqst)331 static inline struct sockaddr_in6 *svc_addr_in6(const struct svc_rqst *rqst)
332 {
333 return (struct sockaddr_in6 *) &rqst->rq_addr;
334 }
335
svc_addr(const struct svc_rqst * rqst)336 static inline struct sockaddr *svc_addr(const struct svc_rqst *rqst)
337 {
338 return (struct sockaddr *) &rqst->rq_addr;
339 }
340
svc_daddr_in(const struct svc_rqst * rqst)341 static inline struct sockaddr_in *svc_daddr_in(const struct svc_rqst *rqst)
342 {
343 return (struct sockaddr_in *) &rqst->rq_daddr;
344 }
345
svc_daddr_in6(const struct svc_rqst * rqst)346 static inline struct sockaddr_in6 *svc_daddr_in6(const struct svc_rqst *rqst)
347 {
348 return (struct sockaddr_in6 *) &rqst->rq_daddr;
349 }
350
svc_daddr(const struct svc_rqst * rqst)351 static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
352 {
353 return (struct sockaddr *) &rqst->rq_daddr;
354 }
355
356 /*
357 * Check buffer bounds after decoding arguments
358 */
359 static inline int
xdr_argsize_check(struct svc_rqst * rqstp,__be32 * p)360 xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
361 {
362 char *cp = (char *)p;
363 struct kvec *vec = &rqstp->rq_arg.head[0];
364 return cp >= (char*)vec->iov_base
365 && cp <= (char*)vec->iov_base + vec->iov_len;
366 }
367
368 static inline int
xdr_ressize_check(struct svc_rqst * rqstp,__be32 * p)369 xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p)
370 {
371 struct kvec *vec = &rqstp->rq_res.head[0];
372 char *cp = (char*)p;
373
374 vec->iov_len = cp - (char*)vec->iov_base;
375
376 return vec->iov_len <= PAGE_SIZE;
377 }
378
svc_free_res_pages(struct svc_rqst * rqstp)379 static inline void svc_free_res_pages(struct svc_rqst *rqstp)
380 {
381 while (rqstp->rq_next_page != rqstp->rq_respages) {
382 struct page **pp = --rqstp->rq_next_page;
383 if (*pp) {
384 put_page(*pp);
385 *pp = NULL;
386 }
387 }
388 }
389
390 struct svc_deferred_req {
391 u32 prot; /* protocol (UDP or TCP) */
392 struct svc_xprt *xprt;
393 struct sockaddr_storage addr; /* where reply must go */
394 size_t addrlen;
395 struct sockaddr_storage daddr; /* where reply must come from */
396 size_t daddrlen;
397 void *xprt_ctxt;
398 struct cache_deferred_req handle;
399 int argslen;
400 __be32 args[];
401 };
402
403 struct svc_process_info {
404 union {
405 int (*dispatch)(struct svc_rqst *, __be32 *);
406 struct {
407 unsigned int lovers;
408 unsigned int hivers;
409 } mismatch;
410 };
411 };
412
413 /*
414 * List of RPC programs on the same transport endpoint
415 */
416 struct svc_program {
417 struct svc_program * pg_next; /* other programs (same xprt) */
418 u32 pg_prog; /* program number */
419 unsigned int pg_lovers; /* lowest version */
420 unsigned int pg_hivers; /* highest version */
421 unsigned int pg_nvers; /* number of versions */
422 const struct svc_version **pg_vers; /* version array */
423 char * pg_name; /* service name */
424 char * pg_class; /* class name: services sharing authentication */
425 struct svc_stat * pg_stats; /* rpc statistics */
426 int (*pg_authenticate)(struct svc_rqst *);
427 __be32 (*pg_init_request)(struct svc_rqst *,
428 const struct svc_program *,
429 struct svc_process_info *);
430 int (*pg_rpcbind_set)(struct net *net,
431 const struct svc_program *,
432 u32 version, int family,
433 unsigned short proto,
434 unsigned short port);
435 };
436
437 /*
438 * RPC program version
439 */
440 struct svc_version {
441 u32 vs_vers; /* version number */
442 u32 vs_nproc; /* number of procedures */
443 const struct svc_procedure *vs_proc; /* per-procedure info */
444 unsigned int *vs_count; /* call counts */
445 u32 vs_xdrsize; /* xdrsize needed for this version */
446
447 /* Don't register with rpcbind */
448 bool vs_hidden;
449
450 /* Don't care if the rpcbind registration fails */
451 bool vs_rpcb_optnl;
452
453 /* Need xprt with congestion control */
454 bool vs_need_cong_ctrl;
455
456 /* Dispatch function */
457 int (*vs_dispatch)(struct svc_rqst *, __be32 *);
458 };
459
460 /*
461 * RPC procedure info
462 */
463 struct svc_procedure {
464 /* process the request: */
465 __be32 (*pc_func)(struct svc_rqst *);
466 /* XDR decode args: */
467 bool (*pc_decode)(struct svc_rqst *rqstp,
468 struct xdr_stream *xdr);
469 /* XDR encode result: */
470 bool (*pc_encode)(struct svc_rqst *rqstp,
471 struct xdr_stream *xdr);
472 /* XDR free result: */
473 void (*pc_release)(struct svc_rqst *);
474 unsigned int pc_argsize; /* argument struct size */
475 unsigned int pc_ressize; /* result struct size */
476 unsigned int pc_cachetype; /* cache info (NFS) */
477 unsigned int pc_xdrressize; /* maximum size of XDR reply */
478 const char * pc_name; /* for display */
479 };
480
481 /*
482 * Function prototypes.
483 */
484 int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
485 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
486 int svc_bind(struct svc_serv *serv, struct net *net);
487 struct svc_serv *svc_create(struct svc_program *, unsigned int,
488 int (*threadfn)(void *data));
489 struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
490 struct svc_pool *pool, int node);
491 void svc_rqst_replace_page(struct svc_rqst *rqstp,
492 struct page *page);
493 void svc_rqst_free(struct svc_rqst *);
494 void svc_exit_thread(struct svc_rqst *);
495 struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
496 int (*threadfn)(void *data));
497 int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
498 int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
499 int svc_process(struct svc_rqst *);
500 int bc_svc_process(struct svc_serv *, struct rpc_rqst *,
501 struct svc_rqst *);
502 int svc_register(const struct svc_serv *, struct net *, const int,
503 const unsigned short, const unsigned short);
504
505 void svc_wake_up(struct svc_serv *);
506 void svc_reserve(struct svc_rqst *rqstp, int space);
507 struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv);
508 char * svc_print_addr(struct svc_rqst *, char *, size_t);
509 const char * svc_proc_name(const struct svc_rqst *rqstp);
510 int svc_encode_result_payload(struct svc_rqst *rqstp,
511 unsigned int offset,
512 unsigned int length);
513 unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
514 struct xdr_buf *payload);
515 char *svc_fill_symlink_pathname(struct svc_rqst *rqstp,
516 struct kvec *first, void *p,
517 size_t total);
518 __be32 svc_generic_init_request(struct svc_rqst *rqstp,
519 const struct svc_program *progp,
520 struct svc_process_info *procinfo);
521 int svc_generic_rpcbind_set(struct net *net,
522 const struct svc_program *progp,
523 u32 version, int family,
524 unsigned short proto,
525 unsigned short port);
526 int svc_rpcbind_set_version(struct net *net,
527 const struct svc_program *progp,
528 u32 version, int family,
529 unsigned short proto,
530 unsigned short port);
531
532 #define RPC_MAX_ADDRBUFLEN (63U)
533
534 /*
535 * When we want to reduce the size of the reserved space in the response
536 * buffer, we need to take into account the size of any checksum data that
537 * may be at the end of the packet. This is difficult to determine exactly
538 * for all cases without actually generating the checksum, so we just use a
539 * static value.
540 */
svc_reserve_auth(struct svc_rqst * rqstp,int space)541 static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space)
542 {
543 svc_reserve(rqstp, space + rqstp->rq_auth_slack);
544 }
545
546 /**
547 * svcxdr_init_decode - Prepare an xdr_stream for svc Call decoding
548 * @rqstp: controlling server RPC transaction context
549 *
550 */
svcxdr_init_decode(struct svc_rqst * rqstp)551 static inline void svcxdr_init_decode(struct svc_rqst *rqstp)
552 {
553 struct xdr_stream *xdr = &rqstp->rq_arg_stream;
554 struct kvec *argv = rqstp->rq_arg.head;
555
556 xdr_init_decode(xdr, &rqstp->rq_arg, argv->iov_base, NULL);
557 xdr_set_scratch_page(xdr, rqstp->rq_scratch_page);
558 }
559
560 /**
561 * svcxdr_init_encode - Prepare an xdr_stream for svc Reply encoding
562 * @rqstp: controlling server RPC transaction context
563 *
564 */
svcxdr_init_encode(struct svc_rqst * rqstp)565 static inline void svcxdr_init_encode(struct svc_rqst *rqstp)
566 {
567 struct xdr_stream *xdr = &rqstp->rq_res_stream;
568 struct xdr_buf *buf = &rqstp->rq_res;
569 struct kvec *resv = buf->head;
570
571 xdr_reset_scratch_buffer(xdr);
572
573 xdr->buf = buf;
574 xdr->iov = resv;
575 xdr->p = resv->iov_base + resv->iov_len;
576 xdr->end = resv->iov_base + PAGE_SIZE - rqstp->rq_auth_slack;
577 buf->len = resv->iov_len;
578 xdr->page_ptr = buf->pages - 1;
579 buf->buflen = PAGE_SIZE * (1 + rqstp->rq_page_end - buf->pages);
580 buf->buflen -= rqstp->rq_auth_slack;
581 xdr->rqst = NULL;
582 }
583
584 #endif /* SUNRPC_SVC_H */
585