1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  linux/include/linux/sunrpc/xprt.h
4  *
5  *  Declarations for the RPC transport interface.
6  *
7  *  Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8  */
9 
10 #ifndef _LINUX_SUNRPC_XPRT_H
11 #define _LINUX_SUNRPC_XPRT_H
12 
13 #include <linux/uio.h>
14 #include <linux/socket.h>
15 #include <linux/in.h>
16 #include <linux/ktime.h>
17 #include <linux/kref.h>
18 #include <linux/sunrpc/sched.h>
19 #include <linux/sunrpc/xdr.h>
20 #include <linux/sunrpc/msg_prot.h>
21 
22 #define RPC_MIN_SLOT_TABLE	(2U)
23 #define RPC_DEF_SLOT_TABLE	(16U)
24 #define RPC_MAX_SLOT_TABLE_LIMIT	(65536U)
25 #define RPC_MAX_SLOT_TABLE	RPC_MAX_SLOT_TABLE_LIMIT
26 
27 #define RPC_CWNDSHIFT		(8U)
28 #define RPC_CWNDSCALE		(1U << RPC_CWNDSHIFT)
29 #define RPC_INITCWND		RPC_CWNDSCALE
30 #define RPC_MAXCWND(xprt)	((xprt)->max_reqs << RPC_CWNDSHIFT)
31 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
32 
33 /*
34  * This describes a timeout strategy
35  */
36 struct rpc_timeout {
37 	unsigned long		to_initval,		/* initial timeout */
38 				to_maxval,		/* max timeout */
39 				to_increment;		/* if !exponential */
40 	unsigned int		to_retries;		/* max # of retries */
41 	unsigned char		to_exponential;
42 };
43 
44 enum rpc_display_format_t {
45 	RPC_DISPLAY_ADDR = 0,
46 	RPC_DISPLAY_PORT,
47 	RPC_DISPLAY_PROTO,
48 	RPC_DISPLAY_HEX_ADDR,
49 	RPC_DISPLAY_HEX_PORT,
50 	RPC_DISPLAY_NETID,
51 	RPC_DISPLAY_MAX,
52 };
53 
54 struct rpc_task;
55 struct rpc_xprt;
56 struct xprt_class;
57 struct seq_file;
58 struct svc_serv;
59 struct net;
60 
61 /*
62  * This describes a complete RPC request
63  */
64 struct rpc_rqst {
65 	/*
66 	 * This is the user-visible part
67 	 */
68 	struct rpc_xprt *	rq_xprt;		/* RPC client */
69 	struct xdr_buf		rq_snd_buf;		/* send buffer */
70 	struct xdr_buf		rq_rcv_buf;		/* recv buffer */
71 
72 	/*
73 	 * This is the private part
74 	 */
75 	struct rpc_task *	rq_task;	/* RPC task data */
76 	struct rpc_cred *	rq_cred;	/* Bound cred */
77 	__be32			rq_xid;		/* request XID */
78 	int			rq_cong;	/* has incremented xprt->cong */
79 	u32			rq_seqno;	/* gss seq no. used on req. */
80 	int			rq_enc_pages_num;
81 	struct page		**rq_enc_pages;	/* scratch pages for use by
82 						   gss privacy code */
83 	void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
84 
85 	union {
86 		struct list_head	rq_list;	/* Slot allocation list */
87 		struct rb_node		rq_recv;	/* Receive queue */
88 	};
89 
90 	struct list_head	rq_xmit;	/* Send queue */
91 	struct list_head	rq_xmit2;	/* Send queue */
92 
93 	void			*rq_buffer;	/* Call XDR encode buffer */
94 	size_t			rq_callsize;
95 	void			*rq_rbuffer;	/* Reply XDR decode buffer */
96 	size_t			rq_rcvsize;
97 	size_t			rq_xmit_bytes_sent;	/* total bytes sent */
98 	size_t			rq_reply_bytes_recvd;	/* total reply bytes */
99 							/* received */
100 
101 	struct xdr_buf		rq_private_buf;		/* The receive buffer
102 							 * used in the softirq.
103 							 */
104 	unsigned long		rq_majortimeo;	/* major timeout alarm */
105 	unsigned long		rq_minortimeo;	/* minor timeout alarm */
106 	unsigned long		rq_timeout;	/* Current timeout value */
107 	ktime_t			rq_rtt;		/* round-trip time */
108 	unsigned int		rq_retries;	/* # of retries */
109 	unsigned int		rq_connect_cookie;
110 						/* A cookie used to track the
111 						   state of the transport
112 						   connection */
113 	atomic_t		rq_pin;
114 
115 	/*
116 	 * Partial send handling
117 	 */
118 	u32			rq_bytes_sent;	/* Bytes we have sent */
119 
120 	ktime_t			rq_xtime;	/* transmit time stamp */
121 	int			rq_ntrans;
122 
123 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
124 	struct list_head	rq_bc_list;	/* Callback service list */
125 	unsigned long		rq_bc_pa_state;	/* Backchannel prealloc state */
126 	struct list_head	rq_bc_pa_list;	/* Backchannel prealloc list */
127 #endif /* CONFIG_SUNRPC_BACKCHANEL */
128 };
129 #define rq_svec			rq_snd_buf.head
130 #define rq_slen			rq_snd_buf.len
131 
132 /* RPC transport layer security policies */
133 enum xprtsec_policies {
134 	RPC_XPRTSEC_NONE = 0,
135 	RPC_XPRTSEC_TLS_ANON,
136 	RPC_XPRTSEC_TLS_X509,
137 };
138 
139 struct xprtsec_parms {
140 	enum xprtsec_policies	policy;
141 
142 	/* authentication material */
143 	key_serial_t		cert_serial;
144 	key_serial_t		privkey_serial;
145 };
146 
147 struct rpc_xprt_ops {
148 	void		(*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
149 	int		(*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
150 	void		(*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
151 	void		(*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task);
152 	void		(*free_slot)(struct rpc_xprt *xprt,
153 				     struct rpc_rqst *req);
154 	void		(*rpcbind)(struct rpc_task *task);
155 	void		(*set_port)(struct rpc_xprt *xprt, unsigned short port);
156 	void		(*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
157 	int		(*get_srcaddr)(struct rpc_xprt *xprt, char *buf,
158 				       size_t buflen);
159 	unsigned short	(*get_srcport)(struct rpc_xprt *xprt);
160 	int		(*buf_alloc)(struct rpc_task *task);
161 	void		(*buf_free)(struct rpc_task *task);
162 	int		(*prepare_request)(struct rpc_rqst *req,
163 					   struct xdr_buf *buf);
164 	int		(*send_request)(struct rpc_rqst *req);
165 	void		(*wait_for_reply_request)(struct rpc_task *task);
166 	void		(*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
167 	void		(*release_request)(struct rpc_task *task);
168 	void		(*close)(struct rpc_xprt *xprt);
169 	void		(*destroy)(struct rpc_xprt *xprt);
170 	void		(*set_connect_timeout)(struct rpc_xprt *xprt,
171 					unsigned long connect_timeout,
172 					unsigned long reconnect_timeout);
173 	void		(*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq);
174 	int		(*enable_swap)(struct rpc_xprt *xprt);
175 	void		(*disable_swap)(struct rpc_xprt *xprt);
176 	void		(*inject_disconnect)(struct rpc_xprt *xprt);
177 	int		(*bc_setup)(struct rpc_xprt *xprt,
178 				    unsigned int min_reqs);
179 	size_t		(*bc_maxpayload)(struct rpc_xprt *xprt);
180 	unsigned int	(*bc_num_slots)(struct rpc_xprt *xprt);
181 	void		(*bc_free_rqst)(struct rpc_rqst *rqst);
182 	void		(*bc_destroy)(struct rpc_xprt *xprt,
183 				      unsigned int max_reqs);
184 };
185 
186 /*
187  * RPC transport identifiers
188  *
189  * To preserve compatibility with the historical use of raw IP protocol
190  * id's for transport selection, UDP and TCP identifiers are specified
191  * with the previous values. No such restriction exists for new transports,
192  * except that they may not collide with these values (17 and 6,
193  * respectively).
194  */
195 #define XPRT_TRANSPORT_BC       (1 << 31)
196 enum xprt_transports {
197 	XPRT_TRANSPORT_UDP	= IPPROTO_UDP,
198 	XPRT_TRANSPORT_TCP	= IPPROTO_TCP,
199 	XPRT_TRANSPORT_BC_TCP	= IPPROTO_TCP | XPRT_TRANSPORT_BC,
200 	XPRT_TRANSPORT_RDMA	= 256,
201 	XPRT_TRANSPORT_BC_RDMA	= XPRT_TRANSPORT_RDMA | XPRT_TRANSPORT_BC,
202 	XPRT_TRANSPORT_LOCAL	= 257,
203 	XPRT_TRANSPORT_TCP_TLS	= 258,
204 };
205 
206 struct rpc_sysfs_xprt;
207 struct rpc_xprt {
208 	struct kref		kref;		/* Reference count */
209 	const struct rpc_xprt_ops *ops;		/* transport methods */
210 	unsigned int		id;		/* transport id */
211 
212 	const struct rpc_timeout *timeout;	/* timeout parms */
213 	struct sockaddr_storage	addr;		/* server address */
214 	size_t			addrlen;	/* size of server address */
215 	int			prot;		/* IP protocol */
216 
217 	unsigned long		cong;		/* current congestion */
218 	unsigned long		cwnd;		/* congestion window */
219 
220 	size_t			max_payload;	/* largest RPC payload size,
221 						   in bytes */
222 
223 	struct rpc_wait_queue	binding;	/* requests waiting on rpcbind */
224 	struct rpc_wait_queue	sending;	/* requests waiting to send */
225 	struct rpc_wait_queue	pending;	/* requests in flight */
226 	struct rpc_wait_queue	backlog;	/* waiting for slot */
227 	struct list_head	free;		/* free slots */
228 	unsigned int		max_reqs;	/* max number of slots */
229 	unsigned int		min_reqs;	/* min number of slots */
230 	unsigned int		num_reqs;	/* total slots */
231 	unsigned long		state;		/* transport state */
232 	unsigned char		resvport   : 1,	/* use a reserved port */
233 				reuseport  : 1; /* reuse port on reconnect */
234 	atomic_t		swapper;	/* we're swapping over this
235 						   transport */
236 	unsigned int		bind_index;	/* bind function index */
237 
238 	/*
239 	 * Multipath
240 	 */
241 	struct list_head	xprt_switch;
242 
243 	/*
244 	 * Connection of transports
245 	 */
246 	unsigned long		bind_timeout,
247 				reestablish_timeout;
248 	struct xprtsec_parms	xprtsec;
249 	unsigned int		connect_cookie;	/* A cookie that gets bumped
250 						   every time the transport
251 						   is reconnected */
252 
253 	/*
254 	 * Disconnection of idle transports
255 	 */
256 	struct work_struct	task_cleanup;
257 	struct timer_list	timer;
258 	unsigned long		last_used,
259 				idle_timeout,
260 				connect_timeout,
261 				max_reconnect_timeout;
262 
263 	/*
264 	 * Send stuff
265 	 */
266 	atomic_long_t		queuelen;
267 	spinlock_t		transport_lock;	/* lock transport info */
268 	spinlock_t		reserve_lock;	/* lock slot table */
269 	spinlock_t		queue_lock;	/* send/receive queue lock */
270 	u32			xid;		/* Next XID value to use */
271 	struct rpc_task *	snd_task;	/* Task blocked in send */
272 
273 	struct list_head	xmit_queue;	/* Send queue */
274 	atomic_long_t		xmit_queuelen;
275 
276 	struct svc_xprt		*bc_xprt;	/* NFSv4.1 backchannel */
277 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
278 	struct svc_serv		*bc_serv;       /* The RPC service which will */
279 						/* process the callback */
280 	unsigned int		bc_alloc_max;
281 	unsigned int		bc_alloc_count;	/* Total number of preallocs */
282 	atomic_t		bc_slot_count;	/* Number of allocated slots */
283 	spinlock_t		bc_pa_lock;	/* Protects the preallocated
284 						 * items */
285 	struct list_head	bc_pa_list;	/* List of preallocated
286 						 * backchannel rpc_rqst's */
287 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
288 
289 	struct rb_root		recv_queue;	/* Receive queue */
290 
291 	struct {
292 		unsigned long		bind_count,	/* total number of binds */
293 					connect_count,	/* total number of connects */
294 					connect_start,	/* connect start timestamp */
295 					connect_time,	/* jiffies waiting for connect */
296 					sends,		/* how many complete requests */
297 					recvs,		/* how many complete requests */
298 					bad_xids,	/* lookup_rqst didn't find XID */
299 					max_slots;	/* max rpc_slots used */
300 
301 		unsigned long long	req_u,		/* average requests on the wire */
302 					bklog_u,	/* backlog queue utilization */
303 					sending_u,	/* send q utilization */
304 					pending_u;	/* pend q utilization */
305 	} stat;
306 
307 	struct net		*xprt_net;
308 	netns_tracker		ns_tracker;
309 	const char		*servername;
310 	const char		*address_strings[RPC_DISPLAY_MAX];
311 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
312 	struct dentry		*debugfs;		/* debugfs directory */
313 #endif
314 	struct rcu_head		rcu;
315 	const struct xprt_class	*xprt_class;
316 	struct rpc_sysfs_xprt	*xprt_sysfs;
317 	bool			main; /*mark if this is the 1st transport */
318 };
319 
320 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
321 /*
322  * Backchannel flags
323  */
324 #define	RPC_BC_PA_IN_USE	0x0001		/* Preallocated backchannel */
325 						/* buffer in use */
326 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
327 
328 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
bc_prealloc(struct rpc_rqst * req)329 static inline int bc_prealloc(struct rpc_rqst *req)
330 {
331 	return test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
332 }
333 #else
bc_prealloc(struct rpc_rqst * req)334 static inline int bc_prealloc(struct rpc_rqst *req)
335 {
336 	return 0;
337 }
338 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
339 
340 #define XPRT_CREATE_INFINITE_SLOTS	(1U)
341 #define XPRT_CREATE_NO_IDLE_TIMEOUT	(1U << 1)
342 
343 struct xprt_create {
344 	int			ident;		/* XPRT_TRANSPORT identifier */
345 	struct net *		net;
346 	struct sockaddr *	srcaddr;	/* optional local address */
347 	struct sockaddr *	dstaddr;	/* remote peer address */
348 	size_t			addrlen;
349 	const char		*servername;
350 	struct svc_xprt		*bc_xprt;	/* NFSv4.1 backchannel */
351 	struct rpc_xprt_switch	*bc_xps;
352 	unsigned int		flags;
353 	struct xprtsec_parms	xprtsec;
354 	unsigned long		connect_timeout;
355 	unsigned long		reconnect_timeout;
356 };
357 
358 struct xprt_class {
359 	struct list_head	list;
360 	int			ident;		/* XPRT_TRANSPORT identifier */
361 	struct rpc_xprt *	(*setup)(struct xprt_create *);
362 	struct module		*owner;
363 	char			name[32];
364 	const char *		netid[];
365 };
366 
367 /*
368  * Generic internal transport functions
369  */
370 struct rpc_xprt		*xprt_create_transport(struct xprt_create *args);
371 void			xprt_connect(struct rpc_task *task);
372 unsigned long		xprt_reconnect_delay(const struct rpc_xprt *xprt);
373 void			xprt_reconnect_backoff(struct rpc_xprt *xprt,
374 					       unsigned long init_to);
375 void			xprt_reserve(struct rpc_task *task);
376 void			xprt_retry_reserve(struct rpc_task *task);
377 int			xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
378 int			xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
379 void			xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
380 void			xprt_free_slot(struct rpc_xprt *xprt,
381 				       struct rpc_rqst *req);
382 bool			xprt_prepare_transmit(struct rpc_task *task);
383 void			xprt_request_enqueue_transmit(struct rpc_task *task);
384 int			xprt_request_enqueue_receive(struct rpc_task *task);
385 void			xprt_request_wait_receive(struct rpc_task *task);
386 void			xprt_request_dequeue_xprt(struct rpc_task *task);
387 bool			xprt_request_need_retransmit(struct rpc_task *task);
388 void			xprt_transmit(struct rpc_task *task);
389 void			xprt_end_transmit(struct rpc_task *task);
390 int			xprt_adjust_timeout(struct rpc_rqst *req);
391 void			xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
392 void			xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
393 void			xprt_release(struct rpc_task *task);
394 struct rpc_xprt *	xprt_get(struct rpc_xprt *xprt);
395 void			xprt_put(struct rpc_xprt *xprt);
396 struct rpc_xprt *	xprt_alloc(struct net *net, size_t size,
397 				unsigned int num_prealloc,
398 				unsigned int max_req);
399 void			xprt_free(struct rpc_xprt *);
400 void			xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task);
401 bool			xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req);
402 void			xprt_cleanup_ids(void);
403 
404 static inline int
xprt_enable_swap(struct rpc_xprt * xprt)405 xprt_enable_swap(struct rpc_xprt *xprt)
406 {
407 	return xprt->ops->enable_swap(xprt);
408 }
409 
410 static inline void
xprt_disable_swap(struct rpc_xprt * xprt)411 xprt_disable_swap(struct rpc_xprt *xprt)
412 {
413 	xprt->ops->disable_swap(xprt);
414 }
415 
416 /*
417  * Transport switch helper functions
418  */
419 int			xprt_register_transport(struct xprt_class *type);
420 int			xprt_unregister_transport(struct xprt_class *type);
421 int			xprt_find_transport_ident(const char *);
422 void			xprt_wait_for_reply_request_def(struct rpc_task *task);
423 void			xprt_wait_for_reply_request_rtt(struct rpc_task *task);
424 void			xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
425 void			xprt_wait_for_buffer_space(struct rpc_xprt *xprt);
426 bool			xprt_write_space(struct rpc_xprt *xprt);
427 void			xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result);
428 struct rpc_rqst *	xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid);
429 void			xprt_update_rtt(struct rpc_task *task);
430 void			xprt_complete_rqst(struct rpc_task *task, int copied);
431 void			xprt_pin_rqst(struct rpc_rqst *req);
432 void			xprt_unpin_rqst(struct rpc_rqst *req);
433 void			xprt_release_rqst_cong(struct rpc_task *task);
434 bool			xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req);
435 void			xprt_disconnect_done(struct rpc_xprt *xprt);
436 void			xprt_force_disconnect(struct rpc_xprt *xprt);
437 void			xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
438 
439 bool			xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *);
440 void			xprt_unlock_connect(struct rpc_xprt *, void *);
441 void			xprt_release_write(struct rpc_xprt *, struct rpc_task *);
442 
443 /*
444  * Reserved bit positions in xprt->state
445  */
446 #define XPRT_LOCKED		(0)
447 #define XPRT_CONNECTED		(1)
448 #define XPRT_CONNECTING		(2)
449 #define XPRT_CLOSE_WAIT		(3)
450 #define XPRT_BOUND		(4)
451 #define XPRT_BINDING		(5)
452 #define XPRT_CLOSING		(6)
453 #define XPRT_OFFLINE		(7)
454 #define XPRT_REMOVE		(8)
455 #define XPRT_CONGESTED		(9)
456 #define XPRT_CWND_WAIT		(10)
457 #define XPRT_WRITE_SPACE	(11)
458 #define XPRT_SND_IS_COOKIE	(12)
459 
xprt_set_connected(struct rpc_xprt * xprt)460 static inline void xprt_set_connected(struct rpc_xprt *xprt)
461 {
462 	set_bit(XPRT_CONNECTED, &xprt->state);
463 }
464 
xprt_clear_connected(struct rpc_xprt * xprt)465 static inline void xprt_clear_connected(struct rpc_xprt *xprt)
466 {
467 	clear_bit(XPRT_CONNECTED, &xprt->state);
468 }
469 
xprt_connected(struct rpc_xprt * xprt)470 static inline int xprt_connected(struct rpc_xprt *xprt)
471 {
472 	return test_bit(XPRT_CONNECTED, &xprt->state);
473 }
474 
xprt_test_and_set_connected(struct rpc_xprt * xprt)475 static inline int xprt_test_and_set_connected(struct rpc_xprt *xprt)
476 {
477 	return test_and_set_bit(XPRT_CONNECTED, &xprt->state);
478 }
479 
xprt_test_and_clear_connected(struct rpc_xprt * xprt)480 static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt)
481 {
482 	return test_and_clear_bit(XPRT_CONNECTED, &xprt->state);
483 }
484 
xprt_clear_connecting(struct rpc_xprt * xprt)485 static inline void xprt_clear_connecting(struct rpc_xprt *xprt)
486 {
487 	smp_mb__before_atomic();
488 	clear_bit(XPRT_CONNECTING, &xprt->state);
489 	smp_mb__after_atomic();
490 }
491 
xprt_connecting(struct rpc_xprt * xprt)492 static inline int xprt_connecting(struct rpc_xprt *xprt)
493 {
494 	return test_bit(XPRT_CONNECTING, &xprt->state);
495 }
496 
xprt_test_and_set_connecting(struct rpc_xprt * xprt)497 static inline int xprt_test_and_set_connecting(struct rpc_xprt *xprt)
498 {
499 	return test_and_set_bit(XPRT_CONNECTING, &xprt->state);
500 }
501 
xprt_set_bound(struct rpc_xprt * xprt)502 static inline void xprt_set_bound(struct rpc_xprt *xprt)
503 {
504 	test_and_set_bit(XPRT_BOUND, &xprt->state);
505 }
506 
xprt_bound(struct rpc_xprt * xprt)507 static inline int xprt_bound(struct rpc_xprt *xprt)
508 {
509 	return test_bit(XPRT_BOUND, &xprt->state);
510 }
511 
xprt_clear_bound(struct rpc_xprt * xprt)512 static inline void xprt_clear_bound(struct rpc_xprt *xprt)
513 {
514 	clear_bit(XPRT_BOUND, &xprt->state);
515 }
516 
xprt_clear_binding(struct rpc_xprt * xprt)517 static inline void xprt_clear_binding(struct rpc_xprt *xprt)
518 {
519 	smp_mb__before_atomic();
520 	clear_bit(XPRT_BINDING, &xprt->state);
521 	smp_mb__after_atomic();
522 }
523 
xprt_test_and_set_binding(struct rpc_xprt * xprt)524 static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
525 {
526 	return test_and_set_bit(XPRT_BINDING, &xprt->state);
527 }
528 
529 void xprt_set_offline_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
530 void xprt_set_online_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
531 void xprt_delete_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
532 #endif /* _LINUX_SUNRPC_XPRT_H */
533