1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/net/sunrpc/xprt.c
4  *
5  *  This is a generic RPC call interface supporting congestion avoidance,
6  *  and asynchronous calls.
7  *
8  *  The interface works like this:
9  *
10  *  -	When a process places a call, it allocates a request slot if
11  *	one is available. Otherwise, it sleeps on the backlog queue
12  *	(xprt_reserve).
13  *  -	Next, the caller puts together the RPC message, stuffs it into
14  *	the request struct, and calls xprt_transmit().
15  *  -	xprt_transmit sends the message and installs the caller on the
16  *	transport's wait list. At the same time, if a reply is expected,
17  *	it installs a timer that is run after the packet's timeout has
18  *	expired.
19  *  -	When a packet arrives, the data_ready handler walks the list of
20  *	pending requests for that transport. If a matching XID is found, the
21  *	caller is woken up, and the timer removed.
22  *  -	When no reply arrives within the timeout interval, the timer is
23  *	fired by the kernel and runs xprt_timer(). It either adjusts the
24  *	timeout values (minor timeout) or wakes up the caller with a status
25  *	of -ETIMEDOUT.
26  *  -	When the caller receives a notification from RPC that a reply arrived,
27  *	it should release the RPC slot, and process the reply.
28  *	If the call timed out, it may choose to retry the operation by
29  *	adjusting the initial timeout value, and simply calling rpc_call
30  *	again.
31  *
32  *  Support for async RPC is done through a set of RPC-specific scheduling
33  *  primitives that `transparently' work for processes as well as async
34  *  tasks that rely on callbacks.
35  *
36  *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
37  *
38  *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
39  */
40 
41 #include <linux/module.h>
42 
43 #include <linux/types.h>
44 #include <linux/interrupt.h>
45 #include <linux/workqueue.h>
46 #include <linux/net.h>
47 #include <linux/ktime.h>
48 
49 #include <linux/sunrpc/clnt.h>
50 #include <linux/sunrpc/metrics.h>
51 #include <linux/sunrpc/bc_xprt.h>
52 #include <linux/rcupdate.h>
53 #include <linux/sched/mm.h>
54 
55 #include <trace/events/sunrpc.h>
56 
57 #include "sunrpc.h"
58 #include "sysfs.h"
59 #include "fail.h"
60 
61 /*
62  * Local variables
63  */
64 
65 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
66 # define RPCDBG_FACILITY	RPCDBG_XPRT
67 #endif
68 
69 /*
70  * Local functions
71  */
72 static void	xprt_init(struct rpc_xprt *xprt, struct net *net);
73 static __be32	xprt_alloc_xid(struct rpc_xprt *xprt);
74 static void	xprt_destroy(struct rpc_xprt *xprt);
75 static void	xprt_request_init(struct rpc_task *task);
76 static int	xprt_request_prepare(struct rpc_rqst *req, struct xdr_buf *buf);
77 
78 static DEFINE_SPINLOCK(xprt_list_lock);
79 static LIST_HEAD(xprt_list);
80 
xprt_request_timeout(const struct rpc_rqst * req)81 static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
82 {
83 	unsigned long timeout = jiffies + req->rq_timeout;
84 
85 	if (time_before(timeout, req->rq_majortimeo))
86 		return timeout;
87 	return req->rq_majortimeo;
88 }
89 
90 /**
91  * xprt_register_transport - register a transport implementation
92  * @transport: transport to register
93  *
94  * If a transport implementation is loaded as a kernel module, it can
95  * call this interface to make itself known to the RPC client.
96  *
97  * Returns:
98  * 0:		transport successfully registered
99  * -EEXIST:	transport already registered
100  * -EINVAL:	transport module being unloaded
101  */
xprt_register_transport(struct xprt_class * transport)102 int xprt_register_transport(struct xprt_class *transport)
103 {
104 	struct xprt_class *t;
105 	int result;
106 
107 	result = -EEXIST;
108 	spin_lock(&xprt_list_lock);
109 	list_for_each_entry(t, &xprt_list, list) {
110 		/* don't register the same transport class twice */
111 		if (t->ident == transport->ident)
112 			goto out;
113 	}
114 
115 	list_add_tail(&transport->list, &xprt_list);
116 	printk(KERN_INFO "RPC: Registered %s transport module.\n",
117 	       transport->name);
118 	result = 0;
119 
120 out:
121 	spin_unlock(&xprt_list_lock);
122 	return result;
123 }
124 EXPORT_SYMBOL_GPL(xprt_register_transport);
125 
126 /**
127  * xprt_unregister_transport - unregister a transport implementation
128  * @transport: transport to unregister
129  *
130  * Returns:
131  * 0:		transport successfully unregistered
132  * -ENOENT:	transport never registered
133  */
xprt_unregister_transport(struct xprt_class * transport)134 int xprt_unregister_transport(struct xprt_class *transport)
135 {
136 	struct xprt_class *t;
137 	int result;
138 
139 	result = 0;
140 	spin_lock(&xprt_list_lock);
141 	list_for_each_entry(t, &xprt_list, list) {
142 		if (t == transport) {
143 			printk(KERN_INFO
144 				"RPC: Unregistered %s transport module.\n",
145 				transport->name);
146 			list_del_init(&transport->list);
147 			goto out;
148 		}
149 	}
150 	result = -ENOENT;
151 
152 out:
153 	spin_unlock(&xprt_list_lock);
154 	return result;
155 }
156 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
157 
158 static void
xprt_class_release(const struct xprt_class * t)159 xprt_class_release(const struct xprt_class *t)
160 {
161 	module_put(t->owner);
162 }
163 
164 static const struct xprt_class *
xprt_class_find_by_ident_locked(int ident)165 xprt_class_find_by_ident_locked(int ident)
166 {
167 	const struct xprt_class *t;
168 
169 	list_for_each_entry(t, &xprt_list, list) {
170 		if (t->ident != ident)
171 			continue;
172 		if (!try_module_get(t->owner))
173 			continue;
174 		return t;
175 	}
176 	return NULL;
177 }
178 
179 static const struct xprt_class *
xprt_class_find_by_ident(int ident)180 xprt_class_find_by_ident(int ident)
181 {
182 	const struct xprt_class *t;
183 
184 	spin_lock(&xprt_list_lock);
185 	t = xprt_class_find_by_ident_locked(ident);
186 	spin_unlock(&xprt_list_lock);
187 	return t;
188 }
189 
190 static const struct xprt_class *
xprt_class_find_by_netid_locked(const char * netid)191 xprt_class_find_by_netid_locked(const char *netid)
192 {
193 	const struct xprt_class *t;
194 	unsigned int i;
195 
196 	list_for_each_entry(t, &xprt_list, list) {
197 		for (i = 0; t->netid[i][0] != '\0'; i++) {
198 			if (strcmp(t->netid[i], netid) != 0)
199 				continue;
200 			if (!try_module_get(t->owner))
201 				continue;
202 			return t;
203 		}
204 	}
205 	return NULL;
206 }
207 
208 static const struct xprt_class *
xprt_class_find_by_netid(const char * netid)209 xprt_class_find_by_netid(const char *netid)
210 {
211 	const struct xprt_class *t;
212 
213 	spin_lock(&xprt_list_lock);
214 	t = xprt_class_find_by_netid_locked(netid);
215 	if (!t) {
216 		spin_unlock(&xprt_list_lock);
217 		request_module("rpc%s", netid);
218 		spin_lock(&xprt_list_lock);
219 		t = xprt_class_find_by_netid_locked(netid);
220 	}
221 	spin_unlock(&xprt_list_lock);
222 	return t;
223 }
224 
225 /**
226  * xprt_find_transport_ident - convert a netid into a transport identifier
227  * @netid: transport to load
228  *
229  * Returns:
230  * > 0:		transport identifier
231  * -ENOENT:	transport module not available
232  */
xprt_find_transport_ident(const char * netid)233 int xprt_find_transport_ident(const char *netid)
234 {
235 	const struct xprt_class *t;
236 	int ret;
237 
238 	t = xprt_class_find_by_netid(netid);
239 	if (!t)
240 		return -ENOENT;
241 	ret = t->ident;
242 	xprt_class_release(t);
243 	return ret;
244 }
245 EXPORT_SYMBOL_GPL(xprt_find_transport_ident);
246 
xprt_clear_locked(struct rpc_xprt * xprt)247 static void xprt_clear_locked(struct rpc_xprt *xprt)
248 {
249 	xprt->snd_task = NULL;
250 	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state))
251 		clear_bit_unlock(XPRT_LOCKED, &xprt->state);
252 	else
253 		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
254 }
255 
256 /**
257  * xprt_reserve_xprt - serialize write access to transports
258  * @task: task that is requesting access to the transport
259  * @xprt: pointer to the target transport
260  *
261  * This prevents mixing the payload of separate requests, and prevents
262  * transport connects from colliding with writes.  No congestion control
263  * is provided.
264  */
xprt_reserve_xprt(struct rpc_xprt * xprt,struct rpc_task * task)265 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
266 {
267 	struct rpc_rqst *req = task->tk_rqstp;
268 
269 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
270 		if (task == xprt->snd_task)
271 			goto out_locked;
272 		goto out_sleep;
273 	}
274 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
275 		goto out_unlock;
276 	xprt->snd_task = task;
277 
278 out_locked:
279 	trace_xprt_reserve_xprt(xprt, task);
280 	return 1;
281 
282 out_unlock:
283 	xprt_clear_locked(xprt);
284 out_sleep:
285 	task->tk_status = -EAGAIN;
286 	if  (RPC_IS_SOFT(task))
287 		rpc_sleep_on_timeout(&xprt->sending, task, NULL,
288 				xprt_request_timeout(req));
289 	else
290 		rpc_sleep_on(&xprt->sending, task, NULL);
291 	return 0;
292 }
293 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
294 
295 static bool
xprt_need_congestion_window_wait(struct rpc_xprt * xprt)296 xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
297 {
298 	return test_bit(XPRT_CWND_WAIT, &xprt->state);
299 }
300 
301 static void
xprt_set_congestion_window_wait(struct rpc_xprt * xprt)302 xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
303 {
304 	if (!list_empty(&xprt->xmit_queue)) {
305 		/* Peek at head of queue to see if it can make progress */
306 		if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
307 					rq_xmit)->rq_cong)
308 			return;
309 	}
310 	set_bit(XPRT_CWND_WAIT, &xprt->state);
311 }
312 
313 static void
xprt_test_and_clear_congestion_window_wait(struct rpc_xprt * xprt)314 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
315 {
316 	if (!RPCXPRT_CONGESTED(xprt))
317 		clear_bit(XPRT_CWND_WAIT, &xprt->state);
318 }
319 
320 /*
321  * xprt_reserve_xprt_cong - serialize write access to transports
322  * @task: task that is requesting access to the transport
323  *
324  * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
325  * integrated into the decision of whether a request is allowed to be
326  * woken up and given access to the transport.
327  * Note that the lock is only granted if we know there are free slots.
328  */
xprt_reserve_xprt_cong(struct rpc_xprt * xprt,struct rpc_task * task)329 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
330 {
331 	struct rpc_rqst *req = task->tk_rqstp;
332 
333 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
334 		if (task == xprt->snd_task)
335 			goto out_locked;
336 		goto out_sleep;
337 	}
338 	if (req == NULL) {
339 		xprt->snd_task = task;
340 		goto out_locked;
341 	}
342 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
343 		goto out_unlock;
344 	if (!xprt_need_congestion_window_wait(xprt)) {
345 		xprt->snd_task = task;
346 		goto out_locked;
347 	}
348 out_unlock:
349 	xprt_clear_locked(xprt);
350 out_sleep:
351 	task->tk_status = -EAGAIN;
352 	if (RPC_IS_SOFT(task))
353 		rpc_sleep_on_timeout(&xprt->sending, task, NULL,
354 				xprt_request_timeout(req));
355 	else
356 		rpc_sleep_on(&xprt->sending, task, NULL);
357 	return 0;
358 out_locked:
359 	trace_xprt_reserve_cong(xprt, task);
360 	return 1;
361 }
362 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
363 
xprt_lock_write(struct rpc_xprt * xprt,struct rpc_task * task)364 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
365 {
366 	int retval;
367 
368 	if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
369 		return 1;
370 	spin_lock(&xprt->transport_lock);
371 	retval = xprt->ops->reserve_xprt(xprt, task);
372 	spin_unlock(&xprt->transport_lock);
373 	return retval;
374 }
375 
__xprt_lock_write_func(struct rpc_task * task,void * data)376 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
377 {
378 	struct rpc_xprt *xprt = data;
379 
380 	xprt->snd_task = task;
381 	return true;
382 }
383 
__xprt_lock_write_next(struct rpc_xprt * xprt)384 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
385 {
386 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
387 		return;
388 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
389 		goto out_unlock;
390 	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
391 				__xprt_lock_write_func, xprt))
392 		return;
393 out_unlock:
394 	xprt_clear_locked(xprt);
395 }
396 
__xprt_lock_write_next_cong(struct rpc_xprt * xprt)397 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
398 {
399 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
400 		return;
401 	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
402 		goto out_unlock;
403 	if (xprt_need_congestion_window_wait(xprt))
404 		goto out_unlock;
405 	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
406 				__xprt_lock_write_func, xprt))
407 		return;
408 out_unlock:
409 	xprt_clear_locked(xprt);
410 }
411 
412 /**
413  * xprt_release_xprt - allow other requests to use a transport
414  * @xprt: transport with other tasks potentially waiting
415  * @task: task that is releasing access to the transport
416  *
417  * Note that "task" can be NULL.  No congestion control is provided.
418  */
xprt_release_xprt(struct rpc_xprt * xprt,struct rpc_task * task)419 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
420 {
421 	if (xprt->snd_task == task) {
422 		xprt_clear_locked(xprt);
423 		__xprt_lock_write_next(xprt);
424 	}
425 	trace_xprt_release_xprt(xprt, task);
426 }
427 EXPORT_SYMBOL_GPL(xprt_release_xprt);
428 
429 /**
430  * xprt_release_xprt_cong - allow other requests to use a transport
431  * @xprt: transport with other tasks potentially waiting
432  * @task: task that is releasing access to the transport
433  *
434  * Note that "task" can be NULL.  Another task is awoken to use the
435  * transport if the transport's congestion window allows it.
436  */
xprt_release_xprt_cong(struct rpc_xprt * xprt,struct rpc_task * task)437 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
438 {
439 	if (xprt->snd_task == task) {
440 		xprt_clear_locked(xprt);
441 		__xprt_lock_write_next_cong(xprt);
442 	}
443 	trace_xprt_release_cong(xprt, task);
444 }
445 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
446 
xprt_release_write(struct rpc_xprt * xprt,struct rpc_task * task)447 void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
448 {
449 	if (xprt->snd_task != task)
450 		return;
451 	spin_lock(&xprt->transport_lock);
452 	xprt->ops->release_xprt(xprt, task);
453 	spin_unlock(&xprt->transport_lock);
454 }
455 
456 /*
457  * Van Jacobson congestion avoidance. Check if the congestion window
458  * overflowed. Put the task to sleep if this is the case.
459  */
460 static int
__xprt_get_cong(struct rpc_xprt * xprt,struct rpc_rqst * req)461 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
462 {
463 	if (req->rq_cong)
464 		return 1;
465 	trace_xprt_get_cong(xprt, req->rq_task);
466 	if (RPCXPRT_CONGESTED(xprt)) {
467 		xprt_set_congestion_window_wait(xprt);
468 		return 0;
469 	}
470 	req->rq_cong = 1;
471 	xprt->cong += RPC_CWNDSCALE;
472 	return 1;
473 }
474 
475 /*
476  * Adjust the congestion window, and wake up the next task
477  * that has been sleeping due to congestion
478  */
479 static void
__xprt_put_cong(struct rpc_xprt * xprt,struct rpc_rqst * req)480 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
481 {
482 	if (!req->rq_cong)
483 		return;
484 	req->rq_cong = 0;
485 	xprt->cong -= RPC_CWNDSCALE;
486 	xprt_test_and_clear_congestion_window_wait(xprt);
487 	trace_xprt_put_cong(xprt, req->rq_task);
488 	__xprt_lock_write_next_cong(xprt);
489 }
490 
491 /**
492  * xprt_request_get_cong - Request congestion control credits
493  * @xprt: pointer to transport
494  * @req: pointer to RPC request
495  *
496  * Useful for transports that require congestion control.
497  */
498 bool
xprt_request_get_cong(struct rpc_xprt * xprt,struct rpc_rqst * req)499 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
500 {
501 	bool ret = false;
502 
503 	if (req->rq_cong)
504 		return true;
505 	spin_lock(&xprt->transport_lock);
506 	ret = __xprt_get_cong(xprt, req) != 0;
507 	spin_unlock(&xprt->transport_lock);
508 	return ret;
509 }
510 EXPORT_SYMBOL_GPL(xprt_request_get_cong);
511 
512 /**
513  * xprt_release_rqst_cong - housekeeping when request is complete
514  * @task: RPC request that recently completed
515  *
516  * Useful for transports that require congestion control.
517  */
xprt_release_rqst_cong(struct rpc_task * task)518 void xprt_release_rqst_cong(struct rpc_task *task)
519 {
520 	struct rpc_rqst *req = task->tk_rqstp;
521 
522 	__xprt_put_cong(req->rq_xprt, req);
523 }
524 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
525 
xprt_clear_congestion_window_wait_locked(struct rpc_xprt * xprt)526 static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
527 {
528 	if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
529 		__xprt_lock_write_next_cong(xprt);
530 }
531 
532 /*
533  * Clear the congestion window wait flag and wake up the next
534  * entry on xprt->sending
535  */
536 static void
xprt_clear_congestion_window_wait(struct rpc_xprt * xprt)537 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
538 {
539 	if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
540 		spin_lock(&xprt->transport_lock);
541 		__xprt_lock_write_next_cong(xprt);
542 		spin_unlock(&xprt->transport_lock);
543 	}
544 }
545 
546 /**
547  * xprt_adjust_cwnd - adjust transport congestion window
548  * @xprt: pointer to xprt
549  * @task: recently completed RPC request used to adjust window
550  * @result: result code of completed RPC request
551  *
552  * The transport code maintains an estimate on the maximum number of out-
553  * standing RPC requests, using a smoothed version of the congestion
554  * avoidance implemented in 44BSD. This is basically the Van Jacobson
555  * congestion algorithm: If a retransmit occurs, the congestion window is
556  * halved; otherwise, it is incremented by 1/cwnd when
557  *
558  *	-	a reply is received and
559  *	-	a full number of requests are outstanding and
560  *	-	the congestion window hasn't been updated recently.
561  */
xprt_adjust_cwnd(struct rpc_xprt * xprt,struct rpc_task * task,int result)562 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
563 {
564 	struct rpc_rqst *req = task->tk_rqstp;
565 	unsigned long cwnd = xprt->cwnd;
566 
567 	if (result >= 0 && cwnd <= xprt->cong) {
568 		/* The (cwnd >> 1) term makes sure
569 		 * the result gets rounded properly. */
570 		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
571 		if (cwnd > RPC_MAXCWND(xprt))
572 			cwnd = RPC_MAXCWND(xprt);
573 		__xprt_lock_write_next_cong(xprt);
574 	} else if (result == -ETIMEDOUT) {
575 		cwnd >>= 1;
576 		if (cwnd < RPC_CWNDSCALE)
577 			cwnd = RPC_CWNDSCALE;
578 	}
579 	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
580 			xprt->cong, xprt->cwnd, cwnd);
581 	xprt->cwnd = cwnd;
582 	__xprt_put_cong(xprt, req);
583 }
584 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
585 
586 /**
587  * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
588  * @xprt: transport with waiting tasks
589  * @status: result code to plant in each task before waking it
590  *
591  */
xprt_wake_pending_tasks(struct rpc_xprt * xprt,int status)592 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
593 {
594 	if (status < 0)
595 		rpc_wake_up_status(&xprt->pending, status);
596 	else
597 		rpc_wake_up(&xprt->pending);
598 }
599 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
600 
601 /**
602  * xprt_wait_for_buffer_space - wait for transport output buffer to clear
603  * @xprt: transport
604  *
605  * Note that we only set the timer for the case of RPC_IS_SOFT(), since
606  * we don't in general want to force a socket disconnection due to
607  * an incomplete RPC call transmission.
608  */
xprt_wait_for_buffer_space(struct rpc_xprt * xprt)609 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
610 {
611 	set_bit(XPRT_WRITE_SPACE, &xprt->state);
612 }
613 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
614 
615 static bool
xprt_clear_write_space_locked(struct rpc_xprt * xprt)616 xprt_clear_write_space_locked(struct rpc_xprt *xprt)
617 {
618 	if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
619 		__xprt_lock_write_next(xprt);
620 		dprintk("RPC:       write space: waking waiting task on "
621 				"xprt %p\n", xprt);
622 		return true;
623 	}
624 	return false;
625 }
626 
627 /**
628  * xprt_write_space - wake the task waiting for transport output buffer space
629  * @xprt: transport with waiting tasks
630  *
631  * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
632  */
xprt_write_space(struct rpc_xprt * xprt)633 bool xprt_write_space(struct rpc_xprt *xprt)
634 {
635 	bool ret;
636 
637 	if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
638 		return false;
639 	spin_lock(&xprt->transport_lock);
640 	ret = xprt_clear_write_space_locked(xprt);
641 	spin_unlock(&xprt->transport_lock);
642 	return ret;
643 }
644 EXPORT_SYMBOL_GPL(xprt_write_space);
645 
xprt_abs_ktime_to_jiffies(ktime_t abstime)646 static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
647 {
648 	s64 delta = ktime_to_ns(ktime_get() - abstime);
649 	return likely(delta >= 0) ?
650 		jiffies - nsecs_to_jiffies(delta) :
651 		jiffies + nsecs_to_jiffies(-delta);
652 }
653 
xprt_calc_majortimeo(struct rpc_rqst * req)654 static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
655 {
656 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
657 	unsigned long majortimeo = req->rq_timeout;
658 
659 	if (to->to_exponential)
660 		majortimeo <<= to->to_retries;
661 	else
662 		majortimeo += to->to_increment * to->to_retries;
663 	if (majortimeo > to->to_maxval || majortimeo == 0)
664 		majortimeo = to->to_maxval;
665 	return majortimeo;
666 }
667 
xprt_reset_majortimeo(struct rpc_rqst * req)668 static void xprt_reset_majortimeo(struct rpc_rqst *req)
669 {
670 	req->rq_majortimeo += xprt_calc_majortimeo(req);
671 }
672 
xprt_reset_minortimeo(struct rpc_rqst * req)673 static void xprt_reset_minortimeo(struct rpc_rqst *req)
674 {
675 	req->rq_minortimeo += req->rq_timeout;
676 }
677 
xprt_init_majortimeo(struct rpc_task * task,struct rpc_rqst * req)678 static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
679 {
680 	unsigned long time_init;
681 	struct rpc_xprt *xprt = req->rq_xprt;
682 
683 	if (likely(xprt && xprt_connected(xprt)))
684 		time_init = jiffies;
685 	else
686 		time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
687 	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
688 	req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
689 	req->rq_minortimeo = time_init + req->rq_timeout;
690 }
691 
692 /**
693  * xprt_adjust_timeout - adjust timeout values for next retransmit
694  * @req: RPC request containing parameters to use for the adjustment
695  *
696  */
xprt_adjust_timeout(struct rpc_rqst * req)697 int xprt_adjust_timeout(struct rpc_rqst *req)
698 {
699 	struct rpc_xprt *xprt = req->rq_xprt;
700 	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
701 	int status = 0;
702 
703 	if (time_before(jiffies, req->rq_majortimeo)) {
704 		if (time_before(jiffies, req->rq_minortimeo))
705 			return status;
706 		if (to->to_exponential)
707 			req->rq_timeout <<= 1;
708 		else
709 			req->rq_timeout += to->to_increment;
710 		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
711 			req->rq_timeout = to->to_maxval;
712 		req->rq_retries++;
713 	} else {
714 		req->rq_timeout = to->to_initval;
715 		req->rq_retries = 0;
716 		xprt_reset_majortimeo(req);
717 		/* Reset the RTT counters == "slow start" */
718 		spin_lock(&xprt->transport_lock);
719 		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
720 		spin_unlock(&xprt->transport_lock);
721 		status = -ETIMEDOUT;
722 	}
723 	xprt_reset_minortimeo(req);
724 
725 	if (req->rq_timeout == 0) {
726 		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
727 		req->rq_timeout = 5 * HZ;
728 	}
729 	return status;
730 }
731 
xprt_autoclose(struct work_struct * work)732 static void xprt_autoclose(struct work_struct *work)
733 {
734 	struct rpc_xprt *xprt =
735 		container_of(work, struct rpc_xprt, task_cleanup);
736 	unsigned int pflags = memalloc_nofs_save();
737 
738 	trace_xprt_disconnect_auto(xprt);
739 	xprt->connect_cookie++;
740 	smp_mb__before_atomic();
741 	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
742 	xprt->ops->close(xprt);
743 	xprt_release_write(xprt, NULL);
744 	wake_up_bit(&xprt->state, XPRT_LOCKED);
745 	memalloc_nofs_restore(pflags);
746 }
747 
748 /**
749  * xprt_disconnect_done - mark a transport as disconnected
750  * @xprt: transport to flag for disconnect
751  *
752  */
xprt_disconnect_done(struct rpc_xprt * xprt)753 void xprt_disconnect_done(struct rpc_xprt *xprt)
754 {
755 	trace_xprt_disconnect_done(xprt);
756 	spin_lock(&xprt->transport_lock);
757 	xprt_clear_connected(xprt);
758 	xprt_clear_write_space_locked(xprt);
759 	xprt_clear_congestion_window_wait_locked(xprt);
760 	xprt_wake_pending_tasks(xprt, -ENOTCONN);
761 	spin_unlock(&xprt->transport_lock);
762 }
763 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
764 
765 /**
766  * xprt_schedule_autoclose_locked - Try to schedule an autoclose RPC call
767  * @xprt: transport to disconnect
768  */
xprt_schedule_autoclose_locked(struct rpc_xprt * xprt)769 static void xprt_schedule_autoclose_locked(struct rpc_xprt *xprt)
770 {
771 	if (test_and_set_bit(XPRT_CLOSE_WAIT, &xprt->state))
772 		return;
773 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
774 		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
775 	else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
776 		rpc_wake_up_queued_task_set_status(&xprt->pending,
777 						   xprt->snd_task, -ENOTCONN);
778 }
779 
780 /**
781  * xprt_force_disconnect - force a transport to disconnect
782  * @xprt: transport to disconnect
783  *
784  */
xprt_force_disconnect(struct rpc_xprt * xprt)785 void xprt_force_disconnect(struct rpc_xprt *xprt)
786 {
787 	trace_xprt_disconnect_force(xprt);
788 
789 	/* Don't race with the test_bit() in xprt_clear_locked() */
790 	spin_lock(&xprt->transport_lock);
791 	xprt_schedule_autoclose_locked(xprt);
792 	spin_unlock(&xprt->transport_lock);
793 }
794 EXPORT_SYMBOL_GPL(xprt_force_disconnect);
795 
796 static unsigned int
xprt_connect_cookie(struct rpc_xprt * xprt)797 xprt_connect_cookie(struct rpc_xprt *xprt)
798 {
799 	return READ_ONCE(xprt->connect_cookie);
800 }
801 
802 static bool
xprt_request_retransmit_after_disconnect(struct rpc_task * task)803 xprt_request_retransmit_after_disconnect(struct rpc_task *task)
804 {
805 	struct rpc_rqst *req = task->tk_rqstp;
806 	struct rpc_xprt *xprt = req->rq_xprt;
807 
808 	return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
809 		!xprt_connected(xprt);
810 }
811 
812 /**
813  * xprt_conditional_disconnect - force a transport to disconnect
814  * @xprt: transport to disconnect
815  * @cookie: 'connection cookie'
816  *
817  * This attempts to break the connection if and only if 'cookie' matches
818  * the current transport 'connection cookie'. It ensures that we don't
819  * try to break the connection more than once when we need to retransmit
820  * a batch of RPC requests.
821  *
822  */
xprt_conditional_disconnect(struct rpc_xprt * xprt,unsigned int cookie)823 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
824 {
825 	/* Don't race with the test_bit() in xprt_clear_locked() */
826 	spin_lock(&xprt->transport_lock);
827 	if (cookie != xprt->connect_cookie)
828 		goto out;
829 	if (test_bit(XPRT_CLOSING, &xprt->state))
830 		goto out;
831 	xprt_schedule_autoclose_locked(xprt);
832 out:
833 	spin_unlock(&xprt->transport_lock);
834 }
835 
836 static bool
xprt_has_timer(const struct rpc_xprt * xprt)837 xprt_has_timer(const struct rpc_xprt *xprt)
838 {
839 	return xprt->idle_timeout != 0;
840 }
841 
842 static void
xprt_schedule_autodisconnect(struct rpc_xprt * xprt)843 xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
844 	__must_hold(&xprt->transport_lock)
845 {
846 	xprt->last_used = jiffies;
847 	if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
848 		mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
849 }
850 
851 static void
xprt_init_autodisconnect(struct timer_list * t)852 xprt_init_autodisconnect(struct timer_list *t)
853 {
854 	struct rpc_xprt *xprt = from_timer(xprt, t, timer);
855 
856 	if (!RB_EMPTY_ROOT(&xprt->recv_queue))
857 		return;
858 	/* Reset xprt->last_used to avoid connect/autodisconnect cycling */
859 	xprt->last_used = jiffies;
860 	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
861 		return;
862 	queue_work(xprtiod_workqueue, &xprt->task_cleanup);
863 }
864 
865 #if IS_ENABLED(CONFIG_FAIL_SUNRPC)
xprt_inject_disconnect(struct rpc_xprt * xprt)866 static void xprt_inject_disconnect(struct rpc_xprt *xprt)
867 {
868 	if (!fail_sunrpc.ignore_client_disconnect &&
869 	    should_fail(&fail_sunrpc.attr, 1))
870 		xprt->ops->inject_disconnect(xprt);
871 }
872 #else
xprt_inject_disconnect(struct rpc_xprt * xprt)873 static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
874 {
875 }
876 #endif
877 
xprt_lock_connect(struct rpc_xprt * xprt,struct rpc_task * task,void * cookie)878 bool xprt_lock_connect(struct rpc_xprt *xprt,
879 		struct rpc_task *task,
880 		void *cookie)
881 {
882 	bool ret = false;
883 
884 	spin_lock(&xprt->transport_lock);
885 	if (!test_bit(XPRT_LOCKED, &xprt->state))
886 		goto out;
887 	if (xprt->snd_task != task)
888 		goto out;
889 	set_bit(XPRT_SND_IS_COOKIE, &xprt->state);
890 	xprt->snd_task = cookie;
891 	ret = true;
892 out:
893 	spin_unlock(&xprt->transport_lock);
894 	return ret;
895 }
896 EXPORT_SYMBOL_GPL(xprt_lock_connect);
897 
xprt_unlock_connect(struct rpc_xprt * xprt,void * cookie)898 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
899 {
900 	spin_lock(&xprt->transport_lock);
901 	if (xprt->snd_task != cookie)
902 		goto out;
903 	if (!test_bit(XPRT_LOCKED, &xprt->state))
904 		goto out;
905 	xprt->snd_task =NULL;
906 	clear_bit(XPRT_SND_IS_COOKIE, &xprt->state);
907 	xprt->ops->release_xprt(xprt, NULL);
908 	xprt_schedule_autodisconnect(xprt);
909 out:
910 	spin_unlock(&xprt->transport_lock);
911 	wake_up_bit(&xprt->state, XPRT_LOCKED);
912 }
913 EXPORT_SYMBOL_GPL(xprt_unlock_connect);
914 
915 /**
916  * xprt_connect - schedule a transport connect operation
917  * @task: RPC task that is requesting the connect
918  *
919  */
xprt_connect(struct rpc_task * task)920 void xprt_connect(struct rpc_task *task)
921 {
922 	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
923 
924 	trace_xprt_connect(xprt);
925 
926 	if (!xprt_bound(xprt)) {
927 		task->tk_status = -EAGAIN;
928 		return;
929 	}
930 	if (!xprt_lock_write(xprt, task))
931 		return;
932 
933 	if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
934 		task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
935 		rpc_sleep_on_timeout(&xprt->pending, task, NULL,
936 				xprt_request_timeout(task->tk_rqstp));
937 
938 		if (test_bit(XPRT_CLOSING, &xprt->state))
939 			return;
940 		if (xprt_test_and_set_connecting(xprt))
941 			return;
942 		/* Race breaker */
943 		if (!xprt_connected(xprt)) {
944 			xprt->stat.connect_start = jiffies;
945 			xprt->ops->connect(xprt, task);
946 		} else {
947 			xprt_clear_connecting(xprt);
948 			task->tk_status = 0;
949 			rpc_wake_up_queued_task(&xprt->pending, task);
950 		}
951 	}
952 	xprt_release_write(xprt, task);
953 }
954 
955 /**
956  * xprt_reconnect_delay - compute the wait before scheduling a connect
957  * @xprt: transport instance
958  *
959  */
xprt_reconnect_delay(const struct rpc_xprt * xprt)960 unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
961 {
962 	unsigned long start, now = jiffies;
963 
964 	start = xprt->stat.connect_start + xprt->reestablish_timeout;
965 	if (time_after(start, now))
966 		return start - now;
967 	return 0;
968 }
969 EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
970 
971 /**
972  * xprt_reconnect_backoff - compute the new re-establish timeout
973  * @xprt: transport instance
974  * @init_to: initial reestablish timeout
975  *
976  */
xprt_reconnect_backoff(struct rpc_xprt * xprt,unsigned long init_to)977 void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
978 {
979 	xprt->reestablish_timeout <<= 1;
980 	if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
981 		xprt->reestablish_timeout = xprt->max_reconnect_timeout;
982 	if (xprt->reestablish_timeout < init_to)
983 		xprt->reestablish_timeout = init_to;
984 }
985 EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
986 
987 enum xprt_xid_rb_cmp {
988 	XID_RB_EQUAL,
989 	XID_RB_LEFT,
990 	XID_RB_RIGHT,
991 };
992 static enum xprt_xid_rb_cmp
xprt_xid_cmp(__be32 xid1,__be32 xid2)993 xprt_xid_cmp(__be32 xid1, __be32 xid2)
994 {
995 	if (xid1 == xid2)
996 		return XID_RB_EQUAL;
997 	if ((__force u32)xid1 < (__force u32)xid2)
998 		return XID_RB_LEFT;
999 	return XID_RB_RIGHT;
1000 }
1001 
1002 static struct rpc_rqst *
xprt_request_rb_find(struct rpc_xprt * xprt,__be32 xid)1003 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
1004 {
1005 	struct rb_node *n = xprt->recv_queue.rb_node;
1006 	struct rpc_rqst *req;
1007 
1008 	while (n != NULL) {
1009 		req = rb_entry(n, struct rpc_rqst, rq_recv);
1010 		switch (xprt_xid_cmp(xid, req->rq_xid)) {
1011 		case XID_RB_LEFT:
1012 			n = n->rb_left;
1013 			break;
1014 		case XID_RB_RIGHT:
1015 			n = n->rb_right;
1016 			break;
1017 		case XID_RB_EQUAL:
1018 			return req;
1019 		}
1020 	}
1021 	return NULL;
1022 }
1023 
1024 static void
xprt_request_rb_insert(struct rpc_xprt * xprt,struct rpc_rqst * new)1025 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
1026 {
1027 	struct rb_node **p = &xprt->recv_queue.rb_node;
1028 	struct rb_node *n = NULL;
1029 	struct rpc_rqst *req;
1030 
1031 	while (*p != NULL) {
1032 		n = *p;
1033 		req = rb_entry(n, struct rpc_rqst, rq_recv);
1034 		switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
1035 		case XID_RB_LEFT:
1036 			p = &n->rb_left;
1037 			break;
1038 		case XID_RB_RIGHT:
1039 			p = &n->rb_right;
1040 			break;
1041 		case XID_RB_EQUAL:
1042 			WARN_ON_ONCE(new != req);
1043 			return;
1044 		}
1045 	}
1046 	rb_link_node(&new->rq_recv, n, p);
1047 	rb_insert_color(&new->rq_recv, &xprt->recv_queue);
1048 }
1049 
1050 static void
xprt_request_rb_remove(struct rpc_xprt * xprt,struct rpc_rqst * req)1051 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
1052 {
1053 	rb_erase(&req->rq_recv, &xprt->recv_queue);
1054 }
1055 
1056 /**
1057  * xprt_lookup_rqst - find an RPC request corresponding to an XID
1058  * @xprt: transport on which the original request was transmitted
1059  * @xid: RPC XID of incoming reply
1060  *
1061  * Caller holds xprt->queue_lock.
1062  */
xprt_lookup_rqst(struct rpc_xprt * xprt,__be32 xid)1063 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
1064 {
1065 	struct rpc_rqst *entry;
1066 
1067 	entry = xprt_request_rb_find(xprt, xid);
1068 	if (entry != NULL) {
1069 		trace_xprt_lookup_rqst(xprt, xid, 0);
1070 		entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
1071 		return entry;
1072 	}
1073 
1074 	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
1075 			ntohl(xid));
1076 	trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
1077 	xprt->stat.bad_xids++;
1078 	return NULL;
1079 }
1080 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
1081 
1082 static bool
xprt_is_pinned_rqst(struct rpc_rqst * req)1083 xprt_is_pinned_rqst(struct rpc_rqst *req)
1084 {
1085 	return atomic_read(&req->rq_pin) != 0;
1086 }
1087 
1088 /**
1089  * xprt_pin_rqst - Pin a request on the transport receive list
1090  * @req: Request to pin
1091  *
1092  * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
1093  * so should be holding xprt->queue_lock.
1094  */
xprt_pin_rqst(struct rpc_rqst * req)1095 void xprt_pin_rqst(struct rpc_rqst *req)
1096 {
1097 	atomic_inc(&req->rq_pin);
1098 }
1099 EXPORT_SYMBOL_GPL(xprt_pin_rqst);
1100 
1101 /**
1102  * xprt_unpin_rqst - Unpin a request on the transport receive list
1103  * @req: Request to pin
1104  *
1105  * Caller should be holding xprt->queue_lock.
1106  */
xprt_unpin_rqst(struct rpc_rqst * req)1107 void xprt_unpin_rqst(struct rpc_rqst *req)
1108 {
1109 	if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
1110 		atomic_dec(&req->rq_pin);
1111 		return;
1112 	}
1113 	if (atomic_dec_and_test(&req->rq_pin))
1114 		wake_up_var(&req->rq_pin);
1115 }
1116 EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
1117 
xprt_wait_on_pinned_rqst(struct rpc_rqst * req)1118 static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
1119 {
1120 	wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
1121 }
1122 
1123 static bool
xprt_request_data_received(struct rpc_task * task)1124 xprt_request_data_received(struct rpc_task *task)
1125 {
1126 	return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1127 		READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1128 }
1129 
1130 static bool
xprt_request_need_enqueue_receive(struct rpc_task * task,struct rpc_rqst * req)1131 xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1132 {
1133 	return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1134 		READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1135 }
1136 
1137 /**
1138  * xprt_request_enqueue_receive - Add an request to the receive queue
1139  * @task: RPC task
1140  *
1141  */
1142 int
xprt_request_enqueue_receive(struct rpc_task * task)1143 xprt_request_enqueue_receive(struct rpc_task *task)
1144 {
1145 	struct rpc_rqst *req = task->tk_rqstp;
1146 	struct rpc_xprt *xprt = req->rq_xprt;
1147 	int ret;
1148 
1149 	if (!xprt_request_need_enqueue_receive(task, req))
1150 		return 0;
1151 
1152 	ret = xprt_request_prepare(task->tk_rqstp, &req->rq_rcv_buf);
1153 	if (ret)
1154 		return ret;
1155 	spin_lock(&xprt->queue_lock);
1156 
1157 	/* Update the softirq receive buffer */
1158 	memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1159 			sizeof(req->rq_private_buf));
1160 
1161 	/* Add request to the receive list */
1162 	xprt_request_rb_insert(xprt, req);
1163 	set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1164 	spin_unlock(&xprt->queue_lock);
1165 
1166 	/* Turn off autodisconnect */
1167 	del_singleshot_timer_sync(&xprt->timer);
1168 	return 0;
1169 }
1170 
1171 /**
1172  * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1173  * @task: RPC task
1174  *
1175  * Caller must hold xprt->queue_lock.
1176  */
1177 static void
xprt_request_dequeue_receive_locked(struct rpc_task * task)1178 xprt_request_dequeue_receive_locked(struct rpc_task *task)
1179 {
1180 	struct rpc_rqst *req = task->tk_rqstp;
1181 
1182 	if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1183 		xprt_request_rb_remove(req->rq_xprt, req);
1184 }
1185 
1186 /**
1187  * xprt_update_rtt - Update RPC RTT statistics
1188  * @task: RPC request that recently completed
1189  *
1190  * Caller holds xprt->queue_lock.
1191  */
xprt_update_rtt(struct rpc_task * task)1192 void xprt_update_rtt(struct rpc_task *task)
1193 {
1194 	struct rpc_rqst *req = task->tk_rqstp;
1195 	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1196 	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1197 	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1198 
1199 	if (timer) {
1200 		if (req->rq_ntrans == 1)
1201 			rpc_update_rtt(rtt, timer, m);
1202 		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1203 	}
1204 }
1205 EXPORT_SYMBOL_GPL(xprt_update_rtt);
1206 
1207 /**
1208  * xprt_complete_rqst - called when reply processing is complete
1209  * @task: RPC request that recently completed
1210  * @copied: actual number of bytes received from the transport
1211  *
1212  * Caller holds xprt->queue_lock.
1213  */
xprt_complete_rqst(struct rpc_task * task,int copied)1214 void xprt_complete_rqst(struct rpc_task *task, int copied)
1215 {
1216 	struct rpc_rqst *req = task->tk_rqstp;
1217 	struct rpc_xprt *xprt = req->rq_xprt;
1218 
1219 	xprt->stat.recvs++;
1220 
1221 	xdr_free_bvec(&req->rq_rcv_buf);
1222 	req->rq_private_buf.bvec = NULL;
1223 	req->rq_private_buf.len = copied;
1224 	/* Ensure all writes are done before we update */
1225 	/* req->rq_reply_bytes_recvd */
1226 	smp_wmb();
1227 	req->rq_reply_bytes_recvd = copied;
1228 	xprt_request_dequeue_receive_locked(task);
1229 	rpc_wake_up_queued_task(&xprt->pending, task);
1230 }
1231 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1232 
xprt_timer(struct rpc_task * task)1233 static void xprt_timer(struct rpc_task *task)
1234 {
1235 	struct rpc_rqst *req = task->tk_rqstp;
1236 	struct rpc_xprt *xprt = req->rq_xprt;
1237 
1238 	if (task->tk_status != -ETIMEDOUT)
1239 		return;
1240 
1241 	trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1242 	if (!req->rq_reply_bytes_recvd) {
1243 		if (xprt->ops->timer)
1244 			xprt->ops->timer(xprt, task);
1245 	} else
1246 		task->tk_status = 0;
1247 }
1248 
1249 /**
1250  * xprt_wait_for_reply_request_def - wait for reply
1251  * @task: pointer to rpc_task
1252  *
1253  * Set a request's retransmit timeout based on the transport's
1254  * default timeout parameters.  Used by transports that don't adjust
1255  * the retransmit timeout based on round-trip time estimation,
1256  * and put the task to sleep on the pending queue.
1257  */
xprt_wait_for_reply_request_def(struct rpc_task * task)1258 void xprt_wait_for_reply_request_def(struct rpc_task *task)
1259 {
1260 	struct rpc_rqst *req = task->tk_rqstp;
1261 
1262 	rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1263 			xprt_request_timeout(req));
1264 }
1265 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1266 
1267 /**
1268  * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1269  * @task: pointer to rpc_task
1270  *
1271  * Set a request's retransmit timeout using the RTT estimator,
1272  * and put the task to sleep on the pending queue.
1273  */
xprt_wait_for_reply_request_rtt(struct rpc_task * task)1274 void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1275 {
1276 	int timer = task->tk_msg.rpc_proc->p_timer;
1277 	struct rpc_clnt *clnt = task->tk_client;
1278 	struct rpc_rtt *rtt = clnt->cl_rtt;
1279 	struct rpc_rqst *req = task->tk_rqstp;
1280 	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
1281 	unsigned long timeout;
1282 
1283 	timeout = rpc_calc_rto(rtt, timer);
1284 	timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1285 	if (timeout > max_timeout || timeout == 0)
1286 		timeout = max_timeout;
1287 	rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1288 			jiffies + timeout);
1289 }
1290 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1291 
1292 /**
1293  * xprt_request_wait_receive - wait for the reply to an RPC request
1294  * @task: RPC task about to send a request
1295  *
1296  */
xprt_request_wait_receive(struct rpc_task * task)1297 void xprt_request_wait_receive(struct rpc_task *task)
1298 {
1299 	struct rpc_rqst *req = task->tk_rqstp;
1300 	struct rpc_xprt *xprt = req->rq_xprt;
1301 
1302 	if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1303 		return;
1304 	/*
1305 	 * Sleep on the pending queue if we're expecting a reply.
1306 	 * The spinlock ensures atomicity between the test of
1307 	 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1308 	 */
1309 	spin_lock(&xprt->queue_lock);
1310 	if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1311 		xprt->ops->wait_for_reply_request(task);
1312 		/*
1313 		 * Send an extra queue wakeup call if the
1314 		 * connection was dropped in case the call to
1315 		 * rpc_sleep_on() raced.
1316 		 */
1317 		if (xprt_request_retransmit_after_disconnect(task))
1318 			rpc_wake_up_queued_task_set_status(&xprt->pending,
1319 					task, -ENOTCONN);
1320 	}
1321 	spin_unlock(&xprt->queue_lock);
1322 }
1323 
1324 static bool
xprt_request_need_enqueue_transmit(struct rpc_task * task,struct rpc_rqst * req)1325 xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1326 {
1327 	return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1328 }
1329 
1330 /**
1331  * xprt_request_enqueue_transmit - queue a task for transmission
1332  * @task: pointer to rpc_task
1333  *
1334  * Add a task to the transmission queue.
1335  */
1336 void
xprt_request_enqueue_transmit(struct rpc_task * task)1337 xprt_request_enqueue_transmit(struct rpc_task *task)
1338 {
1339 	struct rpc_rqst *pos, *req = task->tk_rqstp;
1340 	struct rpc_xprt *xprt = req->rq_xprt;
1341 	int ret;
1342 
1343 	if (xprt_request_need_enqueue_transmit(task, req)) {
1344 		ret = xprt_request_prepare(task->tk_rqstp, &req->rq_snd_buf);
1345 		if (ret) {
1346 			task->tk_status = ret;
1347 			return;
1348 		}
1349 		req->rq_bytes_sent = 0;
1350 		spin_lock(&xprt->queue_lock);
1351 		/*
1352 		 * Requests that carry congestion control credits are added
1353 		 * to the head of the list to avoid starvation issues.
1354 		 */
1355 		if (req->rq_cong) {
1356 			xprt_clear_congestion_window_wait(xprt);
1357 			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1358 				if (pos->rq_cong)
1359 					continue;
1360 				/* Note: req is added _before_ pos */
1361 				list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1362 				INIT_LIST_HEAD(&req->rq_xmit2);
1363 				goto out;
1364 			}
1365 		} else if (!req->rq_seqno) {
1366 			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1367 				if (pos->rq_task->tk_owner != task->tk_owner)
1368 					continue;
1369 				list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1370 				INIT_LIST_HEAD(&req->rq_xmit);
1371 				goto out;
1372 			}
1373 		}
1374 		list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1375 		INIT_LIST_HEAD(&req->rq_xmit2);
1376 out:
1377 		atomic_long_inc(&xprt->xmit_queuelen);
1378 		set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1379 		spin_unlock(&xprt->queue_lock);
1380 	}
1381 }
1382 
1383 /**
1384  * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1385  * @task: pointer to rpc_task
1386  *
1387  * Remove a task from the transmission queue
1388  * Caller must hold xprt->queue_lock
1389  */
1390 static void
xprt_request_dequeue_transmit_locked(struct rpc_task * task)1391 xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1392 {
1393 	struct rpc_rqst *req = task->tk_rqstp;
1394 
1395 	if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1396 		return;
1397 	if (!list_empty(&req->rq_xmit)) {
1398 		list_del(&req->rq_xmit);
1399 		if (!list_empty(&req->rq_xmit2)) {
1400 			struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1401 					struct rpc_rqst, rq_xmit2);
1402 			list_del(&req->rq_xmit2);
1403 			list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1404 		}
1405 	} else
1406 		list_del(&req->rq_xmit2);
1407 	atomic_long_dec(&req->rq_xprt->xmit_queuelen);
1408 	xdr_free_bvec(&req->rq_snd_buf);
1409 }
1410 
1411 /**
1412  * xprt_request_dequeue_transmit - remove a task from the transmission queue
1413  * @task: pointer to rpc_task
1414  *
1415  * Remove a task from the transmission queue
1416  */
1417 static void
xprt_request_dequeue_transmit(struct rpc_task * task)1418 xprt_request_dequeue_transmit(struct rpc_task *task)
1419 {
1420 	struct rpc_rqst *req = task->tk_rqstp;
1421 	struct rpc_xprt *xprt = req->rq_xprt;
1422 
1423 	spin_lock(&xprt->queue_lock);
1424 	xprt_request_dequeue_transmit_locked(task);
1425 	spin_unlock(&xprt->queue_lock);
1426 }
1427 
1428 /**
1429  * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1430  * @task: pointer to rpc_task
1431  *
1432  * Remove a task from the transmit and receive queues, and ensure that
1433  * it is not pinned by the receive work item.
1434  */
1435 void
xprt_request_dequeue_xprt(struct rpc_task * task)1436 xprt_request_dequeue_xprt(struct rpc_task *task)
1437 {
1438 	struct rpc_rqst	*req = task->tk_rqstp;
1439 	struct rpc_xprt *xprt = req->rq_xprt;
1440 
1441 	if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1442 	    test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1443 	    xprt_is_pinned_rqst(req)) {
1444 		spin_lock(&xprt->queue_lock);
1445 		while (xprt_is_pinned_rqst(req)) {
1446 			set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1447 			spin_unlock(&xprt->queue_lock);
1448 			xprt_wait_on_pinned_rqst(req);
1449 			spin_lock(&xprt->queue_lock);
1450 			clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1451 		}
1452 		xprt_request_dequeue_transmit_locked(task);
1453 		xprt_request_dequeue_receive_locked(task);
1454 		spin_unlock(&xprt->queue_lock);
1455 		xdr_free_bvec(&req->rq_rcv_buf);
1456 	}
1457 }
1458 
1459 /**
1460  * xprt_request_prepare - prepare an encoded request for transport
1461  * @req: pointer to rpc_rqst
1462  * @buf: pointer to send/rcv xdr_buf
1463  *
1464  * Calls into the transport layer to do whatever is needed to prepare
1465  * the request for transmission or receive.
1466  * Returns error, or zero.
1467  */
1468 static int
xprt_request_prepare(struct rpc_rqst * req,struct xdr_buf * buf)1469 xprt_request_prepare(struct rpc_rqst *req, struct xdr_buf *buf)
1470 {
1471 	struct rpc_xprt *xprt = req->rq_xprt;
1472 
1473 	if (xprt->ops->prepare_request)
1474 		return xprt->ops->prepare_request(req, buf);
1475 	return 0;
1476 }
1477 
1478 /**
1479  * xprt_request_need_retransmit - Test if a task needs retransmission
1480  * @task: pointer to rpc_task
1481  *
1482  * Test for whether a connection breakage requires the task to retransmit
1483  */
1484 bool
xprt_request_need_retransmit(struct rpc_task * task)1485 xprt_request_need_retransmit(struct rpc_task *task)
1486 {
1487 	return xprt_request_retransmit_after_disconnect(task);
1488 }
1489 
1490 /**
1491  * xprt_prepare_transmit - reserve the transport before sending a request
1492  * @task: RPC task about to send a request
1493  *
1494  */
xprt_prepare_transmit(struct rpc_task * task)1495 bool xprt_prepare_transmit(struct rpc_task *task)
1496 {
1497 	struct rpc_rqst	*req = task->tk_rqstp;
1498 	struct rpc_xprt	*xprt = req->rq_xprt;
1499 
1500 	if (!xprt_lock_write(xprt, task)) {
1501 		/* Race breaker: someone may have transmitted us */
1502 		if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1503 			rpc_wake_up_queued_task_set_status(&xprt->sending,
1504 					task, 0);
1505 		return false;
1506 
1507 	}
1508 	if (atomic_read(&xprt->swapper))
1509 		/* This will be clear in __rpc_execute */
1510 		current->flags |= PF_MEMALLOC;
1511 	return true;
1512 }
1513 
xprt_end_transmit(struct rpc_task * task)1514 void xprt_end_transmit(struct rpc_task *task)
1515 {
1516 	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
1517 
1518 	xprt_inject_disconnect(xprt);
1519 	xprt_release_write(xprt, task);
1520 }
1521 
1522 /**
1523  * xprt_request_transmit - send an RPC request on a transport
1524  * @req: pointer to request to transmit
1525  * @snd_task: RPC task that owns the transport lock
1526  *
1527  * This performs the transmission of a single request.
1528  * Note that if the request is not the same as snd_task, then it
1529  * does need to be pinned.
1530  * Returns '0' on success.
1531  */
1532 static int
xprt_request_transmit(struct rpc_rqst * req,struct rpc_task * snd_task)1533 xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1534 {
1535 	struct rpc_xprt *xprt = req->rq_xprt;
1536 	struct rpc_task *task = req->rq_task;
1537 	unsigned int connect_cookie;
1538 	int is_retrans = RPC_WAS_SENT(task);
1539 	int status;
1540 
1541 	if (!req->rq_bytes_sent) {
1542 		if (xprt_request_data_received(task)) {
1543 			status = 0;
1544 			goto out_dequeue;
1545 		}
1546 		/* Verify that our message lies in the RPCSEC_GSS window */
1547 		if (rpcauth_xmit_need_reencode(task)) {
1548 			status = -EBADMSG;
1549 			goto out_dequeue;
1550 		}
1551 		if (RPC_SIGNALLED(task)) {
1552 			status = -ERESTARTSYS;
1553 			goto out_dequeue;
1554 		}
1555 	}
1556 
1557 	/*
1558 	 * Update req->rq_ntrans before transmitting to avoid races with
1559 	 * xprt_update_rtt(), which needs to know that it is recording a
1560 	 * reply to the first transmission.
1561 	 */
1562 	req->rq_ntrans++;
1563 
1564 	trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
1565 	connect_cookie = xprt->connect_cookie;
1566 	status = xprt->ops->send_request(req);
1567 	if (status != 0) {
1568 		req->rq_ntrans--;
1569 		trace_xprt_transmit(req, status);
1570 		return status;
1571 	}
1572 
1573 	if (is_retrans) {
1574 		task->tk_client->cl_stats->rpcretrans++;
1575 		trace_xprt_retransmit(req);
1576 	}
1577 
1578 	xprt_inject_disconnect(xprt);
1579 
1580 	task->tk_flags |= RPC_TASK_SENT;
1581 	spin_lock(&xprt->transport_lock);
1582 
1583 	xprt->stat.sends++;
1584 	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1585 	xprt->stat.bklog_u += xprt->backlog.qlen;
1586 	xprt->stat.sending_u += xprt->sending.qlen;
1587 	xprt->stat.pending_u += xprt->pending.qlen;
1588 	spin_unlock(&xprt->transport_lock);
1589 
1590 	req->rq_connect_cookie = connect_cookie;
1591 out_dequeue:
1592 	trace_xprt_transmit(req, status);
1593 	xprt_request_dequeue_transmit(task);
1594 	rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1595 	return status;
1596 }
1597 
1598 /**
1599  * xprt_transmit - send an RPC request on a transport
1600  * @task: controlling RPC task
1601  *
1602  * Attempts to drain the transmit queue. On exit, either the transport
1603  * signalled an error that needs to be handled before transmission can
1604  * resume, or @task finished transmitting, and detected that it already
1605  * received a reply.
1606  */
1607 void
xprt_transmit(struct rpc_task * task)1608 xprt_transmit(struct rpc_task *task)
1609 {
1610 	struct rpc_rqst *next, *req = task->tk_rqstp;
1611 	struct rpc_xprt	*xprt = req->rq_xprt;
1612 	int status;
1613 
1614 	spin_lock(&xprt->queue_lock);
1615 	for (;;) {
1616 		next = list_first_entry_or_null(&xprt->xmit_queue,
1617 						struct rpc_rqst, rq_xmit);
1618 		if (!next)
1619 			break;
1620 		xprt_pin_rqst(next);
1621 		spin_unlock(&xprt->queue_lock);
1622 		status = xprt_request_transmit(next, task);
1623 		if (status == -EBADMSG && next != req)
1624 			status = 0;
1625 		spin_lock(&xprt->queue_lock);
1626 		xprt_unpin_rqst(next);
1627 		if (status < 0) {
1628 			if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1629 				task->tk_status = status;
1630 			break;
1631 		}
1632 		/* Was @task transmitted, and has it received a reply? */
1633 		if (xprt_request_data_received(task) &&
1634 		    !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1635 			break;
1636 		cond_resched_lock(&xprt->queue_lock);
1637 	}
1638 	spin_unlock(&xprt->queue_lock);
1639 }
1640 
xprt_complete_request_init(struct rpc_task * task)1641 static void xprt_complete_request_init(struct rpc_task *task)
1642 {
1643 	if (task->tk_rqstp)
1644 		xprt_request_init(task);
1645 }
1646 
xprt_add_backlog(struct rpc_xprt * xprt,struct rpc_task * task)1647 void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1648 {
1649 	set_bit(XPRT_CONGESTED, &xprt->state);
1650 	rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
1651 }
1652 EXPORT_SYMBOL_GPL(xprt_add_backlog);
1653 
__xprt_set_rq(struct rpc_task * task,void * data)1654 static bool __xprt_set_rq(struct rpc_task *task, void *data)
1655 {
1656 	struct rpc_rqst *req = data;
1657 
1658 	if (task->tk_rqstp == NULL) {
1659 		memset(req, 0, sizeof(*req));	/* mark unused */
1660 		task->tk_rqstp = req;
1661 		return true;
1662 	}
1663 	return false;
1664 }
1665 
xprt_wake_up_backlog(struct rpc_xprt * xprt,struct rpc_rqst * req)1666 bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
1667 {
1668 	if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
1669 		clear_bit(XPRT_CONGESTED, &xprt->state);
1670 		return false;
1671 	}
1672 	return true;
1673 }
1674 EXPORT_SYMBOL_GPL(xprt_wake_up_backlog);
1675 
xprt_throttle_congested(struct rpc_xprt * xprt,struct rpc_task * task)1676 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1677 {
1678 	bool ret = false;
1679 
1680 	if (!test_bit(XPRT_CONGESTED, &xprt->state))
1681 		goto out;
1682 	spin_lock(&xprt->reserve_lock);
1683 	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1684 		xprt_add_backlog(xprt, task);
1685 		ret = true;
1686 	}
1687 	spin_unlock(&xprt->reserve_lock);
1688 out:
1689 	return ret;
1690 }
1691 
xprt_dynamic_alloc_slot(struct rpc_xprt * xprt)1692 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1693 {
1694 	struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1695 
1696 	if (xprt->num_reqs >= xprt->max_reqs)
1697 		goto out;
1698 	++xprt->num_reqs;
1699 	spin_unlock(&xprt->reserve_lock);
1700 	req = kzalloc(sizeof(*req), rpc_task_gfp_mask());
1701 	spin_lock(&xprt->reserve_lock);
1702 	if (req != NULL)
1703 		goto out;
1704 	--xprt->num_reqs;
1705 	req = ERR_PTR(-ENOMEM);
1706 out:
1707 	return req;
1708 }
1709 
xprt_dynamic_free_slot(struct rpc_xprt * xprt,struct rpc_rqst * req)1710 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1711 {
1712 	if (xprt->num_reqs > xprt->min_reqs) {
1713 		--xprt->num_reqs;
1714 		kfree(req);
1715 		return true;
1716 	}
1717 	return false;
1718 }
1719 
xprt_alloc_slot(struct rpc_xprt * xprt,struct rpc_task * task)1720 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1721 {
1722 	struct rpc_rqst *req;
1723 
1724 	spin_lock(&xprt->reserve_lock);
1725 	if (!list_empty(&xprt->free)) {
1726 		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1727 		list_del(&req->rq_list);
1728 		goto out_init_req;
1729 	}
1730 	req = xprt_dynamic_alloc_slot(xprt);
1731 	if (!IS_ERR(req))
1732 		goto out_init_req;
1733 	switch (PTR_ERR(req)) {
1734 	case -ENOMEM:
1735 		dprintk("RPC:       dynamic allocation of request slot "
1736 				"failed! Retrying\n");
1737 		task->tk_status = -ENOMEM;
1738 		break;
1739 	case -EAGAIN:
1740 		xprt_add_backlog(xprt, task);
1741 		dprintk("RPC:       waiting for request slot\n");
1742 		fallthrough;
1743 	default:
1744 		task->tk_status = -EAGAIN;
1745 	}
1746 	spin_unlock(&xprt->reserve_lock);
1747 	return;
1748 out_init_req:
1749 	xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1750 				     xprt->num_reqs);
1751 	spin_unlock(&xprt->reserve_lock);
1752 
1753 	task->tk_status = 0;
1754 	task->tk_rqstp = req;
1755 }
1756 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1757 
xprt_free_slot(struct rpc_xprt * xprt,struct rpc_rqst * req)1758 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1759 {
1760 	spin_lock(&xprt->reserve_lock);
1761 	if (!xprt_wake_up_backlog(xprt, req) &&
1762 	    !xprt_dynamic_free_slot(xprt, req)) {
1763 		memset(req, 0, sizeof(*req));	/* mark unused */
1764 		list_add(&req->rq_list, &xprt->free);
1765 	}
1766 	spin_unlock(&xprt->reserve_lock);
1767 }
1768 EXPORT_SYMBOL_GPL(xprt_free_slot);
1769 
xprt_free_all_slots(struct rpc_xprt * xprt)1770 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1771 {
1772 	struct rpc_rqst *req;
1773 	while (!list_empty(&xprt->free)) {
1774 		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1775 		list_del(&req->rq_list);
1776 		kfree(req);
1777 	}
1778 }
1779 
1780 static DEFINE_IDA(rpc_xprt_ids);
1781 
xprt_cleanup_ids(void)1782 void xprt_cleanup_ids(void)
1783 {
1784 	ida_destroy(&rpc_xprt_ids);
1785 }
1786 
xprt_alloc_id(struct rpc_xprt * xprt)1787 static int xprt_alloc_id(struct rpc_xprt *xprt)
1788 {
1789 	int id;
1790 
1791 	id = ida_alloc(&rpc_xprt_ids, GFP_KERNEL);
1792 	if (id < 0)
1793 		return id;
1794 
1795 	xprt->id = id;
1796 	return 0;
1797 }
1798 
xprt_free_id(struct rpc_xprt * xprt)1799 static void xprt_free_id(struct rpc_xprt *xprt)
1800 {
1801 	ida_free(&rpc_xprt_ids, xprt->id);
1802 }
1803 
xprt_alloc(struct net * net,size_t size,unsigned int num_prealloc,unsigned int max_alloc)1804 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1805 		unsigned int num_prealloc,
1806 		unsigned int max_alloc)
1807 {
1808 	struct rpc_xprt *xprt;
1809 	struct rpc_rqst *req;
1810 	int i;
1811 
1812 	xprt = kzalloc(size, GFP_KERNEL);
1813 	if (xprt == NULL)
1814 		goto out;
1815 
1816 	xprt_alloc_id(xprt);
1817 	xprt_init(xprt, net);
1818 
1819 	for (i = 0; i < num_prealloc; i++) {
1820 		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1821 		if (!req)
1822 			goto out_free;
1823 		list_add(&req->rq_list, &xprt->free);
1824 	}
1825 	xprt->max_reqs = max_t(unsigned int, max_alloc, num_prealloc);
1826 	xprt->min_reqs = num_prealloc;
1827 	xprt->num_reqs = num_prealloc;
1828 
1829 	return xprt;
1830 
1831 out_free:
1832 	xprt_free(xprt);
1833 out:
1834 	return NULL;
1835 }
1836 EXPORT_SYMBOL_GPL(xprt_alloc);
1837 
xprt_free(struct rpc_xprt * xprt)1838 void xprt_free(struct rpc_xprt *xprt)
1839 {
1840 	put_net_track(xprt->xprt_net, &xprt->ns_tracker);
1841 	xprt_free_all_slots(xprt);
1842 	xprt_free_id(xprt);
1843 	rpc_sysfs_xprt_destroy(xprt);
1844 	kfree_rcu(xprt, rcu);
1845 }
1846 EXPORT_SYMBOL_GPL(xprt_free);
1847 
1848 static void
xprt_init_connect_cookie(struct rpc_rqst * req,struct rpc_xprt * xprt)1849 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1850 {
1851 	req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1852 }
1853 
1854 static __be32
xprt_alloc_xid(struct rpc_xprt * xprt)1855 xprt_alloc_xid(struct rpc_xprt *xprt)
1856 {
1857 	__be32 xid;
1858 
1859 	spin_lock(&xprt->reserve_lock);
1860 	xid = (__force __be32)xprt->xid++;
1861 	spin_unlock(&xprt->reserve_lock);
1862 	return xid;
1863 }
1864 
1865 static void
xprt_init_xid(struct rpc_xprt * xprt)1866 xprt_init_xid(struct rpc_xprt *xprt)
1867 {
1868 	xprt->xid = get_random_u32();
1869 }
1870 
1871 static void
xprt_request_init(struct rpc_task * task)1872 xprt_request_init(struct rpc_task *task)
1873 {
1874 	struct rpc_xprt *xprt = task->tk_xprt;
1875 	struct rpc_rqst	*req = task->tk_rqstp;
1876 
1877 	req->rq_task	= task;
1878 	req->rq_xprt    = xprt;
1879 	req->rq_buffer  = NULL;
1880 	req->rq_xid	= xprt_alloc_xid(xprt);
1881 	xprt_init_connect_cookie(req, xprt);
1882 	req->rq_snd_buf.len = 0;
1883 	req->rq_snd_buf.buflen = 0;
1884 	req->rq_rcv_buf.len = 0;
1885 	req->rq_rcv_buf.buflen = 0;
1886 	req->rq_snd_buf.bvec = NULL;
1887 	req->rq_rcv_buf.bvec = NULL;
1888 	req->rq_release_snd_buf = NULL;
1889 	xprt_init_majortimeo(task, req);
1890 
1891 	trace_xprt_reserve(req);
1892 }
1893 
1894 static void
xprt_do_reserve(struct rpc_xprt * xprt,struct rpc_task * task)1895 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1896 {
1897 	xprt->ops->alloc_slot(xprt, task);
1898 	if (task->tk_rqstp != NULL)
1899 		xprt_request_init(task);
1900 }
1901 
1902 /**
1903  * xprt_reserve - allocate an RPC request slot
1904  * @task: RPC task requesting a slot allocation
1905  *
1906  * If the transport is marked as being congested, or if no more
1907  * slots are available, place the task on the transport's
1908  * backlog queue.
1909  */
xprt_reserve(struct rpc_task * task)1910 void xprt_reserve(struct rpc_task *task)
1911 {
1912 	struct rpc_xprt *xprt = task->tk_xprt;
1913 
1914 	task->tk_status = 0;
1915 	if (task->tk_rqstp != NULL)
1916 		return;
1917 
1918 	task->tk_status = -EAGAIN;
1919 	if (!xprt_throttle_congested(xprt, task))
1920 		xprt_do_reserve(xprt, task);
1921 }
1922 
1923 /**
1924  * xprt_retry_reserve - allocate an RPC request slot
1925  * @task: RPC task requesting a slot allocation
1926  *
1927  * If no more slots are available, place the task on the transport's
1928  * backlog queue.
1929  * Note that the only difference with xprt_reserve is that we now
1930  * ignore the value of the XPRT_CONGESTED flag.
1931  */
xprt_retry_reserve(struct rpc_task * task)1932 void xprt_retry_reserve(struct rpc_task *task)
1933 {
1934 	struct rpc_xprt *xprt = task->tk_xprt;
1935 
1936 	task->tk_status = 0;
1937 	if (task->tk_rqstp != NULL)
1938 		return;
1939 
1940 	task->tk_status = -EAGAIN;
1941 	xprt_do_reserve(xprt, task);
1942 }
1943 
1944 /**
1945  * xprt_release - release an RPC request slot
1946  * @task: task which is finished with the slot
1947  *
1948  */
xprt_release(struct rpc_task * task)1949 void xprt_release(struct rpc_task *task)
1950 {
1951 	struct rpc_xprt	*xprt;
1952 	struct rpc_rqst	*req = task->tk_rqstp;
1953 
1954 	if (req == NULL) {
1955 		if (task->tk_client) {
1956 			xprt = task->tk_xprt;
1957 			xprt_release_write(xprt, task);
1958 		}
1959 		return;
1960 	}
1961 
1962 	xprt = req->rq_xprt;
1963 	xprt_request_dequeue_xprt(task);
1964 	spin_lock(&xprt->transport_lock);
1965 	xprt->ops->release_xprt(xprt, task);
1966 	if (xprt->ops->release_request)
1967 		xprt->ops->release_request(task);
1968 	xprt_schedule_autodisconnect(xprt);
1969 	spin_unlock(&xprt->transport_lock);
1970 	if (req->rq_buffer)
1971 		xprt->ops->buf_free(task);
1972 	if (req->rq_cred != NULL)
1973 		put_rpccred(req->rq_cred);
1974 	if (req->rq_release_snd_buf)
1975 		req->rq_release_snd_buf(req);
1976 
1977 	task->tk_rqstp = NULL;
1978 	if (likely(!bc_prealloc(req)))
1979 		xprt->ops->free_slot(xprt, req);
1980 	else
1981 		xprt_free_bc_request(req);
1982 }
1983 
1984 #ifdef CONFIG_SUNRPC_BACKCHANNEL
1985 void
xprt_init_bc_request(struct rpc_rqst * req,struct rpc_task * task)1986 xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1987 {
1988 	struct xdr_buf *xbufp = &req->rq_snd_buf;
1989 
1990 	task->tk_rqstp = req;
1991 	req->rq_task = task;
1992 	xprt_init_connect_cookie(req, req->rq_xprt);
1993 	/*
1994 	 * Set up the xdr_buf length.
1995 	 * This also indicates that the buffer is XDR encoded already.
1996 	 */
1997 	xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1998 		xbufp->tail[0].iov_len;
1999 }
2000 #endif
2001 
xprt_init(struct rpc_xprt * xprt,struct net * net)2002 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
2003 {
2004 	kref_init(&xprt->kref);
2005 
2006 	spin_lock_init(&xprt->transport_lock);
2007 	spin_lock_init(&xprt->reserve_lock);
2008 	spin_lock_init(&xprt->queue_lock);
2009 
2010 	INIT_LIST_HEAD(&xprt->free);
2011 	xprt->recv_queue = RB_ROOT;
2012 	INIT_LIST_HEAD(&xprt->xmit_queue);
2013 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
2014 	spin_lock_init(&xprt->bc_pa_lock);
2015 	INIT_LIST_HEAD(&xprt->bc_pa_list);
2016 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
2017 	INIT_LIST_HEAD(&xprt->xprt_switch);
2018 
2019 	xprt->last_used = jiffies;
2020 	xprt->cwnd = RPC_INITCWND;
2021 	xprt->bind_index = 0;
2022 
2023 	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
2024 	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
2025 	rpc_init_wait_queue(&xprt->sending, "xprt_sending");
2026 	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
2027 
2028 	xprt_init_xid(xprt);
2029 
2030 	xprt->xprt_net = get_net_track(net, &xprt->ns_tracker, GFP_KERNEL);
2031 }
2032 
2033 /**
2034  * xprt_create_transport - create an RPC transport
2035  * @args: rpc transport creation arguments
2036  *
2037  */
xprt_create_transport(struct xprt_create * args)2038 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
2039 {
2040 	struct rpc_xprt	*xprt;
2041 	const struct xprt_class *t;
2042 
2043 	t = xprt_class_find_by_ident(args->ident);
2044 	if (!t) {
2045 		dprintk("RPC: transport (%d) not supported\n", args->ident);
2046 		return ERR_PTR(-EIO);
2047 	}
2048 
2049 	xprt = t->setup(args);
2050 	xprt_class_release(t);
2051 
2052 	if (IS_ERR(xprt))
2053 		goto out;
2054 	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
2055 		xprt->idle_timeout = 0;
2056 	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
2057 	if (xprt_has_timer(xprt))
2058 		timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
2059 	else
2060 		timer_setup(&xprt->timer, NULL, 0);
2061 
2062 	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
2063 		xprt_destroy(xprt);
2064 		return ERR_PTR(-EINVAL);
2065 	}
2066 	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
2067 	if (xprt->servername == NULL) {
2068 		xprt_destroy(xprt);
2069 		return ERR_PTR(-ENOMEM);
2070 	}
2071 
2072 	rpc_xprt_debugfs_register(xprt);
2073 
2074 	trace_xprt_create(xprt);
2075 out:
2076 	return xprt;
2077 }
2078 
xprt_destroy_cb(struct work_struct * work)2079 static void xprt_destroy_cb(struct work_struct *work)
2080 {
2081 	struct rpc_xprt *xprt =
2082 		container_of(work, struct rpc_xprt, task_cleanup);
2083 
2084 	trace_xprt_destroy(xprt);
2085 
2086 	rpc_xprt_debugfs_unregister(xprt);
2087 	rpc_destroy_wait_queue(&xprt->binding);
2088 	rpc_destroy_wait_queue(&xprt->pending);
2089 	rpc_destroy_wait_queue(&xprt->sending);
2090 	rpc_destroy_wait_queue(&xprt->backlog);
2091 	kfree(xprt->servername);
2092 	/*
2093 	 * Destroy any existing back channel
2094 	 */
2095 	xprt_destroy_backchannel(xprt, UINT_MAX);
2096 
2097 	/*
2098 	 * Tear down transport state and free the rpc_xprt
2099 	 */
2100 	xprt->ops->destroy(xprt);
2101 }
2102 
2103 /**
2104  * xprt_destroy - destroy an RPC transport, killing off all requests.
2105  * @xprt: transport to destroy
2106  *
2107  */
xprt_destroy(struct rpc_xprt * xprt)2108 static void xprt_destroy(struct rpc_xprt *xprt)
2109 {
2110 	/*
2111 	 * Exclude transport connect/disconnect handlers and autoclose
2112 	 */
2113 	wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
2114 
2115 	/*
2116 	 * xprt_schedule_autodisconnect() can run after XPRT_LOCKED
2117 	 * is cleared.  We use ->transport_lock to ensure the mod_timer()
2118 	 * can only run *before* del_time_sync(), never after.
2119 	 */
2120 	spin_lock(&xprt->transport_lock);
2121 	del_timer_sync(&xprt->timer);
2122 	spin_unlock(&xprt->transport_lock);
2123 
2124 	/*
2125 	 * Destroy sockets etc from the system workqueue so they can
2126 	 * safely flush receive work running on rpciod.
2127 	 */
2128 	INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
2129 	schedule_work(&xprt->task_cleanup);
2130 }
2131 
xprt_destroy_kref(struct kref * kref)2132 static void xprt_destroy_kref(struct kref *kref)
2133 {
2134 	xprt_destroy(container_of(kref, struct rpc_xprt, kref));
2135 }
2136 
2137 /**
2138  * xprt_get - return a reference to an RPC transport.
2139  * @xprt: pointer to the transport
2140  *
2141  */
xprt_get(struct rpc_xprt * xprt)2142 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
2143 {
2144 	if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
2145 		return xprt;
2146 	return NULL;
2147 }
2148 EXPORT_SYMBOL_GPL(xprt_get);
2149 
2150 /**
2151  * xprt_put - release a reference to an RPC transport.
2152  * @xprt: pointer to the transport
2153  *
2154  */
xprt_put(struct rpc_xprt * xprt)2155 void xprt_put(struct rpc_xprt *xprt)
2156 {
2157 	if (xprt != NULL)
2158 		kref_put(&xprt->kref, xprt_destroy_kref);
2159 }
2160 EXPORT_SYMBOL_GPL(xprt_put);
2161 
xprt_set_offline_locked(struct rpc_xprt * xprt,struct rpc_xprt_switch * xps)2162 void xprt_set_offline_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps)
2163 {
2164 	if (!test_and_set_bit(XPRT_OFFLINE, &xprt->state)) {
2165 		spin_lock(&xps->xps_lock);
2166 		xps->xps_nactive--;
2167 		spin_unlock(&xps->xps_lock);
2168 	}
2169 }
2170 
xprt_set_online_locked(struct rpc_xprt * xprt,struct rpc_xprt_switch * xps)2171 void xprt_set_online_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps)
2172 {
2173 	if (test_and_clear_bit(XPRT_OFFLINE, &xprt->state)) {
2174 		spin_lock(&xps->xps_lock);
2175 		xps->xps_nactive++;
2176 		spin_unlock(&xps->xps_lock);
2177 	}
2178 }
2179 
xprt_delete_locked(struct rpc_xprt * xprt,struct rpc_xprt_switch * xps)2180 void xprt_delete_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps)
2181 {
2182 	if (test_and_set_bit(XPRT_REMOVE, &xprt->state))
2183 		return;
2184 
2185 	xprt_force_disconnect(xprt);
2186 	if (!test_bit(XPRT_CONNECTED, &xprt->state))
2187 		return;
2188 
2189 	if (!xprt->sending.qlen && !xprt->pending.qlen &&
2190 	    !xprt->backlog.qlen && !atomic_long_read(&xprt->queuelen))
2191 		rpc_xprt_switch_remove_xprt(xps, xprt, true);
2192 }
2193