Lines Matching refs:xprt

72 static void	xprt_init(struct rpc_xprt *xprt, struct net *net);
73 static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
74 static void xprt_destroy(struct rpc_xprt *xprt);
247 static void xprt_clear_locked(struct rpc_xprt *xprt) in xprt_clear_locked() argument
249 xprt->snd_task = NULL; in xprt_clear_locked()
250 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) in xprt_clear_locked()
251 clear_bit_unlock(XPRT_LOCKED, &xprt->state); in xprt_clear_locked()
253 queue_work(xprtiod_workqueue, &xprt->task_cleanup); in xprt_clear_locked()
265 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_reserve_xprt() argument
269 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { in xprt_reserve_xprt()
270 if (task == xprt->snd_task) in xprt_reserve_xprt()
274 if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) in xprt_reserve_xprt()
276 xprt->snd_task = task; in xprt_reserve_xprt()
279 trace_xprt_reserve_xprt(xprt, task); in xprt_reserve_xprt()
283 xprt_clear_locked(xprt); in xprt_reserve_xprt()
287 rpc_sleep_on_timeout(&xprt->sending, task, NULL, in xprt_reserve_xprt()
290 rpc_sleep_on(&xprt->sending, task, NULL); in xprt_reserve_xprt()
296 xprt_need_congestion_window_wait(struct rpc_xprt *xprt) in xprt_need_congestion_window_wait() argument
298 return test_bit(XPRT_CWND_WAIT, &xprt->state); in xprt_need_congestion_window_wait()
302 xprt_set_congestion_window_wait(struct rpc_xprt *xprt) in xprt_set_congestion_window_wait() argument
304 if (!list_empty(&xprt->xmit_queue)) { in xprt_set_congestion_window_wait()
306 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst, in xprt_set_congestion_window_wait()
310 set_bit(XPRT_CWND_WAIT, &xprt->state); in xprt_set_congestion_window_wait()
314 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt) in xprt_test_and_clear_congestion_window_wait() argument
316 if (!RPCXPRT_CONGESTED(xprt)) in xprt_test_and_clear_congestion_window_wait()
317 clear_bit(XPRT_CWND_WAIT, &xprt->state); in xprt_test_and_clear_congestion_window_wait()
329 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_reserve_xprt_cong() argument
333 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { in xprt_reserve_xprt_cong()
334 if (task == xprt->snd_task) in xprt_reserve_xprt_cong()
339 xprt->snd_task = task; in xprt_reserve_xprt_cong()
342 if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) in xprt_reserve_xprt_cong()
344 if (!xprt_need_congestion_window_wait(xprt)) { in xprt_reserve_xprt_cong()
345 xprt->snd_task = task; in xprt_reserve_xprt_cong()
349 xprt_clear_locked(xprt); in xprt_reserve_xprt_cong()
353 rpc_sleep_on_timeout(&xprt->sending, task, NULL, in xprt_reserve_xprt_cong()
356 rpc_sleep_on(&xprt->sending, task, NULL); in xprt_reserve_xprt_cong()
359 trace_xprt_reserve_cong(xprt, task); in xprt_reserve_xprt_cong()
364 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_lock_write() argument
368 if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task) in xprt_lock_write()
370 spin_lock(&xprt->transport_lock); in xprt_lock_write()
371 retval = xprt->ops->reserve_xprt(xprt, task); in xprt_lock_write()
372 spin_unlock(&xprt->transport_lock); in xprt_lock_write()
378 struct rpc_xprt *xprt = data; in __xprt_lock_write_func() local
380 xprt->snd_task = task; in __xprt_lock_write_func()
384 static void __xprt_lock_write_next(struct rpc_xprt *xprt) in __xprt_lock_write_next() argument
386 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) in __xprt_lock_write_next()
388 if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) in __xprt_lock_write_next()
390 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending, in __xprt_lock_write_next()
391 __xprt_lock_write_func, xprt)) in __xprt_lock_write_next()
394 xprt_clear_locked(xprt); in __xprt_lock_write_next()
397 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) in __xprt_lock_write_next_cong() argument
399 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) in __xprt_lock_write_next_cong()
401 if (test_bit(XPRT_WRITE_SPACE, &xprt->state)) in __xprt_lock_write_next_cong()
403 if (xprt_need_congestion_window_wait(xprt)) in __xprt_lock_write_next_cong()
405 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending, in __xprt_lock_write_next_cong()
406 __xprt_lock_write_func, xprt)) in __xprt_lock_write_next_cong()
409 xprt_clear_locked(xprt); in __xprt_lock_write_next_cong()
419 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_release_xprt() argument
421 if (xprt->snd_task == task) { in xprt_release_xprt()
422 xprt_clear_locked(xprt); in xprt_release_xprt()
423 __xprt_lock_write_next(xprt); in xprt_release_xprt()
425 trace_xprt_release_xprt(xprt, task); in xprt_release_xprt()
437 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_release_xprt_cong() argument
439 if (xprt->snd_task == task) { in xprt_release_xprt_cong()
440 xprt_clear_locked(xprt); in xprt_release_xprt_cong()
441 __xprt_lock_write_next_cong(xprt); in xprt_release_xprt_cong()
443 trace_xprt_release_cong(xprt, task); in xprt_release_xprt_cong()
447 void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_release_write() argument
449 if (xprt->snd_task != task) in xprt_release_write()
451 spin_lock(&xprt->transport_lock); in xprt_release_write()
452 xprt->ops->release_xprt(xprt, task); in xprt_release_write()
453 spin_unlock(&xprt->transport_lock); in xprt_release_write()
461 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) in __xprt_get_cong() argument
465 trace_xprt_get_cong(xprt, req->rq_task); in __xprt_get_cong()
466 if (RPCXPRT_CONGESTED(xprt)) { in __xprt_get_cong()
467 xprt_set_congestion_window_wait(xprt); in __xprt_get_cong()
471 xprt->cong += RPC_CWNDSCALE; in __xprt_get_cong()
480 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) in __xprt_put_cong() argument
485 xprt->cong -= RPC_CWNDSCALE; in __xprt_put_cong()
486 xprt_test_and_clear_congestion_window_wait(xprt); in __xprt_put_cong()
487 trace_xprt_put_cong(xprt, req->rq_task); in __xprt_put_cong()
488 __xprt_lock_write_next_cong(xprt); in __xprt_put_cong()
499 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) in xprt_request_get_cong() argument
505 spin_lock(&xprt->transport_lock); in xprt_request_get_cong()
506 ret = __xprt_get_cong(xprt, req) != 0; in xprt_request_get_cong()
507 spin_unlock(&xprt->transport_lock); in xprt_request_get_cong()
526 static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt) in xprt_clear_congestion_window_wait_locked() argument
528 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) in xprt_clear_congestion_window_wait_locked()
529 __xprt_lock_write_next_cong(xprt); in xprt_clear_congestion_window_wait_locked()
537 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt) in xprt_clear_congestion_window_wait() argument
539 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) { in xprt_clear_congestion_window_wait()
540 spin_lock(&xprt->transport_lock); in xprt_clear_congestion_window_wait()
541 __xprt_lock_write_next_cong(xprt); in xprt_clear_congestion_window_wait()
542 spin_unlock(&xprt->transport_lock); in xprt_clear_congestion_window_wait()
562 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result) in xprt_adjust_cwnd() argument
565 unsigned long cwnd = xprt->cwnd; in xprt_adjust_cwnd()
567 if (result >= 0 && cwnd <= xprt->cong) { in xprt_adjust_cwnd()
571 if (cwnd > RPC_MAXCWND(xprt)) in xprt_adjust_cwnd()
572 cwnd = RPC_MAXCWND(xprt); in xprt_adjust_cwnd()
573 __xprt_lock_write_next_cong(xprt); in xprt_adjust_cwnd()
580 xprt->cong, xprt->cwnd, cwnd); in xprt_adjust_cwnd()
581 xprt->cwnd = cwnd; in xprt_adjust_cwnd()
582 __xprt_put_cong(xprt, req); in xprt_adjust_cwnd()
592 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) in xprt_wake_pending_tasks() argument
595 rpc_wake_up_status(&xprt->pending, status); in xprt_wake_pending_tasks()
597 rpc_wake_up(&xprt->pending); in xprt_wake_pending_tasks()
609 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt) in xprt_wait_for_buffer_space() argument
611 set_bit(XPRT_WRITE_SPACE, &xprt->state); in xprt_wait_for_buffer_space()
616 xprt_clear_write_space_locked(struct rpc_xprt *xprt) in xprt_clear_write_space_locked() argument
618 if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) { in xprt_clear_write_space_locked()
619 __xprt_lock_write_next(xprt); in xprt_clear_write_space_locked()
621 "xprt %p\n", xprt); in xprt_clear_write_space_locked()
633 bool xprt_write_space(struct rpc_xprt *xprt) in xprt_write_space() argument
637 if (!test_bit(XPRT_WRITE_SPACE, &xprt->state)) in xprt_write_space()
639 spin_lock(&xprt->transport_lock); in xprt_write_space()
640 ret = xprt_clear_write_space_locked(xprt); in xprt_write_space()
641 spin_unlock(&xprt->transport_lock); in xprt_write_space()
681 struct rpc_xprt *xprt = req->rq_xprt; in xprt_init_majortimeo() local
683 if (likely(xprt && xprt_connected(xprt))) in xprt_init_majortimeo()
699 struct rpc_xprt *xprt = req->rq_xprt; in xprt_adjust_timeout() local
718 spin_lock(&xprt->transport_lock); in xprt_adjust_timeout()
720 spin_unlock(&xprt->transport_lock); in xprt_adjust_timeout()
734 struct rpc_xprt *xprt = in xprt_autoclose() local
738 trace_xprt_disconnect_auto(xprt); in xprt_autoclose()
739 xprt->connect_cookie++; in xprt_autoclose()
741 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); in xprt_autoclose()
742 xprt->ops->close(xprt); in xprt_autoclose()
743 xprt_release_write(xprt, NULL); in xprt_autoclose()
744 wake_up_bit(&xprt->state, XPRT_LOCKED); in xprt_autoclose()
753 void xprt_disconnect_done(struct rpc_xprt *xprt) in xprt_disconnect_done() argument
755 trace_xprt_disconnect_done(xprt); in xprt_disconnect_done()
756 spin_lock(&xprt->transport_lock); in xprt_disconnect_done()
757 xprt_clear_connected(xprt); in xprt_disconnect_done()
758 xprt_clear_write_space_locked(xprt); in xprt_disconnect_done()
759 xprt_clear_congestion_window_wait_locked(xprt); in xprt_disconnect_done()
760 xprt_wake_pending_tasks(xprt, -ENOTCONN); in xprt_disconnect_done()
761 spin_unlock(&xprt->transport_lock); in xprt_disconnect_done()
769 static void xprt_schedule_autoclose_locked(struct rpc_xprt *xprt) in xprt_schedule_autoclose_locked() argument
771 if (test_and_set_bit(XPRT_CLOSE_WAIT, &xprt->state)) in xprt_schedule_autoclose_locked()
773 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) in xprt_schedule_autoclose_locked()
774 queue_work(xprtiod_workqueue, &xprt->task_cleanup); in xprt_schedule_autoclose_locked()
775 else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state)) in xprt_schedule_autoclose_locked()
776 rpc_wake_up_queued_task_set_status(&xprt->pending, in xprt_schedule_autoclose_locked()
777 xprt->snd_task, -ENOTCONN); in xprt_schedule_autoclose_locked()
785 void xprt_force_disconnect(struct rpc_xprt *xprt) in xprt_force_disconnect() argument
787 trace_xprt_disconnect_force(xprt); in xprt_force_disconnect()
790 spin_lock(&xprt->transport_lock); in xprt_force_disconnect()
791 xprt_schedule_autoclose_locked(xprt); in xprt_force_disconnect()
792 spin_unlock(&xprt->transport_lock); in xprt_force_disconnect()
797 xprt_connect_cookie(struct rpc_xprt *xprt) in xprt_connect_cookie() argument
799 return READ_ONCE(xprt->connect_cookie); in xprt_connect_cookie()
806 struct rpc_xprt *xprt = req->rq_xprt; in xprt_request_retransmit_after_disconnect() local
808 return req->rq_connect_cookie != xprt_connect_cookie(xprt) || in xprt_request_retransmit_after_disconnect()
809 !xprt_connected(xprt); in xprt_request_retransmit_after_disconnect()
823 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie) in xprt_conditional_disconnect() argument
826 spin_lock(&xprt->transport_lock); in xprt_conditional_disconnect()
827 if (cookie != xprt->connect_cookie) in xprt_conditional_disconnect()
829 if (test_bit(XPRT_CLOSING, &xprt->state)) in xprt_conditional_disconnect()
831 xprt_schedule_autoclose_locked(xprt); in xprt_conditional_disconnect()
833 spin_unlock(&xprt->transport_lock); in xprt_conditional_disconnect()
837 xprt_has_timer(const struct rpc_xprt *xprt) in xprt_has_timer() argument
839 return xprt->idle_timeout != 0; in xprt_has_timer()
843 xprt_schedule_autodisconnect(struct rpc_xprt *xprt) in xprt_schedule_autodisconnect() argument
844 __must_hold(&xprt->transport_lock) in xprt_schedule_autodisconnect()
846 xprt->last_used = jiffies; in xprt_schedule_autodisconnect()
847 if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt)) in xprt_schedule_autodisconnect()
848 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout); in xprt_schedule_autodisconnect()
854 struct rpc_xprt *xprt = from_timer(xprt, t, timer); in xprt_init_autodisconnect() local
856 if (!RB_EMPTY_ROOT(&xprt->recv_queue)) in xprt_init_autodisconnect()
859 xprt->last_used = jiffies; in xprt_init_autodisconnect()
860 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) in xprt_init_autodisconnect()
862 queue_work(xprtiod_workqueue, &xprt->task_cleanup); in xprt_init_autodisconnect()
866 static void xprt_inject_disconnect(struct rpc_xprt *xprt) in xprt_inject_disconnect() argument
870 xprt->ops->inject_disconnect(xprt); in xprt_inject_disconnect()
873 static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) in xprt_inject_disconnect() argument
878 bool xprt_lock_connect(struct rpc_xprt *xprt, in xprt_lock_connect() argument
884 spin_lock(&xprt->transport_lock); in xprt_lock_connect()
885 if (!test_bit(XPRT_LOCKED, &xprt->state)) in xprt_lock_connect()
887 if (xprt->snd_task != task) in xprt_lock_connect()
889 set_bit(XPRT_SND_IS_COOKIE, &xprt->state); in xprt_lock_connect()
890 xprt->snd_task = cookie; in xprt_lock_connect()
893 spin_unlock(&xprt->transport_lock); in xprt_lock_connect()
898 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) in xprt_unlock_connect() argument
900 spin_lock(&xprt->transport_lock); in xprt_unlock_connect()
901 if (xprt->snd_task != cookie) in xprt_unlock_connect()
903 if (!test_bit(XPRT_LOCKED, &xprt->state)) in xprt_unlock_connect()
905 xprt->snd_task =NULL; in xprt_unlock_connect()
906 clear_bit(XPRT_SND_IS_COOKIE, &xprt->state); in xprt_unlock_connect()
907 xprt->ops->release_xprt(xprt, NULL); in xprt_unlock_connect()
908 xprt_schedule_autodisconnect(xprt); in xprt_unlock_connect()
910 spin_unlock(&xprt->transport_lock); in xprt_unlock_connect()
911 wake_up_bit(&xprt->state, XPRT_LOCKED); in xprt_unlock_connect()
922 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; in xprt_connect() local
924 trace_xprt_connect(xprt); in xprt_connect()
926 if (!xprt_bound(xprt)) { in xprt_connect()
930 if (!xprt_lock_write(xprt, task)) in xprt_connect()
933 if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { in xprt_connect()
934 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie; in xprt_connect()
935 rpc_sleep_on_timeout(&xprt->pending, task, NULL, in xprt_connect()
938 if (test_bit(XPRT_CLOSING, &xprt->state)) in xprt_connect()
940 if (xprt_test_and_set_connecting(xprt)) in xprt_connect()
943 if (!xprt_connected(xprt)) { in xprt_connect()
944 xprt->stat.connect_start = jiffies; in xprt_connect()
945 xprt->ops->connect(xprt, task); in xprt_connect()
947 xprt_clear_connecting(xprt); in xprt_connect()
949 rpc_wake_up_queued_task(&xprt->pending, task); in xprt_connect()
952 xprt_release_write(xprt, task); in xprt_connect()
960 unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt) in xprt_reconnect_delay() argument
964 start = xprt->stat.connect_start + xprt->reestablish_timeout; in xprt_reconnect_delay()
977 void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to) in xprt_reconnect_backoff() argument
979 xprt->reestablish_timeout <<= 1; in xprt_reconnect_backoff()
980 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout) in xprt_reconnect_backoff()
981 xprt->reestablish_timeout = xprt->max_reconnect_timeout; in xprt_reconnect_backoff()
982 if (xprt->reestablish_timeout < init_to) in xprt_reconnect_backoff()
983 xprt->reestablish_timeout = init_to; in xprt_reconnect_backoff()
1003 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid) in xprt_request_rb_find() argument
1005 struct rb_node *n = xprt->recv_queue.rb_node; in xprt_request_rb_find()
1025 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new) in xprt_request_rb_insert() argument
1027 struct rb_node **p = &xprt->recv_queue.rb_node; in xprt_request_rb_insert()
1047 rb_insert_color(&new->rq_recv, &xprt->recv_queue); in xprt_request_rb_insert()
1051 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req) in xprt_request_rb_remove() argument
1053 rb_erase(&req->rq_recv, &xprt->recv_queue); in xprt_request_rb_remove()
1063 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) in xprt_lookup_rqst() argument
1067 entry = xprt_request_rb_find(xprt, xid); in xprt_lookup_rqst()
1069 trace_xprt_lookup_rqst(xprt, xid, 0); in xprt_lookup_rqst()
1076 trace_xprt_lookup_rqst(xprt, xid, -ENOENT); in xprt_lookup_rqst()
1077 xprt->stat.bad_xids++; in xprt_lookup_rqst()
1146 struct rpc_xprt *xprt = req->rq_xprt; in xprt_request_enqueue_receive() local
1155 spin_lock(&xprt->queue_lock); in xprt_request_enqueue_receive()
1162 xprt_request_rb_insert(xprt, req); in xprt_request_enqueue_receive()
1164 spin_unlock(&xprt->queue_lock); in xprt_request_enqueue_receive()
1167 del_singleshot_timer_sync(&xprt->timer); in xprt_request_enqueue_receive()
1217 struct rpc_xprt *xprt = req->rq_xprt; in xprt_complete_rqst() local
1219 xprt->stat.recvs++; in xprt_complete_rqst()
1229 rpc_wake_up_queued_task(&xprt->pending, task); in xprt_complete_rqst()
1236 struct rpc_xprt *xprt = req->rq_xprt; in xprt_timer() local
1241 trace_xprt_timer(xprt, req->rq_xid, task->tk_status); in xprt_timer()
1243 if (xprt->ops->timer) in xprt_timer()
1244 xprt->ops->timer(xprt, task); in xprt_timer()
1300 struct rpc_xprt *xprt = req->rq_xprt; in xprt_request_wait_receive() local
1309 spin_lock(&xprt->queue_lock); in xprt_request_wait_receive()
1311 xprt->ops->wait_for_reply_request(task); in xprt_request_wait_receive()
1318 rpc_wake_up_queued_task_set_status(&xprt->pending, in xprt_request_wait_receive()
1321 spin_unlock(&xprt->queue_lock); in xprt_request_wait_receive()
1340 struct rpc_xprt *xprt = req->rq_xprt; in xprt_request_enqueue_transmit() local
1350 spin_lock(&xprt->queue_lock); in xprt_request_enqueue_transmit()
1356 xprt_clear_congestion_window_wait(xprt); in xprt_request_enqueue_transmit()
1357 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { in xprt_request_enqueue_transmit()
1366 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { in xprt_request_enqueue_transmit()
1374 list_add_tail(&req->rq_xmit, &xprt->xmit_queue); in xprt_request_enqueue_transmit()
1377 atomic_long_inc(&xprt->xmit_queuelen); in xprt_request_enqueue_transmit()
1379 spin_unlock(&xprt->queue_lock); in xprt_request_enqueue_transmit()
1421 struct rpc_xprt *xprt = req->rq_xprt; in xprt_request_dequeue_transmit() local
1423 spin_lock(&xprt->queue_lock); in xprt_request_dequeue_transmit()
1425 spin_unlock(&xprt->queue_lock); in xprt_request_dequeue_transmit()
1439 struct rpc_xprt *xprt = req->rq_xprt; in xprt_request_dequeue_xprt() local
1444 spin_lock(&xprt->queue_lock); in xprt_request_dequeue_xprt()
1447 spin_unlock(&xprt->queue_lock); in xprt_request_dequeue_xprt()
1449 spin_lock(&xprt->queue_lock); in xprt_request_dequeue_xprt()
1454 spin_unlock(&xprt->queue_lock); in xprt_request_dequeue_xprt()
1471 struct rpc_xprt *xprt = req->rq_xprt; in xprt_request_prepare() local
1473 if (xprt->ops->prepare_request) in xprt_request_prepare()
1474 return xprt->ops->prepare_request(req, buf); in xprt_request_prepare()
1498 struct rpc_xprt *xprt = req->rq_xprt; in xprt_prepare_transmit() local
1500 if (!xprt_lock_write(xprt, task)) { in xprt_prepare_transmit()
1503 rpc_wake_up_queued_task_set_status(&xprt->sending, in xprt_prepare_transmit()
1508 if (atomic_read(&xprt->swapper)) in xprt_prepare_transmit()
1516 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; in xprt_end_transmit() local
1518 xprt_inject_disconnect(xprt); in xprt_end_transmit()
1519 xprt_release_write(xprt, task); in xprt_end_transmit()
1535 struct rpc_xprt *xprt = req->rq_xprt; in xprt_request_transmit() local
1565 connect_cookie = xprt->connect_cookie; in xprt_request_transmit()
1566 status = xprt->ops->send_request(req); in xprt_request_transmit()
1578 xprt_inject_disconnect(xprt); in xprt_request_transmit()
1581 spin_lock(&xprt->transport_lock); in xprt_request_transmit()
1583 xprt->stat.sends++; in xprt_request_transmit()
1584 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; in xprt_request_transmit()
1585 xprt->stat.bklog_u += xprt->backlog.qlen; in xprt_request_transmit()
1586 xprt->stat.sending_u += xprt->sending.qlen; in xprt_request_transmit()
1587 xprt->stat.pending_u += xprt->pending.qlen; in xprt_request_transmit()
1588 spin_unlock(&xprt->transport_lock); in xprt_request_transmit()
1594 rpc_wake_up_queued_task_set_status(&xprt->sending, task, status); in xprt_request_transmit()
1611 struct rpc_xprt *xprt = req->rq_xprt; in xprt_transmit() local
1614 spin_lock(&xprt->queue_lock); in xprt_transmit()
1616 next = list_first_entry_or_null(&xprt->xmit_queue, in xprt_transmit()
1621 spin_unlock(&xprt->queue_lock); in xprt_transmit()
1625 spin_lock(&xprt->queue_lock); in xprt_transmit()
1636 cond_resched_lock(&xprt->queue_lock); in xprt_transmit()
1638 spin_unlock(&xprt->queue_lock); in xprt_transmit()
1647 void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_add_backlog() argument
1649 set_bit(XPRT_CONGESTED, &xprt->state); in xprt_add_backlog()
1650 rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init); in xprt_add_backlog()
1666 bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req) in xprt_wake_up_backlog() argument
1668 if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) { in xprt_wake_up_backlog()
1669 clear_bit(XPRT_CONGESTED, &xprt->state); in xprt_wake_up_backlog()
1676 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_throttle_congested() argument
1680 if (!test_bit(XPRT_CONGESTED, &xprt->state)) in xprt_throttle_congested()
1682 spin_lock(&xprt->reserve_lock); in xprt_throttle_congested()
1683 if (test_bit(XPRT_CONGESTED, &xprt->state)) { in xprt_throttle_congested()
1684 xprt_add_backlog(xprt, task); in xprt_throttle_congested()
1687 spin_unlock(&xprt->reserve_lock); in xprt_throttle_congested()
1692 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt) in xprt_dynamic_alloc_slot() argument
1696 if (xprt->num_reqs >= xprt->max_reqs) in xprt_dynamic_alloc_slot()
1698 ++xprt->num_reqs; in xprt_dynamic_alloc_slot()
1699 spin_unlock(&xprt->reserve_lock); in xprt_dynamic_alloc_slot()
1701 spin_lock(&xprt->reserve_lock); in xprt_dynamic_alloc_slot()
1704 --xprt->num_reqs; in xprt_dynamic_alloc_slot()
1710 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) in xprt_dynamic_free_slot() argument
1712 if (xprt->num_reqs > xprt->min_reqs) { in xprt_dynamic_free_slot()
1713 --xprt->num_reqs; in xprt_dynamic_free_slot()
1720 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_alloc_slot() argument
1724 spin_lock(&xprt->reserve_lock); in xprt_alloc_slot()
1725 if (!list_empty(&xprt->free)) { in xprt_alloc_slot()
1726 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); in xprt_alloc_slot()
1730 req = xprt_dynamic_alloc_slot(xprt); in xprt_alloc_slot()
1740 xprt_add_backlog(xprt, task); in xprt_alloc_slot()
1746 spin_unlock(&xprt->reserve_lock); in xprt_alloc_slot()
1749 xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots, in xprt_alloc_slot()
1750 xprt->num_reqs); in xprt_alloc_slot()
1751 spin_unlock(&xprt->reserve_lock); in xprt_alloc_slot()
1758 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) in xprt_free_slot() argument
1760 spin_lock(&xprt->reserve_lock); in xprt_free_slot()
1761 if (!xprt_wake_up_backlog(xprt, req) && in xprt_free_slot()
1762 !xprt_dynamic_free_slot(xprt, req)) { in xprt_free_slot()
1764 list_add(&req->rq_list, &xprt->free); in xprt_free_slot()
1766 spin_unlock(&xprt->reserve_lock); in xprt_free_slot()
1770 static void xprt_free_all_slots(struct rpc_xprt *xprt) in xprt_free_all_slots() argument
1773 while (!list_empty(&xprt->free)) { in xprt_free_all_slots()
1774 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list); in xprt_free_all_slots()
1787 static int xprt_alloc_id(struct rpc_xprt *xprt) in xprt_alloc_id() argument
1795 xprt->id = id; in xprt_alloc_id()
1799 static void xprt_free_id(struct rpc_xprt *xprt) in xprt_free_id() argument
1801 ida_free(&rpc_xprt_ids, xprt->id); in xprt_free_id()
1808 struct rpc_xprt *xprt; in xprt_alloc() local
1812 xprt = kzalloc(size, GFP_KERNEL); in xprt_alloc()
1813 if (xprt == NULL) in xprt_alloc()
1816 xprt_alloc_id(xprt); in xprt_alloc()
1817 xprt_init(xprt, net); in xprt_alloc()
1823 list_add(&req->rq_list, &xprt->free); in xprt_alloc()
1825 xprt->max_reqs = max_t(unsigned int, max_alloc, num_prealloc); in xprt_alloc()
1826 xprt->min_reqs = num_prealloc; in xprt_alloc()
1827 xprt->num_reqs = num_prealloc; in xprt_alloc()
1829 return xprt; in xprt_alloc()
1832 xprt_free(xprt); in xprt_alloc()
1838 void xprt_free(struct rpc_xprt *xprt) in xprt_free() argument
1840 put_net_track(xprt->xprt_net, &xprt->ns_tracker); in xprt_free()
1841 xprt_free_all_slots(xprt); in xprt_free()
1842 xprt_free_id(xprt); in xprt_free()
1843 rpc_sysfs_xprt_destroy(xprt); in xprt_free()
1844 kfree_rcu(xprt, rcu); in xprt_free()
1849 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt) in xprt_init_connect_cookie() argument
1851 req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1; in xprt_init_connect_cookie()
1855 xprt_alloc_xid(struct rpc_xprt *xprt) in xprt_alloc_xid() argument
1859 spin_lock(&xprt->reserve_lock); in xprt_alloc_xid()
1860 xid = (__force __be32)xprt->xid++; in xprt_alloc_xid()
1861 spin_unlock(&xprt->reserve_lock); in xprt_alloc_xid()
1866 xprt_init_xid(struct rpc_xprt *xprt) in xprt_init_xid() argument
1868 xprt->xid = get_random_u32(); in xprt_init_xid()
1874 struct rpc_xprt *xprt = task->tk_xprt; in xprt_request_init() local
1878 req->rq_xprt = xprt; in xprt_request_init()
1880 req->rq_xid = xprt_alloc_xid(xprt); in xprt_request_init()
1881 xprt_init_connect_cookie(req, xprt); in xprt_request_init()
1895 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_do_reserve() argument
1897 xprt->ops->alloc_slot(xprt, task); in xprt_do_reserve()
1912 struct rpc_xprt *xprt = task->tk_xprt; in xprt_reserve() local
1919 if (!xprt_throttle_congested(xprt, task)) in xprt_reserve()
1920 xprt_do_reserve(xprt, task); in xprt_reserve()
1934 struct rpc_xprt *xprt = task->tk_xprt; in xprt_retry_reserve() local
1941 xprt_do_reserve(xprt, task); in xprt_retry_reserve()
1951 struct rpc_xprt *xprt; in xprt_release() local
1956 xprt = task->tk_xprt; in xprt_release()
1957 xprt_release_write(xprt, task); in xprt_release()
1962 xprt = req->rq_xprt; in xprt_release()
1964 spin_lock(&xprt->transport_lock); in xprt_release()
1965 xprt->ops->release_xprt(xprt, task); in xprt_release()
1966 if (xprt->ops->release_request) in xprt_release()
1967 xprt->ops->release_request(task); in xprt_release()
1968 xprt_schedule_autodisconnect(xprt); in xprt_release()
1969 spin_unlock(&xprt->transport_lock); in xprt_release()
1971 xprt->ops->buf_free(task); in xprt_release()
1979 xprt->ops->free_slot(xprt, req); in xprt_release()
2002 static void xprt_init(struct rpc_xprt *xprt, struct net *net) in xprt_init() argument
2004 kref_init(&xprt->kref); in xprt_init()
2006 spin_lock_init(&xprt->transport_lock); in xprt_init()
2007 spin_lock_init(&xprt->reserve_lock); in xprt_init()
2008 spin_lock_init(&xprt->queue_lock); in xprt_init()
2010 INIT_LIST_HEAD(&xprt->free); in xprt_init()
2011 xprt->recv_queue = RB_ROOT; in xprt_init()
2012 INIT_LIST_HEAD(&xprt->xmit_queue); in xprt_init()
2014 spin_lock_init(&xprt->bc_pa_lock); in xprt_init()
2015 INIT_LIST_HEAD(&xprt->bc_pa_list); in xprt_init()
2017 INIT_LIST_HEAD(&xprt->xprt_switch); in xprt_init()
2019 xprt->last_used = jiffies; in xprt_init()
2020 xprt->cwnd = RPC_INITCWND; in xprt_init()
2021 xprt->bind_index = 0; in xprt_init()
2023 rpc_init_wait_queue(&xprt->binding, "xprt_binding"); in xprt_init()
2024 rpc_init_wait_queue(&xprt->pending, "xprt_pending"); in xprt_init()
2025 rpc_init_wait_queue(&xprt->sending, "xprt_sending"); in xprt_init()
2026 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); in xprt_init()
2028 xprt_init_xid(xprt); in xprt_init()
2030 xprt->xprt_net = get_net_track(net, &xprt->ns_tracker, GFP_KERNEL); in xprt_init()
2040 struct rpc_xprt *xprt; in xprt_create_transport() local
2049 xprt = t->setup(args); in xprt_create_transport()
2052 if (IS_ERR(xprt)) in xprt_create_transport()
2055 xprt->idle_timeout = 0; in xprt_create_transport()
2056 INIT_WORK(&xprt->task_cleanup, xprt_autoclose); in xprt_create_transport()
2057 if (xprt_has_timer(xprt)) in xprt_create_transport()
2058 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0); in xprt_create_transport()
2060 timer_setup(&xprt->timer, NULL, 0); in xprt_create_transport()
2063 xprt_destroy(xprt); in xprt_create_transport()
2066 xprt->servername = kstrdup(args->servername, GFP_KERNEL); in xprt_create_transport()
2067 if (xprt->servername == NULL) { in xprt_create_transport()
2068 xprt_destroy(xprt); in xprt_create_transport()
2072 rpc_xprt_debugfs_register(xprt); in xprt_create_transport()
2074 trace_xprt_create(xprt); in xprt_create_transport()
2076 return xprt; in xprt_create_transport()
2081 struct rpc_xprt *xprt = in xprt_destroy_cb() local
2084 trace_xprt_destroy(xprt); in xprt_destroy_cb()
2086 rpc_xprt_debugfs_unregister(xprt); in xprt_destroy_cb()
2087 rpc_destroy_wait_queue(&xprt->binding); in xprt_destroy_cb()
2088 rpc_destroy_wait_queue(&xprt->pending); in xprt_destroy_cb()
2089 rpc_destroy_wait_queue(&xprt->sending); in xprt_destroy_cb()
2090 rpc_destroy_wait_queue(&xprt->backlog); in xprt_destroy_cb()
2091 kfree(xprt->servername); in xprt_destroy_cb()
2095 xprt_destroy_backchannel(xprt, UINT_MAX); in xprt_destroy_cb()
2100 xprt->ops->destroy(xprt); in xprt_destroy_cb()
2108 static void xprt_destroy(struct rpc_xprt *xprt) in xprt_destroy() argument
2113 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); in xprt_destroy()
2120 spin_lock(&xprt->transport_lock); in xprt_destroy()
2121 del_timer_sync(&xprt->timer); in xprt_destroy()
2122 spin_unlock(&xprt->transport_lock); in xprt_destroy()
2128 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb); in xprt_destroy()
2129 schedule_work(&xprt->task_cleanup); in xprt_destroy()
2142 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt) in xprt_get() argument
2144 if (xprt != NULL && kref_get_unless_zero(&xprt->kref)) in xprt_get()
2145 return xprt; in xprt_get()
2155 void xprt_put(struct rpc_xprt *xprt) in xprt_put() argument
2157 if (xprt != NULL) in xprt_put()
2158 kref_put(&xprt->kref, xprt_destroy_kref); in xprt_put()
2162 void xprt_set_offline_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps) in xprt_set_offline_locked() argument
2164 if (!test_and_set_bit(XPRT_OFFLINE, &xprt->state)) { in xprt_set_offline_locked()
2171 void xprt_set_online_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps) in xprt_set_online_locked() argument
2173 if (test_and_clear_bit(XPRT_OFFLINE, &xprt->state)) { in xprt_set_online_locked()
2180 void xprt_delete_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps) in xprt_delete_locked() argument
2182 if (test_and_set_bit(XPRT_REMOVE, &xprt->state)) in xprt_delete_locked()
2185 xprt_force_disconnect(xprt); in xprt_delete_locked()
2186 if (!test_bit(XPRT_CONNECTED, &xprt->state)) in xprt_delete_locked()
2189 if (!xprt->sending.qlen && !xprt->pending.qlen && in xprt_delete_locked()
2190 !xprt->backlog.qlen && !atomic_long_read(&xprt->queuelen)) in xprt_delete_locked()
2191 rpc_xprt_switch_remove_xprt(xps, xprt, true); in xprt_delete_locked()