Home
last modified time | relevance | path

Searched refs:sk_listener (Results 1 – 13 of 13) sorted by relevance

/linux-6.1.9/include/net/
Drequest_sock.h87 reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener, in reqsk_alloc() argument
97 if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) { in reqsk_alloc()
101 req->rsk_listener = sk_listener; in reqsk_alloc()
104 req_to_sk(req)->sk_prot = sk_listener->sk_prot; in reqsk_alloc()
Dmptcp.h187 const struct sock *sk_listener,
190 struct sock *sk_listener,
272 const struct sock *sk_listener, in mptcp_subflow_init_cookie_req() argument
279 struct sock *sk_listener, in mptcp_subflow_reqsk_alloc() argument
Dinet_sock.h357 struct sock *sk_listener,
Dtcp.h1502 const struct sock *sk_listener,
Dsock.h2862 static inline bool sk_listener(const struct sock *sk) in sk_listener() function
/linux-6.1.9/net/mptcp/
Dsubflow.c106 static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener) in subflow_init_req() argument
112 subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener)); in subflow_init_req()
113 subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener)); in subflow_init_req()
139 const struct sock *sk_listener, in subflow_check_req() argument
142 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); in subflow_check_req()
153 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) in subflow_check_req()
216 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) { in subflow_check_req()
218 ntohs(inet_sk(sk_listener)->inet_sport), in subflow_check_req()
220 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) { in subflow_check_req()
244 const struct sock *sk_listener, in mptcp_subflow_init_cookie_req() argument
[all …]
/linux-6.1.9/net/ipv4/
Dtcp_minisocks.c375 const struct sock *sk_listener, in tcp_openreq_init_rwin() argument
379 const struct tcp_sock *tp = tcp_sk(sk_listener); in tcp_openreq_init_rwin()
380 int full_space = tcp_full_space(sk_listener); in tcp_openreq_init_rwin()
392 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK && in tcp_openreq_init_rwin()
403 tcp_select_initial_window(sk_listener, full_space, in tcp_openreq_init_rwin()
Dinet_connection_sock.c966 struct sock *sk_listener = req->rsk_listener; in reqsk_timer_handler() local
972 if (inet_sk_state_load(sk_listener) != TCP_LISTEN) { in reqsk_timer_handler()
975 nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL); in reqsk_timer_handler()
993 sk_listener = nsk; in reqsk_timer_handler()
996 icsk = inet_csk(sk_listener); in reqsk_timer_handler()
997 net = sock_net(sk_listener); in reqsk_timer_handler()
1019 if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) { in reqsk_timer_handler()
1034 !inet_rtx_syn_ack(sk_listener, req) || in reqsk_timer_handler()
1045 inet_csk_reqsk_queue_drop(sk_listener, nreq); in reqsk_timer_handler()
Dtcp_input.c6787 struct sock *sk_listener, in inet_reqsk_alloc() argument
6790 struct request_sock *req = reqsk_alloc(ops, sk_listener, in inet_reqsk_alloc()
6802 write_pnet(&ireq->ireq_net, sock_net(sk_listener)); in inet_reqsk_alloc()
6803 ireq->ireq_family = sk_listener->sk_family; in inet_reqsk_alloc()
Dtcp_ipv4.c1423 const struct sock *sk_listener, in tcp_v4_init_req() argument
1427 struct net *net = sock_net(sk_listener); in tcp_v4_init_req()
/linux-6.1.9/net/ipv6/
Dtcp_ipv6.c784 const struct sock *sk_listener, in tcp_v6_init_req() argument
789 const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener); in tcp_v6_init_req()
795 if ((!sk_listener->sk_bound_dev_if || l3_slave) && in tcp_v6_init_req()
800 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) || in tcp_v6_init_req()
/linux-6.1.9/net/sched/
Dsch_fq.c281 if (!sk || sk_listener(sk)) { in fq_classify()
/linux-6.1.9/security/selinux/
Dhooks.c5699 if (sk_listener(sk)) in selinux_ip_output()
5800 !(sk && sk_listener(sk))) in selinux_ip_postroute()
5818 } else if (sk_listener(sk)) { in selinux_ip_postroute()