/linux-5.19.10/net/sctp/ |
D | socket.c | 213 af = sctp_sockaddr_af(sctp_sk(sk), addr, len); in sctp_verify_addr() 218 if (!af->addr_valid(addr, sctp_sk(sk), NULL)) in sctp_verify_addr() 221 if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) in sctp_verify_addr() 244 if (!list_empty(&sctp_sk(sk)->ep->asocs)) in sctp_id2assoc() 245 asoc = list_entry(sctp_sk(sk)->ep->asocs.next, in sctp_id2assoc() 279 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, in sctp_addr_id2transport() 290 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), in sctp_addr_id2transport() 316 if (!sctp_sk(sk)->ep->base.bind_addr.port) in sctp_bind() 376 struct sctp_sock *sp = sctp_sk(sk); in sctp_do_bind() 570 sp = sctp_sk(sk); in sctp_send_asconf_add_ip() [all …]
|
D | endpointola.c | 192 sctp_sk(sk)->ep = NULL; in sctp_endpoint_destroy_rcu() 225 if (sctp_sk(sk)->bind_hash) in sctp_endpoint_destroy() 256 sctp_sk(ep->base.sk))) in sctp_endpoint_is_match() 411 if (!sctp_sk(sk)->ep) in sctp_endpoint_bh_rcv()
|
D | ulpqueue.c | 130 struct sctp_sock *sp = sctp_sk(sk); in sctp_clear_pd() 168 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk); in sctp_ulpq_set_pd() 185 struct sctp_sock *sp = sctp_sk(sk); in sctp_ulpq_tail_event() 482 if (!sctp_sk(asoc->base.sk)->frag_interleave && in sctp_ulpq_retrieve_reassembled() 483 atomic_read(&sctp_sk(asoc->base.sk)->pd_mode)) in sctp_ulpq_retrieve_reassembled() 487 pd_point = sctp_sk(asoc->base.sk)->pd_point; in sctp_ulpq_retrieve_reassembled() 1035 sp = sctp_sk(asoc->base.sk); in sctp_ulpq_partial_delivery() 1122 sp = sctp_sk(sk); in sctp_ulpq_abort_pd()
|
D | ipv6.c | 245 if (!t->encap_port || !sctp_sk(sk)->udp_port) { in sctp_v6_xmit() 268 label, sctp_sk(sk)->udp_port, t->encap_port, false); in sctp_v6_xmit() 784 sctp_sk(newsk)->v4mapped = sctp_sk(sk)->v4mapped; in sctp_v6_create_accept_sk() 887 *addrlen = sctp_v6_addr_to_user(sctp_sk(asoc->base.sk), addr); in sctp_inet6_event_msgname() 918 *addr_len = sctp_v6_addr_to_user(sctp_sk(skb->sk), addr); in sctp_inet6_skb_msgname() 1061 rc = sctp_v6_addr_to_user(sctp_sk(sock->sk), in sctp_getname()
|
D | input.c | 184 ep = sctp_sk(sk)->ep; in sctp_rcv() 772 err = sctp_bind_addrs_check(sctp_sk(sk2), in __sctp_hash_endpoint() 773 sctp_sk(sk), cnt); in __sctp_hash_endpoint() 870 ep = sctp_sk(net->sctp.ctl_sock)->ep; in __sctp_rcv_lookup_endpoint() 880 ep = sctp_sk(sk)->ep; in __sctp_rcv_lookup_endpoint() 1015 laddr, sctp_sk(t->asoc->base.sk))) in sctp_addrs_lookup_transport()
|
D | proc.c | 178 sctp_sk(sk)->type, sk->sk_state, hash, in sctp_eps_seq_show() 265 assoc, sk, sctp_sk(sk)->type, sk->sk_state, in sctp_assocs_seq_show()
|
D | associola.c | 62 sp = sctp_sk((struct sock *)sk); in sctp_association_init() 593 sp = sctp_sk(asoc->base.sk); in sctp_assoc_add_peer() 1075 struct sctp_sock *newsp = sctp_sk(newsk); in sctp_assoc_migrate() 1402 int frag = sctp_mtu_payload(sctp_sk(asoc->base.sk), asoc->pathmtu, in sctp_assoc_update_frag_point() 1618 sctp_sk(asoc->base.sk))) in sctp_assoc_lookup_laddr()
|
D | sysctl.c | 503 sctp_sk(sk)->ep->auth_enable = new_value; in proc_sctp_do_auth() 544 sctp_sk(sk)->udp_port = htons(net->sctp.udp_port); in proc_sctp_do_udp_port()
|
D | stream_interleave.c | 326 pd_point = sctp_sk(asoc->base.sk)->pd_point; in sctp_intl_retrieve_reassembled() 473 struct sctp_sock *sp = sctp_sk(sk); in sctp_enqueue_event() 715 pd_point = sctp_sk(asoc->base.sk)->pd_point; in sctp_intl_retrieve_reassembled_uo() 999 struct sctp_sock *sp = sctp_sk(sk); in sctp_intl_stream_abort_pd()
|
D | output.c | 93 sp = sctp_sk(sk); in sctp_packet_config() 727 if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) && in sctp_packet_can_append_data()
|
D | chunk.c | 176 max_data = sctp_min_frag_point(sctp_sk(asoc->base.sk), in sctp_datamsg_from_user()
|
D | sm_make_chunk.c | 242 sp = sctp_sk(asoc->base.sk); in sctp_make_init() 419 sp = sctp_sk(asoc->base.sk); in sctp_make_init_ack() 1297 sp = sctp_sk(asoc->base.sk); in sctp_make_op_error_limited() 1717 if (sctp_sk(ep->base.sk)->hmac) { in sctp_pack_cookie() 1718 struct crypto_shash *tfm = sctp_sk(ep->base.sk)->hmac; in sctp_pack_cookie() 1781 if (!sctp_sk(ep->base.sk)->hmac) in sctp_unpack_cookie() 1786 struct crypto_shash *tfm = sctp_sk(ep->base.sk)->hmac; in sctp_unpack_cookie()
|
D | bind_addr.c | 545 bp = &sctp_sk(sk)->ep->base.bind_addr; in sctp_is_ep_boundall()
|
D | diag.c | 127 struct sctp_endpoint *ep = sctp_sk(sk)->ep; in inet_sctp_diag_fill()
|
D | protocol.c | 1068 if (!t->encap_port || !sctp_sk(sk)->udp_port) { in sctp_v4_xmit() 1085 sctp_sk(sk)->udp_port, t->encap_port, false, false); in sctp_v4_xmit()
|
D | sm_statefuns.c | 352 if (ep == sctp_sk(net->sctp.ctl_sock)->ep) { in sctp_sf_do_5_1B_init() 720 if (ep == sctp_sk(net->sctp.ctl_sock)->ep) { in sctp_sf_do_5_1D_ce() 1350 ep = sctp_sk(net->sctp.ctl_sock)->ep; in sctp_sf_send_restart_abort() 1820 if (ep == sctp_sk(net->sctp.ctl_sock)->ep) in sctp_sf_do_5_2_3_initack() 6427 sctp_sk(net->sctp.ctl_sock)); in sctp_ootb_pkt_new()
|
D | ulpevent.c | 330 sctp_sk(asoc->base.sk), in sctp_ulpevent_make_peer_addr_change()
|
D | outqueue.c | 201 sctp_sched_set_sched(asoc, sctp_sk(asoc->base.sk)->default_ss); in sctp_outq_init()
|
D | sm_sideeffect.c | 1277 struct sctp_sock *sp = sctp_sk(ep->base.sk); in sctp_cmd_interpreter()
|
/linux-5.19.10/include/net/sctp/ |
D | sctp.h | 520 return sctp_sk(sk)->type == style; in __sctp_style() 628 return __sctp_mtu_payload(sctp_sk(t->asoc->base.sk), t, 0, 0) - in sctp_transport_pl_hlen() 675 sctp_sk(sk)->nodelay = true; in sctp_sock_set_nodelay()
|
D | structs.h | 245 static inline struct sctp_sock *sctp_sk(const struct sock *sk) in sctp_sk() function
|