/linux-5.19.10/drivers/net/ethernet/sfc/ |
D | tx_tso.c | 161 EFX_WARN_ON_ONCE_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) + in efx_tso_check_protocol() 162 (tcp_hdr(skb)->doff << 2u)) > in efx_tso_check_protocol() 179 header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u); in tso_start() 190 st->seqnum = ntohl(tcp_hdr(skb)->seq); in tso_start() 192 EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->urg); in tso_start() 193 EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->syn); in tso_start() 194 EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->rst); in tso_start() 307 tcp_flags = ((u8 *)tcp_hdr(skb))[TCP_FLAGS_OFFSET] & ~tcp_flags_mask; in tso_start_new_packet()
|
D | tx_common.c | 326 (tcp_hdr(skb)->doff << 2u); in efx_tx_tso_header_length()
|
/linux-5.19.10/net/ipv4/ |
D | tcp_offload.c | 41 struct tcphdr *th = tcp_hdr(skb); in tcp4_gso_segment() 70 th = tcp_hdr(skb); in tcp_gso_segment() 116 th = tcp_hdr(skb); in tcp_gso_segment() 141 th = tcp_hdr(skb); in tcp_gso_segment() 225 th2 = tcp_hdr(p); in tcp_gro_receive() 291 struct tcphdr *th = tcp_hdr(skb); in tcp_gro_complete() 326 struct tcphdr *th = tcp_hdr(skb); in tcp4_gro_complete()
|
D | syncookies.c | 179 const struct tcphdr *th = tcp_hdr(skb); in cookie_v4_init_sequence() 333 const struct tcphdr *th = tcp_hdr(skb); in cookie_v4_check()
|
D | tcp_ipv4.c | 100 tcp_hdr(skb)->dest, in tcp_v4_init_seq() 101 tcp_hdr(skb)->source); in tcp_v4_init_seq() 633 struct tcphdr *th = tcp_hdr(skb); in __tcp_v4_send_check() 670 const struct tcphdr *th = tcp_hdr(skb); in tcp_v4_send_reset() 851 const struct tcphdr *th = tcp_hdr(skb); in tcp_v4_send_ack() 1368 const struct tcphdr *th = tcp_hdr(skb); in tcp_v4_md5_hash_skb() 1605 const struct tcphdr *th = tcp_hdr(skb); in tcp_v4_cookie_check() 1721 th = tcp_hdr(skb); in tcp_v4_early_demux()
|
D | tcp_minisocks.c | 515 newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale; in tcp_create_openreq_child() 569 const struct tcphdr *th = tcp_hdr(skb); in tcp_check_req()
|
D | tcp_input.c | 260 !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { in tcp_measure_rcv_mss() 321 if (tcp_hdr(skb)->cwr) { in tcp_ecn_accept_cwr() 3557 u32 nwin = ntohs(tcp_hdr(skb)->window); in tcp_ack_update_window() 3559 if (likely(!tcp_hdr(skb)->syn)) in tcp_ack_update_window() 3620 !tcp_hdr(skb)->syn) in tcp_oow_rate_limited() 3848 if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { in tcp_ack() 4038 const struct tcphdr *th = tcp_hdr(skb); in tcp_parse_options() 4263 const struct tcphdr *th = tcp_hdr(skb); in tcp_disordered_ack() 5027 __skb_pull(skb, tcp_hdr(skb)->doff * 4); in tcp_data_queue() 6446 const struct tcphdr *th = tcp_hdr(skb); in tcp_rcv_state_process() [all …]
|
D | tcp_fastopen.c | 268 tp->snd_wnd = ntohs(tcp_hdr(skb)->window); in tcp_fastopen_create_child()
|
/linux-5.19.10/net/ipv6/ |
D | tcpv6_offload.c | 33 struct tcphdr *th = tcp_hdr(skb); in tcp6_gro_complete() 55 struct tcphdr *th = tcp_hdr(skb); in tcp6_gso_segment()
|
D | syncookies.c | 112 const struct tcphdr *th = tcp_hdr(skb); in cookie_v6_init_sequence() 135 const struct tcphdr *th = tcp_hdr(skb); in cookie_v6_check()
|
D | tcp_ipv6.c | 120 tcp_hdr(skb)->dest, in tcp_v6_init_seq() 121 tcp_hdr(skb)->source); in tcp_v6_init_seq() 735 const struct tcphdr *th = tcp_hdr(skb); in tcp_v6_md5_hash_skb() 846 const struct tcphdr *th = tcp_hdr(skb); in tcp_v6_send_response() 971 const struct tcphdr *th = tcp_hdr(skb); in tcp_v6_send_reset() 1128 const struct tcphdr *th = tcp_hdr(skb); in tcp_v6_cookie_check() 1838 th = tcp_hdr(skb); in tcp_v6_early_demux()
|
/linux-5.19.10/include/net/ |
D | ip6_checksum.h | 58 struct tcphdr *th = tcp_hdr(skb); in __tcp_v6_send_check() 68 struct tcphdr *th = tcp_hdr(skb); in tcp_v6_gso_csum_prep()
|
/linux-5.19.10/include/linux/ |
D | tcp.h | 24 static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb) in tcp_hdr() function 36 return __tcp_hdrlen(tcp_hdr(skb)); in tcp_hdrlen() 51 return (tcp_hdr(skb)->doff - 5) * 4; in tcp_optlen()
|
/linux-5.19.10/net/core/ |
D | tso.c | 79 tso->tcp_seq = (tlen != sizeof(struct udphdr)) ? ntohl(tcp_hdr(skb)->seq) : 0; in tso_start()
|
/linux-5.19.10/net/mptcp/ |
D | syncookies.c | 41 struct tcphdr *th = tcp_hdr(skb); in mptcp_join_entry_hash()
|
D | options.c | 37 if (skb->len > tcp_hdr(skb)->doff << 2) in mptcp_parse_option() 362 const struct tcphdr *th = tcp_hdr(skb); in mptcp_get_options()
|
/linux-5.19.10/net/tls/ |
D | tls_device_fallback.c | 169 struct tcphdr *th = tcp_hdr(skb); in update_chksum() 237 u32 tcp_seq = ntohl(tcp_hdr(skb)->seq); in fill_sg_in()
|
/linux-5.19.10/drivers/net/ethernet/fungible/funeth/ |
D | funeth_tx.c | 91 seq = ntohl(tcp_hdr(skb)->seq); in fun_tls_tx() 197 th = tcp_hdr(skb); in write_pkt_desc()
|
/linux-5.19.10/drivers/net/ethernet/sfc/siena/ |
D | tx_common.c | 324 (tcp_hdr(skb)->doff << 2u); in efx_tx_tso_header_length()
|
/linux-5.19.10/security/ |
D | lsm_audit.c | 61 struct tcphdr *th = tcp_hdr(skb); in ipv4_skb_to_auditdata()
|
/linux-5.19.10/net/openvswitch/ |
D | flow.c | 722 struct tcphdr *tcp = tcp_hdr(skb); in key_extract_l3l4() 846 struct tcphdr *tcp = tcp_hdr(skb); in key_extract_l3l4()
|
D | actions.c | 354 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, in update_ip_l4_checksum() 387 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, in update_ipv6_checksum() 726 th = tcp_hdr(skb); in set_tcp()
|
/linux-5.19.10/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
D | ktls_tx.c | 476 seq = ntohl(tcp_hdr(skb)->seq); in mlx5e_ktls_handle_tx_skb()
|
/linux-5.19.10/drivers/net/ethernet/sun/ |
D | sunvnet_common.c | 321 struct tcphdr *ptcp = tcp_hdr(skb); in vnet_fullcsum_ipv4() 354 struct tcphdr *ptcp = tcp_hdr(skb); in vnet_fullcsum_ipv6() 1239 hlen += tcp_hdr(skb)->doff * 4; in vnet_handle_offloads()
|
/linux-5.19.10/drivers/net/ |
D | thunderbolt.c | 980 tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data); in tbnet_xmit_csum_and_map() 990 tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data); in tbnet_xmit_csum_and_map()
|