Lines Matching refs:skb
13 struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb, in skb_eth_gso_segment() argument
22 segs = ptype->callbacks.gso_segment(skb, features); in skb_eth_gso_segment()
37 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, in skb_mac_gso_segment() argument
42 int vlan_depth = skb->mac_len; in skb_mac_gso_segment()
43 __be16 type = skb_network_protocol(skb, &vlan_depth); in skb_mac_gso_segment()
48 __skb_pull(skb, vlan_depth); in skb_mac_gso_segment()
53 segs = ptype->callbacks.gso_segment(skb, features); in skb_mac_gso_segment()
59 __skb_push(skb, skb->data - skb_mac_header(skb)); in skb_mac_gso_segment()
66 static bool skb_needs_check(const struct sk_buff *skb, bool tx_path) in skb_needs_check() argument
69 return skb->ip_summed != CHECKSUM_PARTIAL && in skb_needs_check()
70 skb->ip_summed != CHECKSUM_UNNECESSARY; in skb_needs_check()
72 return skb->ip_summed == CHECKSUM_NONE; in skb_needs_check()
88 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, in __skb_gso_segment() argument
93 if (unlikely(skb_needs_check(skb, tx_path))) { in __skb_gso_segment()
97 err = skb_cow_head(skb, 0); in __skb_gso_segment()
108 struct net_device *dev = skb->dev; in __skb_gso_segment()
111 if (!skb_gso_ok(skb, features | partial_features)) in __skb_gso_segment()
116 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); in __skb_gso_segment()
118 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); in __skb_gso_segment()
119 SKB_GSO_CB(skb)->encap_level = 0; in __skb_gso_segment()
121 skb_reset_mac_header(skb); in __skb_gso_segment()
122 skb_reset_mac_len(skb); in __skb_gso_segment()
124 segs = skb_mac_gso_segment(skb, features); in __skb_gso_segment()
126 if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) in __skb_gso_segment()
127 skb_warn_bad_offload(skb); in __skb_gso_segment()
143 static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) in skb_gso_transport_seglen() argument
145 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_gso_transport_seglen()
148 if (skb->encapsulation) { in skb_gso_transport_seglen()
149 thlen = skb_inner_transport_header(skb) - in skb_gso_transport_seglen()
150 skb_transport_header(skb); in skb_gso_transport_seglen()
153 thlen += inner_tcp_hdrlen(skb); in skb_gso_transport_seglen()
155 thlen = tcp_hdrlen(skb); in skb_gso_transport_seglen()
156 } else if (unlikely(skb_is_gso_sctp(skb))) { in skb_gso_transport_seglen()
178 static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) in skb_gso_network_seglen() argument
180 unsigned int hdr_len = skb_transport_header(skb) - in skb_gso_network_seglen()
181 skb_network_header(skb); in skb_gso_network_seglen()
183 return hdr_len + skb_gso_transport_seglen(skb); in skb_gso_network_seglen()
195 static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) in skb_gso_mac_seglen() argument
197 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); in skb_gso_mac_seglen()
199 return hdr_len + skb_gso_transport_seglen(skb); in skb_gso_mac_seglen()
223 static inline bool skb_gso_size_check(const struct sk_buff *skb, in skb_gso_size_check() argument
226 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_gso_size_check()
235 skb_walk_frags(skb, iter) { in skb_gso_size_check()
253 bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) in skb_gso_validate_network_len() argument
255 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); in skb_gso_validate_network_len()
268 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len) in skb_gso_validate_mac_len() argument
270 return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len); in skb_gso_validate_mac_len()