/linux-6.1.9/include/linux/ |
D | skbuff.h | 884 void (*destructor)(struct sk_buff *skb); 1089 static inline bool skb_pfmemalloc(const struct sk_buff *skb) in skb_pfmemalloc() argument 1091 return unlikely(skb->pfmemalloc); in skb_pfmemalloc() 1107 static inline struct dst_entry *skb_dst(const struct sk_buff *skb) in skb_dst() argument 1112 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && in skb_dst() 1115 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); in skb_dst() 1126 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) in skb_dst_set() argument 1128 skb->slow_gro |= !!dst; in skb_dst_set() 1129 skb->_skb_refdst = (unsigned long)dst; in skb_dst_set() 1142 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) in skb_dst_set_noref() argument [all …]
|
/linux-6.1.9/net/xfrm/ |
D | xfrm_output.c | 27 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb); 28 static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); 30 static int xfrm_skb_check_space(struct sk_buff *skb) in xfrm_skb_check_space() argument 32 struct dst_entry *dst = skb_dst(skb); in xfrm_skb_check_space() 34 - skb_headroom(skb); in xfrm_skb_check_space() 35 int ntail = dst->dev->needed_tailroom - skb_tailroom(skb); in xfrm_skb_check_space() 44 return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC); in xfrm_skb_check_space() 51 static struct dst_entry *skb_dst_pop(struct sk_buff *skb) in skb_dst_pop() argument 53 struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb))); in skb_dst_pop() 55 skb_dst_drop(skb); in skb_dst_pop() [all …]
|
D | xfrm_input.c | 40 int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb); 102 static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol, in xfrm_rcv_cb() argument 113 ret = afinfo->callback(skb, protocol, err); in xfrm_rcv_cb() 119 struct sec_path *secpath_set(struct sk_buff *skb) in secpath_set() argument 121 struct sec_path *sp, *tmp = skb_ext_find(skb, SKB_EXT_SEC_PATH); in secpath_set() 123 sp = skb_ext_add(skb, SKB_EXT_SEC_PATH); in secpath_set() 141 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) in xfrm_parse_spi() argument 158 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr))) in xfrm_parse_spi() 160 *spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2))); in xfrm_parse_spi() 167 if (!pskb_may_pull(skb, hlen)) in xfrm_parse_spi() [all …]
|
/linux-6.1.9/include/net/ |
D | gro.h | 85 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) argument 88 static inline int gro_recursion_inc_test(struct sk_buff *skb) in gro_recursion_inc_test() argument 90 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; in gro_recursion_inc_test() 96 struct sk_buff *skb) in call_gro_receive() argument 98 if (unlikely(gro_recursion_inc_test(skb))) { in call_gro_receive() 99 NAPI_GRO_CB(skb)->flush |= 1; in call_gro_receive() 103 return cb(head, skb); in call_gro_receive() 111 struct sk_buff *skb) in call_gro_receive_sk() argument 113 if (unlikely(gro_recursion_inc_test(skb))) { in call_gro_receive_sk() 114 NAPI_GRO_CB(skb)->flush |= 1; in call_gro_receive_sk() [all …]
|
D | llc_c_ev.h | 123 static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb) in llc_conn_ev() argument 125 return (struct llc_conn_state_ev *)skb->cb; in llc_conn_ev() 128 typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb); 129 typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb); 131 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb); 132 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb); 133 int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb); 134 int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb); 135 int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb); 136 int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb); [all …]
|
D | llc_c_ac.h | 97 typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb); 99 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb); 100 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb); 101 int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb); 102 int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb); 103 int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb); 104 int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb); 105 int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb); 107 struct sk_buff *skb); 109 struct sk_buff *skb); [all …]
|
/linux-6.1.9/net/core/ |
D | gro.c | 101 struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb, in skb_eth_gso_segment() argument 110 segs = ptype->callbacks.gso_segment(skb, features); in skb_eth_gso_segment() 125 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, in skb_mac_gso_segment() argument 130 int vlan_depth = skb->mac_len; in skb_mac_gso_segment() 131 __be16 type = skb_network_protocol(skb, &vlan_depth); in skb_mac_gso_segment() 136 __skb_pull(skb, vlan_depth); in skb_mac_gso_segment() 141 segs = ptype->callbacks.gso_segment(skb, features); in skb_mac_gso_segment() 147 __skb_push(skb, skb->data - skb_mac_header(skb)); in skb_mac_gso_segment() 153 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) in skb_gro_receive() argument 155 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); in skb_gro_receive() [all …]
|
D | skbuff.c | 113 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, in skb_panic() argument 117 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic() 118 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic() 119 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic() 123 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_over_panic() argument 125 skb_panic(skb, sz, addr, __func__); in skb_over_panic() 128 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_under_panic() argument 130 skb_panic(skb, sz, addr, __func__); in skb_under_panic() 212 struct sk_buff *skb; in napi_get_frags_check() local 215 skb = napi_get_frags(napi); in napi_get_frags_check() [all …]
|
/linux-6.1.9/drivers/net/can/dev/ |
D | skb.c | 47 int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, in can_put_echo_skb() argument 56 (skb->protocol != htons(ETH_P_CAN) && in can_put_echo_skb() 57 skb->protocol != htons(ETH_P_CANFD))) { in can_put_echo_skb() 58 kfree_skb(skb); in can_put_echo_skb() 63 skb = can_create_echo_skb(skb); in can_put_echo_skb() 64 if (!skb) in can_put_echo_skb() 68 skb->ip_summed = CHECKSUM_UNNECESSARY; in can_put_echo_skb() 69 skb->dev = dev; in can_put_echo_skb() 72 can_skb_prv(skb)->frame_len = frame_len; in can_put_echo_skb() 74 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) in can_put_echo_skb() [all …]
|
/linux-6.1.9/net/bridge/ |
D | br_netfilter_hooks.c | 66 #define IS_IP(skb) \ argument 67 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP)) 69 #define IS_IPV6(skb) \ argument 70 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6)) 72 #define IS_ARP(skb) \ argument 73 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_ARP)) 75 static inline __be16 vlan_proto(const struct sk_buff *skb) in vlan_proto() argument 77 if (skb_vlan_tag_present(skb)) in vlan_proto() 78 return skb->protocol; in vlan_proto() 79 else if (skb->protocol == htons(ETH_P_8021Q)) in vlan_proto() [all …]
|
D | br_forward.c | 22 const struct sk_buff *skb) in should_deliver() argument 27 return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && in should_deliver() 28 p->state == BR_STATE_FORWARDING && br_allowed_egress(vg, skb) && in should_deliver() 29 nbp_switchdev_allowed_egress(p, skb) && in should_deliver() 30 !br_skb_isolated(p, skb); in should_deliver() 33 int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) in br_dev_queue_push_xmit() argument 35 skb_push(skb, ETH_HLEN); in br_dev_queue_push_xmit() 36 if (!is_skb_forwardable(skb->dev, skb)) in br_dev_queue_push_xmit() 39 br_drop_fake_rtable(skb); in br_dev_queue_push_xmit() 41 if (skb->ip_summed == CHECKSUM_PARTIAL && in br_dev_queue_push_xmit() [all …]
|
D | br_netfilter_ipv6.c | 46 static int br_nf_check_hbh_len(struct sk_buff *skb) in br_nf_check_hbh_len() argument 48 unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1); in br_nf_check_hbh_len() 50 const unsigned char *nh = skb_network_header(skb); in br_nf_check_hbh_len() 54 if ((raw + len) - skb->data > skb_headlen(skb)) in br_nf_check_hbh_len() 76 ipv6_hdr(skb)->payload_len) in br_nf_check_hbh_len() 78 if (pkt_len > skb->len - sizeof(struct ipv6hdr)) in br_nf_check_hbh_len() 80 if (pskb_trim_rcsum(skb, in br_nf_check_hbh_len() 83 nh = skb_network_header(skb); in br_nf_check_hbh_len() 99 int br_validate_ipv6(struct net *net, struct sk_buff *skb) in br_validate_ipv6() argument 102 struct inet6_dev *idev = __in6_dev_get(skb->dev); in br_validate_ipv6() [all …]
|
/linux-6.1.9/net/ipv4/ |
D | udp_offload.c | 15 static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, in __skb_udp_tunnel_segment() argument 17 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, in __skb_udp_tunnel_segment() argument 21 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); in __skb_udp_tunnel_segment() 24 struct udphdr *uh = udp_hdr(skb); in __skb_udp_tunnel_segment() 25 u16 mac_offset = skb->mac_header; in __skb_udp_tunnel_segment() 26 __be16 protocol = skb->protocol; in __skb_udp_tunnel_segment() 27 u16 mac_len = skb->mac_len; in __skb_udp_tunnel_segment() 32 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) in __skb_udp_tunnel_segment() 41 if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) in __skb_udp_tunnel_segment() 44 partial = (__force __wsum)htonl(skb->len); in __skb_udp_tunnel_segment() [all …]
|
D | ip_input.c | 148 bool ip_call_ra_chain(struct sk_buff *skb) in ip_call_ra_chain() argument 151 u8 protocol = ip_hdr(skb)->protocol; in ip_call_ra_chain() 153 struct net_device *dev = skb->dev; in ip_call_ra_chain() 165 if (ip_is_fragment(ip_hdr(skb))) { in ip_call_ra_chain() 166 if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN)) in ip_call_ra_chain() 170 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); in ip_call_ra_chain() 179 raw_rcv(last, skb); in ip_call_ra_chain() 187 void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol) in ip_protocol_deliver_rcu() argument 193 raw = raw_local_deliver(skb, protocol); in ip_protocol_deliver_rcu() 198 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { in ip_protocol_deliver_rcu() [all …]
|
D | tcp_offload.c | 15 static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, in tcp_gso_tstamp() argument 18 while (skb) { in tcp_gso_tstamp() 20 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP; in tcp_gso_tstamp() 21 skb_shinfo(skb)->tskey = ts_seq; in tcp_gso_tstamp() 25 skb = skb->next; in tcp_gso_tstamp() 30 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, in tcp4_gso_segment() argument 33 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)) in tcp4_gso_segment() 36 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) in tcp4_gso_segment() 39 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { in tcp4_gso_segment() 40 const struct iphdr *iph = ip_hdr(skb); in tcp4_gso_segment() [all …]
|
D | gre_offload.c | 15 static struct sk_buff *gre_gso_segment(struct sk_buff *skb, in gre_gso_segment() argument 18 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); in gre_gso_segment() 21 u16 mac_offset = skb->mac_header; in gre_gso_segment() 22 __be16 protocol = skb->protocol; in gre_gso_segment() 23 u16 mac_len = skb->mac_len; in gre_gso_segment() 26 if (!skb->encapsulation) in gre_gso_segment() 32 if (unlikely(!pskb_may_pull(skb, tnl_hlen))) in gre_gso_segment() 36 skb->encapsulation = 0; in gre_gso_segment() 37 SKB_GSO_CB(skb)->encap_level = 0; in gre_gso_segment() 38 __skb_pull(skb, tnl_hlen); in gre_gso_segment() [all …]
|
/linux-6.1.9/net/ipv6/ |
D | exthdrs.c | 64 static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff, in ip6_tlvopt_unknown() argument 78 switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) { in ip6_tlvopt_unknown() 89 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) in ip6_tlvopt_unknown() 93 icmpv6_param_prob_reason(skb, ICMPV6_UNK_OPTION, optoff, in ip6_tlvopt_unknown() 99 kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO); in ip6_tlvopt_unknown() 103 static bool ipv6_hop_ra(struct sk_buff *skb, int optoff); 104 static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff); 105 static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff); 106 static bool ipv6_hop_calipso(struct sk_buff *skb, int optoff); 108 static bool ipv6_dest_hao(struct sk_buff *skb, int optoff); [all …]
|
D | ip6_input.c | 49 struct sk_buff *skb) in ip6_rcv_finish_core() argument 52 !skb_dst(skb) && !skb->sk) { in ip6_rcv_finish_core() 53 switch (ipv6_hdr(skb)->nexthdr) { in ip6_rcv_finish_core() 56 tcp_v6_early_demux(skb); in ip6_rcv_finish_core() 60 udp_v6_early_demux(skb); in ip6_rcv_finish_core() 65 if (!skb_valid_dst(skb)) in ip6_rcv_finish_core() 66 ip6_route_input(skb); in ip6_rcv_finish_core() 69 int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) in ip6_rcv_finish() argument 74 skb = l3mdev_ip6_rcv(skb); in ip6_rcv_finish() 75 if (!skb) in ip6_rcv_finish() [all …]
|
D | ip6_offload.c | 32 #define indirect_call_gro_receive_l4(f2, f1, cb, head, skb) \ argument 34 unlikely(gro_recursion_inc_test(skb)) ? \ 35 NAPI_GRO_CB(skb)->flush |= 1, NULL : \ 36 INDIRECT_CALL_L4(cb, f2, f1, head, skb); \ 39 static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) in ipv6_gso_pull_exthdrs() argument 57 if (unlikely(!pskb_may_pull(skb, 8))) in ipv6_gso_pull_exthdrs() 60 opth = (void *)skb->data; in ipv6_gso_pull_exthdrs() 63 if (unlikely(!pskb_may_pull(skb, len))) in ipv6_gso_pull_exthdrs() 66 opth = (void *)skb->data; in ipv6_gso_pull_exthdrs() 68 __skb_pull(skb, len); in ipv6_gso_pull_exthdrs() [all …]
|
/linux-6.1.9/drivers/net/ethernet/qualcomm/rmnet/ |
D | rmnet_handlers.c | 22 static void rmnet_set_skb_proto(struct sk_buff *skb) in rmnet_set_skb_proto() argument 24 switch (skb->data[0] & 0xF0) { in rmnet_set_skb_proto() 26 skb->protocol = htons(ETH_P_IP); in rmnet_set_skb_proto() 29 skb->protocol = htons(ETH_P_IPV6); in rmnet_set_skb_proto() 32 skb->protocol = htons(ETH_P_MAP); in rmnet_set_skb_proto() 40 rmnet_deliver_skb(struct sk_buff *skb) in rmnet_deliver_skb() argument 42 struct rmnet_priv *priv = netdev_priv(skb->dev); in rmnet_deliver_skb() 44 skb_reset_transport_header(skb); in rmnet_deliver_skb() 45 skb_reset_network_header(skb); in rmnet_deliver_skb() 46 rmnet_vnd_rx_fixup(skb, skb->dev); in rmnet_deliver_skb() [all …]
|
/linux-6.1.9/net/sched/ |
D | sch_frag.c | 18 int (*xmit)(struct sk_buff *skb); 23 static int sch_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) in sch_frag_xmit() argument 27 if (skb_cow_head(skb, data->l2_len) < 0) { in sch_frag_xmit() 28 kfree_skb(skb); in sch_frag_xmit() 32 __skb_dst_copy(skb, data->dst); in sch_frag_xmit() 33 *qdisc_skb_cb(skb) = data->cb; in sch_frag_xmit() 34 skb->inner_protocol = data->inner_protocol; in sch_frag_xmit() 36 __vlan_hwaccel_put_tag(skb, data->vlan_proto, in sch_frag_xmit() 39 __vlan_hwaccel_clear_tag(skb); in sch_frag_xmit() 42 skb_push(skb, data->l2_len); in sch_frag_xmit() [all …]
|
/linux-6.1.9/net/bridge/netfilter/ |
D | nf_conntrack_bridge.c | 28 struct sk_buff *skb, in nf_br_ip_fragment() argument 34 int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; in nf_br_ip_fragment() 35 bool mono_delivery_time = skb->mono_delivery_time; in nf_br_ip_fragment() 37 ktime_t tstamp = skb->tstamp; in nf_br_ip_fragment() 43 if (skb->ip_summed == CHECKSUM_PARTIAL && in nf_br_ip_fragment() 44 (err = skb_checksum_help(skb))) in nf_br_ip_fragment() 47 iph = ip_hdr(skb); in nf_br_ip_fragment() 55 ll_rs = LL_RESERVED_SPACE(skb->dev); in nf_br_ip_fragment() 56 mtu = skb->dev->mtu; in nf_br_ip_fragment() 58 if (skb_has_frag_list(skb)) { in nf_br_ip_fragment() [all …]
|
/linux-6.1.9/drivers/bluetooth/ |
D | btbcm.c | 40 struct sk_buff *skb; in btbcm_check_bdaddr() local 42 skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, in btbcm_check_bdaddr() 44 if (IS_ERR(skb)) { in btbcm_check_bdaddr() 45 int err = PTR_ERR(skb); in btbcm_check_bdaddr() 51 if (skb->len != sizeof(*bda)) { in btbcm_check_bdaddr() 53 kfree_skb(skb); in btbcm_check_bdaddr() 57 bda = (struct hci_rp_read_bd_addr *)skb->data; in btbcm_check_bdaddr() 95 kfree_skb(skb); in btbcm_check_bdaddr() 103 struct sk_buff *skb; in btbcm_set_bdaddr() local 106 skb = __hci_cmd_sync(hdev, 0xfc01, 6, bdaddr, HCI_INIT_TIMEOUT); in btbcm_set_bdaddr() [all …]
|
/linux-6.1.9/drivers/net/wireless/ath/ath10k/ |
D | wmi-ops.h | 15 void (*rx)(struct ath10k *ar, struct sk_buff *skb); 19 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb, 21 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb, 23 int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb, 26 struct ath10k *ar, struct sk_buff *skb, 28 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb, 30 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb, 32 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb, 34 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb, 36 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb, [all …]
|
/linux-6.1.9/net/ieee802154/6lowpan/ |
D | rx.c | 21 static int lowpan_give_skb_to_device(struct sk_buff *skb) in lowpan_give_skb_to_device() argument 23 skb->protocol = htons(ETH_P_IPV6); in lowpan_give_skb_to_device() 24 skb->dev->stats.rx_packets++; in lowpan_give_skb_to_device() 25 skb->dev->stats.rx_bytes += skb->len; in lowpan_give_skb_to_device() 27 return netif_rx(skb); in lowpan_give_skb_to_device() 30 static int lowpan_rx_handlers_result(struct sk_buff *skb, lowpan_rx_result res) in lowpan_rx_handlers_result() argument 40 kfree_skb(skb); in lowpan_rx_handlers_result() 46 return lowpan_give_skb_to_device(skb); in lowpan_rx_handlers_result() 64 static lowpan_rx_result lowpan_rx_h_frag(struct sk_buff *skb) in lowpan_rx_h_frag() argument 68 if (!(lowpan_is_frag1(*skb_network_header(skb)) || in lowpan_rx_h_frag() [all …]
|