Home
last modified time | relevance | path

Searched refs:shinfo (Results 1 – 25 of 34) sorted by relevance

12

/linux-6.1.9/net/tls/
Dtls_strp.c29 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); in tls_strp_anchor_free() local
31 DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1); in tls_strp_anchor_free()
32 shinfo->frag_list = NULL; in tls_strp_anchor_free()
128 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); in tls_strp_msg_hold() local
133 WARN_ON_ONCE(!shinfo->nr_frags); in tls_strp_msg_hold()
148 iter = shinfo->frag_list; in tls_strp_msg_hold()
175 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); in tls_strp_flush_anchor_copy() local
178 DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1); in tls_strp_flush_anchor_copy()
180 for (i = 0; i < shinfo->nr_frags; i++) in tls_strp_flush_anchor_copy()
181 __skb_frag_unref(&shinfo->frags[i], false); in tls_strp_flush_anchor_copy()
[all …]
/linux-6.1.9/tools/testing/selftests/kvm/x86_64/
Dxen_shinfo_test.c455 struct shared_info *shinfo = addr_gpa2hva(vm, SHINFO_VADDR); in main() local
489 struct pvclock_wall_clock wc_copy = shinfo->wc; in main()
490 void *m = mmap(shinfo, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE, zero_fd, 0); in main()
491 TEST_ASSERT(m == shinfo, "Failed to map /dev/zero over shared info"); in main()
492 shinfo->wc = wc_copy; in main()
692 shinfo->evtchn_mask[0] = 1UL << EVTCHN_TEST1; in main()
701 shinfo->evtchn_pending[0] = 0; in main()
702 shinfo->evtchn_mask[0] = 0; in main()
711 shinfo->evtchn_pending[1] = 0; in main()
727 shinfo->evtchn_pending[0] = 0; in main()
[all …]
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx4/
Den_tx.c602 const struct skb_shared_info *shinfo, in is_inline() argument
610 if (shinfo->nr_frags == 1) { in is_inline()
611 ptr = skb_frag_address_safe(&shinfo->frags[0]); in is_inline()
617 if (shinfo->nr_frags) in is_inline()
634 const struct skb_shared_info *shinfo, in get_real_size() argument
644 if (shinfo->gso_size) { in get_real_size()
658 real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE + in get_real_size()
674 shinfo, pfrag); in get_real_size()
680 (shinfo->nr_frags + 1) * DS_SIZE; in get_real_size()
688 const struct skb_shared_info *shinfo, in build_inline_wqe() argument
[all …]
/linux-6.1.9/include/linux/
Dvirtio_net.h145 struct skb_shared_info *shinfo = skb_shinfo(skb); in virtio_net_hdr_to_skb() local
153 shinfo->gso_size = gso_size; in virtio_net_hdr_to_skb()
154 shinfo->gso_type = gso_type; in virtio_net_hdr_to_skb()
157 shinfo->gso_type |= SKB_GSO_DODGY; in virtio_net_hdr_to_skb()
158 shinfo->gso_segs = 0; in virtio_net_hdr_to_skb()
Dskbuff.h2386 static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo, in __skb_fill_page_desc_noacc() argument
2390 skb_frag_t *frag = &shinfo->frags[i]; in __skb_fill_page_desc_noacc()
2472 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_fill_page_desc_noacc() local
2474 __skb_fill_page_desc_noacc(shinfo, i, page, off, size); in skb_fill_page_desc_noacc()
2475 shinfo->nr_frags = i + 1; in skb_fill_page_desc_noacc()
3397 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_frag_unref() local
3400 __skb_frag_unref(&shinfo->frags[f], skb->pp_recycle); in skb_frag_unref()
4906 static inline void skb_increase_gso_size(struct skb_shared_info *shinfo, in skb_increase_gso_size() argument
4909 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS)) in skb_increase_gso_size()
4911 shinfo->gso_size += increment; in skb_increase_gso_size()
[all …]
/linux-6.1.9/drivers/net/xen-netback/
Dnetback.c387 struct skb_shared_info *shinfo = skb_shinfo(skb); in xenvif_get_requests() local
388 skb_frag_t *frags = shinfo->frags; in xenvif_get_requests()
396 nr_slots = shinfo->nr_frags + 1; in xenvif_get_requests()
451 for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; in xenvif_get_requests()
452 shinfo->nr_frags++, gop++) { in xenvif_get_requests()
457 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); in xenvif_get_requests()
467 shinfo = skb_shinfo(nskb); in xenvif_get_requests()
468 frags = shinfo->frags; in xenvif_get_requests()
470 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; in xenvif_get_requests()
471 shinfo->nr_frags++, txp++, gop++) { in xenvif_get_requests()
[all …]
/linux-6.1.9/drivers/net/ethernet/fungible/funeth/
Dfuneth_tx.c154 const struct skb_shared_info *shinfo; in write_pkt_desc() local
164 shinfo = skb_shinfo(skb); in write_pkt_desc()
165 if (unlikely(fun_map_pkt(q->dma_dev, shinfo, skb->data, in write_pkt_desc()
179 if (likely(shinfo->gso_size)) { in write_pkt_desc()
186 if (shinfo->gso_type & (SKB_GSO_UDP_TUNNEL | in write_pkt_desc()
190 if (shinfo->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) in write_pkt_desc()
213 shinfo->gso_size, in write_pkt_desc()
219 } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { in write_pkt_desc()
232 shinfo->gso_size, in write_pkt_desc()
242 if (shinfo->gso_type & SKB_GSO_TCPV6) in write_pkt_desc()
[all …]
/linux-6.1.9/drivers/net/ethernet/marvell/octeon_ep/
Doctep_tx.c38 struct skb_shared_info *shinfo; in octep_iq_process_completions() local
67 shinfo = skb_shinfo(skb); in octep_iq_process_completions()
68 frags = shinfo->nr_frags; in octep_iq_process_completions()
108 struct skb_shared_info *shinfo; in octep_iq_free_pending() local
129 shinfo = skb_shinfo(skb); in octep_iq_free_pending()
130 frags = shinfo->nr_frags; in octep_iq_free_pending()
Doctep_rx.c404 struct skb_shared_info *shinfo; in __octep_oq_process_rx() local
418 shinfo = skb_shinfo(skb); in __octep_oq_process_rx()
433 skb_add_rx_frag(skb, shinfo->nr_frags, in __octep_oq_process_rx()
Doctep_main.c625 struct skb_shared_info *shinfo; in octep_start_xmit() local
644 shinfo = skb_shinfo(skb); in octep_start_xmit()
645 nr_frags = shinfo->nr_frags; in octep_start_xmit()
689 frag = &shinfo->frags[0]; in octep_start_xmit()
/linux-6.1.9/drivers/net/ethernet/google/gve/
Dgve_tx_dqo.c454 const struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_tx_add_skb_no_copy_dqo() local
507 /*eop=*/shinfo->nr_frags == 0, is_gso); in gve_tx_add_skb_no_copy_dqo()
510 for (i = 0; i < shinfo->nr_frags; i++) { in gve_tx_add_skb_no_copy_dqo()
511 const skb_frag_t *frag = &shinfo->frags[i]; in gve_tx_add_skb_no_copy_dqo()
512 bool is_eop = i == (shinfo->nr_frags - 1); in gve_tx_add_skb_no_copy_dqo()
577 const struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_num_buffer_descs_needed() local
583 for (i = 0; i < shinfo->nr_frags; i++) { in gve_num_buffer_descs_needed()
584 unsigned int frag_size = skb_frag_size(&shinfo->frags[i]); in gve_num_buffer_descs_needed()
602 const struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_can_send_tso() local
604 const int gso_size = shinfo->gso_size; in gve_can_send_tso()
[all …]
Dgve_tx.c516 const struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_tx_add_skb_no_copy() local
549 num_descriptors = 1 + shinfo->nr_frags; in gve_tx_add_skb_no_copy()
575 for (i = 0; i < shinfo->nr_frags; i++) { in gve_tx_add_skb_no_copy()
576 const skb_frag_t *frag = &shinfo->frags[i]; in gve_tx_add_skb_no_copy()
596 i += num_descriptors - shinfo->nr_frags; in gve_tx_add_skb_no_copy()
Dgve_rx_dqo.c608 struct skb_shared_info *shinfo = skb_shinfo(skb); in gve_rx_complete_rsc() local
616 shinfo->gso_type = SKB_GSO_TCPV4; in gve_rx_complete_rsc()
619 shinfo->gso_type = SKB_GSO_TCPV6; in gve_rx_complete_rsc()
625 shinfo->gso_size = le16_to_cpu(desc->rsc_seg_len); in gve_rx_complete_rsc()
/linux-6.1.9/arch/x86/kvm/
Dxen.c81 struct shared_info *shinfo = gpc->khva; in kvm_xen_shared_info_init() local
83 wc_sec_hi = &shinfo->wc_sec_hi; in kvm_xen_shared_info_init()
84 wc = &shinfo->wc; in kvm_xen_shared_info_init()
88 struct compat_shared_info *shinfo = gpc->khva; in kvm_xen_shared_info_init() local
90 wc_sec_hi = &shinfo->arch.wc_sec_hi; in kvm_xen_shared_info_init()
91 wc = &shinfo->wc; in kvm_xen_shared_info_init()
982 struct shared_info *shinfo = gpc->khva; in wait_pending_event() local
983 pending_bits = (unsigned long *)&shinfo->evtchn_pending; in wait_pending_event()
985 struct compat_shared_info *shinfo = gpc->khva; in wait_pending_event() local
986 pending_bits = (unsigned long *)&shinfo->evtchn_pending; in wait_pending_event()
[all …]
/linux-6.1.9/drivers/net/ethernet/broadcom/bnxt/
Dbnxt_xdp.c203 struct skb_shared_info *shinfo; in bnxt_xdp_buff_frags_free() local
208 shinfo = xdp_get_shared_info_from_buff(xdp); in bnxt_xdp_buff_frags_free()
209 for (i = 0; i < shinfo->nr_frags; i++) { in bnxt_xdp_buff_frags_free()
210 struct page *page = skb_frag_page(&shinfo->frags[i]); in bnxt_xdp_buff_frags_free()
214 shinfo->nr_frags = 0; in bnxt_xdp_buff_frags_free()
/linux-6.1.9/drivers/net/wwan/t7xx/
Dt7xx_hif_dpmaif_tx.c247 struct skb_shared_info *shinfo; in t7xx_dpmaif_add_skb_to_ring() local
261 shinfo = skb_shinfo(skb); in t7xx_dpmaif_add_skb_to_ring()
262 if (shinfo->frag_list) in t7xx_dpmaif_add_skb_to_ring()
265 payload_cnt = shinfo->nr_frags + 1; in t7xx_dpmaif_add_skb_to_ring()
290 skb_frag_t *frag = shinfo->frags + wr_cnt - 1; in t7xx_dpmaif_add_skb_to_ring()
/linux-6.1.9/net/core/
Dskbuff.c276 struct skb_shared_info *shinfo; in __build_skb_around() local
292 shinfo = skb_shinfo(skb); in __build_skb_around()
293 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); in __build_skb_around()
294 atomic_set(&shinfo->dataref, 1); in __build_skb_around()
766 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data() local
771 &shinfo->dataref)) in skb_release_data()
775 bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS; in skb_release_data()
782 for (i = 0; i < shinfo->nr_frags; i++) in skb_release_data()
783 __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); in skb_release_data()
786 if (shinfo->frag_list) in skb_release_data()
[all …]
Dlwt_bpf.c527 struct skb_shared_info *shinfo = skb_shinfo(skb); in handle_gso_type() local
530 shinfo->gso_type |= gso_type; in handle_gso_type()
531 skb_decrease_gso_size(shinfo, encap_len); in handle_gso_type()
532 shinfo->gso_segs = 0; in handle_gso_type()
/linux-6.1.9/net/ipv4/
Dtcp_output.c1492 struct skb_shared_info *shinfo = skb_shinfo(skb); in tcp_fragment_tstamp() local
1495 !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { in tcp_fragment_tstamp()
1497 u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; in tcp_fragment_tstamp()
1499 shinfo->tx_flags &= ~tsflags; in tcp_fragment_tstamp()
1501 swap(shinfo->tskey, shinfo2->tskey); in tcp_fragment_tstamp()
1629 struct skb_shared_info *shinfo; in __pskb_trim_head() local
1641 shinfo = skb_shinfo(skb); in __pskb_trim_head()
1642 for (i = 0; i < shinfo->nr_frags; i++) { in __pskb_trim_head()
1643 int size = skb_frag_size(&shinfo->frags[i]); in __pskb_trim_head()
1649 shinfo->frags[k] = shinfo->frags[i]; in __pskb_trim_head()
[all …]
Dtcp_ipv4.c1762 struct skb_shared_info *shinfo; in tcp_add_backlog() local
1820 shinfo = skb_shinfo(skb); in tcp_add_backlog()
1821 gso_size = shinfo->gso_size ?: skb->len; in tcp_add_backlog()
1822 gso_segs = shinfo->gso_segs ?: 1; in tcp_add_backlog()
1824 shinfo = skb_shinfo(tail); in tcp_add_backlog()
1825 tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen); in tcp_add_backlog()
1826 tail_gso_segs = shinfo->gso_segs ?: 1; in tcp_add_backlog()
1854 shinfo->gso_size = max(gso_size, tail_gso_size); in tcp_add_backlog()
1855 shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF); in tcp_add_backlog()
/linux-6.1.9/drivers/net/wireless/mediatek/mt76/
Ddma.c621 struct skb_shared_info *shinfo = skb_shinfo(skb); in mt76_add_fragment() local
622 int nr_frags = shinfo->nr_frags; in mt76_add_fragment()
624 if (nr_frags < ARRAY_SIZE(shinfo->frags)) { in mt76_add_fragment()
637 if (nr_frags < ARRAY_SIZE(shinfo->frags)) in mt76_add_fragment()
/linux-6.1.9/drivers/net/ethernet/freescale/enetc/
Denetc.c1286 struct skb_shared_info *shinfo; in enetc_xdp_frame_to_xdp_tx_swbd() local
1310 shinfo = xdp_get_shared_info_from_frame(xdp_frame); in enetc_xdp_frame_to_xdp_tx_swbd()
1312 for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags; in enetc_xdp_frame_to_xdp_tx_swbd()
1393 struct skb_shared_info *shinfo; in enetc_map_rx_buff_to_xdp() local
1401 shinfo = xdp_get_shared_info_from_buff(xdp_buff); in enetc_map_rx_buff_to_xdp()
1402 shinfo->nr_frags = 0; in enetc_map_rx_buff_to_xdp()
1408 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff); in enetc_add_rx_buff_to_xdp() local
1410 skb_frag_t *frag = &shinfo->frags[shinfo->nr_frags]; in enetc_add_rx_buff_to_xdp()
1419 shinfo->nr_frags++; in enetc_add_rx_buff_to_xdp()
/linux-6.1.9/io_uring/
Dnet.c1012 struct skb_shared_info *shinfo = skb_shinfo(skb); in io_sg_from_iter() local
1013 int frag = shinfo->nr_frags; in io_sg_from_iter()
1020 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS; in io_sg_from_iter()
1033 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page, in io_sg_from_iter()
1040 shinfo->nr_frags = frag; in io_sg_from_iter()
/linux-6.1.9/net/sched/
Dsch_cake.c1351 const struct skb_shared_info *shinfo = skb_shinfo(skb); in cake_overhead() local
1359 if (!shinfo->gso_size) in cake_overhead()
1366 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | in cake_overhead()
1383 if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) in cake_overhead()
1385 shinfo->gso_size); in cake_overhead()
1387 segs = shinfo->gso_segs; in cake_overhead()
1389 len = shinfo->gso_size + hdr_len; in cake_overhead()
1390 last_len = skb->len - shinfo->gso_size * (segs - 1); in cake_overhead()
/linux-6.1.9/drivers/net/ethernet/hisilicon/hns3/
Dhns3_enet.h743 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size);

12