Lines Matching refs:q
50 static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf) in cache_offer() argument
52 struct funeth_rx_cache *c = &q->cache; in cache_offer()
58 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, in cache_offer()
67 static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb) in cache_get() argument
69 struct funeth_rx_cache *c = &q->cache; in cache_get()
77 dma_sync_single_for_device(q->dma_dev, buf->dma_addr, in cache_get()
88 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, in cache_get()
98 static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb, in funeth_alloc_page() argument
103 if (cache_get(q, rb)) in funeth_alloc_page()
110 rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE, in funeth_alloc_page()
112 if (unlikely(dma_mapping_error(q->dma_dev, rb->dma_addr))) { in funeth_alloc_page()
113 FUN_QSTAT_INC(q, rx_map_err); in funeth_alloc_page()
118 FUN_QSTAT_INC(q, rx_page_alloc); in funeth_alloc_page()
127 static void funeth_free_page(struct funeth_rxq *q, struct funeth_rxbuf *rb) in funeth_free_page() argument
130 dma_unmap_page(q->dma_dev, rb->dma_addr, PAGE_SIZE, in funeth_free_page()
141 static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va, in fun_run_xdp() argument
151 &q->xdp_rxq); in fun_run_xdp()
155 xdp_prog = READ_ONCE(q->xdp_prog); in fun_run_xdp()
171 FUN_QSTAT_INC(q, xdp_tx); in fun_run_xdp()
172 q->xdp_flush |= FUN_XDP_FLUSH_TX; in fun_run_xdp()
177 if (unlikely(xdp_do_redirect(q->netdev, &xdp, xdp_prog))) in fun_run_xdp()
179 FUN_QSTAT_INC(q, xdp_redir); in fun_run_xdp()
180 q->xdp_flush |= FUN_XDP_FLUSH_REDIR; in fun_run_xdp()
183 bpf_warn_invalid_xdp_action(q->netdev, xdp_prog, act); in fun_run_xdp()
186 trace_xdp_exception(q->netdev, xdp_prog, act); in fun_run_xdp()
188 q->cur_buf->pg_refs++; /* return frags' page reference */ in fun_run_xdp()
189 FUN_QSTAT_INC(q, xdp_err); in fun_run_xdp()
192 q->cur_buf->pg_refs++; in fun_run_xdp()
193 FUN_QSTAT_INC(q, xdp_drops); in fun_run_xdp()
256 get_buf(struct funeth_rxq *q, struct funeth_rxbuf *buf, unsigned int len) in get_buf() argument
258 if (q->buf_offset + len <= PAGE_SIZE || !q->buf_offset) in get_buf()
268 buf->node == numa_mem_id()) || !q->spare_buf.page) { in get_buf()
269 dma_sync_single_for_device(q->dma_dev, buf->dma_addr, in get_buf()
273 cache_offer(q, buf); in get_buf()
274 *buf = q->spare_buf; in get_buf()
275 q->spare_buf.page = NULL; in get_buf()
276 q->rqes[q->rq_cons & q->rq_mask] = in get_buf()
279 q->buf_offset = 0; in get_buf()
280 q->rq_cons++; in get_buf()
281 return &q->bufs[q->rq_cons & q->rq_mask]; in get_buf()
296 static int fun_gather_pkt(struct funeth_rxq *q, unsigned int tot_len, in fun_gather_pkt() argument
299 struct funeth_rxbuf *buf = q->cur_buf; in fun_gather_pkt()
304 buf = get_buf(q, buf, tot_len); in fun_gather_pkt()
312 if (!q->spare_buf.page && in fun_gather_pkt()
313 funeth_alloc_page(q, &q->spare_buf, numa_mem_id(), in fun_gather_pkt()
318 PAGE_SIZE - q->buf_offset); in fun_gather_pkt()
319 dma_sync_single_for_cpu(q->dma_dev, in fun_gather_pkt()
320 buf->dma_addr + q->buf_offset, in fun_gather_pkt()
327 skb_frag_off_set(frags, q->buf_offset); in fun_gather_pkt()
334 q->buf_offset = PAGE_SIZE; in fun_gather_pkt()
336 q->buf_offset = ALIGN(q->buf_offset + frag_len, FUN_EPRQ_PKT_ALIGN); in fun_gather_pkt()
337 q->cur_buf = buf; in fun_gather_pkt()
349 static void advance_cq(struct funeth_rxq *q) in advance_cq() argument
351 if (unlikely(q->cq_head == q->cq_mask)) { in advance_cq()
352 q->cq_head = 0; in advance_cq()
353 q->phase ^= 1; in advance_cq()
354 q->next_cqe_info = cqe_to_info(q->cqes); in advance_cq()
356 q->cq_head++; in advance_cq()
357 q->next_cqe_info += FUNETH_CQE_SIZE; in advance_cq()
359 prefetch(q->next_cqe_info); in advance_cq()
366 static void fun_handle_cqe_pkt(struct funeth_rxq *q, struct funeth_txq *xdp_q) in fun_handle_cqe_pkt() argument
368 const struct fun_eth_cqe *rxreq = info_to_cqe(q->next_cqe_info); in fun_handle_cqe_pkt()
370 struct net_device *ndev = q->netdev; in fun_handle_cqe_pkt()
380 u64_stats_update_begin(&q->syncp); in fun_handle_cqe_pkt()
381 q->stats.rx_pkts++; in fun_handle_cqe_pkt()
382 q->stats.rx_bytes += pkt_len; in fun_handle_cqe_pkt()
383 u64_stats_update_end(&q->syncp); in fun_handle_cqe_pkt()
385 advance_cq(q); in fun_handle_cqe_pkt()
393 ref_ok = fun_gather_pkt(q, tot_len, frags); in fun_handle_cqe_pkt()
396 va = fun_run_xdp(q, frags, va, ref_ok, xdp_q); in fun_handle_cqe_pkt()
416 skb = napi_get_frags(q->napi); in fun_handle_cqe_pkt()
433 skb_record_rx_queue(skb, q->qidx); in fun_handle_cqe_pkt()
435 if (likely((q->netdev->features & NETIF_F_RXHASH) && rxreq->hash)) in fun_handle_cqe_pkt()
438 if (likely((q->netdev->features & NETIF_F_RXCSUM) && rxreq->csum)) { in fun_handle_cqe_pkt()
439 FUN_QSTAT_INC(q, rx_cso); in fun_handle_cqe_pkt()
443 if (unlikely(rx_hwtstamp_enabled(q->netdev))) in fun_handle_cqe_pkt()
446 trace_funeth_rx(q, rxreq->nsgl, pkt_len, skb->hash, cv); in fun_handle_cqe_pkt()
448 gro_res = skb->data_len ? napi_gro_frags(q->napi) : in fun_handle_cqe_pkt()
449 napi_gro_receive(q->napi, skb); in fun_handle_cqe_pkt()
451 FUN_QSTAT_INC(q, gro_merged); in fun_handle_cqe_pkt()
453 FUN_QSTAT_INC(q, gro_pkts); in fun_handle_cqe_pkt()
457 FUN_QSTAT_INC(q, rx_mem_drops); in fun_handle_cqe_pkt()
462 q->cur_buf->pg_refs++; in fun_handle_cqe_pkt()
480 static int fun_process_cqes(struct funeth_rxq *q, int budget) in fun_process_cqes() argument
482 struct funeth_priv *fp = netdev_priv(q->netdev); in fun_process_cqes()
489 while (budget && !cqe_phase_mismatch(q->next_cqe_info, q->phase)) { in fun_process_cqes()
493 fun_handle_cqe_pkt(q, xdp_q); in fun_process_cqes()
497 if (unlikely(q->xdp_flush)) { in fun_process_cqes()
498 if (q->xdp_flush & FUN_XDP_FLUSH_TX) in fun_process_cqes()
500 if (q->xdp_flush & FUN_XDP_FLUSH_REDIR) in fun_process_cqes()
502 q->xdp_flush = 0; in fun_process_cqes()
514 struct funeth_rxq *q = irq->rxq; in fun_rxq_napi_poll() local
515 int work_done = budget - fun_process_cqes(q, budget); in fun_rxq_napi_poll()
516 u32 cq_db_val = q->cq_head; in fun_rxq_napi_poll()
519 FUN_QSTAT_INC(q, rx_budget); in fun_rxq_napi_poll()
521 cq_db_val |= q->irq_db_val; in fun_rxq_napi_poll()
524 if (q->rq_cons - q->rq_cons_db >= q->rq_db_thres) { in fun_rxq_napi_poll()
525 u64_stats_update_begin(&q->syncp); in fun_rxq_napi_poll()
526 q->stats.rx_bufs += q->rq_cons - q->rq_cons_db; in fun_rxq_napi_poll()
527 u64_stats_update_end(&q->syncp); in fun_rxq_napi_poll()
528 q->rq_cons_db = q->rq_cons; in fun_rxq_napi_poll()
529 writel((q->rq_cons - 1) & q->rq_mask, q->rq_db); in fun_rxq_napi_poll()
532 writel(cq_db_val, q->cq_db); in fun_rxq_napi_poll()
537 static void fun_rxq_free_bufs(struct funeth_rxq *q) in fun_rxq_free_bufs() argument
539 struct funeth_rxbuf *b = q->bufs; in fun_rxq_free_bufs()
542 for (i = 0; i <= q->rq_mask; i++, b++) in fun_rxq_free_bufs()
543 funeth_free_page(q, b); in fun_rxq_free_bufs()
545 funeth_free_page(q, &q->spare_buf); in fun_rxq_free_bufs()
546 q->cur_buf = NULL; in fun_rxq_free_bufs()
550 static int fun_rxq_alloc_bufs(struct funeth_rxq *q, int node) in fun_rxq_alloc_bufs() argument
552 struct funeth_rxbuf *b = q->bufs; in fun_rxq_alloc_bufs()
555 for (i = 0; i <= q->rq_mask; i++, b++) { in fun_rxq_alloc_bufs()
556 if (funeth_alloc_page(q, b, node, GFP_KERNEL)) { in fun_rxq_alloc_bufs()
557 fun_rxq_free_bufs(q); in fun_rxq_alloc_bufs()
560 q->rqes[i] = FUN_EPRQ_RQBUF_INIT(b->dma_addr); in fun_rxq_alloc_bufs()
562 q->cur_buf = q->bufs; in fun_rxq_alloc_bufs()
576 static void fun_rxq_free_cache(struct funeth_rxq *q) in fun_rxq_free_cache() argument
578 struct funeth_rxbuf *b = q->cache.bufs; in fun_rxq_free_cache()
581 for (i = 0; i <= q->cache.mask; i++, b++) in fun_rxq_free_cache()
582 funeth_free_page(q, b); in fun_rxq_free_cache()
584 kvfree(q->cache.bufs); in fun_rxq_free_cache()
585 q->cache.bufs = NULL; in fun_rxq_free_cache()
588 int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog) in fun_rxq_set_bpf() argument
590 struct funeth_priv *fp = netdev_priv(q->netdev); in fun_rxq_set_bpf()
596 if (headroom != q->headroom) { in fun_rxq_set_bpf()
601 0, q->hw_cqid, headroom); in fun_rxq_set_bpf()
606 q->headroom = headroom; in fun_rxq_set_bpf()
609 WRITE_ONCE(q->xdp_prog, prog); in fun_rxq_set_bpf()
621 struct funeth_rxq *q; in fun_rxq_create_sw() local
626 q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node); in fun_rxq_create_sw()
627 if (!q) in fun_rxq_create_sw()
630 q->qidx = qidx; in fun_rxq_create_sw()
631 q->netdev = dev; in fun_rxq_create_sw()
632 q->cq_mask = ncqe - 1; in fun_rxq_create_sw()
633 q->rq_mask = nrqe - 1; in fun_rxq_create_sw()
634 q->numa_node = numa_node; in fun_rxq_create_sw()
635 q->rq_db_thres = nrqe / 4; in fun_rxq_create_sw()
636 u64_stats_init(&q->syncp); in fun_rxq_create_sw()
637 q->dma_dev = &fp->pdev->dev; in fun_rxq_create_sw()
639 q->rqes = fun_alloc_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes), in fun_rxq_create_sw()
640 sizeof(*q->bufs), false, numa_node, in fun_rxq_create_sw()
641 &q->rq_dma_addr, (void **)&q->bufs, NULL); in fun_rxq_create_sw()
642 if (!q->rqes) in fun_rxq_create_sw()
645 q->cqes = fun_alloc_ring_mem(q->dma_dev, ncqe, FUNETH_CQE_SIZE, 0, in fun_rxq_create_sw()
646 false, numa_node, &q->cq_dma_addr, NULL, in fun_rxq_create_sw()
648 if (!q->cqes) in fun_rxq_create_sw()
651 err = fun_rxq_init_cache(&q->cache, nrqe, numa_node); in fun_rxq_create_sw()
655 err = fun_rxq_alloc_bufs(q, numa_node); in fun_rxq_create_sw()
659 q->stats.rx_bufs = q->rq_mask; in fun_rxq_create_sw()
660 q->init_state = FUN_QSTATE_INIT_SW; in fun_rxq_create_sw()
661 return q; in fun_rxq_create_sw()
664 fun_rxq_free_cache(q); in fun_rxq_create_sw()
666 dma_free_coherent(q->dma_dev, ncqe * FUNETH_CQE_SIZE, q->cqes, in fun_rxq_create_sw()
667 q->cq_dma_addr); in fun_rxq_create_sw()
669 fun_free_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes), false, q->rqes, in fun_rxq_create_sw()
670 q->rq_dma_addr, q->bufs); in fun_rxq_create_sw()
672 kfree(q); in fun_rxq_create_sw()
678 static void fun_rxq_free_sw(struct funeth_rxq *q) in fun_rxq_free_sw() argument
680 struct funeth_priv *fp = netdev_priv(q->netdev); in fun_rxq_free_sw()
682 fun_rxq_free_cache(q); in fun_rxq_free_sw()
683 fun_rxq_free_bufs(q); in fun_rxq_free_sw()
684 fun_free_ring_mem(q->dma_dev, q->rq_mask + 1, sizeof(*q->rqes), false, in fun_rxq_free_sw()
685 q->rqes, q->rq_dma_addr, q->bufs); in fun_rxq_free_sw()
686 dma_free_coherent(q->dma_dev, (q->cq_mask + 1) * FUNETH_CQE_SIZE, in fun_rxq_free_sw()
687 q->cqes, q->cq_dma_addr); in fun_rxq_free_sw()
690 fp->rx_packets += q->stats.rx_pkts; in fun_rxq_free_sw()
691 fp->rx_bytes += q->stats.rx_bytes; in fun_rxq_free_sw()
692 fp->rx_dropped += q->stats.rx_map_err + q->stats.rx_mem_drops; in fun_rxq_free_sw()
694 kfree(q); in fun_rxq_free_sw()
698 int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq) in fun_rxq_create_dev() argument
700 struct funeth_priv *fp = netdev_priv(q->netdev); in fun_rxq_create_dev()
701 unsigned int ncqe = q->cq_mask + 1; in fun_rxq_create_dev()
702 unsigned int nrqe = q->rq_mask + 1; in fun_rxq_create_dev()
705 err = xdp_rxq_info_reg(&q->xdp_rxq, q->netdev, q->qidx, in fun_rxq_create_dev()
710 err = xdp_rxq_info_reg_mem_model(&q->xdp_rxq, MEM_TYPE_PAGE_SHARED, in fun_rxq_create_dev()
715 q->phase = 1; in fun_rxq_create_dev()
716 q->irq_cnt = 0; in fun_rxq_create_dev()
717 q->cq_head = 0; in fun_rxq_create_dev()
718 q->rq_cons = 0; in fun_rxq_create_dev()
719 q->rq_cons_db = 0; in fun_rxq_create_dev()
720 q->buf_offset = 0; in fun_rxq_create_dev()
721 q->napi = &irq->napi; in fun_rxq_create_dev()
722 q->irq_db_val = fp->cq_irq_db; in fun_rxq_create_dev()
723 q->next_cqe_info = cqe_to_info(q->cqes); in fun_rxq_create_dev()
725 q->xdp_prog = fp->xdp_prog; in fun_rxq_create_dev()
726 q->headroom = fp->xdp_prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM; in fun_rxq_create_dev()
730 FUN_HCI_ID_INVALID, 0, nrqe, q->rq_dma_addr, 0, 0, in fun_rxq_create_dev()
732 &q->hw_sqid, &q->rq_db); in fun_rxq_create_dev()
738 q->hw_sqid, ilog2(FUNETH_CQE_SIZE), ncqe, in fun_rxq_create_dev()
739 q->cq_dma_addr, q->headroom, FUN_RX_TAILROOM, 0, 0, in fun_rxq_create_dev()
741 &q->hw_cqid, &q->cq_db); in fun_rxq_create_dev()
745 irq->rxq = q; in fun_rxq_create_dev()
746 writel(q->rq_mask, q->rq_db); in fun_rxq_create_dev()
747 q->init_state = FUN_QSTATE_INIT_FULL; in fun_rxq_create_dev()
749 netif_info(fp, ifup, q->netdev, in fun_rxq_create_dev()
751 q->qidx, ncqe, nrqe, q->hw_cqid, q->hw_sqid, irq->irq_idx, in fun_rxq_create_dev()
752 q->numa_node, q->headroom); in fun_rxq_create_dev()
756 fun_destroy_sq(fp->fdev, q->hw_sqid); in fun_rxq_create_dev()
758 xdp_rxq_info_unreg(&q->xdp_rxq); in fun_rxq_create_dev()
760 netdev_err(q->netdev, in fun_rxq_create_dev()
762 q->qidx, err); in fun_rxq_create_dev()
766 static void fun_rxq_free_dev(struct funeth_rxq *q) in fun_rxq_free_dev() argument
768 struct funeth_priv *fp = netdev_priv(q->netdev); in fun_rxq_free_dev()
771 if (q->init_state < FUN_QSTATE_INIT_FULL) in fun_rxq_free_dev()
774 irq = container_of(q->napi, struct fun_irq, napi); in fun_rxq_free_dev()
775 netif_info(fp, ifdown, q->netdev, in fun_rxq_free_dev()
777 q->qidx, q->hw_cqid, q->hw_sqid, irq->irq_idx); in fun_rxq_free_dev()
780 xdp_rxq_info_unreg(&q->xdp_rxq); in fun_rxq_free_dev()
781 fun_destroy_sq(fp->fdev, q->hw_sqid); in fun_rxq_free_dev()
782 fun_destroy_cq(fp->fdev, q->hw_cqid); in fun_rxq_free_dev()
783 q->init_state = FUN_QSTATE_INIT_SW; in fun_rxq_free_dev()
793 struct funeth_rxq *q = *qp; in funeth_rxq_create() local
796 if (!q) { in funeth_rxq_create()
797 q = fun_rxq_create_sw(dev, qidx, ncqe, nrqe, irq); in funeth_rxq_create()
798 if (IS_ERR(q)) in funeth_rxq_create()
799 return PTR_ERR(q); in funeth_rxq_create()
802 if (q->init_state >= state) in funeth_rxq_create()
805 err = fun_rxq_create_dev(q, irq); in funeth_rxq_create()
808 fun_rxq_free_sw(q); in funeth_rxq_create()
813 *qp = q; in funeth_rxq_create()
818 struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state) in funeth_rxq_free() argument
821 fun_rxq_free_dev(q); in funeth_rxq_free()
824 fun_rxq_free_sw(q); in funeth_rxq_free()
825 q = NULL; in funeth_rxq_free()
828 return q; in funeth_rxq_free()