Lines Matching refs:pfvf
29 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
35 static int otx2_nix_cq_op_status(struct otx2_nic *pfvf, in otx2_nix_cq_op_status() argument
41 status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr); in otx2_nix_cq_op_status()
45 dev_err(pfvf->dev, "CQ stopped due to error"); in otx2_nix_cq_op_status()
83 static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, in otx2_dma_map_skb_frag() argument
101 return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE); in otx2_dma_map_skb_frag()
104 static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) in otx2_dma_unmap_skb_frags() argument
109 otx2_dma_unmap_page(pfvf, sg->dma_addr[seg], in otx2_dma_unmap_skb_frags()
115 static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf, in otx2_xdp_snd_pkt_handler() argument
126 pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]); in otx2_xdp_snd_pkt_handler()
127 otx2_dma_unmap_page(pfvf, sg->dma_addr[0], in otx2_xdp_snd_pkt_handler()
133 static void otx2_snd_pkt_handler(struct otx2_nic *pfvf, in otx2_snd_pkt_handler() argument
146 if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf)) in otx2_snd_pkt_handler()
148 pfvf->netdev->name, cq->cint_idx, in otx2_snd_pkt_handler()
159 timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp); in otx2_snd_pkt_handler()
160 err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns); in otx2_snd_pkt_handler()
171 otx2_dma_unmap_skb_frags(pfvf, sg); in otx2_snd_pkt_handler()
176 static void otx2_set_rxtstamp(struct otx2_nic *pfvf, in otx2_set_rxtstamp() argument
182 if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)) in otx2_set_rxtstamp()
185 timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data); in otx2_set_rxtstamp()
187 err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns); in otx2_set_rxtstamp()
194 static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, in otx2_skb_add_frag() argument
202 va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova)); in otx2_skb_add_frag()
211 otx2_set_rxtstamp(pfvf, skb, va); in otx2_skb_add_frag()
220 len - off, pfvf->rbsize); in otx2_skb_add_frag()
227 pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL); in otx2_skb_add_frag()
232 static void otx2_set_rxhash(struct otx2_nic *pfvf, in otx2_set_rxhash() argument
239 if (!(pfvf->netdev->features & NETIF_F_RXHASH)) in otx2_set_rxhash()
242 rss = &pfvf->hw.rss_info; in otx2_set_rxhash()
254 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, in otx2_free_rcv_seg() argument
268 pfvf->hw_ops->aura_freeptr(pfvf, qidx, in otx2_free_rcv_seg()
274 static bool otx2_check_rcv_errors(struct otx2_nic *pfvf, in otx2_check_rcv_errors() argument
277 struct otx2_drv_stats *stats = &pfvf->hw.drv_stats; in otx2_check_rcv_errors()
280 if (netif_msg_rx_err(pfvf)) in otx2_check_rcv_errors()
281 netdev_err(pfvf->netdev, in otx2_check_rcv_errors()
329 if (pfvf->netdev->features & NETIF_F_RXALL) in otx2_check_rcv_errors()
334 otx2_free_rcv_seg(pfvf, cqe, qidx); in otx2_check_rcv_errors()
338 static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, in otx2_rcv_pkt_handler() argument
352 if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) in otx2_rcv_pkt_handler()
356 if (pfvf->xdp_prog) in otx2_rcv_pkt_handler()
357 if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush)) in otx2_rcv_pkt_handler()
371 if (otx2_skb_add_frag(pfvf, skb, *seg_addr, in otx2_rcv_pkt_handler()
377 otx2_set_rxhash(pfvf, cqe, skb); in otx2_rcv_pkt_handler()
380 if (pfvf->netdev->features & NETIF_F_RXCSUM) in otx2_rcv_pkt_handler()
388 static int otx2_rx_napi_handler(struct otx2_nic *pfvf, in otx2_rx_napi_handler() argument
399 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) in otx2_rx_napi_handler()
414 otx2_rcv_pkt_handler(pfvf, napi, cq, cqe, &need_xdp_flush); in otx2_rx_napi_handler()
425 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, in otx2_rx_napi_handler()
433 struct otx2_nic *pfvf = dev; in otx2_refill_pool_ptrs() local
438 if (otx2_alloc_buffer(pfvf, cq, &bufptr)) in otx2_refill_pool_ptrs()
440 otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM); in otx2_refill_pool_ptrs()
447 static int otx2_tx_napi_handler(struct otx2_nic *pfvf, in otx2_tx_napi_handler() argument
458 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) in otx2_tx_napi_handler()
462 qidx = cq->cq_idx - pfvf->hw.rx_queues; in otx2_tx_napi_handler()
463 sq = &pfvf->qset.sq[qidx]; in otx2_tx_napi_handler()
473 qidx = cq->cq_idx - pfvf->hw.rx_queues; in otx2_tx_napi_handler()
476 otx2_xdp_snd_pkt_handler(pfvf, sq, cqe); in otx2_tx_napi_handler()
478 otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx], in otx2_tx_napi_handler()
490 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, in otx2_tx_napi_handler()
496 qidx = cq->cq_idx - pfvf->hw.rx_queues; in otx2_tx_napi_handler()
498 if (qidx >= pfvf->hw.tx_queues) in otx2_tx_napi_handler()
499 qidx -= pfvf->hw.xdp_queues; in otx2_tx_napi_handler()
500 txq = netdev_get_tx_queue(pfvf->netdev, qidx); in otx2_tx_napi_handler()
505 netif_carrier_ok(pfvf->netdev)) in otx2_tx_napi_handler()
511 static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll) in otx2_adjust_adaptive_coalese() argument
523 dim_update_sample(pfvf->napi_events, in otx2_adjust_adaptive_coalese()
537 struct otx2_nic *pfvf; in otx2_napi_handler() local
541 pfvf = (struct otx2_nic *)cq_poll->dev; in otx2_napi_handler()
542 qset = &pfvf->qset; in otx2_napi_handler()
551 workdone += otx2_rx_napi_handler(pfvf, napi, in otx2_napi_handler()
554 workdone += otx2_tx_napi_handler(pfvf, cq, budget); in otx2_napi_handler()
559 filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); in otx2_napi_handler()
561 otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0)); in otx2_napi_handler()
565 if (pfvf->flags & OTX2_FLAG_INTF_DOWN) in otx2_napi_handler()
569 if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) in otx2_napi_handler()
570 otx2_adjust_adaptive_coalese(pfvf, cq_poll); in otx2_napi_handler()
576 work = &pfvf->refill_wrk[cq->cq_idx]; in otx2_napi_handler()
587 otx2_write64(pfvf, in otx2_napi_handler()
614 static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, in otx2_sqe_add_sg() argument
640 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); in otx2_sqe_add_sg()
641 if (dma_mapping_error(pfvf->dev, dma_addr)) in otx2_sqe_add_sg()
659 static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, in otx2_sqe_add_ext() argument
673 ext->lso_format = pfvf->hw.lso_tsov4_idx; in otx2_sqe_add_ext()
682 ext->lso_format = pfvf->hw.lso_tsov6_idx; in otx2_sqe_add_ext()
699 ext->lso_format = pfvf->hw.lso_udpv4_idx; in otx2_sqe_add_ext()
702 ext->lso_format = pfvf->hw.lso_udpv6_idx; in otx2_sqe_add_ext()
750 static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, in otx2_sqe_add_hdr() argument
765 sqe_hdr->sq = (qidx >= pfvf->hw.tx_queues) ? in otx2_sqe_add_hdr()
766 qidx + pfvf->hw.xdp_queues : qidx; in otx2_sqe_add_hdr()
798 static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf, in otx2_dma_map_tso_skb() argument
816 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); in otx2_dma_map_tso_skb()
817 if (dma_mapping_error(pfvf->dev, dma_addr)) in otx2_dma_map_tso_skb()
827 otx2_dma_unmap_skb_frags(pfvf, sg); in otx2_dma_map_tso_skb()
880 static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, in otx2_sq_append_tso() argument
883 struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx); in otx2_sq_append_tso()
895 if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) { in otx2_sq_append_tso()
912 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx); in otx2_sq_append_tso()
961 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); in otx2_sq_append_tso()
965 static bool is_hw_tso_supported(struct otx2_nic *pfvf, in is_hw_tso_supported() argument
970 if (test_bit(HW_TSO, &pfvf->hw.cap_flag)) in is_hw_tso_supported()
974 if (!is_96xx_B0(pfvf->pdev)) in is_hw_tso_supported()
990 static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb) in otx2_get_sqe_count() argument
996 if (is_hw_tso_supported(pfvf, skb)) in otx2_get_sqe_count()
1078 static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb, in otx2_set_txtstamp() argument
1092 if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC && in otx2_set_txtstamp()
1097 ts = ns_to_timespec64(pfvf->ptp->tstamp); in otx2_set_txtstamp()
1135 ptp_offset, pfvf->ptp->base_ns, udp_csum_crt); in otx2_set_txtstamp()
1145 struct otx2_nic *pfvf = netdev_priv(netdev); in otx2_sq_append_skb() local
1156 if (free_desc < otx2_get_sqe_count(pfvf, skb)) in otx2_sq_append_skb()
1172 if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) { in otx2_sq_append_skb()
1176 otx2_sq_append_tso(pfvf, sq, skb, qidx); in otx2_sq_append_skb()
1185 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx); in otx2_sq_append_skb()
1189 otx2_sqe_add_ext(pfvf, sq, skb, &offset); in otx2_sq_append_skb()
1192 if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) { in otx2_sq_append_skb()
1193 otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]); in otx2_sq_append_skb()
1197 otx2_set_txtstamp(pfvf, skb, sq, &offset); in otx2_sq_append_skb()
1204 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); in otx2_sq_append_skb()
1210 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx) in otx2_cleanup_rx_cqes() argument
1218 if (pfvf->xdp_prog) in otx2_cleanup_rx_cqes()
1221 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) in otx2_cleanup_rx_cqes()
1224 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx); in otx2_cleanup_rx_cqes()
1225 pool = &pfvf->qset.pool[pool_id]; in otx2_cleanup_rx_cqes()
1235 otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx); in otx2_cleanup_rx_cqes()
1240 otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize); in otx2_cleanup_rx_cqes()
1244 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, in otx2_cleanup_rx_cqes()
1248 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) in otx2_cleanup_tx_cqes() argument
1259 qidx = cq->cq_idx - pfvf->hw.rx_queues; in otx2_cleanup_tx_cqes()
1260 sq = &pfvf->qset.sq[qidx]; in otx2_cleanup_tx_cqes()
1262 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) in otx2_cleanup_tx_cqes()
1277 otx2_dma_unmap_skb_frags(pfvf, sg); in otx2_cleanup_tx_cqes()
1284 if (qidx >= pfvf->hw.tx_queues) in otx2_cleanup_tx_cqes()
1285 qidx -= pfvf->hw.xdp_queues; in otx2_cleanup_tx_cqes()
1286 txq = netdev_get_tx_queue(pfvf->netdev, qidx); in otx2_cleanup_tx_cqes()
1290 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, in otx2_cleanup_tx_cqes()
1294 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable) in otx2_rxtx_enable() argument
1299 mutex_lock(&pfvf->mbox.lock); in otx2_rxtx_enable()
1301 msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox); in otx2_rxtx_enable()
1303 msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox); in otx2_rxtx_enable()
1306 mutex_unlock(&pfvf->mbox.lock); in otx2_rxtx_enable()
1310 err = otx2_sync_mbox_msg(&pfvf->mbox); in otx2_rxtx_enable()
1311 mutex_unlock(&pfvf->mbox.lock); in otx2_rxtx_enable()
1315 void otx2_free_pending_sqe(struct otx2_nic *pfvf) in otx2_free_pending_sqe() argument
1324 for (sq_idx = 0; sq_idx < pfvf->hw.tx_queues; sq_idx++) { in otx2_free_pending_sqe()
1325 sq = &pfvf->qset.sq[sq_idx]; in otx2_free_pending_sqe()
1332 otx2_dma_unmap_skb_frags(pfvf, sg); in otx2_free_pending_sqe()
1340 txq = netdev_get_tx_queue(pfvf->netdev, sq_idx); in otx2_free_pending_sqe()
1367 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx) in otx2_xdp_sq_append_pkt() argument
1373 sq = &pfvf->qset.sq[qidx]; in otx2_xdp_sq_append_pkt()
1395 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); in otx2_xdp_sq_append_pkt()
1400 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, in otx2_xdp_rcv_pkt_handler() argument
1415 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); in otx2_xdp_rcv_pkt_handler()
1418 xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq); in otx2_xdp_rcv_pkt_handler()
1430 qidx += pfvf->hw.tx_queues; in otx2_xdp_rcv_pkt_handler()
1432 return otx2_xdp_sq_append_pkt(pfvf, iova, in otx2_xdp_rcv_pkt_handler()
1436 err = xdp_do_redirect(pfvf->netdev, &xdp, prog); in otx2_xdp_rcv_pkt_handler()
1438 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, in otx2_xdp_rcv_pkt_handler()
1447 bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act); in otx2_xdp_rcv_pkt_handler()
1450 trace_xdp_exception(pfvf->netdev, prog, act); in otx2_xdp_rcv_pkt_handler()
1453 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, in otx2_xdp_rcv_pkt_handler()