Lines Matching refs:rx
891 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) in tsnep_rx_ring_cleanup() argument
893 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_ring_cleanup()
898 entry = &rx->entry[i]; in tsnep_rx_ring_cleanup()
899 if (!rx->xsk_pool && entry->page) in tsnep_rx_ring_cleanup()
900 page_pool_put_full_page(rx->page_pool, entry->page, in tsnep_rx_ring_cleanup()
902 if (rx->xsk_pool && entry->xdp) in tsnep_rx_ring_cleanup()
908 if (rx->page_pool) in tsnep_rx_ring_cleanup()
909 page_pool_destroy(rx->page_pool); in tsnep_rx_ring_cleanup()
911 memset(rx->entry, 0, sizeof(rx->entry)); in tsnep_rx_ring_cleanup()
914 if (rx->page[i]) { in tsnep_rx_ring_cleanup()
915 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i], in tsnep_rx_ring_cleanup()
916 rx->page_dma[i]); in tsnep_rx_ring_cleanup()
917 rx->page[i] = NULL; in tsnep_rx_ring_cleanup()
918 rx->page_dma[i] = 0; in tsnep_rx_ring_cleanup()
923 static int tsnep_rx_ring_create(struct tsnep_rx *rx) in tsnep_rx_ring_create() argument
925 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_ring_create()
933 rx->page[i] = in tsnep_rx_ring_create()
934 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i], in tsnep_rx_ring_create()
936 if (!rx->page[i]) { in tsnep_rx_ring_create()
941 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_rx_ring_create()
943 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_rx_ring_create()
946 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_rx_ring_create()
958 rx->page_pool = page_pool_create(&pp_params); in tsnep_rx_ring_create()
959 if (IS_ERR(rx->page_pool)) { in tsnep_rx_ring_create()
960 retval = PTR_ERR(rx->page_pool); in tsnep_rx_ring_create()
961 rx->page_pool = NULL; in tsnep_rx_ring_create()
966 entry = &rx->entry[i]; in tsnep_rx_ring_create()
967 next_entry = &rx->entry[(i + 1) & TSNEP_RING_MASK]; in tsnep_rx_ring_create()
974 tsnep_rx_ring_cleanup(rx); in tsnep_rx_ring_create()
978 static void tsnep_rx_init(struct tsnep_rx *rx) in tsnep_rx_init() argument
982 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_rx_init()
983 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW); in tsnep_rx_init()
984 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH); in tsnep_rx_init()
985 rx->write = 0; in tsnep_rx_init()
986 rx->read = 0; in tsnep_rx_init()
987 rx->owner_counter = 1; in tsnep_rx_init()
988 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_rx_init()
991 static void tsnep_rx_enable(struct tsnep_rx *rx) in tsnep_rx_enable() argument
996 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); in tsnep_rx_enable()
999 static void tsnep_rx_disable(struct tsnep_rx *rx) in tsnep_rx_disable() argument
1003 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL); in tsnep_rx_disable()
1004 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val, in tsnep_rx_disable()
1009 static int tsnep_rx_desc_available(struct tsnep_rx *rx) in tsnep_rx_desc_available() argument
1011 if (rx->read <= rx->write) in tsnep_rx_desc_available()
1012 return TSNEP_RING_SIZE - rx->write + rx->read - 1; in tsnep_rx_desc_available()
1014 return rx->read - rx->write - 1; in tsnep_rx_desc_available()
1017 static void tsnep_rx_free_page_buffer(struct tsnep_rx *rx) in tsnep_rx_free_page_buffer() argument
1024 page = rx->page_buffer; in tsnep_rx_free_page_buffer()
1026 page_pool_put_full_page(rx->page_pool, *page, false); in tsnep_rx_free_page_buffer()
1032 static int tsnep_rx_alloc_page_buffer(struct tsnep_rx *rx) in tsnep_rx_alloc_page_buffer() argument
1040 rx->page_buffer[i] = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_alloc_page_buffer()
1041 if (!rx->page_buffer[i]) { in tsnep_rx_alloc_page_buffer()
1042 tsnep_rx_free_page_buffer(rx); in tsnep_rx_alloc_page_buffer()
1051 static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, in tsnep_rx_set_page() argument
1057 entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET); in tsnep_rx_set_page()
1060 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index) in tsnep_rx_alloc_buffer() argument
1062 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_alloc_buffer()
1065 page = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_alloc_buffer()
1068 tsnep_rx_set_page(rx, entry, page); in tsnep_rx_alloc_buffer()
1073 static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index) in tsnep_rx_reuse_buffer() argument
1075 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_reuse_buffer()
1076 struct tsnep_rx_entry *read = &rx->entry[rx->read]; in tsnep_rx_reuse_buffer()
1078 tsnep_rx_set_page(rx, entry, read->page); in tsnep_rx_reuse_buffer()
1082 static void tsnep_rx_activate(struct tsnep_rx *rx, int index) in tsnep_rx_activate() argument
1084 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_activate()
1089 if (index == rx->increment_owner_counter) { in tsnep_rx_activate()
1090 rx->owner_counter++; in tsnep_rx_activate()
1091 if (rx->owner_counter == 4) in tsnep_rx_activate()
1092 rx->owner_counter = 1; in tsnep_rx_activate()
1093 rx->increment_owner_counter--; in tsnep_rx_activate()
1094 if (rx->increment_owner_counter < 0) in tsnep_rx_activate()
1095 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_rx_activate()
1098 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_rx_activate()
1109 static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_alloc() argument
1115 index = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc()
1117 if (unlikely(tsnep_rx_alloc_buffer(rx, index))) { in tsnep_rx_alloc()
1118 rx->alloc_failed++; in tsnep_rx_alloc()
1123 tsnep_rx_reuse_buffer(rx, index); in tsnep_rx_alloc()
1128 tsnep_rx_activate(rx, index); in tsnep_rx_alloc()
1132 rx->write = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc()
1137 static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_refill() argument
1141 desc_refilled = tsnep_rx_alloc(rx, count, reuse); in tsnep_rx_refill()
1143 tsnep_rx_enable(rx); in tsnep_rx_refill()
1148 static void tsnep_rx_set_xdp(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, in tsnep_rx_set_xdp() argument
1154 entry->desc->rx = __cpu_to_le64(entry->dma); in tsnep_rx_set_xdp()
1157 static void tsnep_rx_reuse_buffer_zc(struct tsnep_rx *rx, int index) in tsnep_rx_reuse_buffer_zc() argument
1159 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_reuse_buffer_zc()
1160 struct tsnep_rx_entry *read = &rx->entry[rx->read]; in tsnep_rx_reuse_buffer_zc()
1162 tsnep_rx_set_xdp(rx, entry, read->xdp); in tsnep_rx_reuse_buffer_zc()
1166 static int tsnep_rx_alloc_zc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_alloc_zc() argument
1171 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, count); in tsnep_rx_alloc_zc()
1173 int index = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc_zc()
1174 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_alloc_zc()
1176 tsnep_rx_set_xdp(rx, entry, rx->xdp_batch[i]); in tsnep_rx_alloc_zc()
1177 tsnep_rx_activate(rx, index); in tsnep_rx_alloc_zc()
1180 rx->alloc_failed++; in tsnep_rx_alloc_zc()
1183 tsnep_rx_reuse_buffer_zc(rx, rx->write); in tsnep_rx_alloc_zc()
1184 tsnep_rx_activate(rx, rx->write); in tsnep_rx_alloc_zc()
1189 rx->write = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc_zc()
1194 static void tsnep_rx_free_zc(struct tsnep_rx *rx) in tsnep_rx_free_zc() argument
1199 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_free_zc()
1207 static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_refill_zc() argument
1211 desc_refilled = tsnep_rx_alloc_zc(rx, count, reuse); in tsnep_rx_refill_zc()
1213 tsnep_rx_enable(rx); in tsnep_rx_refill_zc()
1218 static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, in tsnep_xdp_run_prog() argument
1233 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false)) in tsnep_xdp_run_prog()
1238 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) in tsnep_xdp_run_prog()
1243 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog()
1247 trace_xdp_exception(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog()
1256 page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data), in tsnep_xdp_run_prog()
1262 static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog, in tsnep_xdp_run_prog_zc() argument
1273 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) in tsnep_xdp_run_prog_zc()
1283 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true)) in tsnep_xdp_run_prog_zc()
1288 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog_zc()
1292 trace_xdp_exception(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog_zc()
1313 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, in tsnep_build_skb() argument
1326 if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) { in tsnep_build_skb()
1338 skb_record_rx_queue(skb, rx->queue_index); in tsnep_build_skb()
1339 skb->protocol = eth_type_trans(skb, rx->adapter->netdev); in tsnep_build_skb()
1344 static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_page() argument
1349 skb = tsnep_build_skb(rx, page, length); in tsnep_rx_page()
1353 rx->packets++; in tsnep_rx_page()
1354 rx->bytes += length; in tsnep_rx_page()
1356 rx->multicast++; in tsnep_rx_page()
1360 page_pool_recycle_direct(rx->page_pool, page); in tsnep_rx_page()
1362 rx->dropped++; in tsnep_rx_page()
1366 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_poll() argument
1369 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_poll()
1381 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_poll()
1382 dma_dir = page_pool_get_dma_dir(rx->page_pool); in tsnep_rx_poll()
1383 prog = READ_ONCE(rx->adapter->xdp_prog); in tsnep_rx_poll()
1385 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, in tsnep_rx_poll()
1386 rx->tx_queue_index); in tsnep_rx_poll()
1387 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll()
1389 xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq); in tsnep_rx_poll()
1392 while (likely(done < budget) && (rx->read != rx->write)) { in tsnep_rx_poll()
1393 entry = &rx->entry[rx->read]; in tsnep_rx_poll()
1403 desc_available -= tsnep_rx_refill(rx, desc_available, in tsnep_rx_poll()
1410 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll()
1413 rx->dropped++; in tsnep_rx_poll()
1437 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll()
1447 consume = tsnep_xdp_run_prog(rx, prog, &xdp, in tsnep_rx_poll()
1450 rx->packets++; in tsnep_rx_poll()
1451 rx->bytes += length; in tsnep_rx_poll()
1459 tsnep_rx_page(rx, napi, entry->page, length); in tsnep_rx_poll()
1464 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll()
1467 tsnep_rx_refill(rx, desc_available, false); in tsnep_rx_poll()
1472 static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_poll_zc() argument
1485 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_poll_zc()
1486 prog = READ_ONCE(rx->adapter->xdp_prog); in tsnep_rx_poll_zc()
1488 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, in tsnep_rx_poll_zc()
1489 rx->tx_queue_index); in tsnep_rx_poll_zc()
1490 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll_zc()
1493 while (likely(done < budget) && (rx->read != rx->write)) { in tsnep_rx_poll_zc()
1494 entry = &rx->entry[rx->read]; in tsnep_rx_poll_zc()
1504 desc_available -= tsnep_rx_refill_zc(rx, desc_available, in tsnep_rx_poll_zc()
1511 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll_zc()
1514 rx->dropped++; in tsnep_rx_poll_zc()
1529 xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool); in tsnep_rx_poll_zc()
1538 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll_zc()
1547 consume = tsnep_xdp_run_prog_zc(rx, prog, entry->xdp, in tsnep_rx_poll_zc()
1550 rx->packets++; in tsnep_rx_poll_zc()
1551 rx->bytes += length; in tsnep_rx_poll_zc()
1559 page = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_poll_zc()
1564 tsnep_rx_page(rx, napi, page, length); in tsnep_rx_poll_zc()
1566 rx->dropped++; in tsnep_rx_poll_zc()
1573 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1576 desc_available -= tsnep_rx_refill_zc(rx, desc_available, false); in tsnep_rx_poll_zc()
1578 if (xsk_uses_need_wakeup(rx->xsk_pool)) { in tsnep_rx_poll_zc()
1580 xsk_set_rx_need_wakeup(rx->xsk_pool); in tsnep_rx_poll_zc()
1582 xsk_clear_rx_need_wakeup(rx->xsk_pool); in tsnep_rx_poll_zc()
1590 static bool tsnep_rx_pending(struct tsnep_rx *rx) in tsnep_rx_pending() argument
1594 if (rx->read != rx->write) { in tsnep_rx_pending()
1595 entry = &rx->entry[rx->read]; in tsnep_rx_pending()
1605 static int tsnep_rx_open(struct tsnep_rx *rx) in tsnep_rx_open() argument
1610 retval = tsnep_rx_ring_create(rx); in tsnep_rx_open()
1614 tsnep_rx_init(rx); in tsnep_rx_open()
1616 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_open()
1617 if (rx->xsk_pool) in tsnep_rx_open()
1618 retval = tsnep_rx_alloc_zc(rx, desc_available, false); in tsnep_rx_open()
1620 retval = tsnep_rx_alloc(rx, desc_available, false); in tsnep_rx_open()
1630 if (rx->xsk_pool) { in tsnep_rx_open()
1631 retval = tsnep_rx_alloc_page_buffer(rx); in tsnep_rx_open()
1639 tsnep_rx_ring_cleanup(rx); in tsnep_rx_open()
1643 static void tsnep_rx_close(struct tsnep_rx *rx) in tsnep_rx_close() argument
1645 if (rx->xsk_pool) in tsnep_rx_close()
1646 tsnep_rx_free_page_buffer(rx); in tsnep_rx_close()
1648 tsnep_rx_ring_cleanup(rx); in tsnep_rx_close()
1651 static void tsnep_rx_reopen(struct tsnep_rx *rx) in tsnep_rx_reopen() argument
1653 struct page **page = rx->page_buffer; in tsnep_rx_reopen()
1656 tsnep_rx_init(rx); in tsnep_rx_reopen()
1659 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_reopen()
1669 tsnep_rx_set_page(rx, entry, *page); in tsnep_rx_reopen()
1670 tsnep_rx_activate(rx, rx->write); in tsnep_rx_reopen()
1671 rx->write++; in tsnep_rx_reopen()
1679 static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx) in tsnep_rx_reopen_xsk() argument
1681 struct page **page = rx->page_buffer; in tsnep_rx_reopen_xsk()
1685 tsnep_rx_init(rx); in tsnep_rx_reopen_xsk()
1691 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, in tsnep_rx_reopen_xsk()
1695 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_reopen_xsk()
1714 tsnep_rx_set_xdp(rx, entry, in tsnep_rx_reopen_xsk()
1715 rx->xdp_batch[allocated - 1]); in tsnep_rx_reopen_xsk()
1716 tsnep_rx_activate(rx, rx->write); in tsnep_rx_reopen_xsk()
1717 rx->write++; in tsnep_rx_reopen_xsk()
1727 if (xsk_uses_need_wakeup(rx->xsk_pool)) { in tsnep_rx_reopen_xsk()
1728 int desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_reopen_xsk()
1731 xsk_set_rx_need_wakeup(rx->xsk_pool); in tsnep_rx_reopen_xsk()
1733 xsk_clear_rx_need_wakeup(rx->xsk_pool); in tsnep_rx_reopen_xsk()
1742 if (queue->rx && tsnep_rx_pending(queue->rx)) in tsnep_pending()
1762 if (queue->rx) { in tsnep_poll()
1763 done = queue->rx->xsk_pool ? in tsnep_poll()
1764 tsnep_rx_poll_zc(queue->rx, napi, budget) : in tsnep_poll()
1765 tsnep_rx_poll(queue->rx, napi, budget); in tsnep_poll()
1802 if (queue->tx && queue->rx) in tsnep_request_irq()
1804 name, queue->rx->queue_index); in tsnep_request_irq()
1810 name, queue->rx->queue_index); in tsnep_request_irq()
1842 struct tsnep_rx *rx = queue->rx; in tsnep_queue_close() local
1846 if (rx) { in tsnep_queue_close()
1847 if (xdp_rxq_info_is_reg(&rx->xdp_rxq)) in tsnep_queue_close()
1848 xdp_rxq_info_unreg(&rx->xdp_rxq); in tsnep_queue_close()
1849 if (xdp_rxq_info_is_reg(&rx->xdp_rxq_zc)) in tsnep_queue_close()
1850 xdp_rxq_info_unreg(&rx->xdp_rxq_zc); in tsnep_queue_close()
1859 struct tsnep_rx *rx = queue->rx; in tsnep_queue_open() local
1865 if (rx) { in tsnep_queue_open()
1868 rx->tx_queue_index = tx->queue_index; in tsnep_queue_open()
1869 else if (rx->queue_index < adapter->num_tx_queues) in tsnep_queue_open()
1870 rx->tx_queue_index = rx->queue_index; in tsnep_queue_open()
1872 rx->tx_queue_index = 0; in tsnep_queue_open()
1878 retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev, in tsnep_queue_open()
1879 rx->queue_index, queue->napi.napi_id); in tsnep_queue_open()
1882 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, in tsnep_queue_open()
1884 rx->page_pool); in tsnep_queue_open()
1887 retval = xdp_rxq_info_reg(&rx->xdp_rxq_zc, adapter->netdev, in tsnep_queue_open()
1888 rx->queue_index, queue->napi.napi_id); in tsnep_queue_open()
1891 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq_zc, in tsnep_queue_open()
1896 if (rx->xsk_pool) in tsnep_queue_open()
1897 xsk_pool_set_rxq_info(rx->xsk_pool, &rx->xdp_rxq_zc); in tsnep_queue_open()
1923 if (queue->rx) in tsnep_queue_enable()
1924 tsnep_rx_enable(queue->rx); in tsnep_queue_enable()
1938 if (queue->rx) in tsnep_queue_disable()
1939 tsnep_rx_disable(queue->rx); in tsnep_queue_disable()
1953 if (adapter->queue[i].rx) { in tsnep_netdev_open()
1954 retval = tsnep_rx_open(adapter->queue[i].rx); in tsnep_netdev_open()
1989 if (adapter->queue[i].rx) in tsnep_netdev_open()
1990 tsnep_rx_close(adapter->queue[i].rx); in tsnep_netdev_open()
2010 if (adapter->queue[i].rx) in tsnep_netdev_close()
2011 tsnep_rx_close(adapter->queue[i].rx); in tsnep_netdev_close()
2028 queue->rx->page_buffer = kcalloc(TSNEP_RING_SIZE, in tsnep_enable_xsk()
2029 sizeof(*queue->rx->page_buffer), in tsnep_enable_xsk()
2031 if (!queue->rx->page_buffer) in tsnep_enable_xsk()
2033 queue->rx->xdp_batch = kcalloc(TSNEP_RING_SIZE, in tsnep_enable_xsk()
2034 sizeof(*queue->rx->xdp_batch), in tsnep_enable_xsk()
2036 if (!queue->rx->xdp_batch) { in tsnep_enable_xsk()
2037 kfree(queue->rx->page_buffer); in tsnep_enable_xsk()
2038 queue->rx->page_buffer = NULL; in tsnep_enable_xsk()
2043 xsk_pool_set_rxq_info(pool, &queue->rx->xdp_rxq_zc); in tsnep_enable_xsk()
2049 queue->rx->xsk_pool = pool; in tsnep_enable_xsk()
2052 tsnep_rx_reopen_xsk(queue->rx); in tsnep_enable_xsk()
2066 tsnep_rx_free_zc(queue->rx); in tsnep_disable_xsk()
2068 queue->rx->xsk_pool = NULL; in tsnep_disable_xsk()
2072 tsnep_rx_reopen(queue->rx); in tsnep_disable_xsk()
2076 kfree(queue->rx->xdp_batch); in tsnep_disable_xsk()
2077 queue->rx->xdp_batch = NULL; in tsnep_disable_xsk()
2078 kfree(queue->rx->page_buffer); in tsnep_disable_xsk()
2079 queue->rx->page_buffer = NULL; in tsnep_disable_xsk()
2134 stats->rx_packets += adapter->rx[i].packets; in tsnep_netdev_get_stats64()
2135 stats->rx_bytes += adapter->rx[i].bytes; in tsnep_netdev_get_stats64()
2136 stats->rx_dropped += adapter->rx[i].dropped; in tsnep_netdev_get_stats64()
2137 stats->multicast += adapter->rx[i].multicast; in tsnep_netdev_get_stats64()
2441 adapter->queue[0].rx = &adapter->rx[0]; in tsnep_queue_init()
2442 adapter->queue[0].rx->adapter = adapter; in tsnep_queue_init()
2443 adapter->queue[0].rx->addr = adapter->addr + TSNEP_QUEUE(0); in tsnep_queue_init()
2444 adapter->queue[0].rx->queue_index = 0; in tsnep_queue_init()
2472 adapter->queue[i].rx = &adapter->rx[i]; in tsnep_queue_init()
2473 adapter->queue[i].rx->adapter = adapter; in tsnep_queue_init()
2474 adapter->queue[i].rx->addr = adapter->addr + TSNEP_QUEUE(i); in tsnep_queue_init()
2475 adapter->queue[i].rx->queue_index = i; in tsnep_queue_init()