Home
last modified time | relevance | path

Searched refs:droq (Results 1 – 14 of 14) sorted by relevance

/linux-6.1.9/drivers/net/ethernet/cavium/liquidio/
Docteon_droq.c94 u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq) in octeon_droq_check_hw_for_pkts() argument
99 pkt_count = readl(droq->pkts_sent_reg); in octeon_droq_check_hw_for_pkts()
101 last_count = pkt_count - droq->pkt_count; in octeon_droq_check_hw_for_pkts()
102 droq->pkt_count = pkt_count; in octeon_droq_check_hw_for_pkts()
106 atomic_add(last_count, &droq->pkts_pending); in octeon_droq_check_hw_for_pkts()
111 static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq) in octeon_droq_compute_max_packet_bufs() argument
120 droq->max_empty_descs = 0; in octeon_droq_compute_max_packet_bufs()
123 droq->max_empty_descs++; in octeon_droq_compute_max_packet_bufs()
124 count += droq->buffer_size; in octeon_droq_compute_max_packet_bufs()
127 droq->max_empty_descs = droq->max_count - droq->max_empty_descs; in octeon_droq_compute_max_packet_bufs()
[all …]
Dlio_core.c431 struct octeon_droq *droq) in octeon_schedule_rxq_oom_work() argument
435 struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no]; in octeon_schedule_rxq_oom_work()
447 struct octeon_droq *droq = oct->droq[q_no]; in octnet_poll_check_rxq_oom_status() local
449 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING) || !droq) in octnet_poll_check_rxq_oom_status()
452 if (octeon_retry_droq_refill(droq)) in octnet_poll_check_rxq_oom_status()
453 octeon_schedule_rxq_oom_work(oct, droq); in octnet_poll_check_rxq_oom_status()
554 writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg); in octeon_setup_droq()
577 struct octeon_droq *droq = in liquidio_push_packet() local
593 droq->stats.rx_dropped++; in liquidio_push_packet()
599 skb_record_rx_queue(skb, droq->q_no); in liquidio_push_packet()
[all …]
Docteon_device.c650 vfree(oct->droq[i]); in octeon_free_device_mem()
928 oct->droq[0] = vzalloc_node(sizeof(*oct->droq[0]), numa_node); in octeon_setup_output_queues()
929 if (!oct->droq[0]) in octeon_setup_output_queues()
930 oct->droq[0] = vzalloc(sizeof(*oct->droq[0])); in octeon_setup_output_queues()
931 if (!oct->droq[0]) in octeon_setup_output_queues()
935 vfree(oct->droq[oq_no]); in octeon_setup_output_queues()
936 oct->droq[oq_no] = NULL; in octeon_setup_output_queues()
1280 return oct->droq[q_no]->max_count; in octeon_get_rx_qsize()
1428 void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) in lio_enable_irq() argument
1435 if (droq) { in lio_enable_irq()
[all …]
Dcn66xx_device.c304 struct octeon_droq *droq = oct->droq[oq_no]; in lio_cn6xxx_setup_oq_regs() local
307 droq->desc_ring_dma); in lio_cn6xxx_setup_oq_regs()
308 octeon_write_csr(oct, CN6XXX_SLI_OQ_SIZE(oq_no), droq->max_count); in lio_cn6xxx_setup_oq_regs()
311 droq->buffer_size); in lio_cn6xxx_setup_oq_regs()
314 droq->pkts_sent_reg = in lio_cn6xxx_setup_oq_regs()
316 droq->pkts_credit_reg = in lio_cn6xxx_setup_oq_regs()
508 struct octeon_droq *droq; in lio_cn6xxx_process_droq_intr_regs() local
529 droq = oct->droq[oq_no]; in lio_cn6xxx_process_droq_intr_regs()
530 pkt_count = octeon_droq_check_hw_for_pkts(droq); in lio_cn6xxx_process_droq_intr_regs()
533 if (droq->ops.poll_mode) { in lio_cn6xxx_process_droq_intr_regs()
Docteon_droq.h400 u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq);
406 struct octeon_droq *droq,
410 struct octeon_droq *droq, u32 budget);
414 int octeon_retry_droq_refill(struct octeon_droq *droq);
Dcn23xx_vf_device.c247 struct octeon_droq *droq = oct->droq[oq_no]; in cn23xx_setup_vf_oq_regs() local
250 droq->desc_ring_dma); in cn23xx_setup_vf_oq_regs()
251 octeon_write_csr(oct, CN23XX_VF_SLI_OQ_SIZE(oq_no), droq->max_count); in cn23xx_setup_vf_oq_regs()
254 droq->buffer_size); in cn23xx_setup_vf_oq_regs()
257 droq->pkts_sent_reg = in cn23xx_setup_vf_oq_regs()
259 droq->pkts_credit_reg = in cn23xx_setup_vf_oq_regs()
491 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; in cn23xx_vf_msix_interrupt_handler() local
496 pkts_sent = readq(droq->pkts_sent_reg); in cn23xx_vf_msix_interrupt_handler()
Dlio_ethtool.c972 rx_pending = oct->droq[0]->max_count; in lio_ethtool_get_ringparam()
1216 writel(oct->droq[i]->max_count, in lio_reset_queues()
1217 oct->droq[i]->pkts_credit_reg); in lio_reset_queues()
1279 rx_count_old = oct->droq[0]->max_count; in lio_ethtool_set_ringparam()
1687 CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received); in lio_get_ethtool_stats()
1690 CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received); in lio_get_ethtool_stats()
1692 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + in lio_get_ethtool_stats()
1693 oct_dev->droq[j]->stats.dropped_toomany + in lio_get_ethtool_stats()
1694 oct_dev->droq[j]->stats.rx_dropped); in lio_get_ethtool_stats()
1696 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); in lio_get_ethtool_stats()
[all …]
Dcn23xx_pf_device.c631 struct octeon_droq *droq = oct->droq[oq_no]; in cn23xx_setup_oq_regs() local
639 droq->desc_ring_dma); in cn23xx_setup_oq_regs()
640 octeon_write_csr(oct, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count); in cn23xx_setup_oq_regs()
643 droq->buffer_size); in cn23xx_setup_oq_regs()
646 droq->pkts_sent_reg = in cn23xx_setup_oq_regs()
648 droq->pkts_credit_reg = in cn23xx_setup_oq_regs()
948 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; in cn23xx_pf_msix_interrupt_handler() local
952 if (!droq) { in cn23xx_pf_msix_interrupt_handler()
958 pkts_sent = readq(droq->pkts_sent_reg); in cn23xx_pf_msix_interrupt_handler()
Docteon_mailbox.c222 if (!oct->droq[i]) in get_vf_stats()
224 stats->rx_packets += oct->droq[i]->stats.rx_pkts_received; in get_vf_stats()
225 stats->rx_bytes += oct->droq[i]->stats.rx_bytes_received; in get_vf_stats()
Docteon_main.h76 struct octeon_droq *droq);
Docteon_device.h495 struct octeon_droq *droq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES]; member
889 void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq);
Docteon_network.h490 static inline void octeon_fast_packet_next(struct octeon_droq *droq, in octeon_fast_packet_next() argument
495 skb_put_data(nicbuf, get_rbd(droq->recv_buf_list[idx].buffer), in octeon_fast_packet_next()
Dlio_vf_main.c87 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); in lio_wait_for_oq_pkts()
687 oct->droq[0]->ops.poll_mode = 0; in liquidio_destroy_nic_device()
926 oct->droq[0]->ops.poll_mode = 1; in liquidio_open()
993 oct->droq[0]->ops.poll_mode = 0; in liquidio_stop()
1211 oq_stats = &oct->droq[oq_no]->stats; in liquidio_get_stats64()
2414 writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg); in octeon_device_init()
Dlio_main.c170 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], in octeon_droq_bh()
172 lio_enable_irq(oct->droq[q_no], NULL); in octeon_droq_bh()
205 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); in lio_wait_for_oq_pkts()
1240 oct->droq[0]->ops.poll_mode = 0; in liquidio_destroy_nic_device()
1789 oct->droq[0]->ops.poll_mode = 1; in liquidio_open()
1842 oct->droq[0]->ops.poll_mode = 0; in liquidio_open()
1906 oct->droq[0]->ops.poll_mode = 0; in liquidio_stop()
2079 oq_stats = &oct->droq[oq_no]->stats; in liquidio_get_stats64()
4241 writel(octeon_dev->droq[j]->max_count, in octeon_device_init()
4242 octeon_dev->droq[j]->pkts_credit_reg); in octeon_device_init()