/linux-6.1.9/drivers/net/ethernet/cavium/liquidio/ |
D | cn66xx_device.c | 29 int lio_cn6xxx_soft_reset(struct octeon_device *oct) in lio_cn6xxx_soft_reset() argument 31 octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF); in lio_cn6xxx_soft_reset() 33 dev_dbg(&oct->pci_dev->dev, "BIST enabled for soft reset\n"); in lio_cn6xxx_soft_reset() 35 lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_BIST); in lio_cn6xxx_soft_reset() 36 octeon_write_csr64(oct, CN6XXX_SLI_SCRATCH1, 0x1234ULL); in lio_cn6xxx_soft_reset() 38 lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST); in lio_cn6xxx_soft_reset() 39 lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST); in lio_cn6xxx_soft_reset() 44 if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1)) { in lio_cn6xxx_soft_reset() 45 dev_err(&oct->pci_dev->dev, "Soft reset failed\n"); in lio_cn6xxx_soft_reset() 49 dev_dbg(&oct->pci_dev->dev, "Reset completed\n"); in lio_cn6xxx_soft_reset() [all …]
|
D | cn23xx_pf_device.c | 39 void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct) in cn23xx_dump_pf_initialized_regs() argument 43 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; in cn23xx_dump_pf_initialized_regs() 46 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%llx\n", in cn23xx_dump_pf_initialized_regs() 48 CVM_CAST64(octeon_read_csr64(oct, CN23XX_WIN_WR_MASK_REG))); in cn23xx_dump_pf_initialized_regs() 49 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", in cn23xx_dump_pf_initialized_regs() 51 CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1))); in cn23xx_dump_pf_initialized_regs() 52 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", in cn23xx_dump_pf_initialized_regs() 54 lio_pci_readq(oct, CN23XX_RST_SOFT_RST)); in cn23xx_dump_pf_initialized_regs() 57 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", in cn23xx_dump_pf_initialized_regs() 59 lio_pci_readq(oct, CN23XX_DPI_DMA_CONTROL)); in cn23xx_dump_pf_initialized_regs() [all …]
|
D | cn68xx_device.c | 31 static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct) in lio_cn68xx_set_dpi_regs() argument 36 lio_pci_writeq(oct, CN6XXX_DPI_DMA_CTL_MASK, CN6XXX_DPI_DMA_CONTROL); in lio_cn68xx_set_dpi_regs() 37 dev_dbg(&oct->pci_dev->dev, "DPI_DMA_CONTROL: 0x%016llx\n", in lio_cn68xx_set_dpi_regs() 38 lio_pci_readq(oct, CN6XXX_DPI_DMA_CONTROL)); in lio_cn68xx_set_dpi_regs() 45 lio_pci_writeq(oct, 0, CN6XXX_DPI_DMA_ENG_ENB(i)); in lio_cn68xx_set_dpi_regs() 46 lio_pci_writeq(oct, fifo_sizes[i], CN6XXX_DPI_DMA_ENG_BUF(i)); in lio_cn68xx_set_dpi_regs() 47 dev_dbg(&oct->pci_dev->dev, "DPI_ENG_BUF%d: 0x%016llx\n", i, in lio_cn68xx_set_dpi_regs() 48 lio_pci_readq(oct, CN6XXX_DPI_DMA_ENG_BUF(i))); in lio_cn68xx_set_dpi_regs() 55 lio_pci_writeq(oct, 1, CN6XXX_DPI_CTL); in lio_cn68xx_set_dpi_regs() 56 dev_dbg(&oct->pci_dev->dev, "DPI_CTL: 0x%016llx\n", in lio_cn68xx_set_dpi_regs() [all …]
|
D | octeon_device.c | 568 static void *__retrieve_octeon_config_info(struct octeon_device *oct, in __retrieve_octeon_config_info() argument 571 u32 oct_id = oct->octeon_id; in __retrieve_octeon_config_info() 576 if (oct->chip_id == OCTEON_CN66XX) { in __retrieve_octeon_config_info() 578 } else if ((oct->chip_id == OCTEON_CN68XX) && in __retrieve_octeon_config_info() 581 } else if ((oct->chip_id == OCTEON_CN68XX) && in __retrieve_octeon_config_info() 584 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { in __retrieve_octeon_config_info() 586 } else if (oct->chip_id == OCTEON_CN23XX_VF_VID) { in __retrieve_octeon_config_info() 596 static int __verify_octeon_config_info(struct octeon_device *oct, void *conf) in __verify_octeon_config_info() argument 598 switch (oct->chip_id) { in __verify_octeon_config_info() 601 return lio_validate_cn6xxx_config_info(oct, conf); in __verify_octeon_config_info() [all …]
|
D | cn23xx_vf_device.c | 30 u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us) in cn23xx_vf_get_oq_ticks() argument 33 u32 oqticks_per_us = (u32)oct->pfvf_hsword.coproc_tics_per_us; in cn23xx_vf_get_oq_ticks() 50 static int cn23xx_vf_reset_io_queues(struct octeon_device *oct, u32 num_queues) in cn23xx_vf_reset_io_queues() argument 59 d64 = octeon_read_csr64(oct, in cn23xx_vf_reset_io_queues() 62 octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), in cn23xx_vf_reset_io_queues() 68 u64 reg_val = octeon_read_csr64(oct, in cn23xx_vf_reset_io_queues() 74 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_vf_reset_io_queues() 78 dev_err(&oct->pci_dev->dev, in cn23xx_vf_reset_io_queues() 85 octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), in cn23xx_vf_reset_io_queues() 89 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_vf_reset_io_queues() [all …]
|
D | octeon_console.c | 35 static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct, 38 static int octeon_console_read(struct octeon_device *oct, u32 console_num, 147 static inline u64 __cvmx_bootmem_desc_get(struct octeon_device *oct, in __cvmx_bootmem_desc_get() argument 155 return octeon_read_device_mem32(oct, base); in __cvmx_bootmem_desc_get() 157 return octeon_read_device_mem64(oct, base); in __cvmx_bootmem_desc_get() 173 static void CVMX_BOOTMEM_NAMED_GET_NAME(struct octeon_device *oct, in CVMX_BOOTMEM_NAMED_GET_NAME() argument 179 octeon_pci_read_core_mem(oct, addr, (u8 *)str, len); in CVMX_BOOTMEM_NAMED_GET_NAME() 195 static int __cvmx_bootmem_check_version(struct octeon_device *oct, in __cvmx_bootmem_check_version() argument 201 if (!oct->bootmem_desc_addr) in __cvmx_bootmem_check_version() 202 oct->bootmem_desc_addr = in __cvmx_bootmem_check_version() [all …]
|
D | request_manager.c | 39 static void __check_db_timeout(struct octeon_device *oct, u64 iq_no); 43 static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no) in IQ_INSTR_MODE_64B() argument 46 (struct octeon_instr_queue *)oct->instr_queue[iq_no]; in IQ_INSTR_MODE_64B() 50 #define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no)) argument 56 int octeon_init_instr_queue(struct octeon_device *oct, in octeon_init_instr_queue() argument 65 int numa_node = dev_to_node(&oct->pci_dev->dev); in octeon_init_instr_queue() 67 if (OCTEON_CN6XXX(oct)) in octeon_init_instr_queue() 68 conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx))); in octeon_init_instr_queue() 69 else if (OCTEON_CN23XX_PF(oct)) in octeon_init_instr_queue() 70 conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_pf))); in octeon_init_instr_queue() [all …]
|
D | octeon_mem_ops.c | 31 octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx) in octeon_toggle_bar1_swapmode() argument 35 mask = oct->fn_list.bar1_idx_read(oct, idx); in octeon_toggle_bar1_swapmode() 37 oct->fn_list.bar1_idx_write(oct, idx, mask); in octeon_toggle_bar1_swapmode() 40 #define octeon_toggle_bar1_swapmode(oct, idx) argument 44 octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr, in octeon_pci_fastwrite() argument 52 octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX); in octeon_pci_fastwrite() 61 octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX); in octeon_pci_fastwrite() 68 octeon_pci_fastread(struct octeon_device *oct, u8 __iomem *mapped_addr, in octeon_pci_fastread() argument 76 octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX); in octeon_pci_fastread() 85 octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX); in octeon_pci_fastread() [all …]
|
D | lio_core.c | 81 int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) in lio_setup_glists() argument 116 int numa_node = dev_to_node(&oct->pci_dev->dev); in lio_setup_glists() 123 lio_dma_alloc(oct, in lio_setup_glists() 161 struct octeon_device *oct = lio->oct_dev; in liquidio_set_feature() local 176 dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n", in liquidio_set_feature() 256 struct octeon_device *oct = lio->oct_dev; in liquidio_link_ctrl_cmd_completion() local 297 dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name); in liquidio_link_ctrl_cmd_completion() 301 dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n", in liquidio_link_ctrl_cmd_completion() 306 dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n", in liquidio_link_ctrl_cmd_completion() 311 dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n", in liquidio_link_ctrl_cmd_completion() [all …]
|
D | lio_vf_rep.c | 55 lio_vf_rep_send_soft_command(struct octeon_device *oct, in lio_vf_rep_send_soft_command() argument 66 octeon_alloc_soft_command(oct, req_size, in lio_vf_rep_send_soft_command() 82 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, in lio_vf_rep_send_soft_command() 85 err = octeon_send_soft_command(oct, sc); in lio_vf_rep_send_soft_command() 89 err = wait_for_sc_completion_timeout(oct, sc, 0); in lio_vf_rep_send_soft_command() 95 dev_err(&oct->pci_dev->dev, "VF rep send config failed\n"); in lio_vf_rep_send_soft_command() 103 octeon_free_soft_command(oct, sc); in lio_vf_rep_send_soft_command() 113 struct octeon_device *oct; in lio_vf_rep_open() local 116 oct = vf_rep->oct; in lio_vf_rep_open() 123 ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, in lio_vf_rep_open() [all …]
|
D | lio_vf_main.c | 70 static int octeon_device_init(struct octeon_device *oct); 73 static int lio_wait_for_oq_pkts(struct octeon_device *oct) in lio_wait_for_oq_pkts() argument 76 (struct octeon_device_priv *)oct->priv; in lio_wait_for_oq_pkts() 84 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { in lio_wait_for_oq_pkts() 85 if (!(oct->io_qmask.oq & BIT_ULL(i))) in lio_wait_for_oq_pkts() 87 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); in lio_wait_for_oq_pkts() 105 static void pcierror_quiesce_device(struct octeon_device *oct) in pcierror_quiesce_device() argument 117 if (wait_for_pending_requests(oct)) in pcierror_quiesce_device() 118 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); in pcierror_quiesce_device() 121 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { in pcierror_quiesce_device() [all …]
|
D | lio_main.c | 145 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, 165 struct octeon_device *oct = oct_priv->dev; in octeon_droq_bh() local 167 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { in octeon_droq_bh() 168 if (!(oct->io_qmask.oq & BIT_ULL(q_no))) in octeon_droq_bh() 170 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], in octeon_droq_bh() 172 lio_enable_irq(oct->droq[q_no], NULL); in octeon_droq_bh() 174 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) { in octeon_droq_bh() 178 int adjusted_q_no = q_no + oct->sriov_info.pf_srn; in octeon_droq_bh() 181 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no), in octeon_droq_bh() 184 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0); in octeon_droq_bh() [all …]
|
D | lio_ethtool.c | 219 struct octeon_device *oct = lio->oct_dev; in lio_get_link_ksettings() local 247 dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n"); in lio_get_link_ksettings() 250 dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n", in lio_get_link_ksettings() 260 if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || in lio_get_link_ksettings() 261 oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { in lio_get_link_ksettings() 262 if (OCTEON_CN23XX_PF(oct)) { in lio_get_link_ksettings() 270 if (oct->no_speed_setting == 0) { in lio_get_link_ksettings() 282 if (oct->no_speed_setting == 0) { in lio_get_link_ksettings() 286 oct->speed_setting = 25; in lio_get_link_ksettings() 289 if (oct->speed_setting == 10) { in lio_get_link_ksettings() [all …]
|
D | cn66xx_device.h | 68 int lio_cn6xxx_soft_reset(struct octeon_device *oct); 69 void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct); 70 void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct, 72 void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct, 74 void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct); 75 void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct); 76 void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no); 77 void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no); 78 int lio_cn6xxx_enable_io_queues(struct octeon_device *oct); 79 void lio_cn6xxx_disable_io_queues(struct octeon_device *oct); [all …]
|
D | octeon_main.h | 73 void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac); 75 void octeon_schedule_rxq_oom_work(struct octeon_device *oct, 93 static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx) in octeon_unmap_pci_barx() argument 95 dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n", in octeon_unmap_pci_barx() 98 if (oct->mmio[baridx].done) in octeon_unmap_pci_barx() 99 iounmap(oct->mmio[baridx].hw_addr); in octeon_unmap_pci_barx() 101 if (oct->mmio[baridx].start) in octeon_unmap_pci_barx() 102 pci_release_region(oct->pci_dev, baridx * 2); in octeon_unmap_pci_barx() 111 static inline int octeon_map_pci_barx(struct octeon_device *oct, in octeon_map_pci_barx() argument 116 if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) { in octeon_map_pci_barx() [all …]
|
D | octeon_nic.c | 30 octeon_alloc_soft_command_resp(struct octeon_device *oct, in octeon_alloc_soft_command_resp() argument 41 octeon_alloc_soft_command(oct, 0, rdatasize, 0); in octeon_alloc_soft_command_resp() 52 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { in octeon_alloc_soft_command_resp() 68 rdp->pcie_port = oct->pcie_port; in octeon_alloc_soft_command_resp() 73 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) in octeon_alloc_soft_command_resp() 83 int octnet_send_nic_data_pkt(struct octeon_device *oct, in octnet_send_nic_data_pkt() argument 89 return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd, in octnet_send_nic_data_pkt() 95 *octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct, in octnic_alloc_ctrl_pkt_sc() argument 109 octeon_alloc_soft_command(oct, datasize, rdatasize, 0); in octnic_alloc_ctrl_pkt_sc() 127 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, in octnic_alloc_ctrl_pkt_sc() [all …]
|
D | octeon_droq.c | 140 octeon_droq_destroy_ring_buffers(struct octeon_device *oct, in octeon_droq_destroy_ring_buffers() argument 152 lio_unmap_ring(oct->pci_dev, in octeon_droq_destroy_ring_buffers() 167 octeon_droq_setup_ring_buffers(struct octeon_device *oct, in octeon_droq_setup_ring_buffers() argument 175 buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info); in octeon_droq_setup_ring_buffers() 178 dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n", in octeon_droq_setup_ring_buffers() 198 int octeon_delete_droq(struct octeon_device *oct, u32 q_no) in octeon_delete_droq() argument 200 struct octeon_droq *droq = oct->droq[q_no]; in octeon_delete_droq() 202 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); in octeon_delete_droq() 204 octeon_droq_destroy_ring_buffers(oct, droq); in octeon_delete_droq() 208 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), in octeon_delete_droq() [all …]
|
D | octeon_device.h | 212 typedef int (*octeon_console_print_fn)(struct octeon_device *oct, 423 struct octeon_device *oct; member 621 #define OCTEON_CN6XXX(oct) ({ \ argument 622 typeof(oct) _oct = (oct); \ 625 #define OCTEON_CN23XX_PF(oct) ((oct)->chip_id == OCTEON_CN23XX_PF_VID) argument 626 #define OCTEON_CN23XX_VF(oct) ((oct)->chip_id == OCTEON_CN23XX_VF_VID) argument 627 #define CHIP_CONF(oct, TYPE) \ argument 628 (((struct octeon_ ## TYPE *)((oct)->chip))->conf) 638 void octeon_free_device_mem(struct octeon_device *oct); 655 int octeon_register_device(struct octeon_device *oct, [all …]
|
D | response_manager.c | 30 int octeon_setup_response_list(struct octeon_device *oct) in octeon_setup_response_list() argument 36 INIT_LIST_HEAD(&oct->response_list[i].head); in octeon_setup_response_list() 37 spin_lock_init(&oct->response_list[i].lock); in octeon_setup_response_list() 38 atomic_set(&oct->response_list[i].pending_req_count, 0); in octeon_setup_response_list() 40 spin_lock_init(&oct->cmd_resp_wqlock); in octeon_setup_response_list() 42 oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0); in octeon_setup_response_list() 43 if (!oct->dma_comp_wq.wq) { in octeon_setup_response_list() 44 dev_err(&oct->pci_dev->dev, "failed to create wq thread\n"); in octeon_setup_response_list() 48 cwq = &oct->dma_comp_wq; in octeon_setup_response_list() 50 cwq->wk.ctxptr = oct; in octeon_setup_response_list() [all …]
|
/linux-6.1.9/drivers/net/ethernet/marvell/octeon_ep/ |
D | octep_cn9k_pf.c | 37 static void cn93_dump_regs(struct octep_device *oct, int qno) in cn93_dump_regs() argument 39 struct device *dev = &oct->pdev->dev; in cn93_dump_regs() 44 octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(qno))); in cn93_dump_regs() 47 octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(qno))); in cn93_dump_regs() 50 octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(qno))); in cn93_dump_regs() 53 octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(qno))); in cn93_dump_regs() 56 octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(qno))); in cn93_dump_regs() 59 octep_read_csr64(oct, CN93_SDP_R_IN_CNTS(qno))); in cn93_dump_regs() 62 octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(qno))); in cn93_dump_regs() 65 octep_read_csr64(oct, CN93_SDP_R_IN_PKT_CNT(qno))); in cn93_dump_regs() [all …]
|
D | octep_main.c | 47 static int octep_alloc_ioq_vectors(struct octep_device *oct) in octep_alloc_ioq_vectors() argument 52 for (i = 0; i < oct->num_oqs; i++) { in octep_alloc_ioq_vectors() 53 oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i])); in octep_alloc_ioq_vectors() 54 if (!oct->ioq_vector[i]) in octep_alloc_ioq_vectors() 57 ioq_vector = oct->ioq_vector[i]; in octep_alloc_ioq_vectors() 58 ioq_vector->iq = oct->iq[i]; in octep_alloc_ioq_vectors() 59 ioq_vector->oq = oct->oq[i]; in octep_alloc_ioq_vectors() 60 ioq_vector->octep_dev = oct; in octep_alloc_ioq_vectors() 63 dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs); in octep_alloc_ioq_vectors() 69 vfree(oct->ioq_vector[i]); in octep_alloc_ioq_vectors() [all …]
|
D | octep_main.h | 62 void (*setup_iq_regs)(struct octep_device *oct, int q); 63 void (*setup_oq_regs)(struct octep_device *oct, int q); 64 void (*setup_mbox_regs)(struct octep_device *oct, int mbox); 68 int (*soft_reset)(struct octep_device *oct); 69 void (*reinit_regs)(struct octep_device *oct); 72 void (*enable_interrupts)(struct octep_device *oct); 73 void (*disable_interrupts)(struct octep_device *oct); 75 void (*enable_io_queues)(struct octep_device *oct); 76 void (*disable_io_queues)(struct octep_device *oct); 77 void (*enable_iq)(struct octep_device *oct, int q); [all …]
|
D | octep_tx.c | 36 struct octep_device *oct = iq->octep_dev; in octep_iq_process_completions() local 46 iq->octep_read_index = oct->hw_ops.update_iq_read_idx(iq); in octep_iq_process_completions() 160 void octep_clean_iqs(struct octep_device *oct) in octep_clean_iqs() argument 164 for (i = 0; i < oct->num_iqs; i++) { in octep_clean_iqs() 165 octep_iq_free_pending(oct->iq[i]); in octep_clean_iqs() 166 octep_iq_reset_indices(oct->iq[i]); in octep_clean_iqs() 178 static int octep_setup_iq(struct octep_device *oct, int q_no) in octep_setup_iq() argument 187 oct->iq[q_no] = iq; in octep_setup_iq() 189 iq->octep_dev = oct; in octep_setup_iq() 190 iq->netdev = oct->netdev; in octep_setup_iq() [all …]
|
D | octep_ctrl_net.c | 16 int octep_get_link_status(struct octep_device *oct) in octep_get_link_status() argument 29 err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); in octep_get_link_status() 37 void octep_set_link_status(struct octep_device *oct, bool up) in octep_set_link_status() argument 49 octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); in octep_set_link_status() 52 void octep_set_rx_state(struct octep_device *oct, bool up) in octep_set_rx_state() argument 64 octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); in octep_set_rx_state() 67 int octep_get_mac_addr(struct octep_device *oct, u8 *addr) in octep_get_mac_addr() argument 80 err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); in octep_get_mac_addr() 90 int octep_set_mac_addr(struct octep_device *oct, u8 *addr) in octep_set_mac_addr() argument 103 return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); in octep_set_mac_addr() [all …]
|
D | octep_rx.c | 79 static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq) in octep_oq_refill() argument 123 static int octep_setup_oq(struct octep_device *oct, int q_no) in octep_setup_oq() argument 131 oct->oq[q_no] = oq; in octep_setup_oq() 133 oq->octep_dev = oct; in octep_setup_oq() 134 oq->netdev = oct->netdev; in octep_setup_oq() 135 oq->dev = &oct->pdev->dev; in octep_setup_oq() 137 oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf); in octep_setup_oq() 139 oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf); in octep_setup_oq() 146 if (oct->caps_enabled) in octep_setup_oq() 149 oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf); in octep_setup_oq() [all …]
|