Searched refs:sriov_info (Results 1 – 13 of 13) sorted by relevance
/linux-6.6.21/drivers/net/ethernet/huawei/hinic/ |
D | hinic_sriov.c | 520 return &nic_dev->sriov_info; in hinic_get_sriov_info_by_pcidev() 601 struct hinic_sriov_info *sriov_info; in hinic_ndo_get_vf_config() local 603 sriov_info = &nic_dev->sriov_info; in hinic_ndo_get_vf_config() 604 if (vf >= sriov_info->num_vfs) in hinic_ndo_get_vf_config() 607 hinic_get_vf_config(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), ivi); in hinic_ndo_get_vf_config() 645 struct hinic_sriov_info *sriov_info; in hinic_ndo_set_vf_mac() local 648 sriov_info = &nic_dev->sriov_info; in hinic_ndo_set_vf_mac() 649 if (!is_valid_ether_addr(mac) || vf >= sriov_info->num_vfs) in hinic_ndo_set_vf_mac() 652 err = hinic_set_vf_mac(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), mac); in hinic_ndo_set_vf_mac() 760 dev_err(&nic_dev->sriov_info.pdev->dev, "Failed to delete vf %d old vlan %d\n", in set_hw_vf_vlan() [all …]
|
D | hinic_dev.h | 111 struct hinic_sriov_info sriov_info; member
|
D | hinic_main.c | 1214 nic_dev->sriov_info.hwdev = hwdev; in nic_dev_init() 1215 nic_dev->sriov_info.pdev = pdev; in nic_dev_init() 1396 struct hinic_sriov_info *sriov_info = &nic_dev->sriov_info; in wait_sriov_cfg_complete() local 1399 set_bit(HINIC_FUNC_REMOVE, &sriov_info->state); in wait_sriov_cfg_complete() 1403 if (!test_bit(HINIC_SRIOV_ENABLE, &sriov_info->state) && in wait_sriov_cfg_complete() 1404 !test_bit(HINIC_SRIOV_DISABLE, &sriov_info->state)) in wait_sriov_cfg_complete()
|
/linux-6.6.21/drivers/net/ethernet/cavium/liquidio/ |
D | cn23xx_pf_device.c | 327 (oct->sriov_info.trs << CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS); in cn23xx_setup_global_mac_regs() 329 temp = oct->sriov_info.rings_per_vf & 0xff; in cn23xx_setup_global_mac_regs() 333 temp = oct->sriov_info.max_vfs & 0xff; in cn23xx_setup_global_mac_regs() 352 srn = oct->sriov_info.pf_srn; in cn23xx_reset_io_queues() 353 ern = srn + oct->sriov_info.num_pf_rings; in cn23xx_reset_io_queues() 411 srn = oct->sriov_info.pf_srn; in cn23xx_pf_setup_global_input_regs() 412 ern = srn + oct->sriov_info.num_pf_rings; in cn23xx_pf_setup_global_input_regs() 426 if (q_no < oct->sriov_info.pf_srn) { in cn23xx_pf_setup_global_input_regs() 427 vf_num = q_no / oct->sriov_info.rings_per_vf; in cn23xx_pf_setup_global_input_regs() 483 srn = oct->sriov_info.pf_srn; in cn23xx_pf_setup_global_output_regs() [all …]
|
D | lio_main.c | 178 int adjusted_q_no = q_no + oct->sriov_info.pf_srn; in octeon_droq_bh() 736 max_vfs = oct->sriov_info.max_vfs; in disable_all_vf_links() 804 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask); in liquidio_watchdog() 805 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask); in liquidio_watchdog() 1098 if (oct->sriov_info.sriov_enabled) in octeon_destroy_resources() 1370 oct->sriov_info.max_vfs); in octeon_chip_specific_setup() 2822 if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs) in __liquidio_set_vf_mac() 2843 oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0]; in __liquidio_set_vf_mac() 2858 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) in liquidio_set_vf_mac() 2882 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { in liquidio_set_vf_spoofchk() [all …]
|
D | cn23xx_vf_device.c | 108 if (cn23xx_vf_reset_io_queues(oct, oct->sriov_info.rings_per_vf)) in cn23xx_vf_setup_global_input_regs() 111 for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) { in cn23xx_vf_setup_global_input_regs() 156 for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) { in cn23xx_vf_setup_global_output_regs() 645 if (oct->sriov_info.rings_per_vf > rings_per_vf) { in cn23xx_setup_octeon_vf_device() 648 oct->sriov_info.rings_per_vf, rings_per_vf, in cn23xx_setup_octeon_vf_device() 650 oct->sriov_info.rings_per_vf = rings_per_vf; in cn23xx_setup_octeon_vf_device() 658 oct->sriov_info.rings_per_vf = in cn23xx_setup_octeon_vf_device() 661 oct->sriov_info.rings_per_vf = rings_per_vf; in cn23xx_setup_octeon_vf_device()
|
D | lio_ethtool.c | 512 if (oct->sriov_info.sriov_enabled) { in lio_ethtool_get_channels() 621 if (oct->sriov_info.sriov_enabled) { in lio_ethtool_set_channels() 1019 if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings; in lio_23xx_reconfigure_queue_count() 1020 if_cfg.s.num_oqueues = oct->sriov_info.num_pf_rings; in lio_23xx_reconfigure_queue_count() 1021 if_cfg.s.base_queue = oct->sriov_info.pf_srn; in lio_23xx_reconfigure_queue_count() 1147 if ((OCTEON_CN23XX_PF(oct)) && !oct->sriov_info.sriov_enabled) in lio_reset_queues() 1166 !oct->sriov_info.sriov_enabled) { in lio_reset_queues() 1167 oct->sriov_info.num_pf_rings = num_qs; in lio_reset_queues() 1174 num_qs = oct->sriov_info.num_pf_rings; in lio_reset_queues() 1194 if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) { in lio_reset_queues() [all …]
|
D | lio_vf_rep.c | 499 if (!oct->sriov_info.sriov_enabled) in lio_vf_rep_create() 502 num_vfs = oct->sriov_info.num_vfs_alloced; in lio_vf_rep_create() 585 if (!oct->sriov_info.sriov_enabled) in lio_vf_rep_destroy()
|
D | lio_vf_main.c | 1979 if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf; in setup_nic_devices() 1980 if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf; in setup_nic_devices() 2358 if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) { in octeon_device_init() 2365 oct->sriov_info.rings_per_vf); in octeon_device_init() 2368 if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf)) in octeon_device_init()
|
D | octeon_mailbox.c | 263 pcie_flr(oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no]); in octeon_mbox_process_cmd()
|
D | octeon_device.h | 566 struct octeon_sriov_info sriov_info; member
|
D | octeon_device.c | 855 ioq_vector->ioq_num = i + oct->sriov_info.pf_srn; in octeon_allocate_ioq_vector() 968 for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) { in octeon_set_io_queues_off()
|
D | lio_core.c | 1071 oct->sriov_info.pf_srn + i; in octeon_setup_interrupt() 1074 oct->sriov_info.trs; in octeon_setup_interrupt() 1472 oct_dev->sriov_info.num_vfs_alloced) { in lio_fetch_stats()
|