/linux-6.6.21/drivers/net/ethernet/huawei/hinic/ |
D | hinic_sriov.c | 52 static void hinic_notify_vf_link_status(struct hinic_hwdev *hwdev, u16 vf_id, in hinic_notify_vf_link_status() argument 60 if (vf_infos[HW_VF_ID_TO_OS(vf_id)].registered) { in hinic_notify_vf_link_status() 62 link.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id; in hinic_notify_vf_link_status() 64 vf_id, HINIC_PORT_CMD_LINK_STATUS_REPORT, in hinic_notify_vf_link_status() 70 HW_VF_ID_TO_OS(vf_id), err, in hinic_notify_vf_link_status() 89 static u16 hinic_vf_info_vlanprio(struct hinic_hwdev *hwdev, int vf_id) in hinic_vf_info_vlanprio() argument 95 pf_vlan = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan; in hinic_vf_info_vlanprio() 96 pf_qos = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos; in hinic_vf_info_vlanprio() 103 u8 qos, int vf_id) in hinic_set_vf_vlan() argument 114 vf_vlan.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id; in hinic_set_vf_vlan() [all …]
|
D | hinic_hw_mbox.h | 67 typedef int (*hinic_pf_mbox_cb)(void *handle, u16 vf_id, u8 cmd, void *buf_in, 126 int (*cmd_msg_handler)(void *hwdev, u16 vf_id, 136 u16 vf_id, u8 cmd, void *buf_in, 167 enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
|
/linux-6.6.21/drivers/net/ethernet/intel/ice/ |
D | ice_sriov.h | 31 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); 33 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); 39 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, 43 ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 46 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted); 48 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); 50 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena); 55 ice_get_vf_stats(struct net_device *netdev, int vf_id, 81 int __always_unused vf_id, u8 __always_unused *mac) in ice_set_vf_mac() argument 88 int __always_unused vf_id, in ice_get_vf_cfg() argument [all …]
|
D | ice_sriov.c | 101 wr32(hw, VPINT_ALLOC(vf->vf_id), 0); in ice_dis_vf_mappings() 102 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); in ice_dis_vf_mappings() 117 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0); in ice_dis_vf_mappings() 122 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0); in ice_dis_vf_mappings() 192 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; in ice_free_vfs() 193 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; in ice_free_vfs() 261 return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per; in ice_calc_vf_first_vector_idx() 289 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id; in ice_ena_vf_msix_mappings() 295 wr32(hw, VPINT_ALLOC(vf->vf_id), reg); in ice_ena_vf_msix_mappings() 301 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg); in ice_ena_vf_msix_mappings() [all …]
|
D | ice_vf_lib.c | 25 struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id) in ice_get_vf_by_id() argument 30 hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) { in ice_get_vf_by_id() 31 if (vf->vf_id == vf_id) { in ice_get_vf_by_id() 268 vf->vf_id, err); in ice_vf_recreate_vsi() 294 vf->vf_id); in ice_vf_rebuild_vsi() 324 vf->vf_id, err); in ice_vf_rebuild_host_vlan_cfg() 336 ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err); in ice_vf_rebuild_host_vlan_cfg() 343 vf->vf_id, vsi->idx, err); in ice_vf_rebuild_host_vlan_cfg() 368 vf->min_tx_rate, vf->vf_id, err); in ice_vf_rebuild_host_tx_rate_cfg() 377 vf->max_tx_rate, vf->vf_id, err); in ice_vf_rebuild_host_tx_rate_cfg() [all …]
|
D | ice_virtchnl_fdir.c | 144 vf->vf_id); in ice_vf_start_ctrl_vsi() 151 vf->vf_id); in ice_vf_start_ctrl_vsi() 312 dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id); in ice_vc_fdir_set_flow_fld() 405 flow, vf->vf_id); in ice_vc_fdir_set_flow_hdr() 465 flow, vf->vf_id); in ice_vc_fdir_set_flow_hdr() 499 dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id); in ice_vc_fdir_rem_prof() 669 vf->vf_id); in ice_vc_fdir_write_flow_prof() 676 vf->vf_id); in ice_vc_fdir_write_flow_prof() 691 flow, vf->vf_id); in ice_vc_fdir_write_flow_prof() 700 flow, vf->vf_id); in ice_vc_fdir_write_flow_prof() [all …]
|
D | ice_virtchnl.c | 194 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg, in ice_vc_vf_broadcast() 245 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, in ice_vc_notify_vf_link_state() 305 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, in ice_vc_send_msg_to_vf() 309 vf->vf_id, aq_ret, in ice_vc_send_msg_to_vf() 780 vf->vf_id); in ice_vc_handle_rss_cfg() 787 vf->vf_id); in ice_vc_handle_rss_cfg() 801 vf->vf_id); in ice_vc_handle_rss_cfg() 884 vf->vf_id, status); in ice_vc_handle_rss_cfg() 1026 vf->vf_id); in ice_vc_cfg_promiscuous_mode_msg() 1072 vf->vf_id, ret); in ice_vc_cfg_promiscuous_mode_msg() [all …]
|
D | ice_repr.c | 38 repr->vf->vf_id); in ice_repr_get_phys_port_name() 153 int vf_id = np->repr->vf->vf_id; in ice_repr_sp_stats64() local 158 tx_ring = np->vsi->tx_rings[vf_id]; in ice_repr_sp_stats64() 165 rx_ring = np->vsi->rx_rings[vf_id]; in ice_repr_sp_stats64()
|
/linux-6.6.21/drivers/vfio/pci/pds/ |
D | cmds.c | 94 .vf_id = cpu_to_le16(pds_vfio->vf_id), in pds_vfio_suspend_wait_device_cmd() 117 pds_vfio->vf_id, jiffies_to_msecs(time_done - time_start)); in pds_vfio_suspend_wait_device_cmd() 122 pds_vfio->vf_id); in pds_vfio_suspend_wait_device_cmd() 134 .vf_id = cpu_to_le16(pds_vfio->vf_id), in pds_vfio_suspend_device_cmd() 142 dev_dbg(dev, "vf%u: Suspend device\n", pds_vfio->vf_id); in pds_vfio_suspend_device_cmd() 151 dev_err(dev, "vf%u: Suspend failed: %pe\n", pds_vfio->vf_id, in pds_vfio_suspend_device_cmd() 168 .vf_id = cpu_to_le16(pds_vfio->vf_id), in pds_vfio_resume_device_cmd() 175 dev_dbg(dev, "vf%u: Resume device\n", pds_vfio->vf_id); in pds_vfio_resume_device_cmd() 185 .vf_id = cpu_to_le16(pds_vfio->vf_id), in pds_vfio_get_lm_state_size_cmd() 192 dev_dbg(dev, "vf%u: Get migration status\n", pds_vfio->vf_id); in pds_vfio_get_lm_state_size_cmd() [all …]
|
D | vfio_dev.c | 146 int err, vf_id, pci_id; in pds_vfio_init_device() local 148 vf_id = pci_iov_vf_id(pdev); in pds_vfio_init_device() 149 if (vf_id < 0) in pds_vfio_init_device() 150 return vf_id; in pds_vfio_init_device() 156 pds_vfio->vf_id = vf_id; in pds_vfio_init_device() 168 __func__, pci_dev_id(pci_physfn(pdev)), pci_id, vf_id, in pds_vfio_init_device()
|
/linux-6.6.21/drivers/net/ethernet/intel/i40e/ |
D | i40e_virtchnl_pf.h | 70 s16 vf_id; member 118 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, 126 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); 127 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, 129 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, 131 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting); 133 int vf_id, struct ifla_vf_info *ivi); 134 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); 135 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable); 142 int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
|
D | i40e_virtchnl_pf.c | 28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; in i40e_vc_vf_broadcast() 114 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; in i40e_vc_notify_vf_link_state() 158 u16 vf_id; in i40e_restore_all_vfs_msi_state() local 172 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); in i40e_restore_all_vfs_msi_state() 173 while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) { in i40e_restore_all_vfs_msi_state() 193 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) in i40e_vc_notify_vf_reset() 201 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; in i40e_vc_notify_vf_reset() 244 vf->vf_id); in i40e_vc_reset_vf() 248 vf->vf_id); in i40e_vc_reset_vf() 263 return (vsi && (vsi->vf_id == vf->vf_id)); in i40e_vc_isvalid_vsi_id() [all …]
|
D | i40e_client.c | 17 u32 vf_id, u8 *msg, u16 len); 29 bool is_vf, u32 vf_id, 83 i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len) in i40e_notify_client_of_vf_msg() argument 100 vf_id, msg, len); in i40e_notify_client_of_vf_msg() 196 void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id) in i40e_notify_client_of_vf_reset() argument 211 cdev->client->ops->vf_reset(&cdev->lan_info, cdev->client, vf_id); in i40e_notify_client_of_vf_reset() 248 int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id) in i40e_vf_client_capable() argument 265 vf_id); in i40e_vf_client_capable() 540 u32 vf_id, u8 *msg, u16 len) in i40e_client_virtchnl_send() argument 546 err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_RDMA, in i40e_client_virtchnl_send() [all …]
|
/linux-6.6.21/drivers/vdpa/pds/ |
D | cmds.c | 23 .vdpa_init.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id), in pds_vdpa_init_hw() 45 .vdpa.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id), in pds_vdpa_cmd_reset() 65 .vdpa_status.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id), in pds_vdpa_cmd_set_status() 86 .vdpa_setattr.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id), in pds_vdpa_cmd_set_mac() 109 .vdpa_setattr.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id), in pds_vdpa_cmd_set_max_vq_pairs() 133 .vdpa_vq_init.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id), in pds_vdpa_cmd_init_vq() 167 .vdpa_vq_reset.vf_id = cpu_to_le16(pdsv->vdpa_aux->vf_id), in pds_vdpa_cmd_reset_vq()
|
/linux-6.6.21/include/linux/pds/ |
D | pds_adminq.h | 631 __le16 vf_id; member 643 __le16 vf_id; member 678 __le16 vf_id; member 693 __le16 vf_id; member 720 __le16 vf_id; member 746 __le16 vf_id; member 783 __le16 vf_id; member 816 __le16 vf_id; member 857 __le16 vf_id; member 870 __le16 vf_id; member [all …]
|
/linux-6.6.21/drivers/scsi/bfa/ |
D | bfad_bsg.h | 176 u16 vf_id; member 292 u16 vf_id; member 300 u16 vf_id; member 308 u16 vf_id; member 316 u16 vf_id; member 326 u16 vf_id; member 337 u16 vf_id; member 346 u16 vf_id; member 358 u16 vf_id; member 366 u16 vf_id; member [all …]
|
/linux-6.6.21/drivers/crypto/marvell/octeontx2/ |
D | otx2_cptpf_mbox.c | 57 "AF not responding to VF%d messages\n", vf->vf_id); in forward_to_af() 72 otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, in handle_msg_get_caps() 96 otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp)); in handle_msg_get_eng_grp_num() 117 otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp)); in handle_msg_kvf_limits() 293 otx2_reply_invalid_msg(&cptpf->vfpf_mbox, vf->vf_id, 0, req->id); in cptpf_handle_vf_req() 294 otx2_mbox_msg_send(&cptpf->vfpf_mbox, vf->vf_id); in cptpf_handle_vf_req() 344 mdev = &mbox->dev[vf->vf_id]; in otx2_cptpf_vfpf_mbox_handler() 354 ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK); in otx2_cptpf_vfpf_mbox_handler() 372 otx2_mbox_msg_send(mbox, vf->vf_id); in otx2_cptpf_vfpf_mbox_handler() 462 int vf_id, int size) in forward_to_vf() argument [all …]
|
/linux-6.6.21/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt_sriov.c | 56 static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) in bnxt_vf_ndo_prep() argument 62 if (vf_id >= bp->pf.active_vfs) { in bnxt_vf_ndo_prep() 63 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); in bnxt_vf_ndo_prep() 69 int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) in bnxt_set_vf_spoofchk() argument 81 rc = bnxt_vf_ndo_prep(bp, vf_id); in bnxt_set_vf_spoofchk() 85 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_spoofchk() 161 int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) in bnxt_set_vf_trust() argument 166 if (bnxt_vf_ndo_prep(bp, vf_id)) in bnxt_set_vf_trust() 169 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_trust() 179 int bnxt_get_vf_config(struct net_device *dev, int vf_id, in bnxt_get_vf_config() argument [all …]
|
/linux-6.6.21/drivers/net/ethernet/cavium/thunder/ |
D | nic.h | 285 u8 vf_id; member 427 u8 vf_id; member 463 u8 vf_id; member 470 u8 vf_id; member 477 u8 vf_id; member 485 u8 vf_id; member 492 u8 vf_id; member 502 u8 vf_id; member 520 u8 vf_id; member 526 u8 vf_id; member [all …]
|
D | nic_main.c | 165 mbx.nic_cfg.vf_id = vf; in nic_mbx_send_ready() 236 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); in nic_get_bgx_stats() 237 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); in nic_get_bgx_stats() 240 mbx.bgx_stats.vf_id = bgx->vf_id; in nic_get_bgx_stats() 249 nic_send_msg_to_vf(nic, bgx->vf_id, &mbx); in nic_get_bgx_stats() 480 vnic = cfg->vf_id; in nic_config_cpi() 504 qset = cfg->vf_id; in nic_config_cpi() 545 nic->cpi_base[cfg->vf_id] = cpi_base; in nic_config_cpi() 546 nic->rssi_base[cfg->vf_id] = rssi_base; in nic_config_cpi() 571 rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset; in nic_config_rss() [all …]
|
/linux-6.6.21/include/linux/net/intel/ |
D | i40e_client.h | 116 u32 vf_id, u8 *msg, u16 len); 129 bool is_vf, u32 vf_id, 152 struct i40e_client *client, u32 vf_id, 157 struct i40e_client *client, u32 vf_id); 165 struct i40e_client *client, u32 vf_id);
|
/linux-6.6.21/drivers/net/ethernet/mellanox/mlx5/core/ |
D | sriov.c | 337 int vf_id, in mlx5_sriov_blocking_notifier_unregister() argument 344 if (WARN_ON(vf_id < 0 || vf_id >= sriov->num_vfs)) in mlx5_sriov_blocking_notifier_unregister() 347 vfs_ctx = &sriov->vfs_ctx[vf_id]; in mlx5_sriov_blocking_notifier_unregister() 363 int vf_id, in mlx5_sriov_blocking_notifier_register() argument 370 if (vf_id < 0 || vf_id >= sriov->num_vfs) in mlx5_sriov_blocking_notifier_register() 373 vfs_ctx = &sriov->vfs_ctx[vf_id]; in mlx5_sriov_blocking_notifier_register()
|
/linux-6.6.21/drivers/net/ethernet/amd/pds_core/ |
D | devlink.c | 45 int vf_id; in pdsc_dl_enable_set() local 55 for (vf_id = 0; vf_id < pdsc->num_vfs; vf_id++) { in pdsc_dl_enable_set() 56 struct pdsc *vf = pdsc->vfs[vf_id].vf; in pdsc_dl_enable_set()
|
/linux-6.6.21/include/linux/qed/ |
D | qed_iov_if.h | 20 int (*get_config) (struct qed_dev *cdev, int vf_id, 23 int (*set_link_state) (struct qed_dev *cdev, int vf_id,
|
/linux-6.6.21/drivers/crypto/marvell/octeontx/ |
D | otx_cptvf_mbox.c | 60 static void dump_mbox_msg(struct otx_cpt_mbox *mbox_msg, int vf_id) in dump_mbox_msg() argument 66 if (vf_id >= 0) in dump_mbox_msg() 68 get_mbox_opcode_str(mbox_msg->msg), vf_id, in dump_mbox_msg()
|