Lines Matching refs:bp
27 static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, in bnxt_hwrm_fwd_async_event_cmpl() argument
34 rc = hwrm_req_init(bp, req, HWRM_FWD_ASYNC_EVENT_CMPL); in bnxt_hwrm_fwd_async_event_cmpl()
48 rc = hwrm_req_send(bp, req); in bnxt_hwrm_fwd_async_event_cmpl()
51 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", in bnxt_hwrm_fwd_async_event_cmpl()
56 static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) in bnxt_vf_ndo_prep() argument
58 if (!bp->pf.active_vfs) { in bnxt_vf_ndo_prep()
59 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); in bnxt_vf_ndo_prep()
62 if (vf_id >= bp->pf.active_vfs) { in bnxt_vf_ndo_prep()
63 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); in bnxt_vf_ndo_prep()
71 struct bnxt *bp = netdev_priv(dev); in bnxt_set_vf_spoofchk() local
78 if (bp->hwrm_spec_code < 0x10701) in bnxt_set_vf_spoofchk()
81 rc = bnxt_vf_ndo_prep(bp, vf_id); in bnxt_set_vf_spoofchk()
85 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_spoofchk()
98 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); in bnxt_set_vf_spoofchk()
102 rc = hwrm_req_send(bp, req); in bnxt_set_vf_spoofchk()
113 static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) in bnxt_hwrm_func_qcfg_flags() argument
119 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); in bnxt_hwrm_func_qcfg_flags()
123 req->fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff); in bnxt_hwrm_func_qcfg_flags()
124 resp = hwrm_req_hold(bp, req); in bnxt_hwrm_func_qcfg_flags()
125 rc = hwrm_req_send(bp, req); in bnxt_hwrm_func_qcfg_flags()
128 hwrm_req_drop(bp, req); in bnxt_hwrm_func_qcfg_flags()
132 bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) in bnxt_is_trusted_vf() argument
134 if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) in bnxt_is_trusted_vf()
137 bnxt_hwrm_func_qcfg_flags(bp, vf); in bnxt_is_trusted_vf()
141 static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) in bnxt_hwrm_set_trusted_vf() argument
146 if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) in bnxt_hwrm_set_trusted_vf()
149 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); in bnxt_hwrm_set_trusted_vf()
158 return hwrm_req_send(bp, req); in bnxt_hwrm_set_trusted_vf()
163 struct bnxt *bp = netdev_priv(dev); in bnxt_set_vf_trust() local
166 if (bnxt_vf_ndo_prep(bp, vf_id)) in bnxt_set_vf_trust()
169 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_trust()
175 bnxt_hwrm_set_trusted_vf(bp, vf); in bnxt_set_vf_trust()
182 struct bnxt *bp = netdev_priv(dev); in bnxt_get_vf_config() local
186 rc = bnxt_vf_ndo_prep(bp, vf_id); in bnxt_get_vf_config()
191 vf = &bp->pf.vf[vf_id]; in bnxt_get_vf_config()
205 ivi->trusted = bnxt_is_trusted_vf(bp, vf); in bnxt_get_vf_config()
218 struct bnxt *bp = netdev_priv(dev); in bnxt_set_vf_mac() local
223 rc = bnxt_vf_ndo_prep(bp, vf_id); in bnxt_set_vf_mac()
233 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_mac()
235 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); in bnxt_set_vf_mac()
244 return hwrm_req_send(bp, req); in bnxt_set_vf_mac()
250 struct bnxt *bp = netdev_priv(dev); in bnxt_set_vf_vlan() local
256 if (bp->hwrm_spec_code < 0x10201) in bnxt_set_vf_vlan()
262 rc = bnxt_vf_ndo_prep(bp, vf_id); in bnxt_set_vf_vlan()
272 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_vlan()
277 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); in bnxt_set_vf_vlan()
282 rc = hwrm_req_send(bp, req); in bnxt_set_vf_vlan()
292 struct bnxt *bp = netdev_priv(dev); in bnxt_set_vf_bw() local
298 rc = bnxt_vf_ndo_prep(bp, vf_id); in bnxt_set_vf_bw()
302 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_bw()
303 pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); in bnxt_set_vf_bw()
305 netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n", in bnxt_set_vf_bw()
311 netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n", in bnxt_set_vf_bw()
317 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); in bnxt_set_vf_bw()
324 rc = hwrm_req_send(bp, req); in bnxt_set_vf_bw()
335 struct bnxt *bp = netdev_priv(dev); in bnxt_set_vf_link_state() local
339 rc = bnxt_vf_ndo_prep(bp, vf_id); in bnxt_set_vf_link_state()
343 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_link_state()
357 netdev_err(bp->dev, "Invalid link option\n"); in bnxt_set_vf_link_state()
362 rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf, in bnxt_set_vf_link_state()
367 static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) in bnxt_set_vf_attr() argument
373 vf = &bp->pf.vf[i]; in bnxt_set_vf_attr()
379 static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) in bnxt_hwrm_func_vf_resource_free() argument
382 struct bnxt_pf_info *pf = &bp->pf; in bnxt_hwrm_func_vf_resource_free()
385 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESC_FREE); in bnxt_hwrm_func_vf_resource_free()
389 hwrm_req_hold(bp, req); in bnxt_hwrm_func_vf_resource_free()
392 rc = hwrm_req_send(bp, req); in bnxt_hwrm_func_vf_resource_free()
396 hwrm_req_drop(bp, req); in bnxt_hwrm_func_vf_resource_free()
400 static void bnxt_free_vf_resources(struct bnxt *bp) in bnxt_free_vf_resources() argument
402 struct pci_dev *pdev = bp->pdev; in bnxt_free_vf_resources()
405 kfree(bp->pf.vf_event_bmap); in bnxt_free_vf_resources()
406 bp->pf.vf_event_bmap = NULL; in bnxt_free_vf_resources()
409 if (bp->pf.hwrm_cmd_req_addr[i]) { in bnxt_free_vf_resources()
411 bp->pf.hwrm_cmd_req_addr[i], in bnxt_free_vf_resources()
412 bp->pf.hwrm_cmd_req_dma_addr[i]); in bnxt_free_vf_resources()
413 bp->pf.hwrm_cmd_req_addr[i] = NULL; in bnxt_free_vf_resources()
417 bp->pf.active_vfs = 0; in bnxt_free_vf_resources()
418 kfree(bp->pf.vf); in bnxt_free_vf_resources()
419 bp->pf.vf = NULL; in bnxt_free_vf_resources()
422 static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) in bnxt_alloc_vf_resources() argument
424 struct pci_dev *pdev = bp->pdev; in bnxt_alloc_vf_resources()
427 bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL); in bnxt_alloc_vf_resources()
428 if (!bp->pf.vf) in bnxt_alloc_vf_resources()
431 bnxt_set_vf_attr(bp, num_vfs); in bnxt_alloc_vf_resources()
439 bp->pf.hwrm_cmd_req_addr[i] = in bnxt_alloc_vf_resources()
441 &bp->pf.hwrm_cmd_req_dma_addr[i], in bnxt_alloc_vf_resources()
444 if (!bp->pf.hwrm_cmd_req_addr[i]) in bnxt_alloc_vf_resources()
448 struct bnxt_vf_info *vf = &bp->pf.vf[k]; in bnxt_alloc_vf_resources()
450 vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] + in bnxt_alloc_vf_resources()
453 bp->pf.hwrm_cmd_req_dma_addr[i] + j * in bnxt_alloc_vf_resources()
460 bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL); in bnxt_alloc_vf_resources()
461 if (!bp->pf.vf_event_bmap) in bnxt_alloc_vf_resources()
464 bp->pf.hwrm_cmd_req_pages = nr_pages; in bnxt_alloc_vf_resources()
468 static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) in bnxt_hwrm_func_buf_rgtr() argument
473 rc = hwrm_req_init(bp, req, HWRM_FUNC_BUF_RGTR); in bnxt_hwrm_func_buf_rgtr()
477 req->req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); in bnxt_hwrm_func_buf_rgtr()
480 req->req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); in bnxt_hwrm_func_buf_rgtr()
481 req->req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); in bnxt_hwrm_func_buf_rgtr()
482 req->req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); in bnxt_hwrm_func_buf_rgtr()
483 req->req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); in bnxt_hwrm_func_buf_rgtr()
485 return hwrm_req_send(bp, req); in bnxt_hwrm_func_buf_rgtr()
488 static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id) in __bnxt_set_vf_params() argument
494 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); in __bnxt_set_vf_params()
498 vf = &bp->pf.vf[vf_id]; in __bnxt_set_vf_params()
518 return hwrm_req_send(bp, req); in __bnxt_set_vf_params()
524 static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) in bnxt_hwrm_func_vf_resc_cfg() argument
527 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in bnxt_hwrm_func_vf_resc_cfg()
530 struct bnxt_pf_info *pf = &bp->pf; in bnxt_hwrm_func_vf_resc_cfg()
535 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESOURCE_CFG); in bnxt_hwrm_func_vf_resc_cfg()
539 if (bp->flags & BNXT_FLAG_CHIP_P5) { in bnxt_hwrm_func_vf_resc_cfg()
540 vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp); in bnxt_hwrm_func_vf_resc_cfg()
543 vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings; in bnxt_hwrm_func_vf_resc_cfg()
545 vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp); in bnxt_hwrm_func_vf_resc_cfg()
546 vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp); in bnxt_hwrm_func_vf_resc_cfg()
547 if (bp->flags & BNXT_FLAG_AGG_RINGS) in bnxt_hwrm_func_vf_resc_cfg()
548 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; in bnxt_hwrm_func_vf_resc_cfg()
550 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings; in bnxt_hwrm_func_vf_resc_cfg()
551 vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings; in bnxt_hwrm_func_vf_resc_cfg()
552 vf_vnics = hw_resc->max_vnics - bp->nr_vnics; in bnxt_hwrm_func_vf_resc_cfg()
554 vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs; in bnxt_hwrm_func_vf_resc_cfg()
569 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) in bnxt_hwrm_func_vf_resc_cfg()
597 if (bp->flags & BNXT_FLAG_CHIP_P5) in bnxt_hwrm_func_vf_resc_cfg()
600 hwrm_req_hold(bp, req); in bnxt_hwrm_func_vf_resc_cfg()
603 __bnxt_set_vf_params(bp, i); in bnxt_hwrm_func_vf_resc_cfg()
606 rc = hwrm_req_send(bp, req); in bnxt_hwrm_func_vf_resc_cfg()
625 if (bp->flags & BNXT_FLAG_CHIP_P5) in bnxt_hwrm_func_vf_resc_cfg()
630 hwrm_req_drop(bp, req); in bnxt_hwrm_func_vf_resc_cfg()
637 static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) in bnxt_hwrm_func_cfg() argument
640 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in bnxt_hwrm_func_cfg()
641 struct bnxt_pf_info *pf = &bp->pf; in bnxt_hwrm_func_cfg()
648 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); in bnxt_hwrm_func_cfg()
653 vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs; in bnxt_hwrm_func_cfg()
654 vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs; in bnxt_hwrm_func_cfg()
655 if (bp->flags & BNXT_FLAG_AGG_RINGS) in bnxt_hwrm_func_cfg()
656 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / in bnxt_hwrm_func_cfg()
659 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) / in bnxt_hwrm_func_cfg()
661 vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; in bnxt_hwrm_func_cfg()
662 vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs; in bnxt_hwrm_func_cfg()
663 vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs; in bnxt_hwrm_func_cfg()
677 mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; in bnxt_hwrm_func_cfg()
692 hwrm_req_hold(bp, req); in bnxt_hwrm_func_cfg()
697 rc = hwrm_req_send(bp, req); in bnxt_hwrm_func_cfg()
702 rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid, in bnxt_hwrm_func_cfg()
708 hwrm_req_drop(bp, req); in bnxt_hwrm_func_cfg()
722 static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset) in bnxt_func_cfg() argument
724 if (BNXT_NEW_RM(bp)) in bnxt_func_cfg()
725 return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset); in bnxt_func_cfg()
727 return bnxt_hwrm_func_cfg(bp, num_vfs); in bnxt_func_cfg()
730 int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) in bnxt_cfg_hw_sriov() argument
735 rc = bnxt_hwrm_func_buf_rgtr(bp); in bnxt_cfg_hw_sriov()
740 rc = bnxt_func_cfg(bp, *num_vfs, reset); in bnxt_cfg_hw_sriov()
743 netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n"); in bnxt_cfg_hw_sriov()
747 netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", in bnxt_cfg_hw_sriov()
752 bnxt_ulp_sriov_cfg(bp, *num_vfs); in bnxt_cfg_hw_sriov()
756 static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) in bnxt_sriov_enable() argument
760 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; in bnxt_sriov_enable()
770 avail_cp = bnxt_get_avail_cp_rings_for_en(bp); in bnxt_sriov_enable()
771 avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp); in bnxt_sriov_enable()
779 if (bp->flags & BNXT_FLAG_AGG_RINGS) { in bnxt_sriov_enable()
780 if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >= in bnxt_sriov_enable()
784 if (hw_resc->max_rx_rings - bp->rx_nr_rings >= in bnxt_sriov_enable()
788 if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings || in bnxt_sriov_enable()
792 if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings && in bnxt_sriov_enable()
796 if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >= in bnxt_sriov_enable()
807 netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n"); in bnxt_sriov_enable()
812 netdev_info(bp->dev, "Requested VFs %d, can enable %d\n", in bnxt_sriov_enable()
817 rc = bnxt_alloc_vf_resources(bp, *num_vfs); in bnxt_sriov_enable()
821 rc = bnxt_cfg_hw_sriov(bp, num_vfs, false); in bnxt_sriov_enable()
825 rc = pci_enable_sriov(bp->pdev, *num_vfs); in bnxt_sriov_enable()
827 bnxt_ulp_sriov_cfg(bp, 0); in bnxt_sriov_enable()
835 bnxt_hwrm_func_vf_resource_free(bp, *num_vfs); in bnxt_sriov_enable()
838 bnxt_hwrm_func_qcaps(bp); in bnxt_sriov_enable()
841 bnxt_free_vf_resources(bp); in bnxt_sriov_enable()
846 void bnxt_sriov_disable(struct bnxt *bp) in bnxt_sriov_disable() argument
848 u16 num_vfs = pci_num_vf(bp->pdev); in bnxt_sriov_disable()
854 devl_lock(bp->dl); in bnxt_sriov_disable()
855 bnxt_vf_reps_destroy(bp); in bnxt_sriov_disable()
857 if (pci_vfs_assigned(bp->pdev)) { in bnxt_sriov_disable()
859 bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD); in bnxt_sriov_disable()
860 netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n", in bnxt_sriov_disable()
863 pci_disable_sriov(bp->pdev); in bnxt_sriov_disable()
865 bnxt_hwrm_func_vf_resource_free(bp, num_vfs); in bnxt_sriov_disable()
867 devl_unlock(bp->dl); in bnxt_sriov_disable()
869 bnxt_free_vf_resources(bp); in bnxt_sriov_disable()
873 bnxt_restore_pf_fw_resources(bp); in bnxt_sriov_disable()
876 bnxt_ulp_sriov_cfg(bp, 0); in bnxt_sriov_disable()
882 struct bnxt *bp = netdev_priv(dev); in bnxt_sriov_configure() local
884 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { in bnxt_sriov_configure()
895 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { in bnxt_sriov_configure()
900 bp->sriov_cfg = true; in bnxt_sriov_configure()
903 if (pci_vfs_assigned(bp->pdev)) { in bnxt_sriov_configure()
910 if (num_vfs && num_vfs == bp->pf.active_vfs) in bnxt_sriov_configure()
914 bnxt_sriov_disable(bp); in bnxt_sriov_configure()
918 bnxt_sriov_enable(bp, &num_vfs); in bnxt_sriov_configure()
921 bp->sriov_cfg = false; in bnxt_sriov_configure()
922 wake_up(&bp->sriov_cfg_wait); in bnxt_sriov_configure()
927 static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, in bnxt_hwrm_fwd_resp() argument
937 rc = hwrm_req_init(bp, req, HWRM_FWD_RESP); in bnxt_hwrm_fwd_resp()
947 rc = hwrm_req_send(bp, req); in bnxt_hwrm_fwd_resp()
950 netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); in bnxt_hwrm_fwd_resp()
954 static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, in bnxt_hwrm_fwd_err_resp() argument
963 rc = hwrm_req_init(bp, req, HWRM_REJECT_FWD_RESP); in bnxt_hwrm_fwd_err_resp()
970 rc = hwrm_req_send(bp, req); in bnxt_hwrm_fwd_err_resp()
973 netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); in bnxt_hwrm_fwd_err_resp()
977 static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, in bnxt_hwrm_exec_fwd_resp() argument
986 rc = hwrm_req_init(bp, req, HWRM_EXEC_FWD_RESP); in bnxt_hwrm_exec_fwd_resp()
993 rc = hwrm_req_send(bp, req); in bnxt_hwrm_exec_fwd_resp()
996 netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); in bnxt_hwrm_exec_fwd_resp()
1000 static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf) in bnxt_vf_configure_mac() argument
1010 bool trust = bnxt_is_trusted_vf(bp, vf); in bnxt_vf_configure_mac()
1016 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); in bnxt_vf_configure_mac()
1018 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); in bnxt_vf_configure_mac()
1020 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); in bnxt_vf_configure_mac()
1023 static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) in bnxt_vf_validate_set_mac() argument
1031 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); in bnxt_vf_validate_set_mac()
1038 if (bnxt_is_trusted_vf(bp, vf)) { in bnxt_vf_validate_set_mac()
1056 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); in bnxt_vf_validate_set_mac()
1057 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); in bnxt_vf_validate_set_mac()
1060 static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) in bnxt_vf_set_link() argument
1067 bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); in bnxt_vf_set_link()
1074 mutex_lock(&bp->link_lock); in bnxt_vf_set_link()
1075 memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, in bnxt_vf_set_link()
1077 mutex_unlock(&bp->link_lock); in bnxt_vf_set_link()
1106 rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp, in bnxt_vf_set_link()
1114 static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) in bnxt_vf_req_validate_snd() argument
1122 rc = bnxt_vf_configure_mac(bp, vf); in bnxt_vf_req_validate_snd()
1125 rc = bnxt_vf_validate_set_mac(bp, vf); in bnxt_vf_req_validate_snd()
1132 bp, vf, sizeof(struct hwrm_func_cfg_input)); in bnxt_vf_req_validate_snd()
1135 rc = bnxt_vf_set_link(bp, vf); in bnxt_vf_req_validate_snd()
1143 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) in bnxt_hwrm_exec_fwd_req() argument
1145 u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id; in bnxt_hwrm_exec_fwd_req()
1149 vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i); in bnxt_hwrm_exec_fwd_req()
1153 clear_bit(vf_id, bp->pf.vf_event_bmap); in bnxt_hwrm_exec_fwd_req()
1154 bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]); in bnxt_hwrm_exec_fwd_req()
1159 int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict) in bnxt_approve_mac() argument
1164 if (!BNXT_VF(bp)) in bnxt_approve_mac()
1167 if (bp->hwrm_spec_code < 0x10202) { in bnxt_approve_mac()
1168 if (is_valid_ether_addr(bp->vf.mac_addr)) in bnxt_approve_mac()
1173 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); in bnxt_approve_mac()
1180 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT); in bnxt_approve_mac()
1181 rc = hwrm_req_send(bp, req); in bnxt_approve_mac()
1185 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", in bnxt_approve_mac()
1192 void bnxt_update_vf_mac(struct bnxt *bp) in bnxt_update_vf_mac() argument
1198 if (hwrm_req_init(bp, req, HWRM_FUNC_QCAPS)) in bnxt_update_vf_mac()
1203 resp = hwrm_req_hold(bp, req); in bnxt_update_vf_mac()
1204 if (hwrm_req_send(bp, req)) in bnxt_update_vf_mac()
1214 if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) { in bnxt_update_vf_mac()
1215 memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN); in bnxt_update_vf_mac()
1219 if (!is_valid_ether_addr(bp->vf.mac_addr)) in bnxt_update_vf_mac()
1224 if (is_valid_ether_addr(bp->vf.mac_addr)) in bnxt_update_vf_mac()
1225 eth_hw_addr_set(bp->dev, bp->vf.mac_addr); in bnxt_update_vf_mac()
1227 hwrm_req_drop(bp, req); in bnxt_update_vf_mac()
1229 bnxt_approve_mac(bp, bp->dev->dev_addr, false); in bnxt_update_vf_mac()
1234 int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) in bnxt_cfg_hw_sriov() argument
1241 void bnxt_sriov_disable(struct bnxt *bp) in bnxt_sriov_disable() argument
1245 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) in bnxt_hwrm_exec_fwd_req() argument
1247 netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n"); in bnxt_hwrm_exec_fwd_req()
1250 void bnxt_update_vf_mac(struct bnxt *bp) in bnxt_update_vf_mac() argument
1254 int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict) in bnxt_approve_mac() argument