Lines Matching refs:bp
29 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
35 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, in storm_memset_vf_to_pf() argument
38 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), in storm_memset_vf_to_pf()
40 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), in storm_memset_vf_to_pf()
42 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), in storm_memset_vf_to_pf()
44 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), in storm_memset_vf_to_pf()
48 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, in storm_memset_func_en() argument
51 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), in storm_memset_func_en()
53 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), in storm_memset_func_en()
55 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), in storm_memset_func_en()
57 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), in storm_memset_func_en()
61 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) in bnx2x_vf_idx_by_abs_fid() argument
65 for_each_vf(bp, idx) in bnx2x_vf_idx_by_abs_fid()
66 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) in bnx2x_vf_idx_by_abs_fid()
72 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) in bnx2x_vf_by_abs_fid() argument
74 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); in bnx2x_vf_by_abs_fid()
75 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; in bnx2x_vf_by_abs_fid()
78 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_igu_ack_sb() argument
102 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); in bnx2x_vf_igu_ack_sb()
107 REG_WR(bp, igu_addr_ctl, ctl); in bnx2x_vf_igu_ack_sb()
111 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, in bnx2x_validate_vf_sp_objs() argument
126 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vfop_qctor_dump_tx() argument
142 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vfop_qctor_dump_rx() argument
166 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, in bnx2x_vfop_qctor_prep() argument
234 static int bnx2x_vf_queue_create(struct bnx2x *bp, in bnx2x_vf_queue_create() argument
248 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == in bnx2x_vf_queue_create()
256 rc = bnx2x_queue_state_change(bp, q_params); in bnx2x_vf_queue_create()
263 rc = bnx2x_queue_state_change(bp, q_params); in bnx2x_vf_queue_create()
268 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), in bnx2x_vf_queue_create()
274 static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_queue_destroy() argument
290 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) == in bnx2x_vf_queue_destroy()
299 rc = bnx2x_queue_state_change(bp, &q_params); in bnx2x_vf_queue_destroy()
316 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) in bnx2x_vf_set_igu_info() argument
318 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); in bnx2x_vf_set_igu_info()
321 if (!BP_VFDB(bp)->first_vf_igu_entry) in bnx2x_vf_set_igu_info()
322 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; in bnx2x_vf_set_igu_info()
331 BP_VFDB(bp)->vf_sbs_pool++; in bnx2x_vf_set_igu_info()
334 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_vlan_mac_clear() argument
364 rc = ramrod.vlan_mac_obj->delete_all(bp, in bnx2x_vf_vlan_mac_clear()
378 static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, in bnx2x_vf_mac_vlan_config() argument
416 rc = bnx2x_config_vlan_mac(bp, &ramrod); in bnx2x_vf_mac_vlan_config()
434 int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_mac_vlan_config_list() argument
442 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) in bnx2x_vf_mac_vlan_config_list()
447 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, in bnx2x_vf_mac_vlan_config_list()
461 bnx2x_vf_mac_vlan_config(bp, vf, qid, in bnx2x_vf_mac_vlan_config_list()
473 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, in bnx2x_vf_queue_setup() argument
480 rc = bnx2x_vf_queue_create(bp, vf, qid, qctor); in bnx2x_vf_queue_setup()
485 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, in bnx2x_vf_queue_setup()
493 static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_queue_flr() argument
502 bnx2x_validate_vf_sp_objs(bp, vf, false)) { in bnx2x_vf_queue_flr()
503 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, in bnx2x_vf_queue_flr()
507 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, in bnx2x_vf_queue_flr()
511 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, in bnx2x_vf_queue_flr()
526 rc = bnx2x_queue_state_change(bp, &qstate); in bnx2x_vf_queue_flr()
537 int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_mcast() argument
572 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_SET); in bnx2x_vf_mcast()
577 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); in bnx2x_vf_mcast()
587 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, in bnx2x_vf_prep_rx_mode() argument
597 ramrod->rx_mode_obj = &bp->rx_mode_obj; in bnx2x_vf_prep_rx_mode()
608 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); in bnx2x_vf_prep_rx_mode()
609 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); in bnx2x_vf_prep_rx_mode()
612 int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_rxmode() argument
619 bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags); in bnx2x_vf_rxmode()
622 return bnx2x_config_rx_mode(bp, &ramrod); in bnx2x_vf_rxmode()
625 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) in bnx2x_vf_queue_teardown() argument
633 rc = bnx2x_vf_rxmode(bp, vf, qid, 0); in bnx2x_vf_queue_teardown()
638 if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { in bnx2x_vf_queue_teardown()
639 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, in bnx2x_vf_queue_teardown()
644 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, in bnx2x_vf_queue_teardown()
649 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, in bnx2x_vf_queue_teardown()
654 rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); in bnx2x_vf_queue_teardown()
661 rc = bnx2x_vf_queue_destroy(bp, vf, qid); in bnx2x_vf_queue_teardown()
679 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) in bnx2x_vf_enable_internal() argument
681 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); in bnx2x_vf_enable_internal()
685 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) in bnx2x_vf_semi_clear_err() argument
687 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); in bnx2x_vf_semi_clear_err()
688 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); in bnx2x_vf_semi_clear_err()
689 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); in bnx2x_vf_semi_clear_err()
690 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); in bnx2x_vf_semi_clear_err()
693 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) in bnx2x_vf_pglue_clear_err() argument
695 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; in bnx2x_vf_pglue_clear_err()
712 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); in bnx2x_vf_pglue_clear_err()
715 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_igu_reset() argument
721 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); in bnx2x_vf_igu_reset()
723 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); in bnx2x_vf_igu_reset()
724 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); in bnx2x_vf_igu_reset()
725 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); in bnx2x_vf_igu_reset()
726 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); in bnx2x_vf_igu_reset()
727 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); in bnx2x_vf_igu_reset()
728 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); in bnx2x_vf_igu_reset()
730 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); in bnx2x_vf_igu_reset()
733 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT; in bnx2x_vf_igu_reset()
734 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); in bnx2x_vf_igu_reset()
740 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_vf_igu_reset()
747 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); in bnx2x_vf_igu_reset()
750 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, in bnx2x_vf_igu_reset()
754 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, in bnx2x_vf_igu_reset()
759 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) in bnx2x_vf_enable_access() argument
766 storm_memset_vf_to_pf(bp, abs_fid, BP_FUNC(bp)); in bnx2x_vf_enable_access()
767 storm_memset_func_en(bp, abs_fid, 1); in bnx2x_vf_enable_access()
770 if (bp->fw_cap & FW_CAP_INVALIDATE_VF_FP_HSI) in bnx2x_vf_enable_access()
771 REG_WR8(bp, BAR_XSTRORM_INTMEM + in bnx2x_vf_enable_access()
775 bnx2x_vf_semi_clear_err(bp, abs_vfid); in bnx2x_vf_enable_access()
776 bnx2x_vf_pglue_clear_err(bp, abs_vfid); in bnx2x_vf_enable_access()
779 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); in bnx2x_vf_enable_access()
781 bnx2x_vf_enable_internal(bp, true); in bnx2x_vf_enable_access()
782 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_vf_enable_access()
785 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_enable_traffic() argument
788 bnx2x_vf_igu_reset(bp, vf); in bnx2x_vf_enable_traffic()
791 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); in bnx2x_vf_enable_traffic()
792 REG_WR(bp, PBF_REG_DISABLE_VF, 0); in bnx2x_vf_enable_traffic()
793 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_vf_enable_traffic()
796 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) in bnx2x_vf_is_pcie_pending() argument
798 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); in bnx2x_vf_is_pcie_pending()
814 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) in bnx2x_vf_flr_clnup_epilog() argument
817 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) in bnx2x_vf_flr_clnup_epilog()
827 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_iov_static_resc() argument
846 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_free_resc() argument
849 bnx2x_iov_static_resc(bp, vf); in bnx2x_vf_free_resc()
853 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_flr_clnup_hw() argument
855 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); in bnx2x_vf_flr_clnup_hw()
858 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); in bnx2x_vf_flr_clnup_hw()
859 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, in bnx2x_vf_flr_clnup_hw()
862 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_vf_flr_clnup_hw()
865 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), in bnx2x_vf_flr_clnup_hw()
870 bnx2x_tx_hw_flushed(bp, poll_cnt); in bnx2x_vf_flr_clnup_hw()
873 static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_flr() argument
883 rc = bnx2x_vf_queue_flr(bp, vf, i); in bnx2x_vf_flr()
889 bnx2x_vf_mcast(bp, vf, NULL, 0, true); in bnx2x_vf_flr()
892 bnx2x_vf_flr_clnup_hw(bp, vf); in bnx2x_vf_flr()
895 bnx2x_vf_free_resc(bp, vf); in bnx2x_vf_flr()
900 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); in bnx2x_vf_flr()
907 static void bnx2x_vf_flr_clnup(struct bnx2x *bp) in bnx2x_vf_flr_clnup() argument
912 for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) { in bnx2x_vf_flr_clnup()
914 if (bnx2x_vf(bp, i, state) != VF_RESET || in bnx2x_vf_flr_clnup()
915 !bnx2x_vf(bp, i, flr_clnup_stage)) in bnx2x_vf_flr_clnup()
919 i, BNX2X_NR_VIRTFN(bp)); in bnx2x_vf_flr_clnup()
921 vf = BP_VF(bp, i); in bnx2x_vf_flr_clnup()
924 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); in bnx2x_vf_flr_clnup()
927 bnx2x_vf_flr(bp, vf); in bnx2x_vf_flr_clnup()
931 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); in bnx2x_vf_flr_clnup()
942 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); in bnx2x_vf_flr_clnup()
944 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], in bnx2x_vf_flr_clnup()
945 bp->vfdb->flrd_vfs[i]); in bnx2x_vf_flr_clnup()
947 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); in bnx2x_vf_flr_clnup()
953 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); in bnx2x_vf_flr_clnup()
956 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) in bnx2x_vf_handle_flr_event() argument
962 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); in bnx2x_vf_handle_flr_event()
966 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); in bnx2x_vf_handle_flr_event()
968 for_each_vf(bp, i) { in bnx2x_vf_handle_flr_event()
969 struct bnx2x_virtf *vf = BP_VF(bp, i); in bnx2x_vf_handle_flr_event()
973 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); in bnx2x_vf_handle_flr_event()
975 reset = bp->vfdb->flrd_vfs[1] & in bnx2x_vf_handle_flr_event()
990 bnx2x_vf_flr_clnup(bp); in bnx2x_vf_handle_flr_event()
994 void bnx2x_iov_init_dq(struct bnx2x *bp) in bnx2x_iov_init_dq() argument
996 if (!IS_SRIOV(bp)) in bnx2x_iov_init_dq()
1000 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); in bnx2x_iov_init_dq()
1001 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); in bnx2x_iov_init_dq()
1006 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); in bnx2x_iov_init_dq()
1009 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); in bnx2x_iov_init_dq()
1014 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); in bnx2x_iov_init_dq()
1020 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); in bnx2x_iov_init_dq()
1021 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); in bnx2x_iov_init_dq()
1022 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); in bnx2x_iov_init_dq()
1023 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); in bnx2x_iov_init_dq()
1028 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64); in bnx2x_iov_init_dq()
1031 void bnx2x_iov_init_dmae(struct bnx2x *bp) in bnx2x_iov_init_dmae() argument
1033 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) in bnx2x_iov_init_dmae()
1034 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); in bnx2x_iov_init_dmae()
1037 static int bnx2x_vf_domain(struct bnx2x *bp, int vfid) in bnx2x_vf_domain() argument
1039 struct pci_dev *dev = bp->pdev; in bnx2x_vf_domain()
1044 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) in bnx2x_vf_bus() argument
1046 struct pci_dev *dev = bp->pdev; in bnx2x_vf_bus()
1047 struct bnx2x_sriov *iov = &bp->vfdb->sriov; in bnx2x_vf_bus()
1053 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) in bnx2x_vf_devfn() argument
1055 struct pci_dev *dev = bp->pdev; in bnx2x_vf_devfn()
1056 struct bnx2x_sriov *iov = &bp->vfdb->sriov; in bnx2x_vf_devfn()
1061 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_set_bars() argument
1064 struct pci_dev *dev = bp->pdev; in bnx2x_vf_set_bars()
1065 struct bnx2x_sriov *iov = &bp->vfdb->sriov; in bnx2x_vf_set_bars()
1078 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) in bnx2x_get_vf_igu_cam_info() argument
1086 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); in bnx2x_get_vf_igu_cam_info()
1092 else if (current_pf == BP_FUNC(bp)) in bnx2x_get_vf_igu_cam_info()
1093 bnx2x_vf_set_igu_info(bp, sb_id, in bnx2x_get_vf_igu_cam_info()
1101 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); in bnx2x_get_vf_igu_cam_info()
1102 return BP_VFDB(bp)->vf_sbs_pool; in bnx2x_get_vf_igu_cam_info()
1105 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) in __bnx2x_iov_free_vfdb() argument
1107 if (bp->vfdb) { in __bnx2x_iov_free_vfdb()
1108 kfree(bp->vfdb->vfqs); in __bnx2x_iov_free_vfdb()
1109 kfree(bp->vfdb->vfs); in __bnx2x_iov_free_vfdb()
1110 kfree(bp->vfdb); in __bnx2x_iov_free_vfdb()
1112 bp->vfdb = NULL; in __bnx2x_iov_free_vfdb()
1115 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) in bnx2x_sriov_pci_cfg_info() argument
1118 struct pci_dev *dev = bp->pdev; in bnx2x_sriov_pci_cfg_info()
1140 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) in bnx2x_sriov_info() argument
1148 if (bnx2x_sriov_pci_cfg_info(bp, iov)) in bnx2x_sriov_info()
1155 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); in bnx2x_sriov_info()
1157 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); in bnx2x_sriov_info()
1161 BP_FUNC(bp), in bnx2x_sriov_info()
1169 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, in bnx2x_iov_init_one() argument
1174 struct pci_dev *dev = bp->pdev; in bnx2x_iov_init_one()
1176 bp->vfdb = NULL; in bnx2x_iov_init_one()
1179 if (IS_VF(bp)) in bnx2x_iov_init_one()
1187 if (CHIP_IS_E1x(bp)) in bnx2x_iov_init_one()
1195 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { in bnx2x_iov_init_one()
1197 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); in bnx2x_iov_init_one()
1209 if (!pci_ari_enabled(bp->pdev->bus)) { in bnx2x_iov_init_one()
1215 if (CHIP_INT_MODE_IS_BC(bp)) { in bnx2x_iov_init_one()
1221 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); in bnx2x_iov_init_one()
1222 if (!bp->vfdb) { in bnx2x_iov_init_one()
1233 iov = &(bp->vfdb->sriov); in bnx2x_iov_init_one()
1234 err = bnx2x_sriov_info(bp, iov); in bnx2x_iov_init_one()
1250 bp->vfdb->vfs = kcalloc(BNX2X_NR_VIRTFN(bp), in bnx2x_iov_init_one()
1253 if (!bp->vfdb->vfs) { in bnx2x_iov_init_one()
1260 for_each_vf(bp, i) { in bnx2x_iov_init_one()
1261 bnx2x_vf(bp, i, index) = i; in bnx2x_iov_init_one()
1262 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; in bnx2x_iov_init_one()
1263 bnx2x_vf(bp, i, state) = VF_FREE; in bnx2x_iov_init_one()
1264 mutex_init(&bnx2x_vf(bp, i, op_mutex)); in bnx2x_iov_init_one()
1265 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; in bnx2x_iov_init_one()
1267 bnx2x_vf(bp, i, spoofchk) = 1; in bnx2x_iov_init_one()
1271 if (!bnx2x_get_vf_igu_cam_info(bp)) { in bnx2x_iov_init_one()
1278 bp->vfdb->vfqs = kcalloc(BNX2X_MAX_NUM_VF_QUEUES, in bnx2x_iov_init_one()
1282 if (!bp->vfdb->vfqs) { in bnx2x_iov_init_one()
1289 mutex_init(&bp->vfdb->event_mutex); in bnx2x_iov_init_one()
1291 mutex_init(&bp->vfdb->bulletin_mutex); in bnx2x_iov_init_one()
1293 if (SHMEM2_HAS(bp, sriov_switch_mode)) in bnx2x_iov_init_one()
1294 SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB); in bnx2x_iov_init_one()
1299 __bnx2x_iov_free_vfdb(bp); in bnx2x_iov_init_one()
1303 void bnx2x_iov_remove_one(struct bnx2x *bp) in bnx2x_iov_remove_one() argument
1308 if (!IS_SRIOV(bp)) in bnx2x_iov_remove_one()
1311 bnx2x_disable_sriov(bp); in bnx2x_iov_remove_one()
1314 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { in bnx2x_iov_remove_one()
1315 bnx2x_pretend_func(bp, in bnx2x_iov_remove_one()
1316 HW_VF_HANDLE(bp, in bnx2x_iov_remove_one()
1317 bp->vfdb->sriov.first_vf_in_pf + in bnx2x_iov_remove_one()
1320 bp->vfdb->sriov.first_vf_in_pf + vf_idx); in bnx2x_iov_remove_one()
1321 bnx2x_vf_enable_internal(bp, 0); in bnx2x_iov_remove_one()
1322 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_iov_remove_one()
1326 __bnx2x_iov_free_vfdb(bp); in bnx2x_iov_remove_one()
1329 void bnx2x_iov_free_mem(struct bnx2x *bp) in bnx2x_iov_free_mem() argument
1333 if (!IS_SRIOV(bp)) in bnx2x_iov_free_mem()
1338 struct hw_dma *cxt = &bp->vfdb->context[i]; in bnx2x_iov_free_mem()
1342 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, in bnx2x_iov_free_mem()
1343 BP_VFDB(bp)->sp_dma.mapping, in bnx2x_iov_free_mem()
1344 BP_VFDB(bp)->sp_dma.size); in bnx2x_iov_free_mem()
1346 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, in bnx2x_iov_free_mem()
1347 BP_VF_MBX_DMA(bp)->mapping, in bnx2x_iov_free_mem()
1348 BP_VF_MBX_DMA(bp)->size); in bnx2x_iov_free_mem()
1350 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, in bnx2x_iov_free_mem()
1351 BP_VF_BULLETIN_DMA(bp)->mapping, in bnx2x_iov_free_mem()
1352 BP_VF_BULLETIN_DMA(bp)->size); in bnx2x_iov_free_mem()
1355 int bnx2x_iov_alloc_mem(struct bnx2x *bp) in bnx2x_iov_alloc_mem() argument
1360 if (!IS_SRIOV(bp)) in bnx2x_iov_alloc_mem()
1364 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * in bnx2x_iov_alloc_mem()
1368 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); in bnx2x_iov_alloc_mem()
1383 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); in bnx2x_iov_alloc_mem()
1384 BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping, in bnx2x_iov_alloc_mem()
1386 if (!BP_VFDB(bp)->sp_dma.addr) in bnx2x_iov_alloc_mem()
1388 BP_VFDB(bp)->sp_dma.size = tot_size; in bnx2x_iov_alloc_mem()
1391 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; in bnx2x_iov_alloc_mem()
1392 BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping, in bnx2x_iov_alloc_mem()
1394 if (!BP_VF_MBX_DMA(bp)->addr) in bnx2x_iov_alloc_mem()
1397 BP_VF_MBX_DMA(bp)->size = tot_size; in bnx2x_iov_alloc_mem()
1400 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; in bnx2x_iov_alloc_mem()
1401 BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping, in bnx2x_iov_alloc_mem()
1403 if (!BP_VF_BULLETIN_DMA(bp)->addr) in bnx2x_iov_alloc_mem()
1406 BP_VF_BULLETIN_DMA(bp)->size = tot_size; in bnx2x_iov_alloc_mem()
1414 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vfq_init() argument
1425 bnx2x_init_queue_obj(bp, &q->sp_obj, in bnx2x_vfq_init()
1427 bnx2x_vf_sp(bp, vf, q_data), in bnx2x_vfq_init()
1428 bnx2x_vf_sp_map(bp, vf, q_data), in bnx2x_vfq_init()
1439 static int bnx2x_max_speed_cap(struct bnx2x *bp) in bnx2x_max_speed_cap() argument
1441 u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)]; in bnx2x_max_speed_cap()
1450 int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx) in bnx2x_iov_link_update_vf() argument
1452 struct bnx2x_link_report_data *state = &bp->last_reported_link; in bnx2x_iov_link_update_vf()
1459 rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false); in bnx2x_iov_link_update_vf()
1463 mutex_lock(&bp->vfdb->bulletin_mutex); in bnx2x_iov_link_update_vf()
1489 bulletin->link_speed = bnx2x_max_speed_cap(bp); in bnx2x_iov_link_update_vf()
1501 rc = bnx2x_post_vf_bulletin(bp, idx); in bnx2x_iov_link_update_vf()
1509 mutex_unlock(&bp->vfdb->bulletin_mutex); in bnx2x_iov_link_update_vf()
1515 struct bnx2x *bp = netdev_priv(dev); in bnx2x_set_vf_link_state() local
1516 struct bnx2x_virtf *vf = BP_VF(bp, idx); in bnx2x_set_vf_link_state()
1526 return bnx2x_iov_link_update_vf(bp, idx); in bnx2x_set_vf_link_state()
1529 void bnx2x_iov_link_update(struct bnx2x *bp) in bnx2x_iov_link_update() argument
1533 if (!IS_SRIOV(bp)) in bnx2x_iov_link_update()
1536 for_each_vf(bp, vfid) in bnx2x_iov_link_update()
1537 bnx2x_iov_link_update_vf(bp, vfid); in bnx2x_iov_link_update()
1541 int bnx2x_iov_nic_init(struct bnx2x *bp) in bnx2x_iov_nic_init() argument
1545 if (!IS_SRIOV(bp)) { in bnx2x_iov_nic_init()
1550 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); in bnx2x_iov_nic_init()
1556 for_each_vf(bp, vfid) { in bnx2x_iov_nic_init()
1557 struct bnx2x_virtf *vf = BP_VF(bp, vfid); in bnx2x_iov_nic_init()
1559 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * in bnx2x_iov_nic_init()
1563 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + in bnx2x_iov_nic_init()
1572 bnx2x_iov_static_resc(bp, vf); in bnx2x_iov_nic_init()
1576 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); in bnx2x_iov_nic_init()
1588 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, in bnx2x_iov_nic_init()
1590 bnx2x_vf_sp(bp, vf, mcast_rdata), in bnx2x_iov_nic_init()
1591 bnx2x_vf_sp_map(bp, vf, mcast_rdata), in bnx2x_iov_nic_init()
1597 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) in bnx2x_iov_nic_init()
1598 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * in bnx2x_iov_nic_init()
1601 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + in bnx2x_iov_nic_init()
1605 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); in bnx2x_iov_nic_init()
1609 for_each_vf(bp, vfid) { in bnx2x_iov_nic_init()
1610 struct bnx2x_virtf *vf = BP_VF(bp, vfid); in bnx2x_iov_nic_init()
1613 vf->domain = bnx2x_vf_domain(bp, vfid); in bnx2x_iov_nic_init()
1614 vf->bus = bnx2x_vf_bus(bp, vfid); in bnx2x_iov_nic_init()
1615 vf->devfn = bnx2x_vf_devfn(bp, vfid); in bnx2x_iov_nic_init()
1616 bnx2x_vf_set_bars(bp, vf); in bnx2x_iov_nic_init()
1630 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) in bnx2x_iov_chip_cleanup() argument
1634 if (!IS_SRIOV(bp)) in bnx2x_iov_chip_cleanup()
1638 for_each_vf(bp, i) in bnx2x_iov_chip_cleanup()
1639 bnx2x_vf_release(bp, BP_VF(bp, i)); in bnx2x_iov_chip_cleanup()
1645 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) in bnx2x_iov_init_ilt() argument
1648 struct bnx2x_ilt *ilt = BP_ILT(bp); in bnx2x_iov_init_ilt()
1650 if (!IS_SRIOV(bp)) in bnx2x_iov_init_ilt()
1655 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); in bnx2x_iov_init_ilt()
1664 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) in bnx2x_iov_is_vf_cid() argument
1671 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, in bnx2x_vf_handle_classification_eqe() argument
1684 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, in bnx2x_vf_handle_classification_eqe()
1688 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, in bnx2x_vf_handle_classification_eqe()
1702 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, in bnx2x_vf_handle_mcast_eqe() argument
1713 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); in bnx2x_vf_handle_mcast_eqe()
1721 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, in bnx2x_vf_handle_filters_eqe() argument
1729 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, in bnx2x_vf_handle_rss_update_eqe() argument
1735 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) in bnx2x_iov_eq_sp_event() argument
1742 if (!IS_SRIOV(bp)) in bnx2x_iov_eq_sp_event()
1778 if (!bnx2x_iov_is_vf_cid(bp, cid)) { in bnx2x_iov_eq_sp_event()
1790 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); in bnx2x_iov_eq_sp_event()
1802 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, in bnx2x_iov_eq_sp_event()
1810 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); in bnx2x_iov_eq_sp_event()
1815 bnx2x_vf_handle_mcast_eqe(bp, vf); in bnx2x_iov_eq_sp_event()
1820 bnx2x_vf_handle_filters_eqe(bp, vf); in bnx2x_iov_eq_sp_event()
1825 bnx2x_vf_handle_rss_update_eqe(bp, vf); in bnx2x_iov_eq_sp_event()
1838 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) in bnx2x_vf_by_cid() argument
1845 return bnx2x_vf_by_abs_fid(bp, abs_vfid); in bnx2x_vf_by_cid()
1848 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, in bnx2x_iov_set_queue_sp_obj() argument
1853 if (!IS_SRIOV(bp)) in bnx2x_iov_set_queue_sp_obj()
1856 vf = bnx2x_vf_by_cid(bp, vf_cid); in bnx2x_iov_set_queue_sp_obj()
1870 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) in bnx2x_iov_adjust_stats_req() argument
1878 if (!IS_SRIOV(bp)) in bnx2x_iov_adjust_stats_req()
1881 if (!NO_FCOE(bp)) in bnx2x_iov_adjust_stats_req()
1885 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; in bnx2x_iov_adjust_stats_req()
1891 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, in bnx2x_iov_adjust_stats_req()
1894 cur_query_entry = &bp->fw_stats_req-> in bnx2x_iov_adjust_stats_req()
1897 for_each_vf(bp, i) { in bnx2x_iov_adjust_stats_req()
1899 struct bnx2x_virtf *vf = BP_VF(bp, i); in bnx2x_iov_adjust_stats_req()
1924 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == in bnx2x_iov_adjust_stats_req()
1951 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; in bnx2x_iov_adjust_stats_req()
1955 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, in bnx2x_vf_qtbl_set_q() argument
1961 REG_WR(bp, reg, val); in bnx2x_vf_qtbl_set_q()
1964 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_clr_qtbl() argument
1969 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, in bnx2x_vf_clr_qtbl()
1973 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_igu_disable() argument
1978 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); in bnx2x_vf_igu_disable()
1979 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); in bnx2x_vf_igu_disable()
1982 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); in bnx2x_vf_igu_disable()
1983 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_vf_igu_disable()
1986 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_max_queue_cnt() argument
1993 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_chk_avail_resc() argument
1996 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); in bnx2x_vf_chk_avail_resc()
1997 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); in bnx2x_vf_chk_avail_resc()
2007 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_acquire() argument
2010 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * in bnx2x_vf_acquire()
2014 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + in bnx2x_vf_acquire()
2027 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { in bnx2x_vf_acquire()
2046 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { in bnx2x_vf_acquire()
2055 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); in bnx2x_vf_acquire()
2056 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); in bnx2x_vf_acquire()
2086 bnx2x_vfq_init(bp, vf, q); in bnx2x_vf_acquire()
2092 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) in bnx2x_vf_init() argument
2101 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, in bnx2x_vf_init()
2115 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) in bnx2x_vf_init()
2119 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); in bnx2x_vf_init()
2122 func_init.pf_id = BP_FUNC(bp); in bnx2x_vf_init()
2124 bnx2x_func_init(bp, &func_init); in bnx2x_vf_init()
2127 bnx2x_vf_enable_access(bp, vf->abs_vfid); in bnx2x_vf_init()
2128 bnx2x_vf_enable_traffic(bp, vf); in bnx2x_vf_init()
2132 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, in bnx2x_vf_init()
2138 bnx2x_post_vf_bulletin(bp, vf->index); in bnx2x_vf_init()
2155 int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_close() argument
2163 rc = bnx2x_vf_queue_teardown(bp, vf, i); in bnx2x_vf_close()
2170 bnx2x_vf_igu_disable(bp, vf); in bnx2x_vf_close()
2174 bnx2x_vf_clr_qtbl(bp, vf); in bnx2x_vf_close()
2185 rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); in bnx2x_vf_close()
2202 int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_free() argument
2215 rc = bnx2x_vf_close(bp, vf); in bnx2x_vf_free()
2221 bnx2x_vf_free_resc(bp, vf); in bnx2x_vf_free()
2235 int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_rss_update() argument
2240 return bnx2x_config_rss(bp, rss); in bnx2x_vf_rss_update()
2243 int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_vf_tpa_update() argument
2266 rc = bnx2x_queue_state_change(bp, &qstate); in bnx2x_vf_tpa_update()
2282 int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) in bnx2x_vf_release() argument
2287 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); in bnx2x_vf_release()
2289 rc = bnx2x_vf_free(bp, vf); in bnx2x_vf_release()
2294 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); in bnx2x_vf_release()
2298 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_lock_vf_pf_channel() argument
2318 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_unlock_vf_pf_channel() argument
2349 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) in bnx2x_set_pf_tx_switching() argument
2356 prev_flags = bp->flags; in bnx2x_set_pf_tx_switching()
2358 bp->flags |= TX_SWITCHING; in bnx2x_set_pf_tx_switching()
2360 bp->flags &= ~TX_SWITCHING; in bnx2x_set_pf_tx_switching()
2361 if (prev_flags == bp->flags) in bnx2x_set_pf_tx_switching()
2365 if ((bp->state != BNX2X_STATE_OPEN) || in bnx2x_set_pf_tx_switching()
2366 (bnx2x_get_q_logical_state(bp, in bnx2x_set_pf_tx_switching()
2367 &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) != in bnx2x_set_pf_tx_switching()
2385 for_each_eth_queue(bp, i) { in bnx2x_set_pf_tx_switching()
2386 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_set_pf_tx_switching()
2390 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; in bnx2x_set_pf_tx_switching()
2397 rc = bnx2x_queue_state_change(bp, &q_params); in bnx2x_set_pf_tx_switching()
2411 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); in bnx2x_sriov_configure() local
2413 if (!IS_SRIOV(bp)) { in bnx2x_sriov_configure()
2419 num_vfs_param, BNX2X_NR_VIRTFN(bp)); in bnx2x_sriov_configure()
2422 if (bp->state != BNX2X_STATE_OPEN) { in bnx2x_sriov_configure()
2428 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { in bnx2x_sriov_configure()
2430 num_vfs_param, BNX2X_NR_VIRTFN(bp)); in bnx2x_sriov_configure()
2431 num_vfs_param = BNX2X_NR_VIRTFN(bp); in bnx2x_sriov_configure()
2434 bp->requested_nr_virtfn = num_vfs_param; in bnx2x_sriov_configure()
2436 bnx2x_set_pf_tx_switching(bp, false); in bnx2x_sriov_configure()
2437 bnx2x_disable_sriov(bp); in bnx2x_sriov_configure()
2440 return bnx2x_enable_sriov(bp); in bnx2x_sriov_configure()
2446 int bnx2x_enable_sriov(struct bnx2x *bp) in bnx2x_enable_sriov() argument
2448 int rc = 0, req_vfs = bp->requested_nr_virtfn; in bnx2x_enable_sriov()
2456 first_vf = bp->vfdb->sriov.first_vf_in_pf; in bnx2x_enable_sriov()
2460 BP_VFDB(bp)->vf_sbs_pool / req_vfs); in bnx2x_enable_sriov()
2464 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); in bnx2x_enable_sriov()
2467 vf_sb_count(BP_VF(bp, vf_idx)) = 0; in bnx2x_enable_sriov()
2469 bp->vfdb->vf_sbs_pool = 0; in bnx2x_enable_sriov()
2472 sb_idx = BP_VFDB(bp)->first_vf_igu_entry; in bnx2x_enable_sriov()
2481 REG_WR(bp, address, igu_entry); in bnx2x_enable_sriov()
2488 bnx2x_get_vf_igu_cam_info(bp); in bnx2x_enable_sriov()
2491 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); in bnx2x_enable_sriov()
2494 for_each_vf(bp, vf_idx) { in bnx2x_enable_sriov()
2495 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); in bnx2x_enable_sriov()
2498 vf->vfqs = &bp->vfdb->vfqs[qcount]; in bnx2x_enable_sriov()
2500 bnx2x_iov_static_resc(bp, vf); in bnx2x_enable_sriov()
2508 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); in bnx2x_enable_sriov()
2509 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, in bnx2x_enable_sriov()
2514 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); in bnx2x_enable_sriov()
2520 bnx2x_disable_sriov(bp); in bnx2x_enable_sriov()
2522 rc = bnx2x_set_pf_tx_switching(bp, true); in bnx2x_enable_sriov()
2526 rc = pci_enable_sriov(bp->pdev, req_vfs); in bnx2x_enable_sriov()
2535 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) in bnx2x_pf_set_vfs_vlan() argument
2541 for_each_vf(bp, vfidx) { in bnx2x_pf_set_vfs_vlan()
2542 bulletin = BP_VF_BULLETIN(bp, vfidx); in bnx2x_pf_set_vfs_vlan()
2544 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0, in bnx2x_pf_set_vfs_vlan()
2549 void bnx2x_disable_sriov(struct bnx2x *bp) in bnx2x_disable_sriov() argument
2551 if (pci_vfs_assigned(bp->pdev)) { in bnx2x_disable_sriov()
2557 pci_disable_sriov(bp->pdev); in bnx2x_disable_sriov()
2560 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx, in bnx2x_vf_op_prep() argument
2565 if (bp->state != BNX2X_STATE_OPEN) { in bnx2x_vf_op_prep()
2570 if (!IS_SRIOV(bp)) { in bnx2x_vf_op_prep()
2575 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { in bnx2x_vf_op_prep()
2577 vfidx, BNX2X_NR_VIRTFN(bp)); in bnx2x_vf_op_prep()
2582 *vf = BP_VF(bp, vfidx); in bnx2x_vf_op_prep()
2583 *bulletin = BP_VF_BULLETIN(bp, vfidx); in bnx2x_vf_op_prep()
2608 struct bnx2x *bp = netdev_priv(dev); in bnx2x_get_vf_config() local
2616 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); in bnx2x_get_vf_config()
2635 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) { in bnx2x_get_vf_config()
2636 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, in bnx2x_get_vf_config()
2638 vlan_obj->get_n_elements(bp, vlan_obj, 1, in bnx2x_get_vf_config()
2643 mutex_lock(&bp->vfdb->bulletin_mutex); in bnx2x_get_vf_config()
2660 mutex_unlock(&bp->vfdb->bulletin_mutex); in bnx2x_get_vf_config()
2685 struct bnx2x *bp = netdev_priv(dev); in bnx2x_set_vf_mac() local
2696 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); in bnx2x_set_vf_mac()
2700 mutex_lock(&bp->vfdb->bulletin_mutex); in bnx2x_set_vf_mac()
2709 rc = bnx2x_post_vf_bulletin(bp, vfidx); in bnx2x_set_vf_mac()
2712 mutex_unlock(&bp->vfdb->bulletin_mutex); in bnx2x_set_vf_mac()
2720 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); in bnx2x_set_vf_mac()
2728 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) in bnx2x_set_vf_mac()
2732 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); in bnx2x_set_vf_mac()
2736 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); in bnx2x_set_vf_mac()
2744 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); in bnx2x_set_vf_mac()
2753 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, in bnx2x_set_vf_mac()
2757 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); in bnx2x_set_vf_mac()
2763 static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp, in bnx2x_set_vf_vlan_acceptance() argument
2776 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, in bnx2x_set_vf_vlan_acceptance()
2779 bnx2x_config_rx_mode(bp, &rx_ramrod); in bnx2x_set_vf_vlan_acceptance()
2782 static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf, in bnx2x_set_vf_vlan_filter() argument
2797 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); in bnx2x_set_vf_vlan_filter()
2810 struct bnx2x *bp = netdev_priv(dev); in bnx2x_set_vf_vlan() local
2829 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); in bnx2x_set_vf_vlan()
2839 mutex_lock(&bp->vfdb->bulletin_mutex); in bnx2x_set_vf_vlan()
2848 rc = bnx2x_post_vf_bulletin(bp, vfidx); in bnx2x_set_vf_vlan()
2851 mutex_unlock(&bp->vfdb->bulletin_mutex); in bnx2x_set_vf_vlan()
2855 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != in bnx2x_set_vf_vlan()
2860 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) in bnx2x_set_vf_vlan()
2864 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); in bnx2x_set_vf_vlan()
2869 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, in bnx2x_set_vf_vlan()
2881 bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan); in bnx2x_set_vf_vlan()
2883 rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true); in bnx2x_set_vf_vlan()
2897 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) != in bnx2x_set_vf_vlan()
2932 rc = bnx2x_queue_state_change(bp, &q_params); in bnx2x_set_vf_vlan()
2940 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); in bnx2x_set_vf_vlan()
2952 struct bnx2x *bp = netdev_priv(dev); in bnx2x_set_vf_spoofchk() local
2956 vf = BP_VF(bp, idx); in bnx2x_set_vf_spoofchk()
2971 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != in bnx2x_set_vf_spoofchk()
2976 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) in bnx2x_set_vf_spoofchk()
2987 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) != in bnx2x_set_vf_spoofchk()
3005 rc = bnx2x_queue_state_change(bp, &q_params); in bnx2x_set_vf_spoofchk()
3035 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) in bnx2x_sample_bulletin() argument
3047 memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin, in bnx2x_sample_bulletin()
3050 crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content); in bnx2x_sample_bulletin()
3052 if (bp->shadow_bulletin.content.crc == crc) in bnx2x_sample_bulletin()
3056 bp->shadow_bulletin.content.crc, crc); in bnx2x_sample_bulletin()
3064 bulletin = &bp->shadow_bulletin.content; in bnx2x_sample_bulletin()
3067 if (bp->old_bulletin.version == bulletin->version) in bnx2x_sample_bulletin()
3072 !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) { in bnx2x_sample_bulletin()
3074 eth_hw_addr_set(bp->dev, bulletin->mac); in bnx2x_sample_bulletin()
3081 bp->vf_link_vars.line_speed = bulletin->link_speed; in bnx2x_sample_bulletin()
3082 bp->vf_link_vars.link_report_flags = 0; in bnx2x_sample_bulletin()
3086 &bp->vf_link_vars.link_report_flags); in bnx2x_sample_bulletin()
3090 &bp->vf_link_vars.link_report_flags); in bnx2x_sample_bulletin()
3094 &bp->vf_link_vars.link_report_flags); in bnx2x_sample_bulletin()
3098 &bp->vf_link_vars.link_report_flags); in bnx2x_sample_bulletin()
3099 __bnx2x_link_report(bp); in bnx2x_sample_bulletin()
3103 memcpy(&bp->old_bulletin, bulletin, in bnx2x_sample_bulletin()
3109 void bnx2x_timer_sriov(struct bnx2x *bp) in bnx2x_timer_sriov() argument
3111 bnx2x_sample_bulletin(bp); in bnx2x_timer_sriov()
3114 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) in bnx2x_timer_sriov()
3115 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, in bnx2x_timer_sriov()
3119 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) in bnx2x_vf_doorbells() argument
3122 return bp->regview + PXP_VF_ADDR_DB_START; in bnx2x_vf_doorbells()
3125 void bnx2x_vf_pci_dealloc(struct bnx2x *bp) in bnx2x_vf_pci_dealloc() argument
3127 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, in bnx2x_vf_pci_dealloc()
3129 BNX2X_PCI_FREE(bp->pf2vf_bulletin, bp->pf2vf_bulletin_mapping, in bnx2x_vf_pci_dealloc()
3133 int bnx2x_vf_pci_alloc(struct bnx2x *bp) in bnx2x_vf_pci_alloc() argument
3135 mutex_init(&bp->vf2pf_mutex); in bnx2x_vf_pci_alloc()
3138 bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping, in bnx2x_vf_pci_alloc()
3140 if (!bp->vf2pf_mbox) in bnx2x_vf_pci_alloc()
3144 bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping, in bnx2x_vf_pci_alloc()
3146 if (!bp->pf2vf_bulletin) in bnx2x_vf_pci_alloc()
3149 bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true); in bnx2x_vf_pci_alloc()
3154 bnx2x_vf_pci_dealloc(bp); in bnx2x_vf_pci_alloc()
3158 void bnx2x_iov_channel_down(struct bnx2x *bp) in bnx2x_iov_channel_down() argument
3163 if (!IS_SRIOV(bp)) in bnx2x_iov_channel_down()
3166 for_each_vf(bp, vf_idx) { in bnx2x_iov_channel_down()
3170 bulletin = BP_VF_BULLETIN(bp, vf_idx); in bnx2x_iov_channel_down()
3174 bnx2x_post_vf_bulletin(bp, vf_idx); in bnx2x_iov_channel_down()
3180 struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work); in bnx2x_iov_task() local
3182 if (!netif_running(bp->dev)) in bnx2x_iov_task()
3186 &bp->iov_task_state)) in bnx2x_iov_task()
3187 bnx2x_vf_handle_flr_event(bp); in bnx2x_iov_task()
3190 &bp->iov_task_state)) in bnx2x_iov_task()
3191 bnx2x_vf_mbx(bp); in bnx2x_iov_task()
3194 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) in bnx2x_schedule_iov_task() argument
3197 set_bit(flag, &bp->iov_task_state); in bnx2x_schedule_iov_task()
3200 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); in bnx2x_schedule_iov_task()