Home
last modified time | relevance | path

Searched refs:rvu (Results 1 – 24 of 24) sorted by relevance

/linux-6.1.9/drivers/net/ethernet/marvell/octeontx2/af/
Drvu_cgx.c26 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
31 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
37 trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
44 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature) in is_mac_feature_supported() argument
49 if (!is_pf_cgxmapped(rvu, pf)) in is_mac_feature_supported()
52 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in is_mac_feature_supported()
53 cgxd = rvu_cgx_pdata(cgx_id, rvu); in is_mac_feature_supported()
59 static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) in cgxlmac_to_pfmap() argument
61 return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; in cgxlmac_to_pfmap()
64 int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) in cgxlmac_to_pf() argument
[all …]
Drvu.h55 struct rvu *rvu; member
84 struct rvu *rvu; member
110 struct rvu *rvu; member
318 struct rvu *rvu; member
377 struct rvu *rvu; member
447 struct rvu { struct
523 static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) in rvu_write64() argument
525 writeq(val, rvu->afreg_base + ((block << 28) | offset)); in rvu_write64()
528 static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset) in rvu_read64() argument
530 return readq(rvu->afreg_base + ((block << 28) | offset)); in rvu_read64()
[all …]
Drvu.c27 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
29 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
31 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
33 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
61 static void rvu_setup_hw_capabilities(struct rvu *rvu) in rvu_setup_hw_capabilities() argument
63 struct rvu_hwinfo *hw = rvu->hw; in rvu_setup_hw_capabilities()
73 hw->rvu = rvu; in rvu_setup_hw_capabilities()
75 if (is_rvu_pre_96xx_C0(rvu)) { in rvu_setup_hw_capabilities()
82 if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu)) in rvu_setup_hw_capabilities()
85 if (!is_rvu_pre_96xx_C0(rvu)) in rvu_setup_hw_capabilities()
[all …]
Drvu_cpt.c29 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e)); \
43 struct rvu *rvu = block->rvu; in rvu_cpt_af_flt_intr_handler() local
47 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0)); in rvu_cpt_af_flt_intr_handler()
48 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1)); in rvu_cpt_af_flt_intr_handler()
49 if (!is_rvu_otx2(rvu)) { in rvu_cpt_af_flt_intr_handler()
50 reg2 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(2)); in rvu_cpt_af_flt_intr_handler()
51 dev_err_ratelimited(rvu->dev, in rvu_cpt_af_flt_intr_handler()
55 dev_err_ratelimited(rvu->dev, in rvu_cpt_af_flt_intr_handler()
60 rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0); in rvu_cpt_af_flt_intr_handler()
61 rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1); in rvu_cpt_af_flt_intr_handler()
[all …]
Drvu_devlink.c39 static bool rvu_common_request_irq(struct rvu *rvu, int offset, in rvu_common_request_irq() argument
42 struct rvu_devlink *rvu_dl = rvu->rvu_dl; in rvu_common_request_irq()
45 sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name); in rvu_common_request_irq()
46 rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0, in rvu_common_request_irq()
47 &rvu->irq_name[offset * NAME_SIZE], rvu_dl); in rvu_common_request_irq()
49 dev_warn(rvu->dev, "Failed to register %s irq\n", name); in rvu_common_request_irq()
51 rvu->irq_allocated[offset] = true; in rvu_common_request_irq()
53 return rvu->irq_allocated[offset]; in rvu_common_request_irq()
70 struct rvu *rvu; in rvu_nix_af_rvu_intr_handler() local
74 rvu = rvu_dl->rvu; in rvu_nix_af_rvu_intr_handler()
[all …]
Drvu_nix.c19 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
20 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
22 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
24 static int nix_setup_ipolicers(struct rvu *rvu,
26 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
29 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
30 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
82 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) in rvu_get_next_nix_blkaddr() argument
88 return rvu->nix_blkaddr[blkaddr]; in rvu_get_next_nix_blkaddr()
91 if (rvu->nix_blkaddr[i] == blkaddr) in rvu_get_next_nix_blkaddr()
[all …]
Drvu_npc_hash.c121 static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr, in npc_program_mkex_hash_rx() argument
124 struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; in npc_program_mkex_hash_rx()
158 static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr, in npc_program_mkex_hash_tx() argument
161 struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; in npc_program_mkex_hash_tx()
197 void npc_config_secret_key(struct rvu *rvu, int blkaddr) in npc_config_secret_key() argument
199 struct hw_cap *hwcap = &rvu->hw->cap; in npc_config_secret_key()
200 struct rvu_hwinfo *hw = rvu->hw; in npc_config_secret_key()
204 dev_info(rvu->dev, "HW does not support secret key configuration\n"); in npc_config_secret_key()
209 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf), in npc_config_secret_key()
211 rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf), in npc_config_secret_key()
[all …]
Drvu_cn10k.c20 static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, in lmtst_map_table_ops() argument
26 tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); in lmtst_map_table_ops()
30 dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); in lmtst_map_table_ops()
43 rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0)); in lmtst_map_table_ops()
44 rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL); in lmtst_map_table_ops()
45 rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00); in lmtst_map_table_ops()
53 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) in rvu_get_lmtst_tbl_index() argument
55 return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + in rvu_get_lmtst_tbl_index()
59 static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, in rvu_get_lmtaddr() argument
66 dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__); in rvu_get_lmtaddr()
[all …]
Dmcs_rvu_if.c18 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
23 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
35 int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu, in rvu_mbox_handler_mcs_set_lmac_mode() argument
41 if (req->mcs_id >= rvu->mcs_blk_cnt) in rvu_mbox_handler_mcs_set_lmac_mode()
56 struct rvu *rvu = mcs->rvu; in mcs_add_intr_wq_entry() local
61 pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)]; in mcs_add_intr_wq_entry()
76 spin_lock(&rvu->mcs_intrq_lock); in mcs_add_intr_wq_entry()
77 list_add_tail(&qentry->node, &rvu->mcs_intrq_head); in mcs_add_intr_wq_entry()
78 spin_unlock(&rvu->mcs_intrq_lock); in mcs_add_intr_wq_entry()
79 queue_work(rvu->mcs_intr_wq, &rvu->mcs_intr_work); in mcs_add_intr_wq_entry()
[all …]
Drvu_npc.c32 static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
34 static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
47 bool is_npc_interface_valid(struct rvu *rvu, u8 intf) in is_npc_interface_valid() argument
49 struct rvu_hwinfo *hw = rvu->hw; in is_npc_interface_valid()
54 int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena) in rvu_npc_get_tx_nibble_cfg() argument
59 if (is_rvu_96xx_B0(rvu)) in rvu_npc_get_tx_nibble_cfg()
64 static int npc_mcam_verify_pf_func(struct rvu *rvu, in npc_mcam_verify_pf_func() argument
86 void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) in rvu_npc_set_pkind() argument
91 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); in rvu_npc_set_pkind()
97 rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_CPI_DEFX(pkind, 0), val); in rvu_npc_set_pkind()
[all …]
Drvu_switch.c11 static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc, in rvu_switch_install_rx_rule() argument
18 pfvf = rvu_get_pfvf(rvu, pcifunc); in rvu_switch_install_rx_rule()
37 return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); in rvu_switch_install_rx_rule()
40 static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry) in rvu_switch_install_tx_rule() argument
47 pfvf = rvu_get_pfvf(rvu, pcifunc); in rvu_switch_install_tx_rule()
67 return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); in rvu_switch_install_tx_rule()
70 static int rvu_switch_install_rules(struct rvu *rvu) in rvu_switch_install_rules() argument
72 struct rvu_switch *rswitch = &rvu->rswitch; in rvu_switch_install_rules()
74 struct rvu_hwinfo *hw = rvu->hw; in rvu_switch_install_rules()
80 if (!is_pf_cgxmapped(rvu, pf)) in rvu_switch_install_rules()
[all …]
Drvu_npa.c15 static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, in npa_aq_enqueue_wait() argument
26 reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS); in npa_aq_enqueue_wait()
36 rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1); in npa_aq_enqueue_wait()
52 int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, in rvu_npa_aq_enq_inst() argument
55 struct rvu_hwinfo *hw = rvu->hw; in rvu_npa_aq_enq_inst()
65 pfvf = rvu_get_pfvf(rvu, pcifunc); in rvu_npa_aq_enq_inst()
69 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc); in rvu_npa_aq_enq_inst()
76 dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__); in rvu_npa_aq_enq_inst()
80 npalf = rvu_get_lf(rvu, block, pcifunc, 0); in rvu_npa_aq_enq_inst()
149 rc = npa_aq_enqueue_wait(rvu, block, &inst); in rvu_npa_aq_enq_inst()
[all …]
Drvu_npc_hash.h27 rvu_write64(rvu, blkaddr, \
31 rvu_write64(rvu, blkaddr, \
35 rvu_write64(rvu, blkaddr, \
49 void npc_update_field_hash(struct rvu *rvu, u8 intf,
57 void npc_config_secret_key(struct rvu *rvu, int blkaddr);
58 void npc_program_mkex_hash(struct rvu *rvu, int blkaddr);
204 bool rvu_npc_exact_has_match_table(struct rvu *rvu);
205 u32 rvu_npc_exact_get_max_entries(struct rvu *rvu);
206 int rvu_npc_exact_init(struct rvu *rvu);
207 int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
[all …]
Drvu_debugfs.c201 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \ argument
521 static void rvu_dbg_mcs_init(struct rvu *rvu) in rvu_dbg_mcs_init() argument
527 if (!rvu->mcs_blk_cnt) in rvu_dbg_mcs_init()
530 rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root); in rvu_dbg_mcs_init()
532 for (i = 0; i < rvu->mcs_blk_cnt; i++) { in rvu_dbg_mcs_init()
536 rvu->rvu_dbg.mcs = debugfs_create_dir(dname, in rvu_dbg_mcs_init()
537 rvu->rvu_dbg.mcs_root); in rvu_dbg_mcs_init()
539 rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs); in rvu_dbg_mcs_init()
541 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs, in rvu_dbg_mcs_init()
544 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs, in rvu_dbg_mcs_init()
[all …]
Drvu_npc_fs.c115 static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf) in npc_is_field_present() argument
117 struct npc_mcam *mcam = &rvu->hw->mcam; in npc_is_field_present()
174 static bool npc_check_overlap(struct rvu *rvu, int blkaddr, in npc_check_overlap() argument
177 struct npc_mcam *mcam = &rvu->hw->mcam; in npc_check_overlap()
194 cfg = rvu_read64(rvu, blkaddr, in npc_check_overlap()
223 static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type, in npc_check_field() argument
226 if (!npc_is_field_present(rvu, type, intf) || in npc_check_field()
227 npc_check_overlap(rvu, blkaddr, type, 0, intf)) in npc_check_field()
304 static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf) in npc_handle_multi_layer_fields() argument
306 struct npc_mcam *mcam = &rvu->hw->mcam; in npc_handle_multi_layer_fields()
[all …]
Drvu_sdp.c49 int rvu_sdp_init(struct rvu *rvu) in rvu_sdp_init() argument
60 pfvf = &rvu->pf[sdp_pf_num[i]]; in rvu_sdp_init()
62 pfvf->sdp_info = devm_kzalloc(rvu->dev, in rvu_sdp_init()
70 dev_info(rvu->dev, "SDP PF number:%d\n", sdp_pf_num[i]); in rvu_sdp_init()
81 rvu_mbox_handler_set_sdp_chan_info(struct rvu *rvu, in rvu_mbox_handler_set_sdp_chan_info() argument
85 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); in rvu_mbox_handler_set_sdp_chan_info()
88 dev_info(rvu->dev, "AF: SDP%d max_vfs %d num_pf_rings %d pf_srn %d\n", in rvu_mbox_handler_set_sdp_chan_info()
95 rvu_mbox_handler_get_sdp_chan_info(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_get_sdp_chan_info() argument
98 struct rvu_hwinfo *hw = rvu->hw; in rvu_mbox_handler_get_sdp_chan_info()
105 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); in rvu_mbox_handler_get_sdp_chan_info()
[all …]
Drvu_devlink.h72 struct rvu *rvu; member
79 int rvu_register_dl(struct rvu *rvu);
80 void rvu_unregister_dl(struct rvu *rvu);
Dptp.c494 int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req, in rvu_mbox_handler_ptp_op() argument
506 if (!rvu->ptp) in rvu_mbox_handler_ptp_op()
511 err = ptp_adjfine(rvu->ptp, req->scaled_ppm); in rvu_mbox_handler_ptp_op()
514 err = ptp_get_clock(rvu->ptp, &rsp->clk); in rvu_mbox_handler_ptp_op()
517 err = ptp_get_tstmp(rvu->ptp, &rsp->clk); in rvu_mbox_handler_ptp_op()
520 err = ptp_set_thresh(rvu->ptp, req->thresh); in rvu_mbox_handler_ptp_op()
523 err = ptp_extts_on(rvu->ptp, req->extts_on); in rvu_mbox_handler_ptp_op()
Drvu_npc_fs.h13 void npc_update_entry(struct rvu *rvu, enum key_fields type,
Drvu_reg.h622 if (rvu->hw->npc_ext_set) \
630 if (rvu->hw->npc_ext_set) \
638 if (rvu->hw->npc_ext_set) \
646 if (rvu->hw->npc_ext_set) \
654 if (rvu->hw->npc_ext_set) \
662 if (rvu->hw->npc_ext_set) \
670 if (rvu->hw->npc_ext_set) \
678 if (rvu->hw->npc_ext_set) \
DMakefile11 rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
Drvu_trace.h9 #define TRACE_SYSTEM rvu
Dnpc.h14 rvu_write64(rvu, blkaddr, \
18 rvu_write64(rvu, blkaddr, \
Dmcs.h158 void *rvu; member