1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Marvell CN10K MCS driver
3 *
4 * Copyright (C) 2022 Marvell.
5 */
6
7 #ifndef MCS_H
8 #define MCS_H
9
10 #include <linux/bits.h>
11 #include "rvu.h"
12
13 #define PCI_DEVID_CN10K_MCS 0xA096
14
15 #define MCSX_LINK_LMAC_RANGE_MASK GENMASK_ULL(19, 16)
16 #define MCSX_LINK_LMAC_BASE_MASK GENMASK_ULL(11, 0)
17
18 #define MCS_ID_MASK 0x7
19 #define MCS_MAX_PFS 128
20
21 #define MCS_PORT_MODE_MASK 0x3
22 #define MCS_PORT_FIFO_SKID_MASK 0x3F
23 #define MCS_MAX_CUSTOM_TAGS 0x8
24
25 #define MCS_CTRLPKT_ETYPE_RULE_MAX 8
26 #define MCS_CTRLPKT_DA_RULE_MAX 8
27 #define MCS_CTRLPKT_DA_RANGE_RULE_MAX 4
28 #define MCS_CTRLPKT_COMBO_RULE_MAX 4
29 #define MCS_CTRLPKT_MAC_RULE_MAX 1
30
31 #define MCS_MAX_CTRLPKT_RULES (MCS_CTRLPKT_ETYPE_RULE_MAX + \
32 MCS_CTRLPKT_DA_RULE_MAX + \
33 MCS_CTRLPKT_DA_RANGE_RULE_MAX + \
34 MCS_CTRLPKT_COMBO_RULE_MAX + \
35 MCS_CTRLPKT_MAC_RULE_MAX)
36
37 #define MCS_CTRLPKT_ETYPE_RULE_OFFSET 0
38 #define MCS_CTRLPKT_DA_RULE_OFFSET 8
39 #define MCS_CTRLPKT_DA_RANGE_RULE_OFFSET 16
40 #define MCS_CTRLPKT_COMBO_RULE_OFFSET 20
41 #define MCS_CTRLPKT_MAC_EN_RULE_OFFSET 24
42
43 /* Reserved resources for default bypass entry */
44 #define MCS_RSRC_RSVD_CNT 1
45
46 /* MCS Interrupt Vector Enumeration */
47 enum mcs_int_vec_e {
48 MCS_INT_VEC_MIL_RX_GBL = 0x0,
49 MCS_INT_VEC_MIL_RX_LMACX = 0x1,
50 MCS_INT_VEC_MIL_TX_LMACX = 0x5,
51 MCS_INT_VEC_HIL_RX_GBL = 0x9,
52 MCS_INT_VEC_HIL_RX_LMACX = 0xa,
53 MCS_INT_VEC_HIL_TX_GBL = 0xe,
54 MCS_INT_VEC_HIL_TX_LMACX = 0xf,
55 MCS_INT_VEC_IP = 0x13,
56 MCS_INT_VEC_CNT = 0x14,
57 };
58
59 #define MCS_MAX_BBE_INT 8ULL
60 #define MCS_BBE_INT_MASK 0xFFULL
61
62 #define MCS_MAX_PAB_INT 4ULL
63 #define MCS_PAB_INT_MASK 0xFULL
64
65 #define MCS_BBE_RX_INT_ENA BIT_ULL(0)
66 #define MCS_BBE_TX_INT_ENA BIT_ULL(1)
67 #define MCS_CPM_RX_INT_ENA BIT_ULL(2)
68 #define MCS_CPM_TX_INT_ENA BIT_ULL(3)
69 #define MCS_PAB_RX_INT_ENA BIT_ULL(4)
70 #define MCS_PAB_TX_INT_ENA BIT_ULL(5)
71
72 #define MCS_CPM_TX_INT_PACKET_XPN_EQ0 BIT_ULL(0)
73 #define MCS_CPM_TX_INT_PN_THRESH_REACHED BIT_ULL(1)
74 #define MCS_CPM_TX_INT_SA_NOT_VALID BIT_ULL(2)
75
76 #define MCS_CPM_RX_INT_SECTAG_V_EQ1 BIT_ULL(0)
77 #define MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 BIT_ULL(1)
78 #define MCS_CPM_RX_INT_SL_GTE48 BIT_ULL(2)
79 #define MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 BIT_ULL(3)
80 #define MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 BIT_ULL(4)
81 #define MCS_CPM_RX_INT_PACKET_XPN_EQ0 BIT_ULL(5)
82 #define MCS_CPM_RX_INT_PN_THRESH_REACHED BIT_ULL(6)
83
84 #define MCS_CPM_RX_INT_ALL (MCS_CPM_RX_INT_SECTAG_V_EQ1 | \
85 MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 | \
86 MCS_CPM_RX_INT_SL_GTE48 | \
87 MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 | \
88 MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 | \
89 MCS_CPM_RX_INT_PACKET_XPN_EQ0 | \
90 MCS_CPM_RX_INT_PN_THRESH_REACHED)
91
92 struct mcs_pfvf {
93 u64 intr_mask; /* Enabled Interrupt mask */
94 };
95
96 struct mcs_intr_event {
97 u16 pcifunc;
98 u64 intr_mask;
99 u64 sa_id;
100 u8 mcs_id;
101 u8 lmac_id;
102 };
103
104 struct mcs_intrq_entry {
105 struct list_head node;
106 struct mcs_intr_event intr_event;
107 };
108
109 struct secy_mem_map {
110 u8 flow_id;
111 u8 secy;
112 u8 ctrl_pkt;
113 u8 sc;
114 u64 sci;
115 };
116
117 struct mcs_rsrc_map {
118 u16 *flowid2pf_map;
119 u16 *secy2pf_map;
120 u16 *sc2pf_map;
121 u16 *sa2pf_map;
122 u16 *flowid2secy_map; /* bitmap flowid mapped to secy*/
123 u16 *ctrlpktrule2pf_map;
124 struct rsrc_bmap flow_ids;
125 struct rsrc_bmap secy;
126 struct rsrc_bmap sc;
127 struct rsrc_bmap sa;
128 struct rsrc_bmap ctrlpktrule;
129 };
130
131 struct hwinfo {
132 u8 tcam_entries;
133 u8 secy_entries;
134 u8 sc_entries;
135 u16 sa_entries;
136 u8 mcs_x2p_intf;
137 u8 lmac_cnt;
138 u8 mcs_blks;
139 unsigned long lmac_bmap; /* bitmap of enabled mcs lmac */
140 };
141
142 struct mcs {
143 void __iomem *reg_base;
144 struct pci_dev *pdev;
145 struct device *dev;
146 struct hwinfo *hw;
147 struct mcs_rsrc_map tx;
148 struct mcs_rsrc_map rx;
149 u16 pf_map[MCS_MAX_PFS]; /* List of PCIFUNC mapped to MCS */
150 u8 mcs_id;
151 struct mcs_ops *mcs_ops;
152 struct list_head mcs_list;
153 /* Lock for mcs stats */
154 struct mutex stats_lock;
155 struct mcs_pfvf *pf;
156 struct mcs_pfvf *vf;
157 u16 num_vec;
158 void *rvu;
159 u16 *tx_sa_active;
160 };
161
162 struct mcs_ops {
163 void (*mcs_set_hw_capabilities)(struct mcs *mcs);
164 void (*mcs_parser_cfg)(struct mcs *mcs);
165 void (*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
166 void (*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
167 void (*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir);
168 };
169
170 extern struct pci_driver mcs_driver;
171
mcs_reg_write(struct mcs * mcs,u64 offset,u64 val)172 static inline void mcs_reg_write(struct mcs *mcs, u64 offset, u64 val)
173 {
174 writeq(val, mcs->reg_base + offset);
175 }
176
mcs_reg_read(struct mcs * mcs,u64 offset)177 static inline u64 mcs_reg_read(struct mcs *mcs, u64 offset)
178 {
179 return readq(mcs->reg_base + offset);
180 }
181
182 /* MCS APIs */
183 struct mcs *mcs_get_pdata(int mcs_id);
184 int mcs_get_blkcnt(void);
185 int mcs_set_lmac_channels(int mcs_id, u16 base);
186 int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc);
187 int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc);
188 int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flowid, u8 *secy_id,
189 u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir);
190 int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc);
191 void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir);
192 void mcs_ena_dis_flowid_entry(struct mcs *mcs, int id, int dir, int ena);
193 void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int id, int ena);
194 void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int id, int dir);
195 void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int id, int dir);
196 void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id);
197 void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa, int dir);
198 void mcs_map_sc_to_sa(struct mcs *mcs, u64 *sa_map, int sc, int dir);
199 void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir);
200 void mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
201 void mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
202 void mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
203 void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn);
204 int mcs_install_flowid_bypass_entry(struct mcs *mcs);
205 void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode);
206 void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset);
207 void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req);
208 void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
209 struct mcs_port_cfg_get_rsp *rsp);
210 void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
211 struct mcs_custom_tag_cfg_get_rsp *rsp);
212 int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc);
213 int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req);
214 int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req);
215
216 /* CN10K-B APIs */
217 void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs);
218 void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
219 void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
220 void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
221 void cn10kb_mcs_parser_cfg(struct mcs *mcs);
222
223 /* CNF10K-B APIs */
224 struct mcs_ops *cnf10kb_get_mac_ops(void);
225 void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs);
226 void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
227 void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
228 void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
229 void cnf10kb_mcs_parser_cfg(struct mcs *mcs);
230 void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs);
231 void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs);
232
233 /* Stats APIs */
234 void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir);
235 void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir);
236 void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats, int id, int dir);
237 void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats, int id, int dir);
238 void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
239 void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
240 void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir);
241 int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir);
242 int mcs_set_force_clk_en(struct mcs *mcs, bool set);
243
244 int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event);
245
246 #endif /* MCS_H */
247