1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell MCS driver
3  *
4  * Copyright (C) 2022 Marvell.
5  */
6 
7 #include <linux/bitfield.h>
8 #include <linux/delay.h>
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 
13 #include "mcs.h"
14 #include "mcs_reg.h"
15 
16 #define DRV_NAME	"Marvell MCS Driver"
17 
18 #define PCI_CFG_REG_BAR_NUM	0
19 
20 static const struct pci_device_id mcs_id_table[] = {
21 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_MCS) },
22 	{ 0, }  /* end of table */
23 };
24 
25 static LIST_HEAD(mcs_list);
26 
mcs_get_tx_secy_stats(struct mcs * mcs,struct mcs_secy_stats * stats,int id)27 void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
28 {
29 	u64 reg;
30 
31 	reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(id);
32 	stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
33 
34 	reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(id);
35 	stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
36 
37 	reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(id);
38 	stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
39 
40 	reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(id);
41 	stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
42 
43 	reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(id);
44 	stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
45 
46 	reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(id);
47 	stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
48 
49 	reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(id);
50 	stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
51 
52 	reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(id);
53 	stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
54 
55 	reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(id);
56 	stats->octet_encrypted_cnt =  mcs_reg_read(mcs, reg);
57 
58 	reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(id);
59 	stats->octet_protected_cnt =  mcs_reg_read(mcs, reg);
60 
61 	reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(id);
62 	stats->pkt_noactivesa_cnt =  mcs_reg_read(mcs, reg);
63 
64 	reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(id);
65 	stats->pkt_toolong_cnt =  mcs_reg_read(mcs, reg);
66 
67 	reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(id);
68 	stats->pkt_untagged_cnt =  mcs_reg_read(mcs, reg);
69 }
70 
mcs_get_rx_secy_stats(struct mcs * mcs,struct mcs_secy_stats * stats,int id)71 void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
72 {
73 	u64 reg;
74 
75 	reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(id);
76 	stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
77 
78 	reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(id);
79 	stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
80 
81 	reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(id);
82 	stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
83 
84 	reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(id);
85 	stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
86 
87 	reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(id);
88 	stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
89 
90 	reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(id);
91 	stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
92 
93 	reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(id);
94 	stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
95 
96 	reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(id);
97 	stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
98 
99 	reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(id);
100 	stats->octet_decrypted_cnt =  mcs_reg_read(mcs, reg);
101 
102 	reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(id);
103 	stats->octet_validated_cnt =  mcs_reg_read(mcs, reg);
104 
105 	reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(id);
106 	stats->pkt_port_disabled_cnt =  mcs_reg_read(mcs, reg);
107 
108 	reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(id);
109 	stats->pkt_badtag_cnt =  mcs_reg_read(mcs, reg);
110 
111 	reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(id);
112 	stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
113 
114 	reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(id);
115 	stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
116 
117 	reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id);
118 	stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg);
119 
120 	reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id);
121 	stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg);
122 
123 	reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id);
124 	stats->pkt_ctl_cnt = mcs_reg_read(mcs, reg);
125 
126 	if (mcs->hw->mcs_blks > 1) {
127 		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(id);
128 		stats->pkt_notag_cnt = mcs_reg_read(mcs, reg);
129 	}
130 }
131 
mcs_get_flowid_stats(struct mcs * mcs,struct mcs_flowid_stats * stats,int id,int dir)132 void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats,
133 			  int id, int dir)
134 {
135 	u64 reg;
136 
137 	if (dir == MCS_RX)
138 		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(id);
139 	else
140 		reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(id);
141 
142 	stats->tcam_hit_cnt = mcs_reg_read(mcs, reg);
143 }
144 
mcs_get_port_stats(struct mcs * mcs,struct mcs_port_stats * stats,int id,int dir)145 void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats,
146 			int id, int dir)
147 {
148 	u64 reg;
149 
150 	if (dir == MCS_RX) {
151 		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(id);
152 		stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
153 
154 		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(id);
155 		stats->parser_err_cnt = mcs_reg_read(mcs, reg);
156 		if (mcs->hw->mcs_blks > 1) {
157 			reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(id);
158 			stats->preempt_err_cnt = mcs_reg_read(mcs, reg);
159 		}
160 	} else {
161 		reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(id);
162 		stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
163 
164 		reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(id);
165 		stats->parser_err_cnt = mcs_reg_read(mcs, reg);
166 
167 		reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(id);
168 		stats->sectag_insert_err_cnt = mcs_reg_read(mcs, reg);
169 	}
170 }
171 
mcs_get_sa_stats(struct mcs * mcs,struct mcs_sa_stats * stats,int id,int dir)172 void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir)
173 {
174 	u64 reg;
175 
176 	if (dir == MCS_RX) {
177 		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(id);
178 		stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
179 
180 		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(id);
181 		stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
182 
183 		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(id);
184 		stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
185 
186 		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(id);
187 		stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
188 
189 		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(id);
190 		stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
191 	} else {
192 		reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(id);
193 		stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
194 
195 		reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(id);
196 		stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
197 	}
198 }
199 
mcs_get_sc_stats(struct mcs * mcs,struct mcs_sc_stats * stats,int id,int dir)200 void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats,
201 		      int id, int dir)
202 {
203 	u64 reg;
204 
205 	if (dir == MCS_RX) {
206 		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(id);
207 		stats->hit_cnt = mcs_reg_read(mcs, reg);
208 
209 		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(id);
210 		stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
211 
212 		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(id);
213 		stats->pkt_late_cnt = mcs_reg_read(mcs, reg);
214 
215 		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id);
216 		stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
217 
218 		reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id);
219 		stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg);
220 
221 		if (mcs->hw->mcs_blks > 1) {
222 			reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(id);
223 			stats->pkt_delay_cnt = mcs_reg_read(mcs, reg);
224 
225 			reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(id);
226 			stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
227 		}
228 		if (mcs->hw->mcs_blks == 1) {
229 			reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(id);
230 			stats->octet_decrypt_cnt = mcs_reg_read(mcs, reg);
231 
232 			reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(id);
233 			stats->octet_validate_cnt = mcs_reg_read(mcs, reg);
234 		}
235 	} else {
236 		reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(id);
237 		stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
238 
239 		reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(id);
240 		stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
241 
242 		if (mcs->hw->mcs_blks == 1) {
243 			reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(id);
244 			stats->octet_encrypt_cnt = mcs_reg_read(mcs, reg);
245 
246 			reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(id);
247 			stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
248 		}
249 	}
250 }
251 
mcs_clear_stats(struct mcs * mcs,u8 type,u8 id,int dir)252 void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir)
253 {
254 	struct mcs_flowid_stats flowid_st;
255 	struct mcs_port_stats port_st;
256 	struct mcs_secy_stats secy_st;
257 	struct mcs_sc_stats sc_st;
258 	struct mcs_sa_stats sa_st;
259 	u64 reg;
260 
261 	if (dir == MCS_RX)
262 		reg = MCSX_CSE_RX_SLAVE_CTRL;
263 	else
264 		reg = MCSX_CSE_TX_SLAVE_CTRL;
265 
266 	mcs_reg_write(mcs, reg, BIT_ULL(0));
267 
268 	switch (type) {
269 	case MCS_FLOWID_STATS:
270 		mcs_get_flowid_stats(mcs, &flowid_st, id, dir);
271 		break;
272 	case MCS_SECY_STATS:
273 		if (dir == MCS_RX)
274 			mcs_get_rx_secy_stats(mcs, &secy_st, id);
275 		else
276 			mcs_get_tx_secy_stats(mcs, &secy_st, id);
277 		break;
278 	case MCS_SC_STATS:
279 		mcs_get_sc_stats(mcs, &sc_st, id, dir);
280 		break;
281 	case MCS_SA_STATS:
282 		mcs_get_sa_stats(mcs, &sa_st, id, dir);
283 		break;
284 	case MCS_PORT_STATS:
285 		mcs_get_port_stats(mcs, &port_st, id, dir);
286 		break;
287 	}
288 
289 	mcs_reg_write(mcs, reg, 0x0);
290 }
291 
mcs_clear_all_stats(struct mcs * mcs,u16 pcifunc,int dir)292 int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir)
293 {
294 	struct mcs_rsrc_map *map;
295 	int id;
296 
297 	if (dir == MCS_RX)
298 		map = &mcs->rx;
299 	else
300 		map = &mcs->tx;
301 
302 	/* Clear FLOWID stats */
303 	for (id = 0; id < map->flow_ids.max; id++) {
304 		if (map->flowid2pf_map[id] != pcifunc)
305 			continue;
306 		mcs_clear_stats(mcs, MCS_FLOWID_STATS, id, dir);
307 	}
308 
309 	/* Clear SECY stats */
310 	for (id = 0; id < map->secy.max; id++) {
311 		if (map->secy2pf_map[id] != pcifunc)
312 			continue;
313 		mcs_clear_stats(mcs, MCS_SECY_STATS, id, dir);
314 	}
315 
316 	/* Clear SC stats */
317 	for (id = 0; id < map->secy.max; id++) {
318 		if (map->sc2pf_map[id] != pcifunc)
319 			continue;
320 		mcs_clear_stats(mcs, MCS_SC_STATS, id, dir);
321 	}
322 
323 	/* Clear SA stats */
324 	for (id = 0; id < map->sa.max; id++) {
325 		if (map->sa2pf_map[id] != pcifunc)
326 			continue;
327 		mcs_clear_stats(mcs, MCS_SA_STATS, id, dir);
328 	}
329 	return 0;
330 }
331 
mcs_pn_table_write(struct mcs * mcs,u8 pn_id,u64 next_pn,u8 dir)332 void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir)
333 {
334 	u64 reg;
335 
336 	if (dir == MCS_RX)
337 		reg = MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
338 	else
339 		reg = MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
340 	mcs_reg_write(mcs, reg, next_pn);
341 }
342 
cn10kb_mcs_tx_sa_mem_map_write(struct mcs * mcs,struct mcs_tx_sc_sa_map * map)343 void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
344 {
345 	u64 reg, val;
346 
347 	val = (map->sa_index0 & 0xFF) |
348 	      (map->sa_index1 & 0xFF) << 9 |
349 	      (map->rekey_ena & 0x1) << 18 |
350 	      (map->sa_index0_vld & 0x1) << 19 |
351 	      (map->sa_index1_vld & 0x1) << 20 |
352 	      (map->tx_sa_active & 0x1) << 21 |
353 	      map->sectag_sci << 22;
354 	reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
355 	mcs_reg_write(mcs, reg, val);
356 
357 	val = map->sectag_sci >> 42;
358 	reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(map->sc_id);
359 	mcs_reg_write(mcs, reg, val);
360 }
361 
cn10kb_mcs_rx_sa_mem_map_write(struct mcs * mcs,struct mcs_rx_sc_sa_map * map)362 void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
363 {
364 	u64 val, reg;
365 
366 	val = (map->sa_index & 0xFF) | map->sa_in_use << 9;
367 
368 	reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
369 	mcs_reg_write(mcs, reg, val);
370 }
371 
mcs_sa_plcy_write(struct mcs * mcs,u64 * plcy,int sa_id,int dir)372 void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa_id, int dir)
373 {
374 	int reg_id;
375 	u64 reg;
376 
377 	if (dir == MCS_RX) {
378 		for (reg_id = 0; reg_id < 8; reg_id++) {
379 			reg =  MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
380 			mcs_reg_write(mcs, reg, plcy[reg_id]);
381 		}
382 	} else {
383 		for (reg_id = 0; reg_id < 9; reg_id++) {
384 			reg =  MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
385 			mcs_reg_write(mcs, reg, plcy[reg_id]);
386 		}
387 	}
388 }
389 
mcs_ena_dis_sc_cam_entry(struct mcs * mcs,int sc_id,int ena)390 void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int sc_id, int ena)
391 {
392 	u64 reg, val;
393 
394 	reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(0);
395 	if (sc_id > 63)
396 		reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(1);
397 
398 	if (ena)
399 		val = mcs_reg_read(mcs, reg) | BIT_ULL(sc_id);
400 	else
401 		val = mcs_reg_read(mcs, reg) & ~BIT_ULL(sc_id);
402 
403 	mcs_reg_write(mcs, reg, val);
404 }
405 
mcs_rx_sc_cam_write(struct mcs * mcs,u64 sci,u64 secy,int sc_id)406 void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id)
407 {
408 	mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(0, sc_id), sci);
409 	mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(1, sc_id), secy);
410 	/* Enable SC CAM */
411 	mcs_ena_dis_sc_cam_entry(mcs, sc_id, true);
412 }
413 
mcs_secy_plcy_write(struct mcs * mcs,u64 plcy,int secy_id,int dir)414 void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int secy_id, int dir)
415 {
416 	u64 reg;
417 
418 	if (dir == MCS_RX)
419 		reg = MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(secy_id);
420 	else
421 		reg = MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(secy_id);
422 
423 	mcs_reg_write(mcs, reg, plcy);
424 
425 	if (mcs->hw->mcs_blks == 1 && dir == MCS_RX)
426 		mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(secy_id), 0x0ull);
427 }
428 
cn10kb_mcs_flowid_secy_map(struct mcs * mcs,struct secy_mem_map * map,int dir)429 void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
430 {
431 	u64 reg, val;
432 
433 	val = (map->secy & 0x7F) | (map->ctrl_pkt & 0x1) << 8;
434 	if (dir == MCS_RX) {
435 		reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
436 	} else {
437 		val |= (map->sc & 0x7F) << 9;
438 		reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
439 	}
440 
441 	mcs_reg_write(mcs, reg, val);
442 }
443 
mcs_ena_dis_flowid_entry(struct mcs * mcs,int flow_id,int dir,int ena)444 void mcs_ena_dis_flowid_entry(struct mcs *mcs, int flow_id, int dir, int ena)
445 {
446 	u64 reg, val;
447 
448 	if (dir == MCS_RX) {
449 		reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0;
450 		if (flow_id > 63)
451 			reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1;
452 	} else {
453 		reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0;
454 		if (flow_id > 63)
455 			reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1;
456 	}
457 
458 	/* Enable/Disable the tcam entry */
459 	if (ena)
460 		val = mcs_reg_read(mcs, reg) | BIT_ULL(flow_id);
461 	else
462 		val = mcs_reg_read(mcs, reg) & ~BIT_ULL(flow_id);
463 
464 	mcs_reg_write(mcs, reg, val);
465 }
466 
mcs_flowid_entry_write(struct mcs * mcs,u64 * data,u64 * mask,int flow_id,int dir)467 void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id, int dir)
468 {
469 	int reg_id;
470 	u64 reg;
471 
472 	if (dir == MCS_RX) {
473 		for (reg_id = 0; reg_id < 4; reg_id++) {
474 			reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
475 			mcs_reg_write(mcs, reg, data[reg_id]);
476 			reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
477 			mcs_reg_write(mcs, reg, mask[reg_id]);
478 		}
479 	} else {
480 		for (reg_id = 0; reg_id < 4; reg_id++) {
481 			reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
482 			mcs_reg_write(mcs, reg, data[reg_id]);
483 			reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
484 			mcs_reg_write(mcs, reg, mask[reg_id]);
485 		}
486 	}
487 }
488 
mcs_install_flowid_bypass_entry(struct mcs * mcs)489 int mcs_install_flowid_bypass_entry(struct mcs *mcs)
490 {
491 	int flow_id, secy_id, reg_id;
492 	struct secy_mem_map map;
493 	u64 reg, plcy = 0;
494 
495 	/* Flow entry */
496 	flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT;
497 	for (reg_id = 0; reg_id < 4; reg_id++) {
498 		reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
499 		mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
500 	}
501 	for (reg_id = 0; reg_id < 4; reg_id++) {
502 		reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
503 		mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
504 	}
505 	/* secy */
506 	secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT;
507 
508 	/* Set validate frames to NULL and enable control port */
509 	plcy = 0x7ull;
510 	if (mcs->hw->mcs_blks > 1)
511 		plcy = BIT_ULL(0) | 0x3ull << 4;
512 	mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_RX);
513 
514 	/* Enable control port and set mtu to max */
515 	plcy = BIT_ULL(0) | GENMASK_ULL(43, 28);
516 	if (mcs->hw->mcs_blks > 1)
517 		plcy = BIT_ULL(0) | GENMASK_ULL(63, 48);
518 	mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_TX);
519 
520 	/* Map flowid to secy */
521 	map.secy = secy_id;
522 	map.ctrl_pkt = 0;
523 	map.flow_id = flow_id;
524 	mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_RX);
525 	map.sc = secy_id;
526 	mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_TX);
527 
528 	/* Enable Flowid entry */
529 	mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true);
530 	mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true);
531 	return 0;
532 }
533 
mcs_clear_secy_plcy(struct mcs * mcs,int secy_id,int dir)534 void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir)
535 {
536 	struct mcs_rsrc_map *map;
537 	int flow_id;
538 
539 	if (dir == MCS_RX)
540 		map = &mcs->rx;
541 	else
542 		map = &mcs->tx;
543 
544 	/* Clear secy memory to zero */
545 	mcs_secy_plcy_write(mcs, 0, secy_id, dir);
546 
547 	/* Disable the tcam entry using this secy */
548 	for (flow_id = 0; flow_id < map->flow_ids.max; flow_id++) {
549 		if (map->flowid2secy_map[flow_id] != secy_id)
550 			continue;
551 		mcs_ena_dis_flowid_entry(mcs, flow_id, dir, false);
552 	}
553 }
554 
mcs_alloc_ctrlpktrule(struct rsrc_bmap * rsrc,u16 * pf_map,u16 offset,u16 pcifunc)555 int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc)
556 {
557 	int rsrc_id;
558 
559 	if (!rsrc->bmap)
560 		return -EINVAL;
561 
562 	rsrc_id = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, offset, 1, 0);
563 	if (rsrc_id >= rsrc->max)
564 		return -ENOSPC;
565 
566 	bitmap_set(rsrc->bmap, rsrc_id, 1);
567 	pf_map[rsrc_id] = pcifunc;
568 
569 	return rsrc_id;
570 }
571 
mcs_free_ctrlpktrule(struct mcs * mcs,struct mcs_free_ctrl_pkt_rule_req * req)572 int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req)
573 {
574 	u16 pcifunc = req->hdr.pcifunc;
575 	struct mcs_rsrc_map *map;
576 	u64 dis, reg;
577 	int id, rc;
578 
579 	reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
580 	map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
581 
582 	if (req->all) {
583 		for (id = 0; id < map->ctrlpktrule.max; id++) {
584 			if (map->ctrlpktrule2pf_map[id] != pcifunc)
585 				continue;
586 			mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, id, pcifunc);
587 			dis = mcs_reg_read(mcs, reg);
588 			dis &= ~BIT_ULL(id);
589 			mcs_reg_write(mcs, reg, dis);
590 		}
591 		return 0;
592 	}
593 
594 	rc = mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, req->rule_idx, pcifunc);
595 	dis = mcs_reg_read(mcs, reg);
596 	dis &= ~BIT_ULL(req->rule_idx);
597 	mcs_reg_write(mcs, reg, dis);
598 
599 	return rc;
600 }
601 
mcs_ctrlpktrule_write(struct mcs * mcs,struct mcs_ctrl_pkt_rule_write_req * req)602 int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req)
603 {
604 	u64 reg, enb;
605 	u64 idx;
606 
607 	switch (req->rule_type) {
608 	case MCS_CTRL_PKT_RULE_TYPE_ETH:
609 		req->data0 &= GENMASK(15, 0);
610 		if (req->data0 != ETH_P_PAE)
611 			return -EINVAL;
612 
613 		idx = req->rule_idx - MCS_CTRLPKT_ETYPE_RULE_OFFSET;
614 		reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(idx) :
615 		      MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(idx);
616 
617 		mcs_reg_write(mcs, reg, req->data0);
618 		break;
619 	case MCS_CTRL_PKT_RULE_TYPE_DA:
620 		if (!(req->data0 & BIT_ULL(40)))
621 			return -EINVAL;
622 
623 		idx = req->rule_idx - MCS_CTRLPKT_DA_RULE_OFFSET;
624 		reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_DAX(idx) :
625 		      MCSX_PEX_TX_SLAVE_RULE_DAX(idx);
626 
627 		mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
628 		break;
629 	case MCS_CTRL_PKT_RULE_TYPE_RANGE:
630 		if (!(req->data0 & BIT_ULL(40)) || !(req->data1 & BIT_ULL(40)))
631 			return -EINVAL;
632 
633 		idx = req->rule_idx - MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
634 		if (req->dir == MCS_RX) {
635 			reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(idx);
636 			mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
637 			reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(idx);
638 			mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
639 		} else {
640 			reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(idx);
641 			mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
642 			reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(idx);
643 			mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
644 		}
645 		break;
646 	case MCS_CTRL_PKT_RULE_TYPE_COMBO:
647 		req->data2 &= GENMASK(15, 0);
648 		if (req->data2 != ETH_P_PAE || !(req->data0 & BIT_ULL(40)) ||
649 		    !(req->data1 & BIT_ULL(40)))
650 			return -EINVAL;
651 
652 		idx = req->rule_idx - MCS_CTRLPKT_COMBO_RULE_OFFSET;
653 		if (req->dir == MCS_RX) {
654 			reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(idx);
655 			mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
656 			reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(idx);
657 			mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
658 			reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(idx);
659 			mcs_reg_write(mcs, reg, req->data2);
660 		} else {
661 			reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(idx);
662 			mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
663 			reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(idx);
664 			mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
665 			reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(idx);
666 			mcs_reg_write(mcs, reg, req->data2);
667 		}
668 		break;
669 	case MCS_CTRL_PKT_RULE_TYPE_MAC:
670 		if (!(req->data0 & BIT_ULL(40)))
671 			return -EINVAL;
672 
673 		idx = req->rule_idx - MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
674 		reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_MAC :
675 		      MCSX_PEX_TX_SLAVE_RULE_MAC;
676 
677 		mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
678 		break;
679 	}
680 
681 	reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
682 
683 	enb = mcs_reg_read(mcs, reg);
684 	enb |= BIT_ULL(req->rule_idx);
685 	mcs_reg_write(mcs, reg, enb);
686 
687 	return 0;
688 }
689 
mcs_free_rsrc(struct rsrc_bmap * rsrc,u16 * pf_map,int rsrc_id,u16 pcifunc)690 int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc)
691 {
692 	/* Check if the rsrc_id is mapped to PF/VF */
693 	if (pf_map[rsrc_id] != pcifunc)
694 		return -EINVAL;
695 
696 	rvu_free_rsrc(rsrc, rsrc_id);
697 	pf_map[rsrc_id] = 0;
698 	return 0;
699 }
700 
701 /* Free all the cam resources mapped to pf */
mcs_free_all_rsrc(struct mcs * mcs,int dir,u16 pcifunc)702 int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc)
703 {
704 	struct mcs_rsrc_map *map;
705 	int id;
706 
707 	if (dir == MCS_RX)
708 		map = &mcs->rx;
709 	else
710 		map = &mcs->tx;
711 
712 	/* free tcam entries */
713 	for (id = 0; id < map->flow_ids.max; id++) {
714 		if (map->flowid2pf_map[id] != pcifunc)
715 			continue;
716 		mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map,
717 			      id, pcifunc);
718 		mcs_ena_dis_flowid_entry(mcs, id, dir, false);
719 	}
720 
721 	/* free secy entries */
722 	for (id = 0; id < map->secy.max; id++) {
723 		if (map->secy2pf_map[id] != pcifunc)
724 			continue;
725 		mcs_free_rsrc(&map->secy, map->secy2pf_map,
726 			      id, pcifunc);
727 		mcs_clear_secy_plcy(mcs, id, dir);
728 	}
729 
730 	/* free sc entries */
731 	for (id = 0; id < map->secy.max; id++) {
732 		if (map->sc2pf_map[id] != pcifunc)
733 			continue;
734 		mcs_free_rsrc(&map->sc, map->sc2pf_map, id, pcifunc);
735 
736 		/* Disable SC CAM only on RX side */
737 		if (dir == MCS_RX)
738 			mcs_ena_dis_sc_cam_entry(mcs, id, false);
739 	}
740 
741 	/* free sa entries */
742 	for (id = 0; id < map->sa.max; id++) {
743 		if (map->sa2pf_map[id] != pcifunc)
744 			continue;
745 		mcs_free_rsrc(&map->sa, map->sa2pf_map, id, pcifunc);
746 	}
747 	return 0;
748 }
749 
mcs_alloc_rsrc(struct rsrc_bmap * rsrc,u16 * pf_map,u16 pcifunc)750 int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc)
751 {
752 	int rsrc_id;
753 
754 	rsrc_id = rvu_alloc_rsrc(rsrc);
755 	if (rsrc_id < 0)
756 		return -ENOMEM;
757 	pf_map[rsrc_id] = pcifunc;
758 	return rsrc_id;
759 }
760 
mcs_alloc_all_rsrc(struct mcs * mcs,u8 * flow_id,u8 * secy_id,u8 * sc_id,u8 * sa1_id,u8 * sa2_id,u16 pcifunc,int dir)761 int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flow_id, u8 *secy_id,
762 		       u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir)
763 {
764 	struct mcs_rsrc_map *map;
765 	int id;
766 
767 	if (dir == MCS_RX)
768 		map = &mcs->rx;
769 	else
770 		map = &mcs->tx;
771 
772 	id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
773 	if (id < 0)
774 		return -ENOMEM;
775 	*flow_id = id;
776 
777 	id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
778 	if (id < 0)
779 		return -ENOMEM;
780 	*secy_id = id;
781 
782 	id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
783 	if (id < 0)
784 		return -ENOMEM;
785 	*sc_id = id;
786 
787 	id =  mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
788 	if (id < 0)
789 		return -ENOMEM;
790 	*sa1_id = id;
791 
792 	id =  mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
793 	if (id < 0)
794 		return -ENOMEM;
795 	*sa2_id = id;
796 
797 	return 0;
798 }
799 
cn10kb_mcs_tx_pn_wrapped_handler(struct mcs * mcs)800 static void cn10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
801 {
802 	struct mcs_intr_event event = { 0 };
803 	struct rsrc_bmap *sc_bmap;
804 	u64 val;
805 	int sc;
806 
807 	sc_bmap = &mcs->tx.sc;
808 
809 	event.mcs_id = mcs->mcs_id;
810 	event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
811 
812 	for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
813 		val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
814 
815 		if (mcs->tx_sa_active[sc])
816 			/* SA_index1 was used and got expired */
817 			event.sa_id = (val >> 9) & 0xFF;
818 		else
819 			/* SA_index0 was used and got expired */
820 			event.sa_id = val & 0xFF;
821 
822 		event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
823 		mcs_add_intr_wq_entry(mcs, &event);
824 	}
825 }
826 
cn10kb_mcs_tx_pn_thresh_reached_handler(struct mcs * mcs)827 static void cn10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
828 {
829 	struct mcs_intr_event event = { 0 };
830 	struct rsrc_bmap *sc_bmap;
831 	u64 val, status;
832 	int sc;
833 
834 	sc_bmap = &mcs->tx.sc;
835 
836 	event.mcs_id = mcs->mcs_id;
837 	event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
838 
839 	/* TX SA interrupt is raised only if autorekey is enabled.
840 	 * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
841 	 * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
842 	 * SA in SA_index1 got expired else SA in SA_index0 got expired.
843 	 */
844 	for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
845 		val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
846 		/* Auto rekey is enable */
847 		if (!((val >> 18) & 0x1))
848 			continue;
849 
850 		status = (val >> 21) & 0x1;
851 
852 		/* Check if tx_sa_active status had changed */
853 		if (status == mcs->tx_sa_active[sc])
854 			continue;
855 		/* SA_index0 is expired */
856 		if (status)
857 			event.sa_id = val & 0xFF;
858 		else
859 			event.sa_id = (val >> 9) & 0xFF;
860 
861 		event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
862 		mcs_add_intr_wq_entry(mcs, &event);
863 	}
864 }
865 
mcs_rx_pn_thresh_reached_handler(struct mcs * mcs)866 static void mcs_rx_pn_thresh_reached_handler(struct mcs *mcs)
867 {
868 	struct mcs_intr_event event = { 0 };
869 	int sa, reg;
870 	u64 intr;
871 
872 	/* Check expired SAs */
873 	for (reg = 0; reg < (mcs->hw->sa_entries / 64); reg++) {
874 		/* Bit high in *PN_THRESH_REACHEDX implies
875 		 * corresponding SAs are expired.
876 		 */
877 		intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(reg));
878 		for (sa = 0; sa < 64; sa++) {
879 			if (!(intr & BIT_ULL(sa)))
880 				continue;
881 
882 			event.mcs_id = mcs->mcs_id;
883 			event.intr_mask = MCS_CPM_RX_PN_THRESH_REACHED_INT;
884 			event.sa_id = sa + (reg * 64);
885 			event.pcifunc = mcs->rx.sa2pf_map[event.sa_id];
886 			mcs_add_intr_wq_entry(mcs, &event);
887 		}
888 	}
889 }
890 
mcs_rx_misc_intr_handler(struct mcs * mcs,u64 intr)891 static void mcs_rx_misc_intr_handler(struct mcs *mcs, u64 intr)
892 {
893 	struct mcs_intr_event event = { 0 };
894 
895 	event.mcs_id = mcs->mcs_id;
896 	event.pcifunc = mcs->pf_map[0];
897 
898 	if (intr & MCS_CPM_RX_INT_SECTAG_V_EQ1)
899 		event.intr_mask = MCS_CPM_RX_SECTAG_V_EQ1_INT;
900 	if (intr & MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1)
901 		event.intr_mask |= MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT;
902 	if (intr & MCS_CPM_RX_INT_SL_GTE48)
903 		event.intr_mask |= MCS_CPM_RX_SECTAG_SL_GTE48_INT;
904 	if (intr & MCS_CPM_RX_INT_ES_EQ1_SC_EQ1)
905 		event.intr_mask |= MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT;
906 	if (intr & MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1)
907 		event.intr_mask |= MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT;
908 	if (intr & MCS_CPM_RX_INT_PACKET_XPN_EQ0)
909 		event.intr_mask |= MCS_CPM_RX_PACKET_XPN_EQ0_INT;
910 
911 	mcs_add_intr_wq_entry(mcs, &event);
912 }
913 
mcs_tx_misc_intr_handler(struct mcs * mcs,u64 intr)914 static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr)
915 {
916 	struct mcs_intr_event event = { 0 };
917 
918 	if (!(intr & MCS_CPM_TX_INT_SA_NOT_VALID))
919 		return;
920 
921 	event.mcs_id = mcs->mcs_id;
922 	event.pcifunc = mcs->pf_map[0];
923 
924 	event.intr_mask = MCS_CPM_TX_SA_NOT_VALID_INT;
925 
926 	mcs_add_intr_wq_entry(mcs, &event);
927 }
928 
mcs_bbe_intr_handler(struct mcs * mcs,u64 intr,enum mcs_direction dir)929 static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
930 {
931 	struct mcs_intr_event event = { 0 };
932 	int i;
933 
934 	if (!(intr & MCS_BBE_INT_MASK))
935 		return;
936 
937 	event.mcs_id = mcs->mcs_id;
938 	event.pcifunc = mcs->pf_map[0];
939 
940 	for (i = 0; i < MCS_MAX_BBE_INT; i++) {
941 		if (!(intr & BIT_ULL(i)))
942 			continue;
943 
944 		/* Lower nibble denotes data fifo overflow interrupts and
945 		 * upper nibble indicates policy fifo overflow interrupts.
946 		 */
947 		if (intr & 0xFULL)
948 			event.intr_mask = (dir == MCS_RX) ?
949 					  MCS_BBE_RX_DFIFO_OVERFLOW_INT :
950 					  MCS_BBE_TX_DFIFO_OVERFLOW_INT;
951 		else
952 			event.intr_mask = (dir == MCS_RX) ?
953 					  MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
954 					  MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
955 
956 		/* Notify the lmac_id info which ran into BBE fatal error */
957 		event.lmac_id = i & 0x3ULL;
958 		mcs_add_intr_wq_entry(mcs, &event);
959 	}
960 }
961 
mcs_pab_intr_handler(struct mcs * mcs,u64 intr,enum mcs_direction dir)962 static void mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
963 {
964 	struct mcs_intr_event event = { 0 };
965 	int i;
966 
967 	if (!(intr & MCS_PAB_INT_MASK))
968 		return;
969 
970 	event.mcs_id = mcs->mcs_id;
971 	event.pcifunc = mcs->pf_map[0];
972 
973 	for (i = 0; i < MCS_MAX_PAB_INT; i++) {
974 		if (!(intr & BIT_ULL(i)))
975 			continue;
976 
977 		event.intr_mask = (dir == MCS_RX) ? MCS_PAB_RX_CHAN_OVERFLOW_INT :
978 				  MCS_PAB_TX_CHAN_OVERFLOW_INT;
979 
980 		/* Notify the lmac_id info which ran into PAB fatal error */
981 		event.lmac_id = i;
982 		mcs_add_intr_wq_entry(mcs, &event);
983 	}
984 }
985 
mcs_ip_intr_handler(int irq,void * mcs_irq)986 static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
987 {
988 	struct mcs *mcs = (struct mcs *)mcs_irq;
989 	u64 intr, cpm_intr, bbe_intr, pab_intr;
990 
991 	/* Disable and clear the interrupt */
992 	mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0));
993 	mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
994 
995 	/* Check which block has interrupt*/
996 	intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM);
997 
998 	/* CPM RX */
999 	if (intr & MCS_CPM_RX_INT_ENA) {
1000 		/* Check for PN thresh interrupt bit */
1001 		cpm_intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_RX_INT);
1002 
1003 		if (cpm_intr & MCS_CPM_RX_INT_PN_THRESH_REACHED)
1004 			mcs_rx_pn_thresh_reached_handler(mcs);
1005 
1006 		if (cpm_intr & MCS_CPM_RX_INT_ALL)
1007 			mcs_rx_misc_intr_handler(mcs, cpm_intr);
1008 
1009 		/* Clear the interrupt */
1010 		mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT, cpm_intr);
1011 	}
1012 
1013 	/* CPM TX */
1014 	if (intr & MCS_CPM_TX_INT_ENA) {
1015 		cpm_intr = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_INT);
1016 
1017 		if (cpm_intr & MCS_CPM_TX_INT_PN_THRESH_REACHED) {
1018 			if (mcs->hw->mcs_blks > 1)
1019 				cnf10kb_mcs_tx_pn_thresh_reached_handler(mcs);
1020 			else
1021 				cn10kb_mcs_tx_pn_thresh_reached_handler(mcs);
1022 		}
1023 
1024 		if (cpm_intr & MCS_CPM_TX_INT_SA_NOT_VALID)
1025 			mcs_tx_misc_intr_handler(mcs, cpm_intr);
1026 
1027 		if (cpm_intr & MCS_CPM_TX_INT_PACKET_XPN_EQ0) {
1028 			if (mcs->hw->mcs_blks > 1)
1029 				cnf10kb_mcs_tx_pn_wrapped_handler(mcs);
1030 			else
1031 				cn10kb_mcs_tx_pn_wrapped_handler(mcs);
1032 		}
1033 		/* Clear the interrupt */
1034 		mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT, cpm_intr);
1035 	}
1036 
1037 	/* BBE RX */
1038 	if (intr & MCS_BBE_RX_INT_ENA) {
1039 		bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT);
1040 		mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
1041 
1042 		/* Clear the interrupt */
1043 		mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0);
1044 		mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT, bbe_intr);
1045 	}
1046 
1047 	/* BBE TX */
1048 	if (intr & MCS_BBE_TX_INT_ENA) {
1049 		bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT);
1050 		mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
1051 
1052 		/* Clear the interrupt */
1053 		mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0);
1054 		mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT, bbe_intr);
1055 	}
1056 
1057 	/* PAB RX */
1058 	if (intr & MCS_PAB_RX_INT_ENA) {
1059 		pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT);
1060 		mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
1061 
1062 		/* Clear the interrupt */
1063 		mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0);
1064 		mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT, pab_intr);
1065 	}
1066 
1067 	/* PAB TX */
1068 	if (intr & MCS_PAB_TX_INT_ENA) {
1069 		pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT);
1070 		mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
1071 
1072 		/* Clear the interrupt */
1073 		mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0);
1074 		mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr);
1075 	}
1076 
1077 	/* Enable the interrupt */
1078 	mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
1079 
1080 	return IRQ_HANDLED;
1081 }
1082 
alloc_mem(struct mcs * mcs,int n)1083 static void *alloc_mem(struct mcs *mcs, int n)
1084 {
1085 	return devm_kcalloc(mcs->dev, n, sizeof(u16), GFP_KERNEL);
1086 }
1087 
mcs_alloc_struct_mem(struct mcs * mcs,struct mcs_rsrc_map * res)1088 static int mcs_alloc_struct_mem(struct mcs *mcs, struct mcs_rsrc_map *res)
1089 {
1090 	struct hwinfo *hw = mcs->hw;
1091 	int err;
1092 
1093 	res->flowid2pf_map = alloc_mem(mcs, hw->tcam_entries);
1094 	if (!res->flowid2pf_map)
1095 		return -ENOMEM;
1096 
1097 	res->secy2pf_map = alloc_mem(mcs, hw->secy_entries);
1098 	if (!res->secy2pf_map)
1099 		return -ENOMEM;
1100 
1101 	res->sc2pf_map = alloc_mem(mcs, hw->sc_entries);
1102 	if (!res->sc2pf_map)
1103 		return -ENOMEM;
1104 
1105 	res->sa2pf_map = alloc_mem(mcs, hw->sa_entries);
1106 	if (!res->sa2pf_map)
1107 		return -ENOMEM;
1108 
1109 	res->flowid2secy_map = alloc_mem(mcs, hw->tcam_entries);
1110 	if (!res->flowid2secy_map)
1111 		return -ENOMEM;
1112 
1113 	res->ctrlpktrule2pf_map = alloc_mem(mcs, MCS_MAX_CTRLPKT_RULES);
1114 	if (!res->ctrlpktrule2pf_map)
1115 		return -ENOMEM;
1116 
1117 	res->flow_ids.max = hw->tcam_entries - MCS_RSRC_RSVD_CNT;
1118 	err = rvu_alloc_bitmap(&res->flow_ids);
1119 	if (err)
1120 		return err;
1121 
1122 	res->secy.max = hw->secy_entries - MCS_RSRC_RSVD_CNT;
1123 	err = rvu_alloc_bitmap(&res->secy);
1124 	if (err)
1125 		return err;
1126 
1127 	res->sc.max = hw->sc_entries;
1128 	err = rvu_alloc_bitmap(&res->sc);
1129 	if (err)
1130 		return err;
1131 
1132 	res->sa.max = hw->sa_entries;
1133 	err = rvu_alloc_bitmap(&res->sa);
1134 	if (err)
1135 		return err;
1136 
1137 	res->ctrlpktrule.max = MCS_MAX_CTRLPKT_RULES;
1138 	err = rvu_alloc_bitmap(&res->ctrlpktrule);
1139 	if (err)
1140 		return err;
1141 
1142 	return 0;
1143 }
1144 
mcs_register_interrupts(struct mcs * mcs)1145 static int mcs_register_interrupts(struct mcs *mcs)
1146 {
1147 	int ret = 0;
1148 
1149 	mcs->num_vec = pci_msix_vec_count(mcs->pdev);
1150 
1151 	ret = pci_alloc_irq_vectors(mcs->pdev, mcs->num_vec,
1152 				    mcs->num_vec, PCI_IRQ_MSIX);
1153 	if (ret < 0) {
1154 		dev_err(mcs->dev, "MCS Request for %d msix vector failed err:%d\n",
1155 			mcs->num_vec, ret);
1156 		return ret;
1157 	}
1158 
1159 	ret = request_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP),
1160 			  mcs_ip_intr_handler, 0, "MCS_IP", mcs);
1161 	if (ret) {
1162 		dev_err(mcs->dev, "MCS IP irq registration failed\n");
1163 		goto exit;
1164 	}
1165 
1166 	/* MCS enable IP interrupts */
1167 	mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
1168 
1169 	/* Enable CPM Rx/Tx interrupts */
1170 	mcs_reg_write(mcs, MCSX_TOP_SLAVE_INT_SUM_ENB,
1171 		      MCS_CPM_RX_INT_ENA | MCS_CPM_TX_INT_ENA |
1172 		      MCS_BBE_RX_INT_ENA | MCS_BBE_TX_INT_ENA |
1173 		      MCS_PAB_RX_INT_ENA | MCS_PAB_TX_INT_ENA);
1174 
1175 	mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL);
1176 	mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL);
1177 
1178 	mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xff);
1179 	mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xff);
1180 
1181 	mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xff);
1182 	mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff);
1183 
1184 	mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
1185 	if (!mcs->tx_sa_active) {
1186 		ret = -ENOMEM;
1187 		goto free_irq;
1188 	}
1189 
1190 	return ret;
1191 
1192 free_irq:
1193 	free_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP), mcs);
1194 exit:
1195 	pci_free_irq_vectors(mcs->pdev);
1196 	mcs->num_vec = 0;
1197 	return ret;
1198 }
1199 
mcs_get_blkcnt(void)1200 int mcs_get_blkcnt(void)
1201 {
1202 	struct mcs *mcs;
1203 	int idmax = -ENODEV;
1204 
1205 	/* Check MCS block is present in hardware */
1206 	if (!pci_dev_present(mcs_id_table))
1207 		return 0;
1208 
1209 	list_for_each_entry(mcs, &mcs_list, mcs_list)
1210 		if (mcs->mcs_id > idmax)
1211 			idmax = mcs->mcs_id;
1212 
1213 	if (idmax < 0)
1214 		return 0;
1215 
1216 	return idmax + 1;
1217 }
1218 
mcs_get_pdata(int mcs_id)1219 struct mcs *mcs_get_pdata(int mcs_id)
1220 {
1221 	struct mcs *mcs_dev;
1222 
1223 	list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
1224 		if (mcs_dev->mcs_id == mcs_id)
1225 			return mcs_dev;
1226 	}
1227 	return NULL;
1228 }
1229 
mcs_set_port_cfg(struct mcs * mcs,struct mcs_port_cfg_set_req * req)1230 void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req)
1231 {
1232 	u64 val = 0;
1233 
1234 	mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id),
1235 		      req->port_mode & MCS_PORT_MODE_MASK);
1236 
1237 	req->cstm_tag_rel_mode_sel &= 0x3;
1238 
1239 	if (mcs->hw->mcs_blks > 1) {
1240 		req->fifo_skid &= MCS_PORT_FIFO_SKID_MASK;
1241 		val = (u32)req->fifo_skid << 0x10;
1242 		val |= req->fifo_skid;
1243 		mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id), val);
1244 		mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id),
1245 			      req->cstm_tag_rel_mode_sel);
1246 		val = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
1247 
1248 		if (req->custom_hdr_enb)
1249 			val |= BIT_ULL(req->port_id);
1250 		else
1251 			val &= ~BIT_ULL(req->port_id);
1252 
1253 		mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, val);
1254 	} else {
1255 		val = mcs_reg_read(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id));
1256 		val |= (req->cstm_tag_rel_mode_sel << 2);
1257 		mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id), val);
1258 	}
1259 }
1260 
mcs_get_port_cfg(struct mcs * mcs,struct mcs_port_cfg_get_req * req,struct mcs_port_cfg_get_rsp * rsp)1261 void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
1262 		      struct mcs_port_cfg_get_rsp *rsp)
1263 {
1264 	u64 reg = 0;
1265 
1266 	rsp->port_mode = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id)) &
1267 			 MCS_PORT_MODE_MASK;
1268 
1269 	if (mcs->hw->mcs_blks > 1) {
1270 		reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id);
1271 		rsp->fifo_skid = mcs_reg_read(mcs, reg) & MCS_PORT_FIFO_SKID_MASK;
1272 		reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id);
1273 		rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) & 0x3;
1274 		if (mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION) & BIT_ULL(req->port_id))
1275 			rsp->custom_hdr_enb = 1;
1276 	} else {
1277 		reg = MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id);
1278 		rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) >> 2;
1279 	}
1280 
1281 	rsp->port_id = req->port_id;
1282 	rsp->mcs_id = req->mcs_id;
1283 }
1284 
mcs_get_custom_tag_cfg(struct mcs * mcs,struct mcs_custom_tag_cfg_get_req * req,struct mcs_custom_tag_cfg_get_rsp * rsp)1285 void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
1286 			    struct mcs_custom_tag_cfg_get_rsp *rsp)
1287 {
1288 	u64 reg = 0, val = 0;
1289 	u8 idx;
1290 
1291 	for (idx = 0; idx < MCS_MAX_CUSTOM_TAGS; idx++) {
1292 		if (mcs->hw->mcs_blks > 1)
1293 			reg  = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(idx) :
1294 				MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(idx);
1295 		else
1296 			reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_VLAN_CFGX(idx) :
1297 				MCSX_PEX_TX_SLAVE_VLAN_CFGX(idx);
1298 
1299 		val = mcs_reg_read(mcs, reg);
1300 		if (mcs->hw->mcs_blks > 1) {
1301 			rsp->cstm_etype[idx] = val & GENMASK(15, 0);
1302 			rsp->cstm_indx[idx] = (val >> 0x16) & 0x3;
1303 			reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_ETYPE_ENABLE :
1304 				MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
1305 			rsp->cstm_etype_en = mcs_reg_read(mcs, reg) & 0xFF;
1306 		} else {
1307 			rsp->cstm_etype[idx] = (val >> 0x1) & GENMASK(15, 0);
1308 			rsp->cstm_indx[idx] = (val >> 0x11) & 0x3;
1309 			rsp->cstm_etype_en |= (val & 0x1) << idx;
1310 		}
1311 	}
1312 
1313 	rsp->mcs_id = req->mcs_id;
1314 	rsp->dir = req->dir;
1315 }
1316 
mcs_reset_port(struct mcs * mcs,u8 port_id,u8 reset)1317 void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset)
1318 {
1319 	u64 reg = MCSX_MCS_TOP_SLAVE_PORT_RESET(port_id);
1320 
1321 	mcs_reg_write(mcs, reg, reset & 0x1);
1322 }
1323 
1324 /* Set lmac to bypass/operational mode */
mcs_set_lmac_mode(struct mcs * mcs,int lmac_id,u8 mode)1325 void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode)
1326 {
1327 	u64 reg;
1328 
1329 	reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(lmac_id * 2);
1330 	mcs_reg_write(mcs, reg, (u64)mode);
1331 }
1332 
mcs_pn_threshold_set(struct mcs * mcs,struct mcs_set_pn_threshold * pn)1333 void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn)
1334 {
1335 	u64 reg;
1336 
1337 	if (pn->dir == MCS_RX)
1338 		reg = pn->xpn ? MCSX_CPM_RX_SLAVE_XPN_THRESHOLD : MCSX_CPM_RX_SLAVE_PN_THRESHOLD;
1339 	else
1340 		reg = pn->xpn ? MCSX_CPM_TX_SLAVE_XPN_THRESHOLD : MCSX_CPM_TX_SLAVE_PN_THRESHOLD;
1341 
1342 	mcs_reg_write(mcs, reg, pn->threshold);
1343 }
1344 
cn10kb_mcs_parser_cfg(struct mcs * mcs)1345 void cn10kb_mcs_parser_cfg(struct mcs *mcs)
1346 {
1347 	u64 reg, val;
1348 
1349 	/* VLAN CTag */
1350 	val = BIT_ULL(0) | (0x8100ull & 0xFFFF) << 1 | BIT_ULL(17);
1351 	/* RX */
1352 	reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(0);
1353 	mcs_reg_write(mcs, reg, val);
1354 
1355 	/* TX */
1356 	reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(0);
1357 	mcs_reg_write(mcs, reg, val);
1358 
1359 	/* VLAN STag */
1360 	val = BIT_ULL(0) | (0x88a8ull & 0xFFFF) << 1 | BIT_ULL(18);
1361 	/* RX */
1362 	reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(1);
1363 	mcs_reg_write(mcs, reg, val);
1364 
1365 	/* TX */
1366 	reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(1);
1367 	mcs_reg_write(mcs, reg, val);
1368 }
1369 
mcs_lmac_init(struct mcs * mcs,int lmac_id)1370 static void mcs_lmac_init(struct mcs *mcs, int lmac_id)
1371 {
1372 	u64 reg;
1373 
1374 	/* Port mode 25GB */
1375 	reg = MCSX_PAB_RX_SLAVE_PORT_CFGX(lmac_id);
1376 	mcs_reg_write(mcs, reg, 0);
1377 
1378 	if (mcs->hw->mcs_blks > 1) {
1379 		reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(lmac_id);
1380 		mcs_reg_write(mcs, reg, 0xe000e);
1381 		return;
1382 	}
1383 
1384 	reg = MCSX_PAB_TX_SLAVE_PORT_CFGX(lmac_id);
1385 	mcs_reg_write(mcs, reg, 0);
1386 }
1387 
mcs_set_lmac_channels(int mcs_id,u16 base)1388 int mcs_set_lmac_channels(int mcs_id, u16 base)
1389 {
1390 	struct mcs *mcs;
1391 	int lmac;
1392 	u64 cfg;
1393 
1394 	mcs = mcs_get_pdata(mcs_id);
1395 	if (!mcs)
1396 		return -ENODEV;
1397 	for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
1398 		cfg = mcs_reg_read(mcs, MCSX_LINK_LMACX_CFG(lmac));
1399 		cfg &= ~(MCSX_LINK_LMAC_BASE_MASK | MCSX_LINK_LMAC_RANGE_MASK);
1400 		cfg |=	FIELD_PREP(MCSX_LINK_LMAC_RANGE_MASK, ilog2(16));
1401 		cfg |=	FIELD_PREP(MCSX_LINK_LMAC_BASE_MASK, base);
1402 		mcs_reg_write(mcs, MCSX_LINK_LMACX_CFG(lmac), cfg);
1403 		base += 16;
1404 	}
1405 	return 0;
1406 }
1407 
mcs_x2p_calibration(struct mcs * mcs)1408 static int mcs_x2p_calibration(struct mcs *mcs)
1409 {
1410 	unsigned long timeout = jiffies + usecs_to_jiffies(20000);
1411 	int i, err = 0;
1412 	u64 val;
1413 
1414 	/* set X2P calibration */
1415 	val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
1416 	val |= BIT_ULL(5);
1417 	mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
1418 
1419 	/* Wait for calibration to complete */
1420 	while (!(mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS) & BIT_ULL(0))) {
1421 		if (time_before(jiffies, timeout)) {
1422 			usleep_range(80, 100);
1423 			continue;
1424 		} else {
1425 			err = -EBUSY;
1426 			dev_err(mcs->dev, "MCS X2P calibration failed..ignoring\n");
1427 			return err;
1428 		}
1429 	}
1430 
1431 	val = mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS);
1432 	for (i = 0; i < mcs->hw->mcs_x2p_intf; i++) {
1433 		if (val & BIT_ULL(1 + i))
1434 			continue;
1435 		err = -EBUSY;
1436 		dev_err(mcs->dev, "MCS:%d didn't respond to X2P calibration\n", i);
1437 	}
1438 	/* Clear X2P calibrate */
1439 	mcs_reg_write(mcs, MCSX_MIL_GLOBAL, mcs_reg_read(mcs, MCSX_MIL_GLOBAL) & ~BIT_ULL(5));
1440 
1441 	return err;
1442 }
1443 
mcs_set_external_bypass(struct mcs * mcs,u8 bypass)1444 static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
1445 {
1446 	u64 val;
1447 
1448 	/* Set MCS to external bypass */
1449 	val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
1450 	if (bypass)
1451 		val |= BIT_ULL(6);
1452 	else
1453 		val &= ~BIT_ULL(6);
1454 	mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
1455 }
1456 
mcs_global_cfg(struct mcs * mcs)1457 static void mcs_global_cfg(struct mcs *mcs)
1458 {
1459 	/* Disable external bypass */
1460 	mcs_set_external_bypass(mcs, false);
1461 
1462 	/* Reset TX/RX stats memory */
1463 	mcs_reg_write(mcs, MCSX_CSE_RX_SLAVE_STATS_CLEAR, 0x1F);
1464 	mcs_reg_write(mcs, MCSX_CSE_TX_SLAVE_STATS_CLEAR, 0x1F);
1465 
1466 	/* Set MCS to perform standard IEEE802.1AE macsec processing */
1467 	if (mcs->hw->mcs_blks == 1) {
1468 		mcs_reg_write(mcs, MCSX_IP_MODE, BIT_ULL(3));
1469 		return;
1470 	}
1471 
1472 	mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_ENTRY, 0xe4);
1473 	mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_LEN, 4);
1474 }
1475 
cn10kb_mcs_set_hw_capabilities(struct mcs * mcs)1476 void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
1477 {
1478 	struct hwinfo *hw = mcs->hw;
1479 
1480 	hw->tcam_entries = 128;		/* TCAM entries */
1481 	hw->secy_entries  = 128;	/* SecY entries */
1482 	hw->sc_entries = 128;		/* SC CAM entries */
1483 	hw->sa_entries = 256;		/* SA entries */
1484 	hw->lmac_cnt = 20;		/* lmacs/ports per mcs block */
1485 	hw->mcs_x2p_intf = 5;		/* x2p clabration intf */
1486 	hw->mcs_blks = 1;		/* MCS blocks */
1487 }
1488 
1489 static struct mcs_ops cn10kb_mcs_ops = {
1490 	.mcs_set_hw_capabilities	= cn10kb_mcs_set_hw_capabilities,
1491 	.mcs_parser_cfg			= cn10kb_mcs_parser_cfg,
1492 	.mcs_tx_sa_mem_map_write	= cn10kb_mcs_tx_sa_mem_map_write,
1493 	.mcs_rx_sa_mem_map_write	= cn10kb_mcs_rx_sa_mem_map_write,
1494 	.mcs_flowid_secy_map		= cn10kb_mcs_flowid_secy_map,
1495 };
1496 
mcs_probe(struct pci_dev * pdev,const struct pci_device_id * id)1497 static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1498 {
1499 	struct device *dev = &pdev->dev;
1500 	int lmac, err = 0;
1501 	struct mcs *mcs;
1502 
1503 	mcs = devm_kzalloc(dev, sizeof(*mcs), GFP_KERNEL);
1504 	if (!mcs)
1505 		return -ENOMEM;
1506 
1507 	mcs->hw = devm_kzalloc(dev, sizeof(struct hwinfo), GFP_KERNEL);
1508 	if (!mcs->hw)
1509 		return -ENOMEM;
1510 
1511 	err = pci_enable_device(pdev);
1512 	if (err) {
1513 		dev_err(dev, "Failed to enable PCI device\n");
1514 		pci_set_drvdata(pdev, NULL);
1515 		return err;
1516 	}
1517 
1518 	err = pci_request_regions(pdev, DRV_NAME);
1519 	if (err) {
1520 		dev_err(dev, "PCI request regions failed 0x%x\n", err);
1521 		goto exit;
1522 	}
1523 
1524 	mcs->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1525 	if (!mcs->reg_base) {
1526 		dev_err(dev, "mcs: Cannot map CSR memory space, aborting\n");
1527 		err = -ENOMEM;
1528 		goto exit;
1529 	}
1530 
1531 	pci_set_drvdata(pdev, mcs);
1532 	mcs->pdev = pdev;
1533 	mcs->dev = &pdev->dev;
1534 
1535 	if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B)
1536 		mcs->mcs_ops = &cn10kb_mcs_ops;
1537 	else
1538 		mcs->mcs_ops = cnf10kb_get_mac_ops();
1539 
1540 	/* Set hardware capabilities */
1541 	mcs->mcs_ops->mcs_set_hw_capabilities(mcs);
1542 
1543 	mcs_global_cfg(mcs);
1544 
1545 	/* Perform X2P clibration */
1546 	err = mcs_x2p_calibration(mcs);
1547 	if (err)
1548 		goto err_x2p;
1549 
1550 	mcs->mcs_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
1551 			& MCS_ID_MASK;
1552 
1553 	/* Set mcs tx side resources */
1554 	err = mcs_alloc_struct_mem(mcs, &mcs->tx);
1555 	if (err)
1556 		goto err_x2p;
1557 
1558 	/* Set mcs rx side resources */
1559 	err = mcs_alloc_struct_mem(mcs, &mcs->rx);
1560 	if (err)
1561 		goto err_x2p;
1562 
1563 	/* per port config */
1564 	for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
1565 		mcs_lmac_init(mcs, lmac);
1566 
1567 	/* Parser configuration */
1568 	mcs->mcs_ops->mcs_parser_cfg(mcs);
1569 
1570 	err = mcs_register_interrupts(mcs);
1571 	if (err)
1572 		goto exit;
1573 
1574 	list_add(&mcs->mcs_list, &mcs_list);
1575 	mutex_init(&mcs->stats_lock);
1576 
1577 	return 0;
1578 
1579 err_x2p:
1580 	/* Enable external bypass */
1581 	mcs_set_external_bypass(mcs, true);
1582 exit:
1583 	pci_release_regions(pdev);
1584 	pci_disable_device(pdev);
1585 	pci_set_drvdata(pdev, NULL);
1586 	return err;
1587 }
1588 
mcs_remove(struct pci_dev * pdev)1589 static void mcs_remove(struct pci_dev *pdev)
1590 {
1591 	struct mcs *mcs = pci_get_drvdata(pdev);
1592 
1593 	/* Set MCS to external bypass */
1594 	mcs_set_external_bypass(mcs, true);
1595 	free_irq(pci_irq_vector(pdev, MCS_INT_VEC_IP), mcs);
1596 	pci_free_irq_vectors(pdev);
1597 	pci_release_regions(pdev);
1598 	pci_disable_device(pdev);
1599 	pci_set_drvdata(pdev, NULL);
1600 }
1601 
1602 struct pci_driver mcs_driver = {
1603 	.name = DRV_NAME,
1604 	.id_table = mcs_id_table,
1605 	.probe = mcs_probe,
1606 	.remove = mcs_remove,
1607 };
1608