1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2022 Marvell.
5  *
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/firmware.h>
12 #include <linux/stddef.h>
13 #include <linux/debugfs.h>
14 
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "npc.h"
19 #include "cgx.h"
20 #include "rvu_npc_fs.h"
21 #include "rvu_npc_hash.h"
22 
rvu_npc_wide_extract(const u64 input[],size_t start_bit,size_t width_bits)23 static u64 rvu_npc_wide_extract(const u64 input[], size_t start_bit,
24 				size_t width_bits)
25 {
26 	const u64 mask = ~(u64)((~(__uint128_t)0) << width_bits);
27 	const size_t msb = start_bit + width_bits - 1;
28 	const size_t lword = start_bit >> 6;
29 	const size_t uword = msb >> 6;
30 	size_t lbits;
31 	u64 hi, lo;
32 
33 	if (lword == uword)
34 		return (input[lword] >> (start_bit & 63)) & mask;
35 
36 	lbits = 64 - (start_bit & 63);
37 	hi = input[uword];
38 	lo = (input[lword] >> (start_bit & 63));
39 	return ((hi << lbits) | lo) & mask;
40 }
41 
rvu_npc_lshift_key(u64 * key,size_t key_bit_len)42 static void rvu_npc_lshift_key(u64 *key, size_t key_bit_len)
43 {
44 	u64 prev_orig_word = 0;
45 	u64 cur_orig_word = 0;
46 	size_t extra = key_bit_len % 64;
47 	size_t max_idx = key_bit_len / 64;
48 	size_t i;
49 
50 	if (extra)
51 		max_idx++;
52 
53 	for (i = 0; i < max_idx; i++) {
54 		cur_orig_word = key[i];
55 		key[i] = key[i] << 1;
56 		key[i] |= ((prev_orig_word >> 63) & 0x1);
57 		prev_orig_word = cur_orig_word;
58 	}
59 }
60 
rvu_npc_toeplitz_hash(const u64 * data,u64 * key,size_t data_bit_len,size_t key_bit_len)61 static u32 rvu_npc_toeplitz_hash(const u64 *data, u64 *key, size_t data_bit_len,
62 				 size_t key_bit_len)
63 {
64 	u32 hash_out = 0;
65 	u64 temp_data = 0;
66 	int i;
67 
68 	for (i = data_bit_len - 1; i >= 0; i--) {
69 		temp_data = (data[i / 64]);
70 		temp_data = temp_data >> (i % 64);
71 		temp_data &= 0x1;
72 		if (temp_data)
73 			hash_out ^= (u32)(rvu_npc_wide_extract(key, key_bit_len - 32, 32));
74 
75 		rvu_npc_lshift_key(key, key_bit_len);
76 	}
77 
78 	return hash_out;
79 }
80 
npc_field_hash_calc(u64 * ldata,struct npc_mcam_kex_hash * mkex_hash,u64 * secret_key,u8 intf,u8 hash_idx)81 u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash,
82 			u64 *secret_key, u8 intf, u8 hash_idx)
83 {
84 	u64 hash_key[3];
85 	u64 data_padded[2];
86 	u32 field_hash;
87 
88 	hash_key[0] = secret_key[1] << 31;
89 	hash_key[0] |= secret_key[2];
90 	hash_key[1] = secret_key[1] >> 33;
91 	hash_key[1] |= secret_key[0] << 31;
92 	hash_key[2] = secret_key[0] >> 33;
93 
94 	data_padded[0] = mkex_hash->hash_mask[intf][hash_idx][0] & ldata[0];
95 	data_padded[1] = mkex_hash->hash_mask[intf][hash_idx][1] & ldata[1];
96 	field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159);
97 
98 	field_hash &= mkex_hash->hash_ctrl[intf][hash_idx] >> 32;
99 	field_hash |= mkex_hash->hash_ctrl[intf][hash_idx];
100 	return field_hash;
101 }
102 
npc_update_use_hash(int lt,int ld)103 static u64 npc_update_use_hash(int lt, int ld)
104 {
105 	u64 cfg = 0;
106 
107 	switch (lt) {
108 	case NPC_LT_LC_IP6:
109 		/* Update use_hash(bit-20) and bytesm1 (bit-16:19)
110 		 * in KEX_LD_CFG
111 		 */
112 		cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03,
113 					  ld ? 0x8 : 0x18,
114 					  0x1, 0x0, 0x10);
115 		break;
116 	}
117 
118 	return cfg;
119 }
120 
npc_program_mkex_hash_rx(struct rvu * rvu,int blkaddr,u8 intf)121 static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr,
122 				     u8 intf)
123 {
124 	struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
125 	int lid, lt, ld, hash_cnt = 0;
126 
127 	if (is_npc_intf_tx(intf))
128 		return;
129 
130 	/* Program HASH_CFG */
131 	for (lid = 0; lid < NPC_MAX_LID; lid++) {
132 		for (lt = 0; lt < NPC_MAX_LT; lt++) {
133 			for (ld = 0; ld < NPC_MAX_LD; ld++) {
134 				if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
135 					u64 cfg = npc_update_use_hash(lt, ld);
136 
137 					hash_cnt++;
138 					if (hash_cnt == NPC_MAX_HASH)
139 						return;
140 
141 					/* Set updated KEX configuration */
142 					SET_KEX_LD(intf, lid, lt, ld, cfg);
143 					/* Set HASH configuration */
144 					SET_KEX_LD_HASH(intf, ld,
145 							mkex_hash->hash[intf][ld]);
146 					SET_KEX_LD_HASH_MASK(intf, ld, 0,
147 							     mkex_hash->hash_mask[intf][ld][0]);
148 					SET_KEX_LD_HASH_MASK(intf, ld, 1,
149 							     mkex_hash->hash_mask[intf][ld][1]);
150 					SET_KEX_LD_HASH_CTRL(intf, ld,
151 							     mkex_hash->hash_ctrl[intf][ld]);
152 				}
153 			}
154 		}
155 	}
156 }
157 
npc_program_mkex_hash_tx(struct rvu * rvu,int blkaddr,u8 intf)158 static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr,
159 				     u8 intf)
160 {
161 	struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
162 	int lid, lt, ld, hash_cnt = 0;
163 
164 	if (is_npc_intf_rx(intf))
165 		return;
166 
167 	/* Program HASH_CFG */
168 	for (lid = 0; lid < NPC_MAX_LID; lid++) {
169 		for (lt = 0; lt < NPC_MAX_LT; lt++) {
170 			for (ld = 0; ld < NPC_MAX_LD; ld++)
171 				if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
172 					u64 cfg = npc_update_use_hash(lt, ld);
173 
174 					hash_cnt++;
175 					if (hash_cnt == NPC_MAX_HASH)
176 						return;
177 
178 					/* Set updated KEX configuration */
179 					SET_KEX_LD(intf, lid, lt, ld, cfg);
180 					/* Set HASH configuration */
181 					SET_KEX_LD_HASH(intf, ld,
182 							mkex_hash->hash[intf][ld]);
183 					SET_KEX_LD_HASH_MASK(intf, ld, 0,
184 							     mkex_hash->hash_mask[intf][ld][0]);
185 					SET_KEX_LD_HASH_MASK(intf, ld, 1,
186 							     mkex_hash->hash_mask[intf][ld][1]);
187 					SET_KEX_LD_HASH_CTRL(intf, ld,
188 							     mkex_hash->hash_ctrl[intf][ld]);
189 					hash_cnt++;
190 					if (hash_cnt == NPC_MAX_HASH)
191 						return;
192 				}
193 		}
194 	}
195 }
196 
npc_config_secret_key(struct rvu * rvu,int blkaddr)197 void npc_config_secret_key(struct rvu *rvu, int blkaddr)
198 {
199 	struct hw_cap *hwcap = &rvu->hw->cap;
200 	struct rvu_hwinfo *hw = rvu->hw;
201 	u8 intf;
202 
203 	if (!hwcap->npc_hash_extract) {
204 		dev_info(rvu->dev, "HW does not support secret key configuration\n");
205 		return;
206 	}
207 
208 	for (intf = 0; intf < hw->npc_intfs; intf++) {
209 		rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf),
210 			    RVU_NPC_HASH_SECRET_KEY0);
211 		rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf),
212 			    RVU_NPC_HASH_SECRET_KEY1);
213 		rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf),
214 			    RVU_NPC_HASH_SECRET_KEY2);
215 	}
216 }
217 
npc_program_mkex_hash(struct rvu * rvu,int blkaddr)218 void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
219 {
220 	struct hw_cap *hwcap = &rvu->hw->cap;
221 	struct rvu_hwinfo *hw = rvu->hw;
222 	u8 intf;
223 
224 	if (!hwcap->npc_hash_extract) {
225 		dev_dbg(rvu->dev, "Field hash extract feature is not supported\n");
226 		return;
227 	}
228 
229 	for (intf = 0; intf < hw->npc_intfs; intf++) {
230 		npc_program_mkex_hash_rx(rvu, blkaddr, intf);
231 		npc_program_mkex_hash_tx(rvu, blkaddr, intf);
232 	}
233 }
234 
npc_update_field_hash(struct rvu * rvu,u8 intf,struct mcam_entry * entry,int blkaddr,u64 features,struct flow_msg * pkt,struct flow_msg * mask,struct flow_msg * opkt,struct flow_msg * omask)235 void npc_update_field_hash(struct rvu *rvu, u8 intf,
236 			   struct mcam_entry *entry,
237 			   int blkaddr,
238 			   u64 features,
239 			   struct flow_msg *pkt,
240 			   struct flow_msg *mask,
241 			   struct flow_msg *opkt,
242 			   struct flow_msg *omask)
243 {
244 	struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
245 	struct npc_get_secret_key_req req;
246 	struct npc_get_secret_key_rsp rsp;
247 	u64 ldata[2], cfg;
248 	u32 field_hash;
249 	u8 hash_idx;
250 
251 	if (!rvu->hw->cap.npc_hash_extract) {
252 		dev_dbg(rvu->dev, "%s: Field hash extract feature is not supported\n", __func__);
253 		return;
254 	}
255 
256 	req.intf = intf;
257 	rvu_mbox_handler_npc_get_secret_key(rvu, &req, &rsp);
258 
259 	for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) {
260 		cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx));
261 		if ((cfg & BIT_ULL(11)) && (cfg & BIT_ULL(12))) {
262 			u8 lid = (cfg & GENMASK_ULL(10, 8)) >> 8;
263 			u8 ltype = (cfg & GENMASK_ULL(7, 4)) >> 4;
264 			u8 ltype_mask = cfg & GENMASK_ULL(3, 0);
265 
266 			if (mkex_hash->lid_lt_ld_hash_en[intf][lid][ltype][hash_idx]) {
267 				switch (ltype & ltype_mask) {
268 				/* If hash extract enabled is supported for IPv6 then
269 				 * 128 bit IPv6 source and destination addressed
270 				 * is hashed to 32 bit value.
271 				 */
272 				case NPC_LT_LC_IP6:
273 					if (features & BIT_ULL(NPC_SIP_IPV6)) {
274 						u32 src_ip[IPV6_WORDS];
275 
276 						be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
277 						ldata[0] = (u64)src_ip[0] << 32 | src_ip[1];
278 						ldata[1] = (u64)src_ip[2] << 32 | src_ip[3];
279 						field_hash = npc_field_hash_calc(ldata,
280 										 mkex_hash,
281 										 rsp.secret_key,
282 										 intf,
283 										 hash_idx);
284 						npc_update_entry(rvu, NPC_SIP_IPV6, entry,
285 								 field_hash, 0, 32, 0, intf);
286 						memcpy(&opkt->ip6src, &pkt->ip6src,
287 						       sizeof(pkt->ip6src));
288 						memcpy(&omask->ip6src, &mask->ip6src,
289 						       sizeof(mask->ip6src));
290 						break;
291 					}
292 
293 					if (features & BIT_ULL(NPC_DIP_IPV6)) {
294 						u32 dst_ip[IPV6_WORDS];
295 
296 						be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
297 						ldata[0] = (u64)dst_ip[0] << 32 | dst_ip[1];
298 						ldata[1] = (u64)dst_ip[2] << 32 | dst_ip[3];
299 						field_hash = npc_field_hash_calc(ldata,
300 										 mkex_hash,
301 										 rsp.secret_key,
302 										 intf,
303 										 hash_idx);
304 						npc_update_entry(rvu, NPC_DIP_IPV6, entry,
305 								 field_hash, 0, 32, 0, intf);
306 						memcpy(&opkt->ip6dst, &pkt->ip6dst,
307 						       sizeof(pkt->ip6dst));
308 						memcpy(&omask->ip6dst, &mask->ip6dst,
309 						       sizeof(mask->ip6dst));
310 					}
311 					break;
312 				}
313 			}
314 		}
315 	}
316 }
317 
rvu_mbox_handler_npc_get_secret_key(struct rvu * rvu,struct npc_get_secret_key_req * req,struct npc_get_secret_key_rsp * rsp)318 int rvu_mbox_handler_npc_get_secret_key(struct rvu *rvu,
319 					struct npc_get_secret_key_req *req,
320 					struct npc_get_secret_key_rsp *rsp)
321 {
322 	u64 *secret_key = rsp->secret_key;
323 	u8 intf = req->intf;
324 	int blkaddr;
325 
326 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
327 	if (blkaddr < 0) {
328 		dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
329 		return -EINVAL;
330 	}
331 
332 	secret_key[0] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf));
333 	secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf));
334 	secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf));
335 
336 	return 0;
337 }
338 
339 /**
340  *	rvu_npc_exact_mac2u64 - utility function to convert mac address to u64.
341  *	@mac_addr: MAC address.
342  *	Return: mdata for exact match table.
343  */
rvu_npc_exact_mac2u64(u8 * mac_addr)344 static u64 rvu_npc_exact_mac2u64(u8 *mac_addr)
345 {
346 	u64 mac = 0;
347 	int index;
348 
349 	for (index = ETH_ALEN - 1; index >= 0; index--)
350 		mac |= ((u64)*mac_addr++) << (8 * index);
351 
352 	return mac;
353 }
354 
355 /**
356  *	rvu_exact_prepare_mdata - Make mdata for mcam entry
357  *	@mac: MAC address
358  *	@chan: Channel number.
359  *	@ctype: Channel Type.
360  *	@mask: LDATA mask.
361  *	Return: Meta data
362  */
rvu_exact_prepare_mdata(u8 * mac,u16 chan,u16 ctype,u64 mask)363 static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask)
364 {
365 	u64 ldata = rvu_npc_exact_mac2u64(mac);
366 
367 	/* Please note that mask is 48bit which excludes chan and ctype.
368 	 * Increase mask bits if we need to include them as well.
369 	 */
370 	ldata |= ((u64)chan << 48);
371 	ldata |= ((u64)ctype  << 60);
372 	ldata &= mask;
373 	ldata = ldata << 2;
374 
375 	return ldata;
376 }
377 
378 /**
379  *      rvu_exact_calculate_hash - calculate hash index to mem table.
380  *	@rvu: resource virtualization unit.
381  *	@chan: Channel number
382  *	@ctype: Channel type.
383  *	@mac: MAC address
384  *	@mask: HASH mask.
385  *	@table_depth: Depth of table.
386  *	Return: Hash value
387  */
rvu_exact_calculate_hash(struct rvu * rvu,u16 chan,u16 ctype,u8 * mac,u64 mask,u32 table_depth)388 static u32 rvu_exact_calculate_hash(struct rvu *rvu, u16 chan, u16 ctype, u8 *mac,
389 				    u64 mask, u32 table_depth)
390 {
391 	struct npc_exact_table *table = rvu->hw->table;
392 	u64 hash_key[2];
393 	u64 key_in[2];
394 	u64 ldata;
395 	u32 hash;
396 
397 	key_in[0] = RVU_NPC_HASH_SECRET_KEY0;
398 	key_in[1] = RVU_NPC_HASH_SECRET_KEY2;
399 
400 	hash_key[0] = key_in[0] << 31;
401 	hash_key[0] |= key_in[1];
402 	hash_key[1] = key_in[0] >> 33;
403 
404 	ldata = rvu_exact_prepare_mdata(mac, chan, ctype, mask);
405 
406 	dev_dbg(rvu->dev, "%s: ldata=0x%llx hash_key0=0x%llx hash_key2=0x%llx\n", __func__,
407 		ldata, hash_key[1], hash_key[0]);
408 	hash = rvu_npc_toeplitz_hash(&ldata, (u64 *)hash_key, 64, 95);
409 
410 	hash &= table->mem_table.hash_mask;
411 	hash += table->mem_table.hash_offset;
412 	dev_dbg(rvu->dev, "%s: hash=%x\n", __func__,  hash);
413 
414 	return hash;
415 }
416 
417 /**
418  *      rvu_npc_exact_alloc_mem_table_entry - find free entry in 4 way table.
419  *      @rvu: resource virtualization unit.
420  *	@way: Indicate way to table.
421  *	@index: Hash index to 4 way table.
422  *	@hash: Hash value.
423  *
424  *	Searches 4 way table using hash index. Returns 0 on success.
425  *	Return: 0 upon success.
426  */
rvu_npc_exact_alloc_mem_table_entry(struct rvu * rvu,u8 * way,u32 * index,unsigned int hash)427 static int rvu_npc_exact_alloc_mem_table_entry(struct rvu *rvu, u8 *way,
428 					       u32 *index, unsigned int hash)
429 {
430 	struct npc_exact_table *table;
431 	int depth, i;
432 
433 	table = rvu->hw->table;
434 	depth = table->mem_table.depth;
435 
436 	/* Check all the 4 ways for a free slot. */
437 	mutex_lock(&table->lock);
438 	for (i = 0; i <  table->mem_table.ways; i++) {
439 		if (test_bit(hash + i * depth, table->mem_table.bmap))
440 			continue;
441 
442 		set_bit(hash + i * depth, table->mem_table.bmap);
443 		mutex_unlock(&table->lock);
444 
445 		dev_dbg(rvu->dev, "%s: mem table entry alloc success (way=%d index=%d)\n",
446 			__func__, i, hash);
447 
448 		*way = i;
449 		*index = hash;
450 		return 0;
451 	}
452 	mutex_unlock(&table->lock);
453 
454 	dev_dbg(rvu->dev, "%s: No space in 4 way exact way, weight=%u\n", __func__,
455 		bitmap_weight(table->mem_table.bmap, table->mem_table.depth));
456 	return -ENOSPC;
457 }
458 
459 /**
460  *	rvu_npc_exact_free_id - Free seq id from bitmat.
461  *	@rvu: Resource virtualization unit.
462  *	@seq_id: Sequence identifier to be freed.
463  */
rvu_npc_exact_free_id(struct rvu * rvu,u32 seq_id)464 static void rvu_npc_exact_free_id(struct rvu *rvu, u32 seq_id)
465 {
466 	struct npc_exact_table *table;
467 
468 	table = rvu->hw->table;
469 	mutex_lock(&table->lock);
470 	clear_bit(seq_id, table->id_bmap);
471 	mutex_unlock(&table->lock);
472 	dev_dbg(rvu->dev, "%s: freed id %d\n", __func__, seq_id);
473 }
474 
475 /**
476  *	rvu_npc_exact_alloc_id - Alloc seq id from bitmap.
477  *	@rvu: Resource virtualization unit.
478  *	@seq_id: Sequence identifier.
479  *	Return: True or false.
480  */
rvu_npc_exact_alloc_id(struct rvu * rvu,u32 * seq_id)481 static bool rvu_npc_exact_alloc_id(struct rvu *rvu, u32 *seq_id)
482 {
483 	struct npc_exact_table *table;
484 	u32 idx;
485 
486 	table = rvu->hw->table;
487 
488 	mutex_lock(&table->lock);
489 	idx = find_first_zero_bit(table->id_bmap, table->tot_ids);
490 	if (idx == table->tot_ids) {
491 		mutex_unlock(&table->lock);
492 		dev_err(rvu->dev, "%s: No space in id bitmap (%d)\n",
493 			__func__, bitmap_weight(table->id_bmap, table->tot_ids));
494 
495 		return false;
496 	}
497 
498 	/* Mark bit map to indicate that slot is used.*/
499 	set_bit(idx, table->id_bmap);
500 	mutex_unlock(&table->lock);
501 
502 	*seq_id = idx;
503 	dev_dbg(rvu->dev, "%s: Allocated id (%d)\n", __func__, *seq_id);
504 
505 	return true;
506 }
507 
508 /**
509  *      rvu_npc_exact_alloc_cam_table_entry - find free slot in fully associative table.
510  *      @rvu: resource virtualization unit.
511  *	@index: Index to exact CAM table.
512  *	Return: 0 upon success; else error number.
513  */
rvu_npc_exact_alloc_cam_table_entry(struct rvu * rvu,int * index)514 static int rvu_npc_exact_alloc_cam_table_entry(struct rvu *rvu, int *index)
515 {
516 	struct npc_exact_table *table;
517 	u32 idx;
518 
519 	table = rvu->hw->table;
520 
521 	mutex_lock(&table->lock);
522 	idx = find_first_zero_bit(table->cam_table.bmap, table->cam_table.depth);
523 	if (idx == table->cam_table.depth) {
524 		mutex_unlock(&table->lock);
525 		dev_info(rvu->dev, "%s: No space in exact cam table, weight=%u\n", __func__,
526 			 bitmap_weight(table->cam_table.bmap, table->cam_table.depth));
527 		return -ENOSPC;
528 	}
529 
530 	/* Mark bit map to indicate that slot is used.*/
531 	set_bit(idx, table->cam_table.bmap);
532 	mutex_unlock(&table->lock);
533 
534 	*index = idx;
535 	dev_dbg(rvu->dev, "%s: cam table entry alloc success (index=%d)\n",
536 		__func__, idx);
537 	return 0;
538 }
539 
540 /**
541  *	rvu_exact_prepare_table_entry - Data for exact match table entry.
542  *	@rvu: Resource virtualization unit.
543  *	@enable: Enable/Disable entry
544  *	@ctype: Software defined channel type. Currently set as 0.
545  *	@chan: Channel number.
546  *	@mac_addr: Destination mac address.
547  *	Return: mdata for exact match table.
548  */
rvu_exact_prepare_table_entry(struct rvu * rvu,bool enable,u8 ctype,u16 chan,u8 * mac_addr)549 static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable,
550 					 u8 ctype, u16 chan, u8 *mac_addr)
551 
552 {
553 	u64 ldata = rvu_npc_exact_mac2u64(mac_addr);
554 
555 	/* Enable or disable */
556 	u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), enable ? 1 : 0);
557 
558 	/* Set Ctype */
559 	mdata |= FIELD_PREP(GENMASK_ULL(61, 60), ctype);
560 
561 	/* Set chan */
562 	mdata |= FIELD_PREP(GENMASK_ULL(59, 48), chan);
563 
564 	/* MAC address */
565 	mdata |= FIELD_PREP(GENMASK_ULL(47, 0), ldata);
566 
567 	return mdata;
568 }
569 
570 /**
571  *	rvu_exact_config_secret_key - Configure secret key.
572  *	@rvu: Resource virtualization unit.
573  */
rvu_exact_config_secret_key(struct rvu * rvu)574 static void rvu_exact_config_secret_key(struct rvu *rvu)
575 {
576 	int blkaddr;
577 
578 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
579 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET0(NIX_INTF_RX),
580 		    RVU_NPC_HASH_SECRET_KEY0);
581 
582 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET1(NIX_INTF_RX),
583 		    RVU_NPC_HASH_SECRET_KEY1);
584 
585 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET2(NIX_INTF_RX),
586 		    RVU_NPC_HASH_SECRET_KEY2);
587 }
588 
589 /**
590  *	rvu_exact_config_search_key - Configure search key
591  *	@rvu: Resource virtualization unit.
592  */
rvu_exact_config_search_key(struct rvu * rvu)593 static void rvu_exact_config_search_key(struct rvu *rvu)
594 {
595 	int blkaddr;
596 	u64 reg_val;
597 
598 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
599 
600 	/* HDR offset */
601 	reg_val = FIELD_PREP(GENMASK_ULL(39, 32), 0);
602 
603 	/* BYTESM1, number of bytes - 1 */
604 	reg_val |= FIELD_PREP(GENMASK_ULL(18, 16), ETH_ALEN - 1);
605 
606 	/* Enable LID and set LID to  NPC_LID_LA */
607 	reg_val |= FIELD_PREP(GENMASK_ULL(11, 11), 1);
608 	reg_val |= FIELD_PREP(GENMASK_ULL(10, 8),  NPC_LID_LA);
609 
610 	/* Clear layer type based extraction */
611 
612 	/* Disable LT_EN */
613 	reg_val |= FIELD_PREP(GENMASK_ULL(12, 12), 0);
614 
615 	/* Set LTYPE_MATCH to 0 */
616 	reg_val |= FIELD_PREP(GENMASK_ULL(7, 4), 0);
617 
618 	/* Set LTYPE_MASK to 0 */
619 	reg_val |= FIELD_PREP(GENMASK_ULL(3, 0), 0);
620 
621 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_CFG(NIX_INTF_RX), reg_val);
622 }
623 
624 /**
625  *	rvu_exact_config_result_ctrl - Set exact table hash control
626  *	@rvu: Resource virtualization unit.
627  *	@depth: Depth of Exact match table.
628  *
629  *	Sets mask and offset for hash for mem table.
630  */
rvu_exact_config_result_ctrl(struct rvu * rvu,uint32_t depth)631 static void rvu_exact_config_result_ctrl(struct rvu *rvu, uint32_t depth)
632 {
633 	int blkaddr;
634 	u64 reg = 0;
635 
636 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
637 
638 	/* Set mask. Note that depth is a power of 2 */
639 	rvu->hw->table->mem_table.hash_mask = (depth - 1);
640 	reg |= FIELD_PREP(GENMASK_ULL(42, 32), (depth - 1));
641 
642 	/* Set offset as 0 */
643 	rvu->hw->table->mem_table.hash_offset = 0;
644 	reg |= FIELD_PREP(GENMASK_ULL(10, 0), 0);
645 
646 	/* Set reg for RX */
647 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_RESULT_CTL(NIX_INTF_RX), reg);
648 	/* Store hash mask and offset for s/w algorithm */
649 }
650 
651 /**
652  *	rvu_exact_config_table_mask - Set exact table mask.
653  *	@rvu: Resource virtualization unit.
654  */
rvu_exact_config_table_mask(struct rvu * rvu)655 static void rvu_exact_config_table_mask(struct rvu *rvu)
656 {
657 	int blkaddr;
658 	u64 mask = 0;
659 
660 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
661 
662 	/* Don't use Ctype */
663 	mask |= FIELD_PREP(GENMASK_ULL(61, 60), 0);
664 
665 	/* Set chan */
666 	mask |= GENMASK_ULL(59, 48);
667 
668 	/* Full ldata */
669 	mask |= GENMASK_ULL(47, 0);
670 
671 	/* Store mask for s/w hash calcualtion */
672 	rvu->hw->table->mem_table.mask = mask;
673 
674 	/* Set mask for RX.*/
675 	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_MASK(NIX_INTF_RX), mask);
676 }
677 
678 /**
679  *      rvu_npc_exact_get_max_entries - Get total number of entries in table.
680  *      @rvu: resource virtualization unit.
681  *	Return: Maximum table entries possible.
682  */
rvu_npc_exact_get_max_entries(struct rvu * rvu)683 u32 rvu_npc_exact_get_max_entries(struct rvu *rvu)
684 {
685 	struct npc_exact_table *table;
686 
687 	table = rvu->hw->table;
688 	return table->tot_ids;
689 }
690 
691 /**
692  *      rvu_npc_exact_has_match_table - Checks support for exact match.
693  *      @rvu: resource virtualization unit.
694  *	Return: True if exact match table is supported/enabled.
695  */
rvu_npc_exact_has_match_table(struct rvu * rvu)696 bool rvu_npc_exact_has_match_table(struct rvu *rvu)
697 {
698 	return  rvu->hw->cap.npc_exact_match_enabled;
699 }
700 
701 /**
702  *      __rvu_npc_exact_find_entry_by_seq_id - find entry by id
703  *      @rvu: resource virtualization unit.
704  *	@seq_id: Sequence identifier.
705  *
706  *	Caller should acquire the lock.
707  *	Return: Pointer to table entry.
708  */
709 static struct npc_exact_table_entry *
__rvu_npc_exact_find_entry_by_seq_id(struct rvu * rvu,u32 seq_id)710 __rvu_npc_exact_find_entry_by_seq_id(struct rvu *rvu, u32 seq_id)
711 {
712 	struct npc_exact_table *table = rvu->hw->table;
713 	struct npc_exact_table_entry *entry = NULL;
714 	struct list_head *lhead;
715 
716 	lhead = &table->lhead_gbl;
717 
718 	/* traverse to find the matching entry */
719 	list_for_each_entry(entry, lhead, glist) {
720 		if (entry->seq_id != seq_id)
721 			continue;
722 
723 		return entry;
724 	}
725 
726 	return NULL;
727 }
728 
729 /**
730  *      rvu_npc_exact_add_to_list - Add entry to list
731  *      @rvu: resource virtualization unit.
732  *	@opc_type: OPCODE to select MEM/CAM table.
733  *	@ways: MEM table ways.
734  *	@index: Index in MEM/CAM table.
735  *	@cgx_id: CGX identifier.
736  *	@lmac_id: LMAC identifier.
737  *	@mac_addr: MAC address.
738  *	@chan: Channel number.
739  *	@ctype: Channel Type.
740  *	@seq_id: Sequence identifier
741  *	@cmd: True if function is called by ethtool cmd
742  *	@mcam_idx: NPC mcam index of DMAC entry in NPC mcam.
743  *	@pcifunc: pci function
744  *	Return: 0 upon success.
745  */
rvu_npc_exact_add_to_list(struct rvu * rvu,enum npc_exact_opc_type opc_type,u8 ways,u32 index,u8 cgx_id,u8 lmac_id,u8 * mac_addr,u16 chan,u8 ctype,u32 * seq_id,bool cmd,u32 mcam_idx,u16 pcifunc)746 static int rvu_npc_exact_add_to_list(struct rvu *rvu, enum npc_exact_opc_type opc_type, u8 ways,
747 				     u32 index, u8 cgx_id, u8 lmac_id, u8 *mac_addr, u16 chan,
748 				     u8 ctype, u32 *seq_id, bool cmd, u32 mcam_idx, u16 pcifunc)
749 {
750 	struct npc_exact_table_entry *entry, *tmp, *iter;
751 	struct npc_exact_table *table = rvu->hw->table;
752 	struct list_head *lhead, *pprev;
753 
754 	WARN_ON(ways >= NPC_EXACT_TBL_MAX_WAYS);
755 
756 	if (!rvu_npc_exact_alloc_id(rvu, seq_id)) {
757 		dev_err(rvu->dev, "%s: Generate seq id failed\n", __func__);
758 		return -EFAULT;
759 	}
760 
761 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
762 	if (!entry) {
763 		rvu_npc_exact_free_id(rvu, *seq_id);
764 		dev_err(rvu->dev, "%s: Memory allocation failed\n", __func__);
765 		return -ENOMEM;
766 	}
767 
768 	mutex_lock(&table->lock);
769 	switch (opc_type) {
770 	case NPC_EXACT_OPC_CAM:
771 		lhead = &table->lhead_cam_tbl_entry;
772 		table->cam_tbl_entry_cnt++;
773 		break;
774 
775 	case NPC_EXACT_OPC_MEM:
776 		lhead = &table->lhead_mem_tbl_entry[ways];
777 		table->mem_tbl_entry_cnt++;
778 		break;
779 
780 	default:
781 		mutex_unlock(&table->lock);
782 		kfree(entry);
783 		rvu_npc_exact_free_id(rvu, *seq_id);
784 
785 		dev_err(rvu->dev, "%s: Unknown opc type%d\n", __func__, opc_type);
786 		return  -EINVAL;
787 	}
788 
789 	/* Add to global list */
790 	INIT_LIST_HEAD(&entry->glist);
791 	list_add_tail(&entry->glist, &table->lhead_gbl);
792 	INIT_LIST_HEAD(&entry->list);
793 	entry->index = index;
794 	entry->ways = ways;
795 	entry->opc_type = opc_type;
796 
797 	entry->pcifunc = pcifunc;
798 
799 	ether_addr_copy(entry->mac, mac_addr);
800 	entry->chan = chan;
801 	entry->ctype = ctype;
802 	entry->cgx_id = cgx_id;
803 	entry->lmac_id = lmac_id;
804 
805 	entry->seq_id = *seq_id;
806 
807 	entry->mcam_idx = mcam_idx;
808 	entry->cmd = cmd;
809 
810 	pprev = lhead;
811 
812 	/* Insert entry in ascending order of index */
813 	list_for_each_entry_safe(iter, tmp, lhead, list) {
814 		if (index < iter->index)
815 			break;
816 
817 		pprev = &iter->list;
818 	}
819 
820 	/* Add to each table list */
821 	list_add(&entry->list, pprev);
822 	mutex_unlock(&table->lock);
823 	return 0;
824 }
825 
826 /**
827  *	rvu_npc_exact_mem_table_write - Wrapper for register write
828  *	@rvu: resource virtualization unit.
829  *	@blkaddr: Block address
830  *	@ways: ways for MEM table.
831  *	@index: Index in MEM
832  *	@mdata: Meta data to be written to register.
833  */
rvu_npc_exact_mem_table_write(struct rvu * rvu,int blkaddr,u8 ways,u32 index,u64 mdata)834 static void rvu_npc_exact_mem_table_write(struct rvu *rvu, int blkaddr, u8 ways,
835 					  u32 index, u64 mdata)
836 {
837 	rvu_write64(rvu, blkaddr, NPC_AF_EXACT_MEM_ENTRY(ways, index), mdata);
838 }
839 
840 /**
841  *	rvu_npc_exact_cam_table_write - Wrapper for register write
842  *	@rvu: resource virtualization unit.
843  *	@blkaddr: Block address
844  *	@index: Index in MEM
845  *	@mdata: Meta data to be written to register.
846  */
rvu_npc_exact_cam_table_write(struct rvu * rvu,int blkaddr,u32 index,u64 mdata)847 static void rvu_npc_exact_cam_table_write(struct rvu *rvu, int blkaddr,
848 					  u32 index, u64 mdata)
849 {
850 	rvu_write64(rvu, blkaddr, NPC_AF_EXACT_CAM_ENTRY(index), mdata);
851 }
852 
853 /**
854  *      rvu_npc_exact_dealloc_table_entry - dealloc table entry
855  *      @rvu: resource virtualization unit.
856  *	@opc_type: OPCODE for selection of table(MEM or CAM)
857  *	@ways: ways if opc_type is MEM table.
858  *	@index: Index of MEM or CAM table.
859  *	Return: 0 upon success.
860  */
rvu_npc_exact_dealloc_table_entry(struct rvu * rvu,enum npc_exact_opc_type opc_type,u8 ways,u32 index)861 static int rvu_npc_exact_dealloc_table_entry(struct rvu *rvu, enum npc_exact_opc_type opc_type,
862 					     u8 ways, u32 index)
863 {
864 	int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
865 	struct npc_exact_table *table;
866 	u8 null_dmac[6] = { 0 };
867 	int depth;
868 
869 	/* Prepare entry with all fields set to zero */
870 	u64 null_mdata = rvu_exact_prepare_table_entry(rvu, false, 0, 0, null_dmac);
871 
872 	table = rvu->hw->table;
873 	depth = table->mem_table.depth;
874 
875 	mutex_lock(&table->lock);
876 
877 	switch (opc_type) {
878 	case NPC_EXACT_OPC_CAM:
879 
880 		/* Check whether entry is used already */
881 		if (!test_bit(index, table->cam_table.bmap)) {
882 			mutex_unlock(&table->lock);
883 			dev_err(rvu->dev, "%s: Trying to free an unused entry ways=%d index=%d\n",
884 				__func__, ways, index);
885 			return -EINVAL;
886 		}
887 
888 		rvu_npc_exact_cam_table_write(rvu, blkaddr, index, null_mdata);
889 		clear_bit(index, table->cam_table.bmap);
890 		break;
891 
892 	case NPC_EXACT_OPC_MEM:
893 
894 		/* Check whether entry is used already */
895 		if (!test_bit(index + ways * depth, table->mem_table.bmap)) {
896 			mutex_unlock(&table->lock);
897 			dev_err(rvu->dev, "%s: Trying to free an unused entry index=%d\n",
898 				__func__, index);
899 			return -EINVAL;
900 		}
901 
902 		rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, null_mdata);
903 		clear_bit(index + ways * depth, table->mem_table.bmap);
904 		break;
905 
906 	default:
907 		mutex_unlock(&table->lock);
908 		dev_err(rvu->dev, "%s: invalid opc type %d", __func__, opc_type);
909 		return -ENOSPC;
910 	}
911 
912 	mutex_unlock(&table->lock);
913 
914 	dev_dbg(rvu->dev, "%s: Successfully deleted entry (index=%d, ways=%d opc_type=%d\n",
915 		__func__, index,  ways, opc_type);
916 
917 	return 0;
918 }
919 
920 /**
921  *	rvu_npc_exact_alloc_table_entry - Allociate an entry
922  *      @rvu: resource virtualization unit.
923  *	@mac: MAC address.
924  *	@chan: Channel number.
925  *	@ctype: Channel Type.
926  *	@index: Index of MEM table or CAM table.
927  *	@ways: Ways. Only valid for MEM table.
928  *	@opc_type: OPCODE to select table (MEM or CAM)
929  *
930  *	Try allocating a slot from MEM table. If all 4 ways
931  *	slot are full for a hash index, check availability in
932  *	32-entry CAM table for allocation.
933  *	Return: 0 upon success.
934  */
rvu_npc_exact_alloc_table_entry(struct rvu * rvu,char * mac,u16 chan,u8 ctype,u32 * index,u8 * ways,enum npc_exact_opc_type * opc_type)935 static int rvu_npc_exact_alloc_table_entry(struct rvu *rvu,  char *mac, u16 chan, u8 ctype,
936 					   u32 *index, u8 *ways, enum npc_exact_opc_type *opc_type)
937 {
938 	struct npc_exact_table *table;
939 	unsigned int hash;
940 	int err;
941 
942 	table = rvu->hw->table;
943 
944 	/* Check in 4-ways mem entry for free slote */
945 	hash =  rvu_exact_calculate_hash(rvu, chan, ctype, mac, table->mem_table.mask,
946 					 table->mem_table.depth);
947 	err = rvu_npc_exact_alloc_mem_table_entry(rvu, ways, index, hash);
948 	if (!err) {
949 		*opc_type = NPC_EXACT_OPC_MEM;
950 		dev_dbg(rvu->dev, "%s: inserted in 4 ways hash table ways=%d, index=%d\n",
951 			__func__, *ways, *index);
952 		return 0;
953 	}
954 
955 	dev_dbg(rvu->dev, "%s: failed to insert in 4 ways hash table\n", __func__);
956 
957 	/* wayss is 0 for cam table */
958 	*ways = 0;
959 	err = rvu_npc_exact_alloc_cam_table_entry(rvu, index);
960 	if (!err) {
961 		*opc_type = NPC_EXACT_OPC_CAM;
962 		dev_dbg(rvu->dev, "%s: inserted in fully associative hash table index=%u\n",
963 			__func__, *index);
964 		return 0;
965 	}
966 
967 	dev_err(rvu->dev, "%s: failed to insert in fully associative hash table\n", __func__);
968 	return -ENOSPC;
969 }
970 
971 /**
972  *	rvu_npc_exact_save_drop_rule_chan_and_mask - Save drop rules info in data base.
973  *      @rvu: resource virtualization unit.
974  *	@drop_mcam_idx: Drop rule index in NPC mcam.
975  *	@chan_val: Channel value.
976  *	@chan_mask: Channel Mask.
977  *	@pcifunc: pcifunc of interface.
978  *	Return: True upon success.
979  */
rvu_npc_exact_save_drop_rule_chan_and_mask(struct rvu * rvu,int drop_mcam_idx,u64 chan_val,u64 chan_mask,u16 pcifunc)980 static bool rvu_npc_exact_save_drop_rule_chan_and_mask(struct rvu *rvu, int drop_mcam_idx,
981 						       u64 chan_val, u64 chan_mask, u16 pcifunc)
982 {
983 	struct npc_exact_table *table;
984 	int i;
985 
986 	table = rvu->hw->table;
987 
988 	for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
989 		if (!table->drop_rule_map[i].valid)
990 			break;
991 
992 		if (table->drop_rule_map[i].chan_val != (u16)chan_val)
993 			continue;
994 
995 		if (table->drop_rule_map[i].chan_mask != (u16)chan_mask)
996 			continue;
997 
998 		return false;
999 	}
1000 
1001 	if (i == NPC_MCAM_DROP_RULE_MAX)
1002 		return false;
1003 
1004 	table->drop_rule_map[i].drop_rule_idx = drop_mcam_idx;
1005 	table->drop_rule_map[i].chan_val = (u16)chan_val;
1006 	table->drop_rule_map[i].chan_mask = (u16)chan_mask;
1007 	table->drop_rule_map[i].pcifunc = pcifunc;
1008 	table->drop_rule_map[i].valid = true;
1009 	return true;
1010 }
1011 
1012 /**
1013  *	rvu_npc_exact_calc_drop_rule_chan_and_mask - Calculate Channel number and mask.
1014  *      @rvu: resource virtualization unit.
1015  *	@intf_type: Interface type (SDK, LBK or CGX)
1016  *	@cgx_id: CGX identifier.
1017  *	@lmac_id: LAMC identifier.
1018  *	@val: Channel number.
1019  *	@mask: Channel mask.
1020  *	Return: True upon success.
1021  */
rvu_npc_exact_calc_drop_rule_chan_and_mask(struct rvu * rvu,u8 intf_type,u8 cgx_id,u8 lmac_id,u64 * val,u64 * mask)1022 static bool rvu_npc_exact_calc_drop_rule_chan_and_mask(struct rvu *rvu, u8 intf_type,
1023 						       u8 cgx_id, u8 lmac_id,
1024 						       u64 *val, u64 *mask)
1025 {
1026 	u16 chan_val, chan_mask;
1027 
1028 	/* No support for SDP and LBK */
1029 	if (intf_type != NIX_INTF_TYPE_CGX)
1030 		return false;
1031 
1032 	chan_val = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
1033 	chan_mask = 0xfff;
1034 
1035 	if (val)
1036 		*val = chan_val;
1037 
1038 	if (mask)
1039 		*mask = chan_mask;
1040 
1041 	return true;
1042 }
1043 
1044 /**
1045  *	rvu_npc_exact_drop_rule_to_pcifunc - Retrieve pcifunc
1046  *      @rvu: resource virtualization unit.
1047  *	@drop_rule_idx: Drop rule index in NPC mcam.
1048  *
1049  *	Debugfs (exact_drop_cnt) entry displays pcifunc for interface
1050  *	by retrieving the pcifunc value from data base.
1051  *	Return: Drop rule index.
1052  */
rvu_npc_exact_drop_rule_to_pcifunc(struct rvu * rvu,u32 drop_rule_idx)1053 u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx)
1054 {
1055 	struct npc_exact_table *table;
1056 	int i;
1057 
1058 	table = rvu->hw->table;
1059 
1060 	for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
1061 		if (!table->drop_rule_map[i].valid)
1062 			break;
1063 
1064 		if (table->drop_rule_map[i].drop_rule_idx != drop_rule_idx)
1065 			continue;
1066 
1067 		return table->drop_rule_map[i].pcifunc;
1068 	}
1069 
1070 	dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
1071 		__func__, drop_rule_idx);
1072 	return -1;
1073 }
1074 
1075 /**
1076  *	rvu_npc_exact_get_drop_rule_info - Get drop rule information.
1077  *      @rvu: resource virtualization unit.
1078  *	@intf_type: Interface type (CGX, SDP or LBK)
1079  *	@cgx_id: CGX identifier.
1080  *	@lmac_id: LMAC identifier.
1081  *	@drop_mcam_idx: NPC mcam drop rule index.
1082  *	@val: Channel value.
1083  *	@mask: Channel mask.
1084  *	@pcifunc: pcifunc of interface corresponding to the drop rule.
1085  *	Return: True upon success.
1086  */
rvu_npc_exact_get_drop_rule_info(struct rvu * rvu,u8 intf_type,u8 cgx_id,u8 lmac_id,u32 * drop_mcam_idx,u64 * val,u64 * mask,u16 * pcifunc)1087 static bool rvu_npc_exact_get_drop_rule_info(struct rvu *rvu, u8 intf_type, u8 cgx_id,
1088 					     u8 lmac_id, u32 *drop_mcam_idx, u64 *val,
1089 					     u64 *mask, u16 *pcifunc)
1090 {
1091 	struct npc_exact_table *table;
1092 	u64 chan_val, chan_mask;
1093 	bool rc;
1094 	int i;
1095 
1096 	table = rvu->hw->table;
1097 
1098 	if (intf_type != NIX_INTF_TYPE_CGX) {
1099 		dev_err(rvu->dev, "%s: No drop rule for LBK/SDP mode\n", __func__);
1100 		return false;
1101 	}
1102 
1103 	rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, intf_type, cgx_id,
1104 							lmac_id, &chan_val, &chan_mask);
1105 	if (!rc)
1106 		return false;
1107 
1108 	for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
1109 		if (!table->drop_rule_map[i].valid)
1110 			break;
1111 
1112 		if (table->drop_rule_map[i].chan_val != (u16)chan_val)
1113 			continue;
1114 
1115 		if (val)
1116 			*val = table->drop_rule_map[i].chan_val;
1117 		if (mask)
1118 			*mask = table->drop_rule_map[i].chan_mask;
1119 		if (pcifunc)
1120 			*pcifunc = table->drop_rule_map[i].pcifunc;
1121 
1122 		*drop_mcam_idx = i;
1123 		return true;
1124 	}
1125 
1126 	if (i == NPC_MCAM_DROP_RULE_MAX) {
1127 		dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
1128 			__func__, *drop_mcam_idx);
1129 		return false;
1130 	}
1131 
1132 	dev_err(rvu->dev, "%s: Could not retrieve for cgx=%d, lmac=%d\n",
1133 		__func__, cgx_id, lmac_id);
1134 	return false;
1135 }
1136 
1137 /**
1138  *	__rvu_npc_exact_cmd_rules_cnt_update - Update number dmac rules against a drop rule.
1139  *      @rvu: resource virtualization unit.
1140  *	@drop_mcam_idx: NPC mcam drop rule index.
1141  *	@val: +1 or -1.
1142  *	@enable_or_disable_cam: If no exact match rules against a drop rule, disable it.
1143  *
1144  *	when first exact match entry against a drop rule is added, enable_or_disable_cam
1145  *	is set to true. When last exact match entry against a drop rule is deleted,
1146  *	enable_or_disable_cam is set to true.
1147  *	Return: Number of rules
1148  */
__rvu_npc_exact_cmd_rules_cnt_update(struct rvu * rvu,int drop_mcam_idx,int val,bool * enable_or_disable_cam)1149 static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_idx,
1150 						int val, bool *enable_or_disable_cam)
1151 {
1152 	struct npc_exact_table *table;
1153 	u16 *cnt, old_cnt;
1154 	bool promisc;
1155 
1156 	table = rvu->hw->table;
1157 	promisc = table->promisc_mode[drop_mcam_idx];
1158 
1159 	cnt = &table->cnt_cmd_rules[drop_mcam_idx];
1160 	old_cnt = *cnt;
1161 
1162 	*cnt += val;
1163 
1164 	if (!enable_or_disable_cam)
1165 		goto done;
1166 
1167 	*enable_or_disable_cam = false;
1168 
1169 	if (promisc)
1170 		goto done;
1171 
1172 	/* If all rules are deleted and not already in promisc mode; disable cam */
1173 	if (!*cnt && val < 0) {
1174 		*enable_or_disable_cam = true;
1175 		goto done;
1176 	}
1177 
1178 	/* If rule got added and not already in promisc mode; enable cam */
1179 	if (!old_cnt && val > 0) {
1180 		*enable_or_disable_cam = true;
1181 		goto done;
1182 	}
1183 
1184 done:
1185 	return *cnt;
1186 }
1187 
1188 /**
1189  *      rvu_npc_exact_del_table_entry_by_id - Delete and free table entry.
1190  *      @rvu: resource virtualization unit.
1191  *	@seq_id: Sequence identifier of the entry.
1192  *
1193  *	Deletes entry from linked lists and free up slot in HW MEM or CAM
1194  *	table.
1195  *	Return: 0 upon success.
1196  */
rvu_npc_exact_del_table_entry_by_id(struct rvu * rvu,u32 seq_id)1197 static int rvu_npc_exact_del_table_entry_by_id(struct rvu *rvu, u32 seq_id)
1198 {
1199 	struct npc_exact_table_entry *entry = NULL;
1200 	struct npc_exact_table *table;
1201 	bool disable_cam = false;
1202 	u32 drop_mcam_idx = -1;
1203 	int *cnt;
1204 	bool rc;
1205 
1206 	table = rvu->hw->table;
1207 
1208 	mutex_lock(&table->lock);
1209 
1210 	/* Lookup for entry which needs to be updated */
1211 	entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, seq_id);
1212 	if (!entry) {
1213 		dev_dbg(rvu->dev, "%s: failed to find entry for id=%d\n", __func__, seq_id);
1214 		mutex_unlock(&table->lock);
1215 		return -ENODATA;
1216 	}
1217 
1218 	cnt = (entry->opc_type == NPC_EXACT_OPC_CAM) ? &table->cam_tbl_entry_cnt :
1219 				&table->mem_tbl_entry_cnt;
1220 
1221 	/* delete from lists */
1222 	list_del_init(&entry->list);
1223 	list_del_init(&entry->glist);
1224 
1225 	(*cnt)--;
1226 
1227 	rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, entry->cgx_id,
1228 					      entry->lmac_id, &drop_mcam_idx, NULL, NULL, NULL);
1229 	if (!rc) {
1230 		dev_dbg(rvu->dev, "%s: failed to retrieve drop info for id=0x%x\n",
1231 			__func__, seq_id);
1232 		mutex_unlock(&table->lock);
1233 		return -ENODATA;
1234 	}
1235 
1236 	if (entry->cmd)
1237 		__rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, -1, &disable_cam);
1238 
1239 	/* No dmac filter rules; disable drop on hit rule */
1240 	if (disable_cam) {
1241 		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
1242 		dev_dbg(rvu->dev, "%s: Disabling mcam idx %d\n",
1243 			__func__, drop_mcam_idx);
1244 	}
1245 
1246 	mutex_unlock(&table->lock);
1247 
1248 	rvu_npc_exact_dealloc_table_entry(rvu, entry->opc_type, entry->ways, entry->index);
1249 
1250 	rvu_npc_exact_free_id(rvu, seq_id);
1251 
1252 	dev_dbg(rvu->dev, "%s: delete entry success for id=0x%x, mca=%pM\n",
1253 		__func__, seq_id, entry->mac);
1254 	kfree(entry);
1255 
1256 	return 0;
1257 }
1258 
1259 /**
1260  *      rvu_npc_exact_add_table_entry - Adds a table entry
1261  *      @rvu: resource virtualization unit.
1262  *	@cgx_id: cgx identifier.
1263  *	@lmac_id: lmac identifier.
1264  *	@mac: MAC address.
1265  *	@chan: Channel number.
1266  *	@ctype: Channel Type.
1267  *	@seq_id: Sequence number.
1268  *	@cmd: Whether it is invoked by ethtool cmd.
1269  *	@mcam_idx: NPC mcam index corresponding to MAC
1270  *	@pcifunc: PCI func.
1271  *
1272  *	Creates a new exact match table entry in either CAM or
1273  *	MEM table.
1274  *	Return: 0 upon success.
1275  */
rvu_npc_exact_add_table_entry(struct rvu * rvu,u8 cgx_id,u8 lmac_id,u8 * mac,u16 chan,u8 ctype,u32 * seq_id,bool cmd,u32 mcam_idx,u16 pcifunc)1276 static int rvu_npc_exact_add_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, u8 *mac,
1277 					 u16 chan, u8 ctype, u32 *seq_id, bool cmd,
1278 					 u32 mcam_idx, u16 pcifunc)
1279 {
1280 	int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1281 	enum npc_exact_opc_type opc_type;
1282 	bool enable_cam = false;
1283 	u32 drop_mcam_idx;
1284 	u32 index;
1285 	u64 mdata;
1286 	bool rc;
1287 	int err;
1288 	u8 ways;
1289 
1290 	ctype = 0;
1291 
1292 	err = rvu_npc_exact_alloc_table_entry(rvu, mac, chan, ctype, &index, &ways, &opc_type);
1293 	if (err) {
1294 		dev_err(rvu->dev, "%s: Could not alloc in exact match table\n", __func__);
1295 		return err;
1296 	}
1297 
1298 	/* Write mdata to table */
1299 	mdata = rvu_exact_prepare_table_entry(rvu, true, ctype, chan, mac);
1300 
1301 	if (opc_type == NPC_EXACT_OPC_CAM)
1302 		rvu_npc_exact_cam_table_write(rvu, blkaddr, index, mdata);
1303 	else
1304 		rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index,  mdata);
1305 
1306 	/* Insert entry to linked list */
1307 	err = rvu_npc_exact_add_to_list(rvu, opc_type, ways, index, cgx_id, lmac_id,
1308 					mac, chan, ctype, seq_id, cmd, mcam_idx, pcifunc);
1309 	if (err) {
1310 		rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
1311 		dev_err(rvu->dev, "%s: could not add to exact match table\n", __func__);
1312 		return err;
1313 	}
1314 
1315 	rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
1316 					      &drop_mcam_idx, NULL, NULL, NULL);
1317 	if (!rc) {
1318 		rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
1319 		dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
1320 			__func__, cgx_id, lmac_id);
1321 		return -EINVAL;
1322 	}
1323 
1324 	if (cmd)
1325 		__rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 1, &enable_cam);
1326 
1327 	/* First command rule; enable drop on hit rule */
1328 	if (enable_cam) {
1329 		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, true);
1330 		dev_dbg(rvu->dev, "%s: Enabling mcam idx %d\n",
1331 			__func__, drop_mcam_idx);
1332 	}
1333 
1334 	dev_dbg(rvu->dev,
1335 		"%s: Successfully added entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
1336 		__func__, index, mac, ways, opc_type);
1337 
1338 	return 0;
1339 }
1340 
1341 /**
1342  *      rvu_npc_exact_update_table_entry - Update exact match table.
1343  *      @rvu: resource virtualization unit.
1344  *	@cgx_id: CGX identifier.
1345  *	@lmac_id: LMAC identifier.
1346  *	@old_mac: Existing MAC address entry.
1347  *	@new_mac: New MAC address entry.
1348  *	@seq_id: Sequence identifier of the entry.
1349  *
1350  *	Updates MAC address of an entry. If entry is in MEM table, new
1351  *	hash value may not match with old one.
1352  *	Return: 0 upon success.
1353  */
rvu_npc_exact_update_table_entry(struct rvu * rvu,u8 cgx_id,u8 lmac_id,u8 * old_mac,u8 * new_mac,u32 * seq_id)1354 static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id,
1355 					    u8 *old_mac, u8 *new_mac, u32 *seq_id)
1356 {
1357 	int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1358 	struct npc_exact_table_entry *entry;
1359 	struct npc_exact_table *table;
1360 	u32 hash_index;
1361 	u64 mdata;
1362 
1363 	table = rvu->hw->table;
1364 
1365 	mutex_lock(&table->lock);
1366 
1367 	/* Lookup for entry which needs to be updated */
1368 	entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, *seq_id);
1369 	if (!entry) {
1370 		mutex_unlock(&table->lock);
1371 		dev_dbg(rvu->dev,
1372 			"%s: failed to find entry for cgx_id=%d lmac_id=%d old_mac=%pM\n",
1373 			__func__, cgx_id, lmac_id, old_mac);
1374 		return -ENODATA;
1375 	}
1376 
1377 	/* If entry is in mem table and new hash index is different than old
1378 	 * hash index, we cannot update the entry. Fail in these scenarios.
1379 	 */
1380 	if (entry->opc_type == NPC_EXACT_OPC_MEM) {
1381 		hash_index =  rvu_exact_calculate_hash(rvu, entry->chan, entry->ctype,
1382 						       new_mac, table->mem_table.mask,
1383 						       table->mem_table.depth);
1384 		if (hash_index != entry->index) {
1385 			dev_dbg(rvu->dev,
1386 				"%s: Update failed due to index mismatch(new=0x%x, old=%x)\n",
1387 				__func__, hash_index, entry->index);
1388 			mutex_unlock(&table->lock);
1389 			return -EINVAL;
1390 		}
1391 	}
1392 
1393 	mdata = rvu_exact_prepare_table_entry(rvu, true, entry->ctype, entry->chan, new_mac);
1394 
1395 	if (entry->opc_type == NPC_EXACT_OPC_MEM)
1396 		rvu_npc_exact_mem_table_write(rvu, blkaddr, entry->ways, entry->index, mdata);
1397 	else
1398 		rvu_npc_exact_cam_table_write(rvu, blkaddr, entry->index, mdata);
1399 
1400 	/* Update entry fields */
1401 	ether_addr_copy(entry->mac, new_mac);
1402 	*seq_id = entry->seq_id;
1403 
1404 	dev_dbg(rvu->dev,
1405 		"%s: Successfully updated entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
1406 		__func__, entry->index, entry->mac, entry->ways, entry->opc_type);
1407 
1408 	dev_dbg(rvu->dev, "%s: Successfully updated entry (old mac=%pM new_mac=%pM\n",
1409 		__func__, old_mac, new_mac);
1410 
1411 	mutex_unlock(&table->lock);
1412 	return 0;
1413 }
1414 
1415 /**
1416  *	rvu_npc_exact_promisc_disable - Disable promiscuous mode.
1417  *      @rvu: resource virtualization unit.
1418  *	@pcifunc: pcifunc
1419  *
1420  *	Drop rule is against each PF. We dont support DMAC filter for
1421  *	VF.
1422  *	Return: 0 upon success
1423  */
1424 
rvu_npc_exact_promisc_disable(struct rvu * rvu,u16 pcifunc)1425 int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
1426 {
1427 	struct npc_exact_table *table;
1428 	int pf = rvu_get_pf(pcifunc);
1429 	u8 cgx_id, lmac_id;
1430 	u32 drop_mcam_idx;
1431 	bool *promisc;
1432 	bool rc;
1433 	u32 cnt;
1434 
1435 	table = rvu->hw->table;
1436 
1437 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1438 	rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
1439 					      &drop_mcam_idx, NULL, NULL, NULL);
1440 	if (!rc) {
1441 		dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
1442 			__func__, cgx_id, lmac_id);
1443 		return -EINVAL;
1444 	}
1445 
1446 	mutex_lock(&table->lock);
1447 	promisc = &table->promisc_mode[drop_mcam_idx];
1448 
1449 	if (!*promisc) {
1450 		mutex_unlock(&table->lock);
1451 		dev_dbg(rvu->dev, "%s: Err Already promisc mode disabled (cgx=%d lmac=%d)\n",
1452 			__func__, cgx_id, lmac_id);
1453 		return LMAC_AF_ERR_INVALID_PARAM;
1454 	}
1455 	*promisc = false;
1456 	cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
1457 	mutex_unlock(&table->lock);
1458 
1459 	/* If no dmac filter entries configured, disable drop rule */
1460 	if (!cnt)
1461 		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
1462 	else
1463 		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
1464 
1465 	dev_dbg(rvu->dev, "%s: disabled  promisc mode (cgx=%d lmac=%d, cnt=%d)\n",
1466 		__func__, cgx_id, lmac_id, cnt);
1467 	return 0;
1468 }
1469 
1470 /**
1471  *	rvu_npc_exact_promisc_enable - Enable promiscuous mode.
1472  *      @rvu: resource virtualization unit.
1473  *	@pcifunc: pcifunc.
1474  *	Return: 0 upon success
1475  */
rvu_npc_exact_promisc_enable(struct rvu * rvu,u16 pcifunc)1476 int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
1477 {
1478 	struct npc_exact_table *table;
1479 	int pf = rvu_get_pf(pcifunc);
1480 	u8 cgx_id, lmac_id;
1481 	u32 drop_mcam_idx;
1482 	bool *promisc;
1483 	bool rc;
1484 	u32 cnt;
1485 
1486 	table = rvu->hw->table;
1487 
1488 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1489 	rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
1490 					      &drop_mcam_idx, NULL, NULL, NULL);
1491 	if (!rc) {
1492 		dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
1493 			__func__, cgx_id, lmac_id);
1494 		return -EINVAL;
1495 	}
1496 
1497 	mutex_lock(&table->lock);
1498 	promisc = &table->promisc_mode[drop_mcam_idx];
1499 
1500 	if (*promisc) {
1501 		mutex_unlock(&table->lock);
1502 		dev_dbg(rvu->dev, "%s: Already in promisc mode (cgx=%d lmac=%d)\n",
1503 			__func__, cgx_id, lmac_id);
1504 		return LMAC_AF_ERR_INVALID_PARAM;
1505 	}
1506 	*promisc = true;
1507 	cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
1508 	mutex_unlock(&table->lock);
1509 
1510 	/* If no dmac filter entries configured, disable drop rule */
1511 	if (!cnt)
1512 		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
1513 	else
1514 		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
1515 
1516 	dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d cnt=%d)\n",
1517 		__func__, cgx_id, lmac_id, cnt);
1518 	return 0;
1519 }
1520 
1521 /**
1522  *	rvu_npc_exact_mac_addr_reset - Delete PF mac address.
1523  *      @rvu: resource virtualization unit.
1524  *	@req: Reset request
1525  *	@rsp: Reset response.
1526  *	Return: 0 upon success
1527  */
rvu_npc_exact_mac_addr_reset(struct rvu * rvu,struct cgx_mac_addr_reset_req * req,struct msg_rsp * rsp)1528 int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
1529 				 struct msg_rsp *rsp)
1530 {
1531 	int pf = rvu_get_pf(req->hdr.pcifunc);
1532 	u32 seq_id = req->index;
1533 	struct rvu_pfvf *pfvf;
1534 	u8 cgx_id, lmac_id;
1535 	int rc;
1536 
1537 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1538 
1539 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1540 
1541 	rc = rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
1542 	if (rc) {
1543 		/* TODO: how to handle this error case ? */
1544 		dev_err(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, pfvf->mac_addr, pf);
1545 		return 0;
1546 	}
1547 
1548 	dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d success (seq_id=%u)\n",
1549 		__func__, pfvf->mac_addr, pf, seq_id);
1550 	return 0;
1551 }
1552 
1553 /**
1554  *	rvu_npc_exact_mac_addr_update - Update mac address field with new value.
1555  *      @rvu: resource virtualization unit.
1556  *	@req: Update request.
1557  *	@rsp: Update response.
1558  *	Return: 0 upon success
1559  */
rvu_npc_exact_mac_addr_update(struct rvu * rvu,struct cgx_mac_addr_update_req * req,struct cgx_mac_addr_update_rsp * rsp)1560 int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
1561 				  struct cgx_mac_addr_update_req *req,
1562 				  struct cgx_mac_addr_update_rsp *rsp)
1563 {
1564 	int pf = rvu_get_pf(req->hdr.pcifunc);
1565 	struct npc_exact_table_entry *entry;
1566 	struct npc_exact_table *table;
1567 	struct rvu_pfvf *pfvf;
1568 	u32 seq_id, mcam_idx;
1569 	u8 old_mac[ETH_ALEN];
1570 	u8 cgx_id, lmac_id;
1571 	int rc;
1572 
1573 	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1574 		return LMAC_AF_ERR_PERM_DENIED;
1575 
1576 	dev_dbg(rvu->dev, "%s: Update request for seq_id=%d, mac=%pM\n",
1577 		__func__, req->index, req->mac_addr);
1578 
1579 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1580 
1581 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1582 
1583 	table = rvu->hw->table;
1584 
1585 	mutex_lock(&table->lock);
1586 
1587 	/* Lookup for entry which needs to be updated */
1588 	entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, req->index);
1589 	if (!entry) {
1590 		dev_err(rvu->dev, "%s: failed to find entry for id=0x%x\n", __func__, req->index);
1591 		mutex_unlock(&table->lock);
1592 		return LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED;
1593 	}
1594 	ether_addr_copy(old_mac, entry->mac);
1595 	seq_id = entry->seq_id;
1596 	mcam_idx = entry->mcam_idx;
1597 	mutex_unlock(&table->lock);
1598 
1599 	rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id,  old_mac,
1600 					      req->mac_addr, &seq_id);
1601 	if (!rc) {
1602 		rsp->index = seq_id;
1603 		dev_dbg(rvu->dev, "%s  mac:%pM (pfvf:%pM default:%pM) update to PF=%d success\n",
1604 			__func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf);
1605 		ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1606 		return 0;
1607 	}
1608 
1609 	/* Try deleting and adding it again */
1610 	rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
1611 	if (rc) {
1612 		/* This could be a new entry */
1613 		dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__,
1614 			pfvf->mac_addr, pf);
1615 	}
1616 
1617 	rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
1618 					   pfvf->rx_chan_base, 0, &seq_id, true,
1619 					   mcam_idx, req->hdr.pcifunc);
1620 	if (rc) {
1621 		dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", __func__,
1622 			req->mac_addr, pf);
1623 		return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
1624 	}
1625 
1626 	rsp->index = seq_id;
1627 	dev_dbg(rvu->dev,
1628 		"%s MAC (new:%pM, old=%pM default:%pM) del and add to PF=%d success (seq_id=%u)\n",
1629 		__func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf, seq_id);
1630 
1631 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1632 	return 0;
1633 }
1634 
1635 /**
1636  *	rvu_npc_exact_mac_addr_add - Adds MAC address to exact match table.
1637  *      @rvu: resource virtualization unit.
1638  *	@req: Add request.
1639  *	@rsp: Add response.
1640  *	Return: 0 upon success
1641  */
rvu_npc_exact_mac_addr_add(struct rvu * rvu,struct cgx_mac_addr_add_req * req,struct cgx_mac_addr_add_rsp * rsp)1642 int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
1643 			       struct cgx_mac_addr_add_req *req,
1644 			       struct cgx_mac_addr_add_rsp *rsp)
1645 {
1646 	int pf = rvu_get_pf(req->hdr.pcifunc);
1647 	struct rvu_pfvf *pfvf;
1648 	u8 cgx_id, lmac_id;
1649 	int rc = 0;
1650 	u32 seq_id;
1651 
1652 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1653 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1654 
1655 	rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
1656 					   pfvf->rx_chan_base, 0, &seq_id,
1657 					   true, -1, req->hdr.pcifunc);
1658 
1659 	if (!rc) {
1660 		rsp->index = seq_id;
1661 		dev_dbg(rvu->dev, "%s MAC (%pM) add to PF=%d success (seq_id=%u)\n",
1662 			__func__, req->mac_addr, pf, seq_id);
1663 		return 0;
1664 	}
1665 
1666 	dev_err(rvu->dev, "%s MAC (%pM) add to PF=%d failed\n", __func__,
1667 		req->mac_addr, pf);
1668 	return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
1669 }
1670 
1671 /**
1672  *	rvu_npc_exact_mac_addr_del - Delete DMAC filter
1673  *      @rvu: resource virtualization unit.
1674  *	@req: Delete request.
1675  *	@rsp: Delete response.
1676  *	Return: 0 upon success
1677  */
rvu_npc_exact_mac_addr_del(struct rvu * rvu,struct cgx_mac_addr_del_req * req,struct msg_rsp * rsp)1678 int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
1679 			       struct cgx_mac_addr_del_req *req,
1680 			       struct msg_rsp *rsp)
1681 {
1682 	int pf = rvu_get_pf(req->hdr.pcifunc);
1683 	int rc;
1684 
1685 	rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
1686 	if (!rc) {
1687 		dev_dbg(rvu->dev, "%s del to PF=%d success (seq_id=%u)\n",
1688 			__func__, pf, req->index);
1689 		return 0;
1690 	}
1691 
1692 	dev_err(rvu->dev, "%s del to PF=%d failed (seq_id=%u)\n",
1693 		__func__,  pf, req->index);
1694 	return LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED;
1695 }
1696 
1697 /**
1698  *	rvu_npc_exact_mac_addr_set - Add PF mac address to dmac filter.
1699  *      @rvu: resource virtualization unit.
1700  *	@req: Set request.
1701  *	@rsp: Set response.
1702  *	Return: 0 upon success
1703  */
rvu_npc_exact_mac_addr_set(struct rvu * rvu,struct cgx_mac_addr_set_or_get * req,struct cgx_mac_addr_set_or_get * rsp)1704 int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
1705 			       struct cgx_mac_addr_set_or_get *rsp)
1706 {
1707 	int pf = rvu_get_pf(req->hdr.pcifunc);
1708 	u32 seq_id = req->index;
1709 	struct rvu_pfvf *pfvf;
1710 	u8 cgx_id, lmac_id;
1711 	u32 mcam_idx = -1;
1712 	int rc, nixlf;
1713 
1714 	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1715 
1716 	pfvf = &rvu->pf[pf];
1717 
1718 	/* If table does not have an entry; both update entry and del table entry API
1719 	 * below fails. Those are not failure conditions.
1720 	 */
1721 	rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, pfvf->mac_addr,
1722 					      req->mac_addr, &seq_id);
1723 	if (!rc) {
1724 		rsp->index = seq_id;
1725 		ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1726 		ether_addr_copy(rsp->mac_addr, req->mac_addr);
1727 		dev_dbg(rvu->dev, "%s MAC (%pM) update to PF=%d success\n",
1728 			__func__, req->mac_addr, pf);
1729 		return 0;
1730 	}
1731 
1732 	/* Try deleting and adding it again */
1733 	rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
1734 	if (rc) {
1735 		dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n",
1736 			__func__, pfvf->mac_addr, pf);
1737 	}
1738 
1739 	/* find mcam entry if exist */
1740 	rc = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, NULL);
1741 	if (!rc) {
1742 		mcam_idx = npc_get_nixlf_mcam_index(&rvu->hw->mcam, req->hdr.pcifunc,
1743 						    nixlf, NIXLF_UCAST_ENTRY);
1744 	}
1745 
1746 	rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
1747 					   pfvf->rx_chan_base, 0, &seq_id,
1748 					   true, mcam_idx, req->hdr.pcifunc);
1749 	if (rc) {
1750 		dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n",
1751 			__func__, req->mac_addr, pf);
1752 		return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
1753 	}
1754 
1755 	rsp->index = seq_id;
1756 	ether_addr_copy(rsp->mac_addr, req->mac_addr);
1757 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1758 	dev_dbg(rvu->dev,
1759 		"%s MAC (%pM) del and add to PF=%d success (seq_id=%u)\n",
1760 		__func__, req->mac_addr, pf, seq_id);
1761 	return 0;
1762 }
1763 
1764 /**
1765  *	rvu_npc_exact_can_disable_feature - Check if feature can be disabled.
1766  *      @rvu: resource virtualization unit.
1767  *	Return: True if exact match feature is supported.
1768  */
rvu_npc_exact_can_disable_feature(struct rvu * rvu)1769 bool rvu_npc_exact_can_disable_feature(struct rvu *rvu)
1770 {
1771 	struct npc_exact_table *table = rvu->hw->table;
1772 	bool empty;
1773 
1774 	if (!rvu->hw->cap.npc_exact_match_enabled)
1775 		return false;
1776 
1777 	mutex_lock(&table->lock);
1778 	empty = list_empty(&table->lhead_gbl);
1779 	mutex_unlock(&table->lock);
1780 
1781 	return empty;
1782 }
1783 
1784 /**
1785  *	rvu_npc_exact_disable_feature - Disable feature.
1786  *      @rvu: resource virtualization unit.
1787  */
rvu_npc_exact_disable_feature(struct rvu * rvu)1788 void rvu_npc_exact_disable_feature(struct rvu *rvu)
1789 {
1790 	rvu->hw->cap.npc_exact_match_enabled = false;
1791 }
1792 
1793 /**
1794  *	rvu_npc_exact_reset - Delete and free all entry which match pcifunc.
1795  *      @rvu: resource virtualization unit.
1796  *	@pcifunc: PCI func to match.
1797  */
rvu_npc_exact_reset(struct rvu * rvu,u16 pcifunc)1798 void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc)
1799 {
1800 	struct npc_exact_table *table = rvu->hw->table;
1801 	struct npc_exact_table_entry *tmp, *iter;
1802 	u32 seq_id;
1803 
1804 	mutex_lock(&table->lock);
1805 	list_for_each_entry_safe(iter, tmp, &table->lhead_gbl, glist) {
1806 		if (pcifunc != iter->pcifunc)
1807 			continue;
1808 
1809 		seq_id = iter->seq_id;
1810 		dev_dbg(rvu->dev, "%s: resetting pcifun=%d seq_id=%u\n", __func__,
1811 			pcifunc, seq_id);
1812 
1813 		mutex_unlock(&table->lock);
1814 		rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
1815 		mutex_lock(&table->lock);
1816 	}
1817 	mutex_unlock(&table->lock);
1818 }
1819 
1820 /**
1821  *      rvu_npc_exact_init - initialize exact match table
1822  *      @rvu: resource virtualization unit.
1823  *
1824  *	Initialize HW and SW resources to manage 4way-2K table and fully
1825  *	associative 32-entry mcam table.
1826  *	Return: 0 upon success.
1827  */
rvu_npc_exact_init(struct rvu * rvu)1828 int rvu_npc_exact_init(struct rvu *rvu)
1829 {
1830 	u64 bcast_mcast_val, bcast_mcast_mask;
1831 	struct npc_exact_table *table;
1832 	u64 exact_val, exact_mask;
1833 	u64 chan_val, chan_mask;
1834 	u8 cgx_id, lmac_id;
1835 	u32 *drop_mcam_idx;
1836 	u16 max_lmac_cnt;
1837 	u64 npc_const3;
1838 	int table_size;
1839 	int blkaddr;
1840 	u16 pcifunc;
1841 	int err, i;
1842 	u64 cfg;
1843 	bool rc;
1844 
1845 	/* Read NPC_AF_CONST3 and check for have exact
1846 	 * match functionality is present
1847 	 */
1848 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1849 	if (blkaddr < 0) {
1850 		dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
1851 		return -EINVAL;
1852 	}
1853 
1854 	/* Check exact match feature is supported */
1855 	npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3);
1856 	if (!(npc_const3 & BIT_ULL(62))) {
1857 		dev_info(rvu->dev, "%s: No support for exact match support\n",
1858 			 __func__);
1859 		return 0;
1860 	}
1861 
1862 	/* Check if kex profile has enabled EXACT match nibble */
1863 	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
1864 	if (!(cfg & NPC_EXACT_NIBBLE_HIT)) {
1865 		dev_info(rvu->dev, "%s: NPC exact match nibble not enabled in KEX profile\n",
1866 			 __func__);
1867 		return 0;
1868 	}
1869 
1870 	/* Set capability to true */
1871 	rvu->hw->cap.npc_exact_match_enabled = true;
1872 
1873 	table = kmalloc(sizeof(*table), GFP_KERNEL);
1874 	if (!table)
1875 		return -ENOMEM;
1876 
1877 	dev_dbg(rvu->dev, "%s: Memory allocation for table success\n", __func__);
1878 	memset(table, 0, sizeof(*table));
1879 	rvu->hw->table = table;
1880 
1881 	/* Read table size, ways and depth */
1882 	table->mem_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3);
1883 	table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3);
1884 	table->cam_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3);
1885 
1886 	dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n",
1887 		__func__,  table->mem_table.ways, table->cam_table.depth);
1888 
1889 	/* Check if depth of table is not a sequre of 2
1890 	 * TODO: why _builtin_popcount() is not working ?
1891 	 */
1892 	if ((table->mem_table.depth & (table->mem_table.depth - 1)) != 0) {
1893 		dev_err(rvu->dev,
1894 			"%s: NPC exact match 4way_2k table depth(%d) is not square of 2\n",
1895 			__func__,  table->mem_table.depth);
1896 		return -EINVAL;
1897 	}
1898 
1899 	table_size = table->mem_table.depth * table->mem_table.ways;
1900 
1901 	/* Allocate bitmap for 4way 2K table */
1902 	table->mem_table.bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(table_size),
1903 					     sizeof(long), GFP_KERNEL);
1904 	if (!table->mem_table.bmap)
1905 		return -ENOMEM;
1906 
1907 	dev_dbg(rvu->dev, "%s: Allocated bitmap for 4way 2K entry table\n", __func__);
1908 
1909 	/* Allocate bitmap for 32 entry mcam */
1910 	table->cam_table.bmap = devm_kcalloc(rvu->dev, 1, sizeof(long), GFP_KERNEL);
1911 
1912 	if (!table->cam_table.bmap)
1913 		return -ENOMEM;
1914 
1915 	dev_dbg(rvu->dev, "%s: Allocated bitmap for 32 entry cam\n", __func__);
1916 
1917 	table->tot_ids = (table->mem_table.depth * table->mem_table.ways) + table->cam_table.depth;
1918 	table->id_bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(table->tot_ids),
1919 				      table->tot_ids, GFP_KERNEL);
1920 
1921 	if (!table->id_bmap)
1922 		return -ENOMEM;
1923 
1924 	dev_dbg(rvu->dev, "%s: Allocated bitmap for id map (total=%d)\n",
1925 		__func__, table->tot_ids);
1926 
1927 	/* Initialize list heads for npc_exact_table entries.
1928 	 * This entry is used by debugfs to show entries in
1929 	 * exact match table.
1930 	 */
1931 	for (i = 0; i < NPC_EXACT_TBL_MAX_WAYS; i++)
1932 		INIT_LIST_HEAD(&table->lhead_mem_tbl_entry[i]);
1933 
1934 	INIT_LIST_HEAD(&table->lhead_cam_tbl_entry);
1935 	INIT_LIST_HEAD(&table->lhead_gbl);
1936 
1937 	mutex_init(&table->lock);
1938 
1939 	rvu_exact_config_secret_key(rvu);
1940 	rvu_exact_config_search_key(rvu);
1941 
1942 	rvu_exact_config_table_mask(rvu);
1943 	rvu_exact_config_result_ctrl(rvu, table->mem_table.depth);
1944 
1945 	/* - No drop rule for LBK
1946 	 * - Drop rules for SDP and each LMAC.
1947 	 */
1948 	exact_val = !NPC_EXACT_RESULT_HIT;
1949 	exact_mask = NPC_EXACT_RESULT_HIT;
1950 
1951 	/* nibble - 3	2  1   0
1952 	 *	   L3B L3M L2B L2M
1953 	 */
1954 	bcast_mcast_val = 0b0000;
1955 	bcast_mcast_mask = 0b0011;
1956 
1957 	/* Install SDP drop rule */
1958 	drop_mcam_idx = &table->num_drop_rules;
1959 
1960 	max_lmac_cnt = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX + PF_CGXMAP_BASE;
1961 	for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) {
1962 		if (rvu->pf2cgxlmac_map[i] == 0xFF)
1963 			continue;
1964 
1965 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[i], &cgx_id, &lmac_id);
1966 
1967 		rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, NIX_INTF_TYPE_CGX, cgx_id,
1968 								lmac_id, &chan_val, &chan_mask);
1969 		if (!rc) {
1970 			dev_err(rvu->dev,
1971 				"%s: failed, info chan_val=0x%llx chan_mask=0x%llx rule_id=%d\n",
1972 				__func__, chan_val, chan_mask, *drop_mcam_idx);
1973 			return -EINVAL;
1974 		}
1975 
1976 		/* Filter rules are only for PF */
1977 		pcifunc = RVU_PFFUNC(i, 0);
1978 
1979 		dev_dbg(rvu->dev,
1980 			"%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n",
1981 			__func__, cgx_id, lmac_id, chan_val, chan_mask);
1982 
1983 		rc = rvu_npc_exact_save_drop_rule_chan_and_mask(rvu, table->num_drop_rules,
1984 								chan_val, chan_mask, pcifunc);
1985 		if (!rc) {
1986 			dev_err(rvu->dev,
1987 				"%s: failed to set drop info for cgx=%d, lmac=%d, chan=%llx\n",
1988 				__func__, cgx_id, lmac_id, chan_val);
1989 			return -EINVAL;
1990 		}
1991 
1992 		err = npc_install_mcam_drop_rule(rvu, *drop_mcam_idx,
1993 						 &table->counter_idx[*drop_mcam_idx],
1994 						 chan_val, chan_mask,
1995 						 exact_val, exact_mask,
1996 						 bcast_mcast_val, bcast_mcast_mask);
1997 		if (err) {
1998 			dev_err(rvu->dev,
1999 				"failed to configure drop rule (cgx=%d lmac=%d)\n",
2000 				cgx_id, lmac_id);
2001 			return err;
2002 		}
2003 
2004 		(*drop_mcam_idx)++;
2005 	}
2006 
2007 	dev_info(rvu->dev, "initialized exact match table successfully\n");
2008 	return 0;
2009 }
2010