1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_lib.h"
5 #include "ice_switch.h"
6 
7 #define ICE_ETH_DA_OFFSET		0
8 #define ICE_ETH_ETHTYPE_OFFSET		12
9 #define ICE_ETH_VLAN_TCI_OFFSET		14
10 #define ICE_MAX_VLAN_ID			0xFFF
11 #define ICE_IPV6_ETHER_ID		0x86DD
12 
13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
14  * struct to configure any switch filter rules.
15  * {DA (6 bytes), SA(6 bytes),
16  * Ether type (2 bytes for header without VLAN tag) OR
17  * VLAN tag (4 bytes for header with VLAN tag) }
18  *
19  * Word on Hardcoded values
20  * byte 0 = 0x2: to identify it as locally administered DA MAC
21  * byte 6 = 0x2: to identify it as locally administered SA MAC
22  * byte 12 = 0x81 & byte 13 = 0x00:
23  *	In case of VLAN filter first two bytes defines ether type (0x8100)
24  *	and remaining two bytes are placeholder for programming a given VLAN ID
25  *	In case of Ether type filter it is treated as header without VLAN tag
26  *	and byte 12 and 13 is used to program a given Ether type instead
27  */
28 #define DUMMY_ETH_HDR_LEN		16
29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
30 							0x2, 0, 0, 0, 0, 0,
31 							0x81, 0, 0, 0};
32 
33 enum {
34 	ICE_PKT_VLAN		= BIT(0),
35 	ICE_PKT_OUTER_IPV6	= BIT(1),
36 	ICE_PKT_TUN_GTPC	= BIT(2),
37 	ICE_PKT_TUN_GTPU	= BIT(3),
38 	ICE_PKT_TUN_NVGRE	= BIT(4),
39 	ICE_PKT_TUN_UDP		= BIT(5),
40 	ICE_PKT_INNER_IPV6	= BIT(6),
41 	ICE_PKT_INNER_TCP	= BIT(7),
42 	ICE_PKT_INNER_UDP	= BIT(8),
43 	ICE_PKT_GTP_NOPAY	= BIT(9),
44 };
45 
46 struct ice_dummy_pkt_offsets {
47 	enum ice_protocol_type type;
48 	u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
49 };
50 
51 struct ice_dummy_pkt_profile {
52 	const struct ice_dummy_pkt_offsets *offsets;
53 	const u8 *pkt;
54 	u32 match;
55 	u16 pkt_len;
56 };
57 
58 #define ICE_DECLARE_PKT_OFFSETS(type)				\
59 	static const struct ice_dummy_pkt_offsets		\
60 	ice_dummy_##type##_packet_offsets[]
61 
62 #define ICE_DECLARE_PKT_TEMPLATE(type)				\
63 	static const u8 ice_dummy_##type##_packet[]
64 
65 #define ICE_PKT_PROFILE(type, m) {				\
66 	.match		= (m),					\
67 	.pkt		= ice_dummy_##type##_packet,		\
68 	.pkt_len	= sizeof(ice_dummy_##type##_packet),	\
69 	.offsets	= ice_dummy_##type##_packet_offsets,	\
70 }
71 
72 ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
73 	{ ICE_MAC_OFOS,		0 },
74 	{ ICE_ETYPE_OL,		12 },
75 	{ ICE_IPV4_OFOS,	14 },
76 	{ ICE_NVGRE,		34 },
77 	{ ICE_MAC_IL,		42 },
78 	{ ICE_ETYPE_IL,		54 },
79 	{ ICE_IPV4_IL,		56 },
80 	{ ICE_TCP_IL,		76 },
81 	{ ICE_PROTOCOL_LAST,	0 },
82 };
83 
84 ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = {
85 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
86 	0x00, 0x00, 0x00, 0x00,
87 	0x00, 0x00, 0x00, 0x00,
88 
89 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
90 
91 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
92 	0x00, 0x00, 0x00, 0x00,
93 	0x00, 0x2F, 0x00, 0x00,
94 	0x00, 0x00, 0x00, 0x00,
95 	0x00, 0x00, 0x00, 0x00,
96 
97 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
98 	0x00, 0x00, 0x00, 0x00,
99 
100 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
101 	0x00, 0x00, 0x00, 0x00,
102 	0x00, 0x00, 0x00, 0x00,
103 
104 	0x08, 0x00,		/* ICE_ETYPE_IL 54 */
105 
106 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
107 	0x00, 0x00, 0x00, 0x00,
108 	0x00, 0x06, 0x00, 0x00,
109 	0x00, 0x00, 0x00, 0x00,
110 	0x00, 0x00, 0x00, 0x00,
111 
112 	0x00, 0x00, 0x00, 0x00,	/* ICE_TCP_IL 76 */
113 	0x00, 0x00, 0x00, 0x00,
114 	0x00, 0x00, 0x00, 0x00,
115 	0x50, 0x02, 0x20, 0x00,
116 	0x00, 0x00, 0x00, 0x00
117 };
118 
119 ICE_DECLARE_PKT_OFFSETS(gre_udp) = {
120 	{ ICE_MAC_OFOS,		0 },
121 	{ ICE_ETYPE_OL,		12 },
122 	{ ICE_IPV4_OFOS,	14 },
123 	{ ICE_NVGRE,		34 },
124 	{ ICE_MAC_IL,		42 },
125 	{ ICE_ETYPE_IL,		54 },
126 	{ ICE_IPV4_IL,		56 },
127 	{ ICE_UDP_ILOS,		76 },
128 	{ ICE_PROTOCOL_LAST,	0 },
129 };
130 
131 ICE_DECLARE_PKT_TEMPLATE(gre_udp) = {
132 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_OFOS 0 */
133 	0x00, 0x00, 0x00, 0x00,
134 	0x00, 0x00, 0x00, 0x00,
135 
136 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
137 
138 	0x45, 0x00, 0x00, 0x3E,	/* ICE_IPV4_OFOS 14 */
139 	0x00, 0x00, 0x00, 0x00,
140 	0x00, 0x2F, 0x00, 0x00,
141 	0x00, 0x00, 0x00, 0x00,
142 	0x00, 0x00, 0x00, 0x00,
143 
144 	0x80, 0x00, 0x65, 0x58,	/* ICE_NVGRE 34 */
145 	0x00, 0x00, 0x00, 0x00,
146 
147 	0x00, 0x00, 0x00, 0x00,	/* ICE_MAC_IL 42 */
148 	0x00, 0x00, 0x00, 0x00,
149 	0x00, 0x00, 0x00, 0x00,
150 
151 	0x08, 0x00,		/* ICE_ETYPE_IL 54 */
152 
153 	0x45, 0x00, 0x00, 0x14,	/* ICE_IPV4_IL 56 */
154 	0x00, 0x00, 0x00, 0x00,
155 	0x00, 0x11, 0x00, 0x00,
156 	0x00, 0x00, 0x00, 0x00,
157 	0x00, 0x00, 0x00, 0x00,
158 
159 	0x00, 0x00, 0x00, 0x00,	/* ICE_UDP_ILOS 76 */
160 	0x00, 0x08, 0x00, 0x00,
161 };
162 
163 ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = {
164 	{ ICE_MAC_OFOS,		0 },
165 	{ ICE_ETYPE_OL,		12 },
166 	{ ICE_IPV4_OFOS,	14 },
167 	{ ICE_UDP_OF,		34 },
168 	{ ICE_VXLAN,		42 },
169 	{ ICE_GENEVE,		42 },
170 	{ ICE_VXLAN_GPE,	42 },
171 	{ ICE_MAC_IL,		50 },
172 	{ ICE_ETYPE_IL,		62 },
173 	{ ICE_IPV4_IL,		64 },
174 	{ ICE_TCP_IL,		84 },
175 	{ ICE_PROTOCOL_LAST,	0 },
176 };
177 
178 ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = {
179 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
180 	0x00, 0x00, 0x00, 0x00,
181 	0x00, 0x00, 0x00, 0x00,
182 
183 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
184 
185 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
186 	0x00, 0x01, 0x00, 0x00,
187 	0x40, 0x11, 0x00, 0x00,
188 	0x00, 0x00, 0x00, 0x00,
189 	0x00, 0x00, 0x00, 0x00,
190 
191 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
192 	0x00, 0x46, 0x00, 0x00,
193 
194 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
195 	0x00, 0x00, 0x00, 0x00,
196 
197 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
198 	0x00, 0x00, 0x00, 0x00,
199 	0x00, 0x00, 0x00, 0x00,
200 
201 	0x08, 0x00,		/* ICE_ETYPE_IL 62 */
202 
203 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
204 	0x00, 0x01, 0x00, 0x00,
205 	0x40, 0x06, 0x00, 0x00,
206 	0x00, 0x00, 0x00, 0x00,
207 	0x00, 0x00, 0x00, 0x00,
208 
209 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
210 	0x00, 0x00, 0x00, 0x00,
211 	0x00, 0x00, 0x00, 0x00,
212 	0x50, 0x02, 0x20, 0x00,
213 	0x00, 0x00, 0x00, 0x00
214 };
215 
216 ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = {
217 	{ ICE_MAC_OFOS,		0 },
218 	{ ICE_ETYPE_OL,		12 },
219 	{ ICE_IPV4_OFOS,	14 },
220 	{ ICE_UDP_OF,		34 },
221 	{ ICE_VXLAN,		42 },
222 	{ ICE_GENEVE,		42 },
223 	{ ICE_VXLAN_GPE,	42 },
224 	{ ICE_MAC_IL,		50 },
225 	{ ICE_ETYPE_IL,		62 },
226 	{ ICE_IPV4_IL,		64 },
227 	{ ICE_UDP_ILOS,		84 },
228 	{ ICE_PROTOCOL_LAST,	0 },
229 };
230 
231 ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = {
232 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
233 	0x00, 0x00, 0x00, 0x00,
234 	0x00, 0x00, 0x00, 0x00,
235 
236 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
237 
238 	0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
239 	0x00, 0x01, 0x00, 0x00,
240 	0x00, 0x11, 0x00, 0x00,
241 	0x00, 0x00, 0x00, 0x00,
242 	0x00, 0x00, 0x00, 0x00,
243 
244 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
245 	0x00, 0x3a, 0x00, 0x00,
246 
247 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
248 	0x00, 0x00, 0x00, 0x00,
249 
250 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
251 	0x00, 0x00, 0x00, 0x00,
252 	0x00, 0x00, 0x00, 0x00,
253 
254 	0x08, 0x00,		/* ICE_ETYPE_IL 62 */
255 
256 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
257 	0x00, 0x01, 0x00, 0x00,
258 	0x00, 0x11, 0x00, 0x00,
259 	0x00, 0x00, 0x00, 0x00,
260 	0x00, 0x00, 0x00, 0x00,
261 
262 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
263 	0x00, 0x08, 0x00, 0x00,
264 };
265 
266 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = {
267 	{ ICE_MAC_OFOS,		0 },
268 	{ ICE_ETYPE_OL,		12 },
269 	{ ICE_IPV4_OFOS,	14 },
270 	{ ICE_NVGRE,		34 },
271 	{ ICE_MAC_IL,		42 },
272 	{ ICE_ETYPE_IL,		54 },
273 	{ ICE_IPV6_IL,		56 },
274 	{ ICE_TCP_IL,		96 },
275 	{ ICE_PROTOCOL_LAST,	0 },
276 };
277 
278 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = {
279 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
280 	0x00, 0x00, 0x00, 0x00,
281 	0x00, 0x00, 0x00, 0x00,
282 
283 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
284 
285 	0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */
286 	0x00, 0x00, 0x00, 0x00,
287 	0x00, 0x2F, 0x00, 0x00,
288 	0x00, 0x00, 0x00, 0x00,
289 	0x00, 0x00, 0x00, 0x00,
290 
291 	0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
292 	0x00, 0x00, 0x00, 0x00,
293 
294 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
295 	0x00, 0x00, 0x00, 0x00,
296 	0x00, 0x00, 0x00, 0x00,
297 
298 	0x86, 0xdd,		/* ICE_ETYPE_IL 54 */
299 
300 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
301 	0x00, 0x08, 0x06, 0x40,
302 	0x00, 0x00, 0x00, 0x00,
303 	0x00, 0x00, 0x00, 0x00,
304 	0x00, 0x00, 0x00, 0x00,
305 	0x00, 0x00, 0x00, 0x00,
306 	0x00, 0x00, 0x00, 0x00,
307 	0x00, 0x00, 0x00, 0x00,
308 	0x00, 0x00, 0x00, 0x00,
309 	0x00, 0x00, 0x00, 0x00,
310 
311 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */
312 	0x00, 0x00, 0x00, 0x00,
313 	0x00, 0x00, 0x00, 0x00,
314 	0x50, 0x02, 0x20, 0x00,
315 	0x00, 0x00, 0x00, 0x00
316 };
317 
318 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = {
319 	{ ICE_MAC_OFOS,		0 },
320 	{ ICE_ETYPE_OL,		12 },
321 	{ ICE_IPV4_OFOS,	14 },
322 	{ ICE_NVGRE,		34 },
323 	{ ICE_MAC_IL,		42 },
324 	{ ICE_ETYPE_IL,		54 },
325 	{ ICE_IPV6_IL,		56 },
326 	{ ICE_UDP_ILOS,		96 },
327 	{ ICE_PROTOCOL_LAST,	0 },
328 };
329 
330 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = {
331 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
332 	0x00, 0x00, 0x00, 0x00,
333 	0x00, 0x00, 0x00, 0x00,
334 
335 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
336 
337 	0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
338 	0x00, 0x00, 0x00, 0x00,
339 	0x00, 0x2F, 0x00, 0x00,
340 	0x00, 0x00, 0x00, 0x00,
341 	0x00, 0x00, 0x00, 0x00,
342 
343 	0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
344 	0x00, 0x00, 0x00, 0x00,
345 
346 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
347 	0x00, 0x00, 0x00, 0x00,
348 	0x00, 0x00, 0x00, 0x00,
349 
350 	0x86, 0xdd,		/* ICE_ETYPE_IL 54 */
351 
352 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
353 	0x00, 0x08, 0x11, 0x40,
354 	0x00, 0x00, 0x00, 0x00,
355 	0x00, 0x00, 0x00, 0x00,
356 	0x00, 0x00, 0x00, 0x00,
357 	0x00, 0x00, 0x00, 0x00,
358 	0x00, 0x00, 0x00, 0x00,
359 	0x00, 0x00, 0x00, 0x00,
360 	0x00, 0x00, 0x00, 0x00,
361 	0x00, 0x00, 0x00, 0x00,
362 
363 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */
364 	0x00, 0x08, 0x00, 0x00,
365 };
366 
367 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = {
368 	{ ICE_MAC_OFOS,		0 },
369 	{ ICE_ETYPE_OL,		12 },
370 	{ ICE_IPV4_OFOS,	14 },
371 	{ ICE_UDP_OF,		34 },
372 	{ ICE_VXLAN,		42 },
373 	{ ICE_GENEVE,		42 },
374 	{ ICE_VXLAN_GPE,	42 },
375 	{ ICE_MAC_IL,		50 },
376 	{ ICE_ETYPE_IL,		62 },
377 	{ ICE_IPV6_IL,		64 },
378 	{ ICE_TCP_IL,		104 },
379 	{ ICE_PROTOCOL_LAST,	0 },
380 };
381 
382 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = {
383 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
384 	0x00, 0x00, 0x00, 0x00,
385 	0x00, 0x00, 0x00, 0x00,
386 
387 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
388 
389 	0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */
390 	0x00, 0x01, 0x00, 0x00,
391 	0x40, 0x11, 0x00, 0x00,
392 	0x00, 0x00, 0x00, 0x00,
393 	0x00, 0x00, 0x00, 0x00,
394 
395 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
396 	0x00, 0x5a, 0x00, 0x00,
397 
398 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
399 	0x00, 0x00, 0x00, 0x00,
400 
401 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
402 	0x00, 0x00, 0x00, 0x00,
403 	0x00, 0x00, 0x00, 0x00,
404 
405 	0x86, 0xdd,		/* ICE_ETYPE_IL 62 */
406 
407 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
408 	0x00, 0x08, 0x06, 0x40,
409 	0x00, 0x00, 0x00, 0x00,
410 	0x00, 0x00, 0x00, 0x00,
411 	0x00, 0x00, 0x00, 0x00,
412 	0x00, 0x00, 0x00, 0x00,
413 	0x00, 0x00, 0x00, 0x00,
414 	0x00, 0x00, 0x00, 0x00,
415 	0x00, 0x00, 0x00, 0x00,
416 	0x00, 0x00, 0x00, 0x00,
417 
418 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */
419 	0x00, 0x00, 0x00, 0x00,
420 	0x00, 0x00, 0x00, 0x00,
421 	0x50, 0x02, 0x20, 0x00,
422 	0x00, 0x00, 0x00, 0x00
423 };
424 
425 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = {
426 	{ ICE_MAC_OFOS,		0 },
427 	{ ICE_ETYPE_OL,		12 },
428 	{ ICE_IPV4_OFOS,	14 },
429 	{ ICE_UDP_OF,		34 },
430 	{ ICE_VXLAN,		42 },
431 	{ ICE_GENEVE,		42 },
432 	{ ICE_VXLAN_GPE,	42 },
433 	{ ICE_MAC_IL,		50 },
434 	{ ICE_ETYPE_IL,		62 },
435 	{ ICE_IPV6_IL,		64 },
436 	{ ICE_UDP_ILOS,		104 },
437 	{ ICE_PROTOCOL_LAST,	0 },
438 };
439 
440 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = {
441 	0x00, 0x00, 0x00, 0x00,  /* ICE_MAC_OFOS 0 */
442 	0x00, 0x00, 0x00, 0x00,
443 	0x00, 0x00, 0x00, 0x00,
444 
445 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
446 
447 	0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */
448 	0x00, 0x01, 0x00, 0x00,
449 	0x00, 0x11, 0x00, 0x00,
450 	0x00, 0x00, 0x00, 0x00,
451 	0x00, 0x00, 0x00, 0x00,
452 
453 	0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
454 	0x00, 0x4e, 0x00, 0x00,
455 
456 	0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
457 	0x00, 0x00, 0x00, 0x00,
458 
459 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
460 	0x00, 0x00, 0x00, 0x00,
461 	0x00, 0x00, 0x00, 0x00,
462 
463 	0x86, 0xdd,		/* ICE_ETYPE_IL 62 */
464 
465 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
466 	0x00, 0x08, 0x11, 0x40,
467 	0x00, 0x00, 0x00, 0x00,
468 	0x00, 0x00, 0x00, 0x00,
469 	0x00, 0x00, 0x00, 0x00,
470 	0x00, 0x00, 0x00, 0x00,
471 	0x00, 0x00, 0x00, 0x00,
472 	0x00, 0x00, 0x00, 0x00,
473 	0x00, 0x00, 0x00, 0x00,
474 	0x00, 0x00, 0x00, 0x00,
475 
476 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */
477 	0x00, 0x08, 0x00, 0x00,
478 };
479 
480 /* offset info for MAC + IPv4 + UDP dummy packet */
481 ICE_DECLARE_PKT_OFFSETS(udp) = {
482 	{ ICE_MAC_OFOS,		0 },
483 	{ ICE_ETYPE_OL,		12 },
484 	{ ICE_IPV4_OFOS,	14 },
485 	{ ICE_UDP_ILOS,		34 },
486 	{ ICE_PROTOCOL_LAST,	0 },
487 };
488 
489 /* Dummy packet for MAC + IPv4 + UDP */
490 ICE_DECLARE_PKT_TEMPLATE(udp) = {
491 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
492 	0x00, 0x00, 0x00, 0x00,
493 	0x00, 0x00, 0x00, 0x00,
494 
495 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
496 
497 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
498 	0x00, 0x01, 0x00, 0x00,
499 	0x00, 0x11, 0x00, 0x00,
500 	0x00, 0x00, 0x00, 0x00,
501 	0x00, 0x00, 0x00, 0x00,
502 
503 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
504 	0x00, 0x08, 0x00, 0x00,
505 
506 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
507 };
508 
509 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
510 ICE_DECLARE_PKT_OFFSETS(vlan_udp) = {
511 	{ ICE_MAC_OFOS,		0 },
512 	{ ICE_VLAN_OFOS,	12 },
513 	{ ICE_ETYPE_OL,		16 },
514 	{ ICE_IPV4_OFOS,	18 },
515 	{ ICE_UDP_ILOS,		38 },
516 	{ ICE_PROTOCOL_LAST,	0 },
517 };
518 
519 /* C-tag (801.1Q), IPv4:UDP dummy packet */
520 ICE_DECLARE_PKT_TEMPLATE(vlan_udp) = {
521 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
522 	0x00, 0x00, 0x00, 0x00,
523 	0x00, 0x00, 0x00, 0x00,
524 
525 	0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
526 
527 	0x08, 0x00,		/* ICE_ETYPE_OL 16 */
528 
529 	0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
530 	0x00, 0x01, 0x00, 0x00,
531 	0x00, 0x11, 0x00, 0x00,
532 	0x00, 0x00, 0x00, 0x00,
533 	0x00, 0x00, 0x00, 0x00,
534 
535 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
536 	0x00, 0x08, 0x00, 0x00,
537 
538 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
539 };
540 
541 /* offset info for MAC + IPv4 + TCP dummy packet */
542 ICE_DECLARE_PKT_OFFSETS(tcp) = {
543 	{ ICE_MAC_OFOS,		0 },
544 	{ ICE_ETYPE_OL,		12 },
545 	{ ICE_IPV4_OFOS,	14 },
546 	{ ICE_TCP_IL,		34 },
547 	{ ICE_PROTOCOL_LAST,	0 },
548 };
549 
550 /* Dummy packet for MAC + IPv4 + TCP */
551 ICE_DECLARE_PKT_TEMPLATE(tcp) = {
552 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
553 	0x00, 0x00, 0x00, 0x00,
554 	0x00, 0x00, 0x00, 0x00,
555 
556 	0x08, 0x00,		/* ICE_ETYPE_OL 12 */
557 
558 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
559 	0x00, 0x01, 0x00, 0x00,
560 	0x00, 0x06, 0x00, 0x00,
561 	0x00, 0x00, 0x00, 0x00,
562 	0x00, 0x00, 0x00, 0x00,
563 
564 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
565 	0x00, 0x00, 0x00, 0x00,
566 	0x00, 0x00, 0x00, 0x00,
567 	0x50, 0x00, 0x00, 0x00,
568 	0x00, 0x00, 0x00, 0x00,
569 
570 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
571 };
572 
573 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
574 ICE_DECLARE_PKT_OFFSETS(vlan_tcp) = {
575 	{ ICE_MAC_OFOS,		0 },
576 	{ ICE_VLAN_OFOS,	12 },
577 	{ ICE_ETYPE_OL,		16 },
578 	{ ICE_IPV4_OFOS,	18 },
579 	{ ICE_TCP_IL,		38 },
580 	{ ICE_PROTOCOL_LAST,	0 },
581 };
582 
583 /* C-tag (801.1Q), IPv4:TCP dummy packet */
584 ICE_DECLARE_PKT_TEMPLATE(vlan_tcp) = {
585 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
586 	0x00, 0x00, 0x00, 0x00,
587 	0x00, 0x00, 0x00, 0x00,
588 
589 	0x81, 0x00, 0x00, 0x00,	/* ICE_VLAN_OFOS 12 */
590 
591 	0x08, 0x00,		/* ICE_ETYPE_OL 16 */
592 
593 	0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
594 	0x00, 0x01, 0x00, 0x00,
595 	0x00, 0x06, 0x00, 0x00,
596 	0x00, 0x00, 0x00, 0x00,
597 	0x00, 0x00, 0x00, 0x00,
598 
599 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
600 	0x00, 0x00, 0x00, 0x00,
601 	0x00, 0x00, 0x00, 0x00,
602 	0x50, 0x00, 0x00, 0x00,
603 	0x00, 0x00, 0x00, 0x00,
604 
605 	0x00, 0x00,	/* 2 bytes for 4 byte alignment */
606 };
607 
608 ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
609 	{ ICE_MAC_OFOS,		0 },
610 	{ ICE_ETYPE_OL,		12 },
611 	{ ICE_IPV6_OFOS,	14 },
612 	{ ICE_TCP_IL,		54 },
613 	{ ICE_PROTOCOL_LAST,	0 },
614 };
615 
616 ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
617 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
618 	0x00, 0x00, 0x00, 0x00,
619 	0x00, 0x00, 0x00, 0x00,
620 
621 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
622 
623 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
624 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
625 	0x00, 0x00, 0x00, 0x00,
626 	0x00, 0x00, 0x00, 0x00,
627 	0x00, 0x00, 0x00, 0x00,
628 	0x00, 0x00, 0x00, 0x00,
629 	0x00, 0x00, 0x00, 0x00,
630 	0x00, 0x00, 0x00, 0x00,
631 	0x00, 0x00, 0x00, 0x00,
632 	0x00, 0x00, 0x00, 0x00,
633 
634 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
635 	0x00, 0x00, 0x00, 0x00,
636 	0x00, 0x00, 0x00, 0x00,
637 	0x50, 0x00, 0x00, 0x00,
638 	0x00, 0x00, 0x00, 0x00,
639 
640 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
641 };
642 
643 /* C-tag (802.1Q): IPv6 + TCP */
644 ICE_DECLARE_PKT_OFFSETS(vlan_tcp_ipv6) = {
645 	{ ICE_MAC_OFOS,		0 },
646 	{ ICE_VLAN_OFOS,	12 },
647 	{ ICE_ETYPE_OL,		16 },
648 	{ ICE_IPV6_OFOS,	18 },
649 	{ ICE_TCP_IL,		58 },
650 	{ ICE_PROTOCOL_LAST,	0 },
651 };
652 
653 /* C-tag (802.1Q), IPv6 + TCP dummy packet */
654 ICE_DECLARE_PKT_TEMPLATE(vlan_tcp_ipv6) = {
655 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
656 	0x00, 0x00, 0x00, 0x00,
657 	0x00, 0x00, 0x00, 0x00,
658 
659 	0x81, 0x00, 0x00, 0x00,	/* ICE_VLAN_OFOS 12 */
660 
661 	0x86, 0xDD,		/* ICE_ETYPE_OL 16 */
662 
663 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
664 	0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
665 	0x00, 0x00, 0x00, 0x00,
666 	0x00, 0x00, 0x00, 0x00,
667 	0x00, 0x00, 0x00, 0x00,
668 	0x00, 0x00, 0x00, 0x00,
669 	0x00, 0x00, 0x00, 0x00,
670 	0x00, 0x00, 0x00, 0x00,
671 	0x00, 0x00, 0x00, 0x00,
672 	0x00, 0x00, 0x00, 0x00,
673 
674 	0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
675 	0x00, 0x00, 0x00, 0x00,
676 	0x00, 0x00, 0x00, 0x00,
677 	0x50, 0x00, 0x00, 0x00,
678 	0x00, 0x00, 0x00, 0x00,
679 
680 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
681 };
682 
683 /* IPv6 + UDP */
684 ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
685 	{ ICE_MAC_OFOS,		0 },
686 	{ ICE_ETYPE_OL,		12 },
687 	{ ICE_IPV6_OFOS,	14 },
688 	{ ICE_UDP_ILOS,		54 },
689 	{ ICE_PROTOCOL_LAST,	0 },
690 };
691 
692 /* IPv6 + UDP dummy packet */
693 ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
694 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
695 	0x00, 0x00, 0x00, 0x00,
696 	0x00, 0x00, 0x00, 0x00,
697 
698 	0x86, 0xDD,		/* ICE_ETYPE_OL 12 */
699 
700 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
701 	0x00, 0x10, 0x11, 0x00, /* Next header UDP */
702 	0x00, 0x00, 0x00, 0x00,
703 	0x00, 0x00, 0x00, 0x00,
704 	0x00, 0x00, 0x00, 0x00,
705 	0x00, 0x00, 0x00, 0x00,
706 	0x00, 0x00, 0x00, 0x00,
707 	0x00, 0x00, 0x00, 0x00,
708 	0x00, 0x00, 0x00, 0x00,
709 	0x00, 0x00, 0x00, 0x00,
710 
711 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
712 	0x00, 0x10, 0x00, 0x00,
713 
714 	0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
715 	0x00, 0x00, 0x00, 0x00,
716 
717 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
718 };
719 
720 /* C-tag (802.1Q): IPv6 + UDP */
721 ICE_DECLARE_PKT_OFFSETS(vlan_udp_ipv6) = {
722 	{ ICE_MAC_OFOS,		0 },
723 	{ ICE_VLAN_OFOS,	12 },
724 	{ ICE_ETYPE_OL,		16 },
725 	{ ICE_IPV6_OFOS,	18 },
726 	{ ICE_UDP_ILOS,		58 },
727 	{ ICE_PROTOCOL_LAST,	0 },
728 };
729 
730 /* C-tag (802.1Q), IPv6 + UDP dummy packet */
731 ICE_DECLARE_PKT_TEMPLATE(vlan_udp_ipv6) = {
732 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
733 	0x00, 0x00, 0x00, 0x00,
734 	0x00, 0x00, 0x00, 0x00,
735 
736 	0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
737 
738 	0x86, 0xDD,		/* ICE_ETYPE_OL 16 */
739 
740 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
741 	0x00, 0x08, 0x11, 0x00, /* Next header UDP */
742 	0x00, 0x00, 0x00, 0x00,
743 	0x00, 0x00, 0x00, 0x00,
744 	0x00, 0x00, 0x00, 0x00,
745 	0x00, 0x00, 0x00, 0x00,
746 	0x00, 0x00, 0x00, 0x00,
747 	0x00, 0x00, 0x00, 0x00,
748 	0x00, 0x00, 0x00, 0x00,
749 	0x00, 0x00, 0x00, 0x00,
750 
751 	0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
752 	0x00, 0x08, 0x00, 0x00,
753 
754 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
755 };
756 
757 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
758 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
759 	{ ICE_MAC_OFOS,		0 },
760 	{ ICE_IPV4_OFOS,	14 },
761 	{ ICE_UDP_OF,		34 },
762 	{ ICE_GTP,		42 },
763 	{ ICE_IPV4_IL,		62 },
764 	{ ICE_TCP_IL,		82 },
765 	{ ICE_PROTOCOL_LAST,	0 },
766 };
767 
768 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = {
769 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
770 	0x00, 0x00, 0x00, 0x00,
771 	0x00, 0x00, 0x00, 0x00,
772 	0x08, 0x00,
773 
774 	0x45, 0x00, 0x00, 0x58, /* IP 14 */
775 	0x00, 0x00, 0x00, 0x00,
776 	0x00, 0x11, 0x00, 0x00,
777 	0x00, 0x00, 0x00, 0x00,
778 	0x00, 0x00, 0x00, 0x00,
779 
780 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
781 	0x00, 0x44, 0x00, 0x00,
782 
783 	0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */
784 	0x00, 0x00, 0x00, 0x00,
785 	0x00, 0x00, 0x00, 0x85,
786 
787 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
788 	0x00, 0x00, 0x00, 0x00,
789 
790 	0x45, 0x00, 0x00, 0x28, /* IP 62 */
791 	0x00, 0x00, 0x00, 0x00,
792 	0x00, 0x06, 0x00, 0x00,
793 	0x00, 0x00, 0x00, 0x00,
794 	0x00, 0x00, 0x00, 0x00,
795 
796 	0x00, 0x00, 0x00, 0x00, /* TCP 82 */
797 	0x00, 0x00, 0x00, 0x00,
798 	0x00, 0x00, 0x00, 0x00,
799 	0x50, 0x00, 0x00, 0x00,
800 	0x00, 0x00, 0x00, 0x00,
801 
802 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
803 };
804 
805 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
806 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = {
807 	{ ICE_MAC_OFOS,		0 },
808 	{ ICE_IPV4_OFOS,	14 },
809 	{ ICE_UDP_OF,		34 },
810 	{ ICE_GTP,		42 },
811 	{ ICE_IPV4_IL,		62 },
812 	{ ICE_UDP_ILOS,		82 },
813 	{ ICE_PROTOCOL_LAST,	0 },
814 };
815 
816 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = {
817 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
818 	0x00, 0x00, 0x00, 0x00,
819 	0x00, 0x00, 0x00, 0x00,
820 	0x08, 0x00,
821 
822 	0x45, 0x00, 0x00, 0x4c, /* IP 14 */
823 	0x00, 0x00, 0x00, 0x00,
824 	0x00, 0x11, 0x00, 0x00,
825 	0x00, 0x00, 0x00, 0x00,
826 	0x00, 0x00, 0x00, 0x00,
827 
828 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
829 	0x00, 0x38, 0x00, 0x00,
830 
831 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */
832 	0x00, 0x00, 0x00, 0x00,
833 	0x00, 0x00, 0x00, 0x85,
834 
835 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
836 	0x00, 0x00, 0x00, 0x00,
837 
838 	0x45, 0x00, 0x00, 0x1c, /* IP 62 */
839 	0x00, 0x00, 0x00, 0x00,
840 	0x00, 0x11, 0x00, 0x00,
841 	0x00, 0x00, 0x00, 0x00,
842 	0x00, 0x00, 0x00, 0x00,
843 
844 	0x00, 0x00, 0x00, 0x00, /* UDP 82 */
845 	0x00, 0x08, 0x00, 0x00,
846 
847 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
848 };
849 
850 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
851 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = {
852 	{ ICE_MAC_OFOS,		0 },
853 	{ ICE_IPV4_OFOS,	14 },
854 	{ ICE_UDP_OF,		34 },
855 	{ ICE_GTP,		42 },
856 	{ ICE_IPV6_IL,		62 },
857 	{ ICE_TCP_IL,		102 },
858 	{ ICE_PROTOCOL_LAST,	0 },
859 };
860 
861 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = {
862 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
863 	0x00, 0x00, 0x00, 0x00,
864 	0x00, 0x00, 0x00, 0x00,
865 	0x08, 0x00,
866 
867 	0x45, 0x00, 0x00, 0x6c, /* IP 14 */
868 	0x00, 0x00, 0x00, 0x00,
869 	0x00, 0x11, 0x00, 0x00,
870 	0x00, 0x00, 0x00, 0x00,
871 	0x00, 0x00, 0x00, 0x00,
872 
873 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
874 	0x00, 0x58, 0x00, 0x00,
875 
876 	0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */
877 	0x00, 0x00, 0x00, 0x00,
878 	0x00, 0x00, 0x00, 0x85,
879 
880 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
881 	0x00, 0x00, 0x00, 0x00,
882 
883 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
884 	0x00, 0x14, 0x06, 0x00,
885 	0x00, 0x00, 0x00, 0x00,
886 	0x00, 0x00, 0x00, 0x00,
887 	0x00, 0x00, 0x00, 0x00,
888 	0x00, 0x00, 0x00, 0x00,
889 	0x00, 0x00, 0x00, 0x00,
890 	0x00, 0x00, 0x00, 0x00,
891 	0x00, 0x00, 0x00, 0x00,
892 	0x00, 0x00, 0x00, 0x00,
893 
894 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
895 	0x00, 0x00, 0x00, 0x00,
896 	0x00, 0x00, 0x00, 0x00,
897 	0x50, 0x00, 0x00, 0x00,
898 	0x00, 0x00, 0x00, 0x00,
899 
900 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
901 };
902 
903 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = {
904 	{ ICE_MAC_OFOS,		0 },
905 	{ ICE_IPV4_OFOS,	14 },
906 	{ ICE_UDP_OF,		34 },
907 	{ ICE_GTP,		42 },
908 	{ ICE_IPV6_IL,		62 },
909 	{ ICE_UDP_ILOS,		102 },
910 	{ ICE_PROTOCOL_LAST,	0 },
911 };
912 
913 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = {
914 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
915 	0x00, 0x00, 0x00, 0x00,
916 	0x00, 0x00, 0x00, 0x00,
917 	0x08, 0x00,
918 
919 	0x45, 0x00, 0x00, 0x60, /* IP 14 */
920 	0x00, 0x00, 0x00, 0x00,
921 	0x00, 0x11, 0x00, 0x00,
922 	0x00, 0x00, 0x00, 0x00,
923 	0x00, 0x00, 0x00, 0x00,
924 
925 	0x00, 0x00, 0x08, 0x68, /* UDP 34 */
926 	0x00, 0x4c, 0x00, 0x00,
927 
928 	0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */
929 	0x00, 0x00, 0x00, 0x00,
930 	0x00, 0x00, 0x00, 0x85,
931 
932 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
933 	0x00, 0x00, 0x00, 0x00,
934 
935 	0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
936 	0x00, 0x08, 0x11, 0x00,
937 	0x00, 0x00, 0x00, 0x00,
938 	0x00, 0x00, 0x00, 0x00,
939 	0x00, 0x00, 0x00, 0x00,
940 	0x00, 0x00, 0x00, 0x00,
941 	0x00, 0x00, 0x00, 0x00,
942 	0x00, 0x00, 0x00, 0x00,
943 	0x00, 0x00, 0x00, 0x00,
944 	0x00, 0x00, 0x00, 0x00,
945 
946 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
947 	0x00, 0x08, 0x00, 0x00,
948 
949 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
950 };
951 
952 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = {
953 	{ ICE_MAC_OFOS,		0 },
954 	{ ICE_IPV6_OFOS,	14 },
955 	{ ICE_UDP_OF,		54 },
956 	{ ICE_GTP,		62 },
957 	{ ICE_IPV4_IL,		82 },
958 	{ ICE_TCP_IL,		102 },
959 	{ ICE_PROTOCOL_LAST,	0 },
960 };
961 
962 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = {
963 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
964 	0x00, 0x00, 0x00, 0x00,
965 	0x00, 0x00, 0x00, 0x00,
966 	0x86, 0xdd,
967 
968 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
969 	0x00, 0x44, 0x11, 0x00,
970 	0x00, 0x00, 0x00, 0x00,
971 	0x00, 0x00, 0x00, 0x00,
972 	0x00, 0x00, 0x00, 0x00,
973 	0x00, 0x00, 0x00, 0x00,
974 	0x00, 0x00, 0x00, 0x00,
975 	0x00, 0x00, 0x00, 0x00,
976 	0x00, 0x00, 0x00, 0x00,
977 	0x00, 0x00, 0x00, 0x00,
978 
979 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
980 	0x00, 0x44, 0x00, 0x00,
981 
982 	0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */
983 	0x00, 0x00, 0x00, 0x00,
984 	0x00, 0x00, 0x00, 0x85,
985 
986 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
987 	0x00, 0x00, 0x00, 0x00,
988 
989 	0x45, 0x00, 0x00, 0x28, /* IP 82 */
990 	0x00, 0x00, 0x00, 0x00,
991 	0x00, 0x06, 0x00, 0x00,
992 	0x00, 0x00, 0x00, 0x00,
993 	0x00, 0x00, 0x00, 0x00,
994 
995 	0x00, 0x00, 0x00, 0x00, /* TCP 102 */
996 	0x00, 0x00, 0x00, 0x00,
997 	0x00, 0x00, 0x00, 0x00,
998 	0x50, 0x00, 0x00, 0x00,
999 	0x00, 0x00, 0x00, 0x00,
1000 
1001 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
1002 };
1003 
1004 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = {
1005 	{ ICE_MAC_OFOS,		0 },
1006 	{ ICE_IPV6_OFOS,	14 },
1007 	{ ICE_UDP_OF,		54 },
1008 	{ ICE_GTP,		62 },
1009 	{ ICE_IPV4_IL,		82 },
1010 	{ ICE_UDP_ILOS,		102 },
1011 	{ ICE_PROTOCOL_LAST,	0 },
1012 };
1013 
1014 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = {
1015 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
1016 	0x00, 0x00, 0x00, 0x00,
1017 	0x00, 0x00, 0x00, 0x00,
1018 	0x86, 0xdd,
1019 
1020 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1021 	0x00, 0x38, 0x11, 0x00,
1022 	0x00, 0x00, 0x00, 0x00,
1023 	0x00, 0x00, 0x00, 0x00,
1024 	0x00, 0x00, 0x00, 0x00,
1025 	0x00, 0x00, 0x00, 0x00,
1026 	0x00, 0x00, 0x00, 0x00,
1027 	0x00, 0x00, 0x00, 0x00,
1028 	0x00, 0x00, 0x00, 0x00,
1029 	0x00, 0x00, 0x00, 0x00,
1030 
1031 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1032 	0x00, 0x38, 0x00, 0x00,
1033 
1034 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */
1035 	0x00, 0x00, 0x00, 0x00,
1036 	0x00, 0x00, 0x00, 0x85,
1037 
1038 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1039 	0x00, 0x00, 0x00, 0x00,
1040 
1041 	0x45, 0x00, 0x00, 0x1c, /* IP 82 */
1042 	0x00, 0x00, 0x00, 0x00,
1043 	0x00, 0x11, 0x00, 0x00,
1044 	0x00, 0x00, 0x00, 0x00,
1045 	0x00, 0x00, 0x00, 0x00,
1046 
1047 	0x00, 0x00, 0x00, 0x00, /* UDP 102 */
1048 	0x00, 0x08, 0x00, 0x00,
1049 
1050 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
1051 };
1052 
1053 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = {
1054 	{ ICE_MAC_OFOS,		0 },
1055 	{ ICE_IPV6_OFOS,	14 },
1056 	{ ICE_UDP_OF,		54 },
1057 	{ ICE_GTP,		62 },
1058 	{ ICE_IPV6_IL,		82 },
1059 	{ ICE_TCP_IL,		122 },
1060 	{ ICE_PROTOCOL_LAST,	0 },
1061 };
1062 
1063 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = {
1064 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
1065 	0x00, 0x00, 0x00, 0x00,
1066 	0x00, 0x00, 0x00, 0x00,
1067 	0x86, 0xdd,
1068 
1069 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1070 	0x00, 0x58, 0x11, 0x00,
1071 	0x00, 0x00, 0x00, 0x00,
1072 	0x00, 0x00, 0x00, 0x00,
1073 	0x00, 0x00, 0x00, 0x00,
1074 	0x00, 0x00, 0x00, 0x00,
1075 	0x00, 0x00, 0x00, 0x00,
1076 	0x00, 0x00, 0x00, 0x00,
1077 	0x00, 0x00, 0x00, 0x00,
1078 	0x00, 0x00, 0x00, 0x00,
1079 
1080 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1081 	0x00, 0x58, 0x00, 0x00,
1082 
1083 	0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */
1084 	0x00, 0x00, 0x00, 0x00,
1085 	0x00, 0x00, 0x00, 0x85,
1086 
1087 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1088 	0x00, 0x00, 0x00, 0x00,
1089 
1090 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
1091 	0x00, 0x14, 0x06, 0x00,
1092 	0x00, 0x00, 0x00, 0x00,
1093 	0x00, 0x00, 0x00, 0x00,
1094 	0x00, 0x00, 0x00, 0x00,
1095 	0x00, 0x00, 0x00, 0x00,
1096 	0x00, 0x00, 0x00, 0x00,
1097 	0x00, 0x00, 0x00, 0x00,
1098 	0x00, 0x00, 0x00, 0x00,
1099 	0x00, 0x00, 0x00, 0x00,
1100 
1101 	0x00, 0x00, 0x00, 0x00, /* TCP 122 */
1102 	0x00, 0x00, 0x00, 0x00,
1103 	0x00, 0x00, 0x00, 0x00,
1104 	0x50, 0x00, 0x00, 0x00,
1105 	0x00, 0x00, 0x00, 0x00,
1106 
1107 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
1108 };
1109 
1110 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = {
1111 	{ ICE_MAC_OFOS,		0 },
1112 	{ ICE_IPV6_OFOS,	14 },
1113 	{ ICE_UDP_OF,		54 },
1114 	{ ICE_GTP,		62 },
1115 	{ ICE_IPV6_IL,		82 },
1116 	{ ICE_UDP_ILOS,		122 },
1117 	{ ICE_PROTOCOL_LAST,	0 },
1118 };
1119 
1120 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = {
1121 	0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
1122 	0x00, 0x00, 0x00, 0x00,
1123 	0x00, 0x00, 0x00, 0x00,
1124 	0x86, 0xdd,
1125 
1126 	0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1127 	0x00, 0x4c, 0x11, 0x00,
1128 	0x00, 0x00, 0x00, 0x00,
1129 	0x00, 0x00, 0x00, 0x00,
1130 	0x00, 0x00, 0x00, 0x00,
1131 	0x00, 0x00, 0x00, 0x00,
1132 	0x00, 0x00, 0x00, 0x00,
1133 	0x00, 0x00, 0x00, 0x00,
1134 	0x00, 0x00, 0x00, 0x00,
1135 	0x00, 0x00, 0x00, 0x00,
1136 
1137 	0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1138 	0x00, 0x4c, 0x00, 0x00,
1139 
1140 	0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */
1141 	0x00, 0x00, 0x00, 0x00,
1142 	0x00, 0x00, 0x00, 0x85,
1143 
1144 	0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1145 	0x00, 0x00, 0x00, 0x00,
1146 
1147 	0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
1148 	0x00, 0x08, 0x11, 0x00,
1149 	0x00, 0x00, 0x00, 0x00,
1150 	0x00, 0x00, 0x00, 0x00,
1151 	0x00, 0x00, 0x00, 0x00,
1152 	0x00, 0x00, 0x00, 0x00,
1153 	0x00, 0x00, 0x00, 0x00,
1154 	0x00, 0x00, 0x00, 0x00,
1155 	0x00, 0x00, 0x00, 0x00,
1156 	0x00, 0x00, 0x00, 0x00,
1157 
1158 	0x00, 0x00, 0x00, 0x00, /* UDP 122 */
1159 	0x00, 0x08, 0x00, 0x00,
1160 
1161 	0x00, 0x00, /* 2 bytes for 4 byte alignment */
1162 };
1163 
1164 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = {
1165 	{ ICE_MAC_OFOS,		0 },
1166 	{ ICE_IPV4_OFOS,	14 },
1167 	{ ICE_UDP_OF,		34 },
1168 	{ ICE_GTP_NO_PAY,	42 },
1169 	{ ICE_PROTOCOL_LAST,	0 },
1170 };
1171 
1172 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = {
1173 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1174 	0x00, 0x00, 0x00, 0x00,
1175 	0x00, 0x00, 0x00, 0x00,
1176 	0x08, 0x00,
1177 
1178 	0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
1179 	0x00, 0x00, 0x40, 0x00,
1180 	0x40, 0x11, 0x00, 0x00,
1181 	0x00, 0x00, 0x00, 0x00,
1182 	0x00, 0x00, 0x00, 0x00,
1183 
1184 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
1185 	0x00, 0x00, 0x00, 0x00,
1186 
1187 	0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1188 	0x00, 0x00, 0x00, 0x00,
1189 	0x00, 0x00, 0x00, 0x85,
1190 
1191 	0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1192 	0x00, 0x00, 0x00, 0x00,
1193 
1194 	0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
1195 	0x00, 0x00, 0x40, 0x00,
1196 	0x40, 0x00, 0x00, 0x00,
1197 	0x00, 0x00, 0x00, 0x00,
1198 	0x00, 0x00, 0x00, 0x00,
1199 	0x00, 0x00,
1200 };
1201 
1202 ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = {
1203 	{ ICE_MAC_OFOS,		0 },
1204 	{ ICE_IPV6_OFOS,	14 },
1205 	{ ICE_UDP_OF,		54 },
1206 	{ ICE_GTP_NO_PAY,	62 },
1207 	{ ICE_PROTOCOL_LAST,	0 },
1208 };
1209 
1210 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
1211 	0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1212 	0x00, 0x00, 0x00, 0x00,
1213 	0x00, 0x00, 0x00, 0x00,
1214 	0x86, 0xdd,
1215 
1216 	0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1217 	0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1218 	0x00, 0x00, 0x00, 0x00,
1219 	0x00, 0x00, 0x00, 0x00,
1220 	0x00, 0x00, 0x00, 0x00,
1221 	0x00, 0x00, 0x00, 0x00,
1222 	0x00, 0x00, 0x00, 0x00,
1223 	0x00, 0x00, 0x00, 0x00,
1224 	0x00, 0x00, 0x00, 0x00,
1225 	0x00, 0x00, 0x00, 0x00,
1226 
1227 	0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1228 	0x00, 0x00, 0x00, 0x00,
1229 
1230 	0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
1231 	0x00, 0x00, 0x00, 0x00,
1232 
1233 	0x00, 0x00,
1234 };
1235 
1236 static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
1237 	ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
1238 				  ICE_PKT_GTP_NOPAY),
1239 	ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1240 					    ICE_PKT_OUTER_IPV6 |
1241 					    ICE_PKT_INNER_IPV6 |
1242 					    ICE_PKT_INNER_UDP),
1243 	ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1244 					    ICE_PKT_OUTER_IPV6 |
1245 					    ICE_PKT_INNER_IPV6),
1246 	ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1247 					    ICE_PKT_OUTER_IPV6 |
1248 					    ICE_PKT_INNER_UDP),
1249 	ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU |
1250 					    ICE_PKT_OUTER_IPV6),
1251 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY),
1252 	ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1253 					    ICE_PKT_INNER_IPV6 |
1254 					    ICE_PKT_INNER_UDP),
1255 	ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1256 					    ICE_PKT_INNER_IPV6),
1257 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1258 					    ICE_PKT_INNER_UDP),
1259 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
1260 	ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
1261 	ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
1262 	ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
1263 				      ICE_PKT_INNER_TCP),
1264 	ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
1265 	ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6),
1266 	ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE),
1267 	ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
1268 					  ICE_PKT_INNER_IPV6 |
1269 					  ICE_PKT_INNER_TCP),
1270 	ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
1271 	ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
1272 					  ICE_PKT_INNER_IPV6),
1273 	ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
1274 	ICE_PKT_PROFILE(vlan_udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP |
1275 				       ICE_PKT_VLAN),
1276 	ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
1277 	ICE_PKT_PROFILE(vlan_udp, ICE_PKT_INNER_UDP | ICE_PKT_VLAN),
1278 	ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
1279 	ICE_PKT_PROFILE(vlan_tcp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_VLAN),
1280 	ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
1281 	ICE_PKT_PROFILE(vlan_tcp, ICE_PKT_VLAN),
1282 	ICE_PKT_PROFILE(tcp, 0),
1283 };
1284 
1285 #define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l)	struct_size((s), hdr_data, (l))
1286 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s)	\
1287 	ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
1288 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s)	\
1289 	ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
1290 #define ICE_SW_RULE_LG_ACT_SIZE(s, n)		struct_size((s), act, (n))
1291 #define ICE_SW_RULE_VSI_LIST_SIZE(s, n)		struct_size((s), vsi, (n))
1292 
1293 /* this is a recipe to profile association bitmap */
1294 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1295 			  ICE_MAX_NUM_PROFILES);
1296 
1297 /* this is a profile to recipe association bitmap */
1298 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1299 			  ICE_MAX_NUM_RECIPES);
1300 
1301 /**
1302  * ice_init_def_sw_recp - initialize the recipe book keeping tables
1303  * @hw: pointer to the HW struct
1304  *
1305  * Allocate memory for the entire recipe table and initialize the structures/
1306  * entries corresponding to basic recipes.
1307  */
ice_init_def_sw_recp(struct ice_hw * hw)1308 int ice_init_def_sw_recp(struct ice_hw *hw)
1309 {
1310 	struct ice_sw_recipe *recps;
1311 	u8 i;
1312 
1313 	recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
1314 			     sizeof(*recps), GFP_KERNEL);
1315 	if (!recps)
1316 		return -ENOMEM;
1317 
1318 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1319 		recps[i].root_rid = i;
1320 		INIT_LIST_HEAD(&recps[i].filt_rules);
1321 		INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1322 		INIT_LIST_HEAD(&recps[i].rg_list);
1323 		mutex_init(&recps[i].filt_rule_lock);
1324 	}
1325 
1326 	hw->switch_info->recp_list = recps;
1327 
1328 	return 0;
1329 }
1330 
1331 /**
1332  * ice_aq_get_sw_cfg - get switch configuration
1333  * @hw: pointer to the hardware structure
1334  * @buf: pointer to the result buffer
1335  * @buf_size: length of the buffer available for response
1336  * @req_desc: pointer to requested descriptor
1337  * @num_elems: pointer to number of elements
1338  * @cd: pointer to command details structure or NULL
1339  *
1340  * Get switch configuration (0x0200) to be placed in buf.
1341  * This admin command returns information such as initial VSI/port number
1342  * and switch ID it belongs to.
1343  *
1344  * NOTE: *req_desc is both an input/output parameter.
1345  * The caller of this function first calls this function with *request_desc set
1346  * to 0. If the response from f/w has *req_desc set to 0, all the switch
1347  * configuration information has been returned; if non-zero (meaning not all
1348  * the information was returned), the caller should call this function again
1349  * with *req_desc set to the previous value returned by f/w to get the
1350  * next block of switch configuration information.
1351  *
1352  * *num_elems is output only parameter. This reflects the number of elements
1353  * in response buffer. The caller of this function to use *num_elems while
1354  * parsing the response buffer.
1355  */
1356 static int
ice_aq_get_sw_cfg(struct ice_hw * hw,struct ice_aqc_get_sw_cfg_resp_elem * buf,u16 buf_size,u16 * req_desc,u16 * num_elems,struct ice_sq_cd * cd)1357 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1358 		  u16 buf_size, u16 *req_desc, u16 *num_elems,
1359 		  struct ice_sq_cd *cd)
1360 {
1361 	struct ice_aqc_get_sw_cfg *cmd;
1362 	struct ice_aq_desc desc;
1363 	int status;
1364 
1365 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1366 	cmd = &desc.params.get_sw_conf;
1367 	cmd->element = cpu_to_le16(*req_desc);
1368 
1369 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1370 	if (!status) {
1371 		*req_desc = le16_to_cpu(cmd->element);
1372 		*num_elems = le16_to_cpu(cmd->num_elems);
1373 	}
1374 
1375 	return status;
1376 }
1377 
1378 /**
1379  * ice_aq_add_vsi
1380  * @hw: pointer to the HW struct
1381  * @vsi_ctx: pointer to a VSI context struct
1382  * @cd: pointer to command details structure or NULL
1383  *
1384  * Add a VSI context to the hardware (0x0210)
1385  */
1386 static int
ice_aq_add_vsi(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)1387 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1388 	       struct ice_sq_cd *cd)
1389 {
1390 	struct ice_aqc_add_update_free_vsi_resp *res;
1391 	struct ice_aqc_add_get_update_free_vsi *cmd;
1392 	struct ice_aq_desc desc;
1393 	int status;
1394 
1395 	cmd = &desc.params.vsi_cmd;
1396 	res = &desc.params.add_update_free_vsi_res;
1397 
1398 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1399 
1400 	if (!vsi_ctx->alloc_from_pool)
1401 		cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
1402 					   ICE_AQ_VSI_IS_VALID);
1403 	cmd->vf_id = vsi_ctx->vf_num;
1404 
1405 	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1406 
1407 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1408 
1409 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1410 				 sizeof(vsi_ctx->info), cd);
1411 
1412 	if (!status) {
1413 		vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1414 		vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
1415 		vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
1416 	}
1417 
1418 	return status;
1419 }
1420 
1421 /**
1422  * ice_aq_free_vsi
1423  * @hw: pointer to the HW struct
1424  * @vsi_ctx: pointer to a VSI context struct
1425  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1426  * @cd: pointer to command details structure or NULL
1427  *
1428  * Free VSI context info from hardware (0x0213)
1429  */
1430 static int
ice_aq_free_vsi(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,bool keep_vsi_alloc,struct ice_sq_cd * cd)1431 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1432 		bool keep_vsi_alloc, struct ice_sq_cd *cd)
1433 {
1434 	struct ice_aqc_add_update_free_vsi_resp *resp;
1435 	struct ice_aqc_add_get_update_free_vsi *cmd;
1436 	struct ice_aq_desc desc;
1437 	int status;
1438 
1439 	cmd = &desc.params.vsi_cmd;
1440 	resp = &desc.params.add_update_free_vsi_res;
1441 
1442 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1443 
1444 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1445 	if (keep_vsi_alloc)
1446 		cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
1447 
1448 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1449 	if (!status) {
1450 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1451 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1452 	}
1453 
1454 	return status;
1455 }
1456 
1457 /**
1458  * ice_aq_update_vsi
1459  * @hw: pointer to the HW struct
1460  * @vsi_ctx: pointer to a VSI context struct
1461  * @cd: pointer to command details structure or NULL
1462  *
1463  * Update VSI context in the hardware (0x0211)
1464  */
1465 static int
ice_aq_update_vsi(struct ice_hw * hw,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)1466 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1467 		  struct ice_sq_cd *cd)
1468 {
1469 	struct ice_aqc_add_update_free_vsi_resp *resp;
1470 	struct ice_aqc_add_get_update_free_vsi *cmd;
1471 	struct ice_aq_desc desc;
1472 	int status;
1473 
1474 	cmd = &desc.params.vsi_cmd;
1475 	resp = &desc.params.add_update_free_vsi_res;
1476 
1477 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1478 
1479 	cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1480 
1481 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1482 
1483 	status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1484 				 sizeof(vsi_ctx->info), cd);
1485 
1486 	if (!status) {
1487 		vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1488 		vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1489 	}
1490 
1491 	return status;
1492 }
1493 
1494 /**
1495  * ice_is_vsi_valid - check whether the VSI is valid or not
1496  * @hw: pointer to the HW struct
1497  * @vsi_handle: VSI handle
1498  *
1499  * check whether the VSI is valid or not
1500  */
ice_is_vsi_valid(struct ice_hw * hw,u16 vsi_handle)1501 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1502 {
1503 	return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1504 }
1505 
1506 /**
1507  * ice_get_hw_vsi_num - return the HW VSI number
1508  * @hw: pointer to the HW struct
1509  * @vsi_handle: VSI handle
1510  *
1511  * return the HW VSI number
1512  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1513  */
ice_get_hw_vsi_num(struct ice_hw * hw,u16 vsi_handle)1514 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1515 {
1516 	return hw->vsi_ctx[vsi_handle]->vsi_num;
1517 }
1518 
1519 /**
1520  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1521  * @hw: pointer to the HW struct
1522  * @vsi_handle: VSI handle
1523  *
1524  * return the VSI context entry for a given VSI handle
1525  */
ice_get_vsi_ctx(struct ice_hw * hw,u16 vsi_handle)1526 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1527 {
1528 	return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1529 }
1530 
1531 /**
1532  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1533  * @hw: pointer to the HW struct
1534  * @vsi_handle: VSI handle
1535  * @vsi: VSI context pointer
1536  *
1537  * save the VSI context entry for a given VSI handle
1538  */
1539 static void
ice_save_vsi_ctx(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi)1540 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1541 {
1542 	hw->vsi_ctx[vsi_handle] = vsi;
1543 }
1544 
1545 /**
1546  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1547  * @hw: pointer to the HW struct
1548  * @vsi_handle: VSI handle
1549  */
ice_clear_vsi_q_ctx(struct ice_hw * hw,u16 vsi_handle)1550 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1551 {
1552 	struct ice_vsi_ctx *vsi;
1553 	u8 i;
1554 
1555 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
1556 	if (!vsi)
1557 		return;
1558 	ice_for_each_traffic_class(i) {
1559 		if (vsi->lan_q_ctx[i]) {
1560 			devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
1561 			vsi->lan_q_ctx[i] = NULL;
1562 		}
1563 		if (vsi->rdma_q_ctx[i]) {
1564 			devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
1565 			vsi->rdma_q_ctx[i] = NULL;
1566 		}
1567 	}
1568 }
1569 
1570 /**
1571  * ice_clear_vsi_ctx - clear the VSI context entry
1572  * @hw: pointer to the HW struct
1573  * @vsi_handle: VSI handle
1574  *
1575  * clear the VSI context entry
1576  */
ice_clear_vsi_ctx(struct ice_hw * hw,u16 vsi_handle)1577 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1578 {
1579 	struct ice_vsi_ctx *vsi;
1580 
1581 	vsi = ice_get_vsi_ctx(hw, vsi_handle);
1582 	if (vsi) {
1583 		ice_clear_vsi_q_ctx(hw, vsi_handle);
1584 		devm_kfree(ice_hw_to_dev(hw), vsi);
1585 		hw->vsi_ctx[vsi_handle] = NULL;
1586 	}
1587 }
1588 
1589 /**
1590  * ice_clear_all_vsi_ctx - clear all the VSI context entries
1591  * @hw: pointer to the HW struct
1592  */
ice_clear_all_vsi_ctx(struct ice_hw * hw)1593 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1594 {
1595 	u16 i;
1596 
1597 	for (i = 0; i < ICE_MAX_VSI; i++)
1598 		ice_clear_vsi_ctx(hw, i);
1599 }
1600 
1601 /**
1602  * ice_add_vsi - add VSI context to the hardware and VSI handle list
1603  * @hw: pointer to the HW struct
1604  * @vsi_handle: unique VSI handle provided by drivers
1605  * @vsi_ctx: pointer to a VSI context struct
1606  * @cd: pointer to command details structure or NULL
1607  *
1608  * Add a VSI context to the hardware also add it into the VSI handle list.
1609  * If this function gets called after reset for existing VSIs then update
1610  * with the new HW VSI number in the corresponding VSI handle list entry.
1611  */
1612 int
ice_add_vsi(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)1613 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1614 	    struct ice_sq_cd *cd)
1615 {
1616 	struct ice_vsi_ctx *tmp_vsi_ctx;
1617 	int status;
1618 
1619 	if (vsi_handle >= ICE_MAX_VSI)
1620 		return -EINVAL;
1621 	status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1622 	if (status)
1623 		return status;
1624 	tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1625 	if (!tmp_vsi_ctx) {
1626 		/* Create a new VSI context */
1627 		tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
1628 					   sizeof(*tmp_vsi_ctx), GFP_KERNEL);
1629 		if (!tmp_vsi_ctx) {
1630 			ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1631 			return -ENOMEM;
1632 		}
1633 		*tmp_vsi_ctx = *vsi_ctx;
1634 		ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1635 	} else {
1636 		/* update with new HW VSI num */
1637 		tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1638 	}
1639 
1640 	return 0;
1641 }
1642 
1643 /**
1644  * ice_free_vsi- free VSI context from hardware and VSI handle list
1645  * @hw: pointer to the HW struct
1646  * @vsi_handle: unique VSI handle
1647  * @vsi_ctx: pointer to a VSI context struct
1648  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1649  * @cd: pointer to command details structure or NULL
1650  *
1651  * Free VSI context info from hardware as well as from VSI handle list
1652  */
1653 int
ice_free_vsi(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi_ctx,bool keep_vsi_alloc,struct ice_sq_cd * cd)1654 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1655 	     bool keep_vsi_alloc, struct ice_sq_cd *cd)
1656 {
1657 	int status;
1658 
1659 	if (!ice_is_vsi_valid(hw, vsi_handle))
1660 		return -EINVAL;
1661 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1662 	status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1663 	if (!status)
1664 		ice_clear_vsi_ctx(hw, vsi_handle);
1665 	return status;
1666 }
1667 
1668 /**
1669  * ice_update_vsi
1670  * @hw: pointer to the HW struct
1671  * @vsi_handle: unique VSI handle
1672  * @vsi_ctx: pointer to a VSI context struct
1673  * @cd: pointer to command details structure or NULL
1674  *
1675  * Update VSI context in the hardware
1676  */
1677 int
ice_update_vsi(struct ice_hw * hw,u16 vsi_handle,struct ice_vsi_ctx * vsi_ctx,struct ice_sq_cd * cd)1678 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1679 	       struct ice_sq_cd *cd)
1680 {
1681 	if (!ice_is_vsi_valid(hw, vsi_handle))
1682 		return -EINVAL;
1683 	vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1684 	return ice_aq_update_vsi(hw, vsi_ctx, cd);
1685 }
1686 
1687 /**
1688  * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
1689  * @hw: pointer to HW struct
1690  * @vsi_handle: VSI SW index
1691  * @enable: boolean for enable/disable
1692  */
1693 int
ice_cfg_rdma_fltr(struct ice_hw * hw,u16 vsi_handle,bool enable)1694 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
1695 {
1696 	struct ice_vsi_ctx *ctx;
1697 
1698 	ctx = ice_get_vsi_ctx(hw, vsi_handle);
1699 	if (!ctx)
1700 		return -EIO;
1701 
1702 	if (enable)
1703 		ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1704 	else
1705 		ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1706 
1707 	return ice_update_vsi(hw, vsi_handle, ctx, NULL);
1708 }
1709 
1710 /**
1711  * ice_aq_alloc_free_vsi_list
1712  * @hw: pointer to the HW struct
1713  * @vsi_list_id: VSI list ID returned or used for lookup
1714  * @lkup_type: switch rule filter lookup type
1715  * @opc: switch rules population command type - pass in the command opcode
1716  *
1717  * allocates or free a VSI list resource
1718  */
1719 static int
ice_aq_alloc_free_vsi_list(struct ice_hw * hw,u16 * vsi_list_id,enum ice_sw_lkup_type lkup_type,enum ice_adminq_opc opc)1720 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1721 			   enum ice_sw_lkup_type lkup_type,
1722 			   enum ice_adminq_opc opc)
1723 {
1724 	struct ice_aqc_alloc_free_res_elem *sw_buf;
1725 	struct ice_aqc_res_elem *vsi_ele;
1726 	u16 buf_len;
1727 	int status;
1728 
1729 	buf_len = struct_size(sw_buf, elem, 1);
1730 	sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
1731 	if (!sw_buf)
1732 		return -ENOMEM;
1733 	sw_buf->num_elems = cpu_to_le16(1);
1734 
1735 	if (lkup_type == ICE_SW_LKUP_MAC ||
1736 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1737 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1738 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1739 	    lkup_type == ICE_SW_LKUP_PROMISC ||
1740 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
1741 		sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1742 	} else if (lkup_type == ICE_SW_LKUP_VLAN) {
1743 		sw_buf->res_type =
1744 			cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1745 	} else {
1746 		status = -EINVAL;
1747 		goto ice_aq_alloc_free_vsi_list_exit;
1748 	}
1749 
1750 	if (opc == ice_aqc_opc_free_res)
1751 		sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
1752 
1753 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1754 	if (status)
1755 		goto ice_aq_alloc_free_vsi_list_exit;
1756 
1757 	if (opc == ice_aqc_opc_alloc_res) {
1758 		vsi_ele = &sw_buf->elem[0];
1759 		*vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
1760 	}
1761 
1762 ice_aq_alloc_free_vsi_list_exit:
1763 	devm_kfree(ice_hw_to_dev(hw), sw_buf);
1764 	return status;
1765 }
1766 
1767 /**
1768  * ice_aq_sw_rules - add/update/remove switch rules
1769  * @hw: pointer to the HW struct
1770  * @rule_list: pointer to switch rule population list
1771  * @rule_list_sz: total size of the rule list in bytes
1772  * @num_rules: number of switch rules in the rule_list
1773  * @opc: switch rules population command type - pass in the command opcode
1774  * @cd: pointer to command details structure or NULL
1775  *
1776  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1777  */
1778 int
ice_aq_sw_rules(struct ice_hw * hw,void * rule_list,u16 rule_list_sz,u8 num_rules,enum ice_adminq_opc opc,struct ice_sq_cd * cd)1779 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1780 		u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1781 {
1782 	struct ice_aq_desc desc;
1783 	int status;
1784 
1785 	if (opc != ice_aqc_opc_add_sw_rules &&
1786 	    opc != ice_aqc_opc_update_sw_rules &&
1787 	    opc != ice_aqc_opc_remove_sw_rules)
1788 		return -EINVAL;
1789 
1790 	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1791 
1792 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1793 	desc.params.sw_rules.num_rules_fltr_entry_index =
1794 		cpu_to_le16(num_rules);
1795 	status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1796 	if (opc != ice_aqc_opc_add_sw_rules &&
1797 	    hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1798 		status = -ENOENT;
1799 
1800 	return status;
1801 }
1802 
1803 /**
1804  * ice_aq_add_recipe - add switch recipe
1805  * @hw: pointer to the HW struct
1806  * @s_recipe_list: pointer to switch rule population list
1807  * @num_recipes: number of switch recipes in the list
1808  * @cd: pointer to command details structure or NULL
1809  *
1810  * Add(0x0290)
1811  */
1812 static int
ice_aq_add_recipe(struct ice_hw * hw,struct ice_aqc_recipe_data_elem * s_recipe_list,u16 num_recipes,struct ice_sq_cd * cd)1813 ice_aq_add_recipe(struct ice_hw *hw,
1814 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
1815 		  u16 num_recipes, struct ice_sq_cd *cd)
1816 {
1817 	struct ice_aqc_add_get_recipe *cmd;
1818 	struct ice_aq_desc desc;
1819 	u16 buf_size;
1820 
1821 	cmd = &desc.params.add_get_recipe;
1822 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1823 
1824 	cmd->num_sub_recipes = cpu_to_le16(num_recipes);
1825 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1826 
1827 	buf_size = num_recipes * sizeof(*s_recipe_list);
1828 
1829 	return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1830 }
1831 
1832 /**
1833  * ice_aq_get_recipe - get switch recipe
1834  * @hw: pointer to the HW struct
1835  * @s_recipe_list: pointer to switch rule population list
1836  * @num_recipes: pointer to the number of recipes (input and output)
1837  * @recipe_root: root recipe number of recipe(s) to retrieve
1838  * @cd: pointer to command details structure or NULL
1839  *
1840  * Get(0x0292)
1841  *
1842  * On input, *num_recipes should equal the number of entries in s_recipe_list.
1843  * On output, *num_recipes will equal the number of entries returned in
1844  * s_recipe_list.
1845  *
1846  * The caller must supply enough space in s_recipe_list to hold all possible
1847  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1848  */
1849 static int
ice_aq_get_recipe(struct ice_hw * hw,struct ice_aqc_recipe_data_elem * s_recipe_list,u16 * num_recipes,u16 recipe_root,struct ice_sq_cd * cd)1850 ice_aq_get_recipe(struct ice_hw *hw,
1851 		  struct ice_aqc_recipe_data_elem *s_recipe_list,
1852 		  u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1853 {
1854 	struct ice_aqc_add_get_recipe *cmd;
1855 	struct ice_aq_desc desc;
1856 	u16 buf_size;
1857 	int status;
1858 
1859 	if (*num_recipes != ICE_MAX_NUM_RECIPES)
1860 		return -EINVAL;
1861 
1862 	cmd = &desc.params.add_get_recipe;
1863 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1864 
1865 	cmd->return_index = cpu_to_le16(recipe_root);
1866 	cmd->num_sub_recipes = 0;
1867 
1868 	buf_size = *num_recipes * sizeof(*s_recipe_list);
1869 
1870 	status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1871 	*num_recipes = le16_to_cpu(cmd->num_sub_recipes);
1872 
1873 	return status;
1874 }
1875 
1876 /**
1877  * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
1878  * @hw: pointer to the HW struct
1879  * @params: parameters used to update the default recipe
1880  *
1881  * This function only supports updating default recipes and it only supports
1882  * updating a single recipe based on the lkup_idx at a time.
1883  *
1884  * This is done as a read-modify-write operation. First, get the current recipe
1885  * contents based on the recipe's ID. Then modify the field vector index and
1886  * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
1887  * the pre-existing recipe with the modifications.
1888  */
1889 int
ice_update_recipe_lkup_idx(struct ice_hw * hw,struct ice_update_recipe_lkup_idx_params * params)1890 ice_update_recipe_lkup_idx(struct ice_hw *hw,
1891 			   struct ice_update_recipe_lkup_idx_params *params)
1892 {
1893 	struct ice_aqc_recipe_data_elem *rcp_list;
1894 	u16 num_recps = ICE_MAX_NUM_RECIPES;
1895 	int status;
1896 
1897 	rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL);
1898 	if (!rcp_list)
1899 		return -ENOMEM;
1900 
1901 	/* read current recipe list from firmware */
1902 	rcp_list->recipe_indx = params->rid;
1903 	status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
1904 	if (status) {
1905 		ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
1906 			  params->rid, status);
1907 		goto error_out;
1908 	}
1909 
1910 	/* only modify existing recipe's lkup_idx and mask if valid, while
1911 	 * leaving all other fields the same, then update the recipe firmware
1912 	 */
1913 	rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
1914 	if (params->mask_valid)
1915 		rcp_list->content.mask[params->lkup_idx] =
1916 			cpu_to_le16(params->mask);
1917 
1918 	if (params->ignore_valid)
1919 		rcp_list->content.lkup_indx[params->lkup_idx] |=
1920 			ICE_AQ_RECIPE_LKUP_IGNORE;
1921 
1922 	status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
1923 	if (status)
1924 		ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
1925 			  params->rid, params->lkup_idx, params->fv_idx,
1926 			  params->mask, params->mask_valid ? "true" : "false",
1927 			  status);
1928 
1929 error_out:
1930 	kfree(rcp_list);
1931 	return status;
1932 }
1933 
1934 /**
1935  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1936  * @hw: pointer to the HW struct
1937  * @profile_id: package profile ID to associate the recipe with
1938  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1939  * @cd: pointer to command details structure or NULL
1940  * Recipe to profile association (0x0291)
1941  */
1942 static int
ice_aq_map_recipe_to_profile(struct ice_hw * hw,u32 profile_id,u8 * r_bitmap,struct ice_sq_cd * cd)1943 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1944 			     struct ice_sq_cd *cd)
1945 {
1946 	struct ice_aqc_recipe_to_profile *cmd;
1947 	struct ice_aq_desc desc;
1948 
1949 	cmd = &desc.params.recipe_to_profile;
1950 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1951 	cmd->profile_id = cpu_to_le16(profile_id);
1952 	/* Set the recipe ID bit in the bitmask to let the device know which
1953 	 * profile we are associating the recipe to
1954 	 */
1955 	memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
1956 
1957 	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1958 }
1959 
1960 /**
1961  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1962  * @hw: pointer to the HW struct
1963  * @profile_id: package profile ID to associate the recipe with
1964  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1965  * @cd: pointer to command details structure or NULL
1966  * Associate profile ID with given recipe (0x0293)
1967  */
1968 static int
ice_aq_get_recipe_to_profile(struct ice_hw * hw,u32 profile_id,u8 * r_bitmap,struct ice_sq_cd * cd)1969 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1970 			     struct ice_sq_cd *cd)
1971 {
1972 	struct ice_aqc_recipe_to_profile *cmd;
1973 	struct ice_aq_desc desc;
1974 	int status;
1975 
1976 	cmd = &desc.params.recipe_to_profile;
1977 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1978 	cmd->profile_id = cpu_to_le16(profile_id);
1979 
1980 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1981 	if (!status)
1982 		memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
1983 
1984 	return status;
1985 }
1986 
1987 /**
1988  * ice_alloc_recipe - add recipe resource
1989  * @hw: pointer to the hardware structure
1990  * @rid: recipe ID returned as response to AQ call
1991  */
ice_alloc_recipe(struct ice_hw * hw,u16 * rid)1992 static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1993 {
1994 	struct ice_aqc_alloc_free_res_elem *sw_buf;
1995 	u16 buf_len;
1996 	int status;
1997 
1998 	buf_len = struct_size(sw_buf, elem, 1);
1999 	sw_buf = kzalloc(buf_len, GFP_KERNEL);
2000 	if (!sw_buf)
2001 		return -ENOMEM;
2002 
2003 	sw_buf->num_elems = cpu_to_le16(1);
2004 	sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
2005 					ICE_AQC_RES_TYPE_S) |
2006 					ICE_AQC_RES_TYPE_FLAG_SHARED);
2007 	status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2008 				       ice_aqc_opc_alloc_res, NULL);
2009 	if (!status)
2010 		*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
2011 	kfree(sw_buf);
2012 
2013 	return status;
2014 }
2015 
2016 /**
2017  * ice_get_recp_to_prof_map - updates recipe to profile mapping
2018  * @hw: pointer to hardware structure
2019  *
2020  * This function is used to populate recipe_to_profile matrix where index to
2021  * this array is the recipe ID and the element is the mapping of which profiles
2022  * is this recipe mapped to.
2023  */
ice_get_recp_to_prof_map(struct ice_hw * hw)2024 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2025 {
2026 	DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
2027 	u16 i;
2028 
2029 	for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2030 		u16 j;
2031 
2032 		bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2033 		bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
2034 		if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2035 			continue;
2036 		bitmap_copy(profile_to_recipe[i], r_bitmap,
2037 			    ICE_MAX_NUM_RECIPES);
2038 		for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2039 			set_bit(i, recipe_to_profile[j]);
2040 	}
2041 }
2042 
2043 /**
2044  * ice_collect_result_idx - copy result index values
2045  * @buf: buffer that contains the result index
2046  * @recp: the recipe struct to copy data into
2047  */
2048 static void
ice_collect_result_idx(struct ice_aqc_recipe_data_elem * buf,struct ice_sw_recipe * recp)2049 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
2050 		       struct ice_sw_recipe *recp)
2051 {
2052 	if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2053 		set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2054 			recp->res_idxs);
2055 }
2056 
2057 /**
2058  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2059  * @hw: pointer to hardware structure
2060  * @recps: struct that we need to populate
2061  * @rid: recipe ID that we are populating
2062  * @refresh_required: true if we should get recipe to profile mapping from FW
2063  *
2064  * This function is used to populate all the necessary entries into our
2065  * bookkeeping so that we have a current list of all the recipes that are
2066  * programmed in the firmware.
2067  */
2068 static int
ice_get_recp_frm_fw(struct ice_hw * hw,struct ice_sw_recipe * recps,u8 rid,bool * refresh_required)2069 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2070 		    bool *refresh_required)
2071 {
2072 	DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
2073 	struct ice_aqc_recipe_data_elem *tmp;
2074 	u16 num_recps = ICE_MAX_NUM_RECIPES;
2075 	struct ice_prot_lkup_ext *lkup_exts;
2076 	u8 fv_word_idx = 0;
2077 	u16 sub_recps;
2078 	int status;
2079 
2080 	bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
2081 
2082 	/* we need a buffer big enough to accommodate all the recipes */
2083 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
2084 	if (!tmp)
2085 		return -ENOMEM;
2086 
2087 	tmp[0].recipe_indx = rid;
2088 	status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2089 	/* non-zero status meaning recipe doesn't exist */
2090 	if (status)
2091 		goto err_unroll;
2092 
2093 	/* Get recipe to profile map so that we can get the fv from lkups that
2094 	 * we read for a recipe from FW. Since we want to minimize the number of
2095 	 * times we make this FW call, just make one call and cache the copy
2096 	 * until a new recipe is added. This operation is only required the
2097 	 * first time to get the changes from FW. Then to search existing
2098 	 * entries we don't need to update the cache again until another recipe
2099 	 * gets added.
2100 	 */
2101 	if (*refresh_required) {
2102 		ice_get_recp_to_prof_map(hw);
2103 		*refresh_required = false;
2104 	}
2105 
2106 	/* Start populating all the entries for recps[rid] based on lkups from
2107 	 * firmware. Note that we are only creating the root recipe in our
2108 	 * database.
2109 	 */
2110 	lkup_exts = &recps[rid].lkup_exts;
2111 
2112 	for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2113 		struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2114 		struct ice_recp_grp_entry *rg_entry;
2115 		u8 i, prof, idx, prot = 0;
2116 		bool is_root;
2117 		u16 off = 0;
2118 
2119 		rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
2120 					GFP_KERNEL);
2121 		if (!rg_entry) {
2122 			status = -ENOMEM;
2123 			goto err_unroll;
2124 		}
2125 
2126 		idx = root_bufs.recipe_indx;
2127 		is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2128 
2129 		/* Mark all result indices in this chain */
2130 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2131 			set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2132 				result_bm);
2133 
2134 		/* get the first profile that is associated with rid */
2135 		prof = find_first_bit(recipe_to_profile[idx],
2136 				      ICE_MAX_NUM_PROFILES);
2137 		for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2138 			u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2139 
2140 			rg_entry->fv_idx[i] = lkup_indx;
2141 			rg_entry->fv_mask[i] =
2142 				le16_to_cpu(root_bufs.content.mask[i + 1]);
2143 
2144 			/* If the recipe is a chained recipe then all its
2145 			 * child recipe's result will have a result index.
2146 			 * To fill fv_words we should not use those result
2147 			 * index, we only need the protocol ids and offsets.
2148 			 * We will skip all the fv_idx which stores result
2149 			 * index in them. We also need to skip any fv_idx which
2150 			 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2151 			 * valid offset value.
2152 			 */
2153 			if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
2154 			    rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2155 			    rg_entry->fv_idx[i] == 0)
2156 				continue;
2157 
2158 			ice_find_prot_off(hw, ICE_BLK_SW, prof,
2159 					  rg_entry->fv_idx[i], &prot, &off);
2160 			lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2161 			lkup_exts->fv_words[fv_word_idx].off = off;
2162 			lkup_exts->field_mask[fv_word_idx] =
2163 				rg_entry->fv_mask[i];
2164 			fv_word_idx++;
2165 		}
2166 		/* populate rg_list with the data from the child entry of this
2167 		 * recipe
2168 		 */
2169 		list_add(&rg_entry->l_entry, &recps[rid].rg_list);
2170 
2171 		/* Propagate some data to the recipe database */
2172 		recps[idx].is_root = !!is_root;
2173 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2174 		bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2175 		if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2176 			recps[idx].chain_idx = root_bufs.content.result_indx &
2177 				~ICE_AQ_RECIPE_RESULT_EN;
2178 			set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2179 		} else {
2180 			recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2181 		}
2182 
2183 		if (!is_root)
2184 			continue;
2185 
2186 		/* Only do the following for root recipes entries */
2187 		memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2188 		       sizeof(recps[idx].r_bitmap));
2189 		recps[idx].root_rid = root_bufs.content.rid &
2190 			~ICE_AQ_RECIPE_ID_IS_ROOT;
2191 		recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2192 	}
2193 
2194 	/* Complete initialization of the root recipe entry */
2195 	lkup_exts->n_val_words = fv_word_idx;
2196 	recps[rid].big_recp = (num_recps > 1);
2197 	recps[rid].n_grp_count = (u8)num_recps;
2198 	recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
2199 					   recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
2200 					   GFP_KERNEL);
2201 	if (!recps[rid].root_buf) {
2202 		status = -ENOMEM;
2203 		goto err_unroll;
2204 	}
2205 
2206 	/* Copy result indexes */
2207 	bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2208 	recps[rid].recp_created = true;
2209 
2210 err_unroll:
2211 	kfree(tmp);
2212 	return status;
2213 }
2214 
2215 /* ice_init_port_info - Initialize port_info with switch configuration data
2216  * @pi: pointer to port_info
2217  * @vsi_port_num: VSI number or port number
2218  * @type: Type of switch element (port or VSI)
2219  * @swid: switch ID of the switch the element is attached to
2220  * @pf_vf_num: PF or VF number
2221  * @is_vf: true if the element is a VF, false otherwise
2222  */
2223 static void
ice_init_port_info(struct ice_port_info * pi,u16 vsi_port_num,u8 type,u16 swid,u16 pf_vf_num,bool is_vf)2224 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2225 		   u16 swid, u16 pf_vf_num, bool is_vf)
2226 {
2227 	switch (type) {
2228 	case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2229 		pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2230 		pi->sw_id = swid;
2231 		pi->pf_vf_num = pf_vf_num;
2232 		pi->is_vf = is_vf;
2233 		pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
2234 		pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
2235 		break;
2236 	default:
2237 		ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2238 		break;
2239 	}
2240 }
2241 
2242 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2243  * @hw: pointer to the hardware structure
2244  */
ice_get_initial_sw_cfg(struct ice_hw * hw)2245 int ice_get_initial_sw_cfg(struct ice_hw *hw)
2246 {
2247 	struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2248 	u16 req_desc = 0;
2249 	u16 num_elems;
2250 	int status;
2251 	u16 i;
2252 
2253 	rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
2254 			    GFP_KERNEL);
2255 
2256 	if (!rbuf)
2257 		return -ENOMEM;
2258 
2259 	/* Multiple calls to ice_aq_get_sw_cfg may be required
2260 	 * to get all the switch configuration information. The need
2261 	 * for additional calls is indicated by ice_aq_get_sw_cfg
2262 	 * writing a non-zero value in req_desc
2263 	 */
2264 	do {
2265 		struct ice_aqc_get_sw_cfg_resp_elem *ele;
2266 
2267 		status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2268 					   &req_desc, &num_elems, NULL);
2269 
2270 		if (status)
2271 			break;
2272 
2273 		for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2274 			u16 pf_vf_num, swid, vsi_port_num;
2275 			bool is_vf = false;
2276 			u8 res_type;
2277 
2278 			vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
2279 				ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2280 
2281 			pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
2282 				ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2283 
2284 			swid = le16_to_cpu(ele->swid);
2285 
2286 			if (le16_to_cpu(ele->pf_vf_num) &
2287 			    ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2288 				is_vf = true;
2289 
2290 			res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
2291 					ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2292 
2293 			if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
2294 				/* FW VSI is not needed. Just continue. */
2295 				continue;
2296 			}
2297 
2298 			ice_init_port_info(hw->port_info, vsi_port_num,
2299 					   res_type, swid, pf_vf_num, is_vf);
2300 		}
2301 	} while (req_desc && !status);
2302 
2303 	devm_kfree(ice_hw_to_dev(hw), rbuf);
2304 	return status;
2305 }
2306 
2307 /**
2308  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2309  * @hw: pointer to the hardware structure
2310  * @fi: filter info structure to fill/update
2311  *
2312  * This helper function populates the lb_en and lan_en elements of the provided
2313  * ice_fltr_info struct using the switch's type and characteristics of the
2314  * switch rule being configured.
2315  */
ice_fill_sw_info(struct ice_hw * hw,struct ice_fltr_info * fi)2316 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2317 {
2318 	fi->lb_en = false;
2319 	fi->lan_en = false;
2320 	if ((fi->flag & ICE_FLTR_TX) &&
2321 	    (fi->fltr_act == ICE_FWD_TO_VSI ||
2322 	     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2323 	     fi->fltr_act == ICE_FWD_TO_Q ||
2324 	     fi->fltr_act == ICE_FWD_TO_QGRP)) {
2325 		/* Setting LB for prune actions will result in replicated
2326 		 * packets to the internal switch that will be dropped.
2327 		 */
2328 		if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2329 			fi->lb_en = true;
2330 
2331 		/* Set lan_en to TRUE if
2332 		 * 1. The switch is a VEB AND
2333 		 * 2
2334 		 * 2.1 The lookup is a directional lookup like ethertype,
2335 		 * promiscuous, ethertype-MAC, promiscuous-VLAN
2336 		 * and default-port OR
2337 		 * 2.2 The lookup is VLAN, OR
2338 		 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2339 		 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2340 		 *
2341 		 * OR
2342 		 *
2343 		 * The switch is a VEPA.
2344 		 *
2345 		 * In all other cases, the LAN enable has to be set to false.
2346 		 */
2347 		if (hw->evb_veb) {
2348 			if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2349 			    fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2350 			    fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2351 			    fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2352 			    fi->lkup_type == ICE_SW_LKUP_DFLT ||
2353 			    fi->lkup_type == ICE_SW_LKUP_VLAN ||
2354 			    (fi->lkup_type == ICE_SW_LKUP_MAC &&
2355 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
2356 			    (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2357 			     !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
2358 				fi->lan_en = true;
2359 		} else {
2360 			fi->lan_en = true;
2361 		}
2362 	}
2363 }
2364 
2365 /**
2366  * ice_fill_sw_rule - Helper function to fill switch rule structure
2367  * @hw: pointer to the hardware structure
2368  * @f_info: entry containing packet forwarding information
2369  * @s_rule: switch rule structure to be filled in based on mac_entry
2370  * @opc: switch rules population command type - pass in the command opcode
2371  */
2372 static void
ice_fill_sw_rule(struct ice_hw * hw,struct ice_fltr_info * f_info,struct ice_sw_rule_lkup_rx_tx * s_rule,enum ice_adminq_opc opc)2373 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2374 		 struct ice_sw_rule_lkup_rx_tx *s_rule,
2375 		 enum ice_adminq_opc opc)
2376 {
2377 	u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2378 	u16 vlan_tpid = ETH_P_8021Q;
2379 	void *daddr = NULL;
2380 	u16 eth_hdr_sz;
2381 	u8 *eth_hdr;
2382 	u32 act = 0;
2383 	__be16 *off;
2384 	u8 q_rgn;
2385 
2386 	if (opc == ice_aqc_opc_remove_sw_rules) {
2387 		s_rule->act = 0;
2388 		s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2389 		s_rule->hdr_len = 0;
2390 		return;
2391 	}
2392 
2393 	eth_hdr_sz = sizeof(dummy_eth_header);
2394 	eth_hdr = s_rule->hdr_data;
2395 
2396 	/* initialize the ether header with a dummy header */
2397 	memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
2398 	ice_fill_sw_info(hw, f_info);
2399 
2400 	switch (f_info->fltr_act) {
2401 	case ICE_FWD_TO_VSI:
2402 		act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2403 			ICE_SINGLE_ACT_VSI_ID_M;
2404 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2405 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2406 				ICE_SINGLE_ACT_VALID_BIT;
2407 		break;
2408 	case ICE_FWD_TO_VSI_LIST:
2409 		act |= ICE_SINGLE_ACT_VSI_LIST;
2410 		act |= (f_info->fwd_id.vsi_list_id <<
2411 			ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2412 			ICE_SINGLE_ACT_VSI_LIST_ID_M;
2413 		if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2414 			act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2415 				ICE_SINGLE_ACT_VALID_BIT;
2416 		break;
2417 	case ICE_FWD_TO_Q:
2418 		act |= ICE_SINGLE_ACT_TO_Q;
2419 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2420 			ICE_SINGLE_ACT_Q_INDEX_M;
2421 		break;
2422 	case ICE_DROP_PACKET:
2423 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2424 			ICE_SINGLE_ACT_VALID_BIT;
2425 		break;
2426 	case ICE_FWD_TO_QGRP:
2427 		q_rgn = f_info->qgrp_size > 0 ?
2428 			(u8)ilog2(f_info->qgrp_size) : 0;
2429 		act |= ICE_SINGLE_ACT_TO_Q;
2430 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2431 			ICE_SINGLE_ACT_Q_INDEX_M;
2432 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2433 			ICE_SINGLE_ACT_Q_REGION_M;
2434 		break;
2435 	default:
2436 		return;
2437 	}
2438 
2439 	if (f_info->lb_en)
2440 		act |= ICE_SINGLE_ACT_LB_ENABLE;
2441 	if (f_info->lan_en)
2442 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
2443 
2444 	switch (f_info->lkup_type) {
2445 	case ICE_SW_LKUP_MAC:
2446 		daddr = f_info->l_data.mac.mac_addr;
2447 		break;
2448 	case ICE_SW_LKUP_VLAN:
2449 		vlan_id = f_info->l_data.vlan.vlan_id;
2450 		if (f_info->l_data.vlan.tpid_valid)
2451 			vlan_tpid = f_info->l_data.vlan.tpid;
2452 		if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2453 		    f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2454 			act |= ICE_SINGLE_ACT_PRUNE;
2455 			act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2456 		}
2457 		break;
2458 	case ICE_SW_LKUP_ETHERTYPE_MAC:
2459 		daddr = f_info->l_data.ethertype_mac.mac_addr;
2460 		fallthrough;
2461 	case ICE_SW_LKUP_ETHERTYPE:
2462 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2463 		*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
2464 		break;
2465 	case ICE_SW_LKUP_MAC_VLAN:
2466 		daddr = f_info->l_data.mac_vlan.mac_addr;
2467 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
2468 		break;
2469 	case ICE_SW_LKUP_PROMISC_VLAN:
2470 		vlan_id = f_info->l_data.mac_vlan.vlan_id;
2471 		fallthrough;
2472 	case ICE_SW_LKUP_PROMISC:
2473 		daddr = f_info->l_data.mac_vlan.mac_addr;
2474 		break;
2475 	default:
2476 		break;
2477 	}
2478 
2479 	s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ?
2480 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2481 		cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
2482 
2483 	/* Recipe set depending on lookup type */
2484 	s_rule->recipe_id = cpu_to_le16(f_info->lkup_type);
2485 	s_rule->src = cpu_to_le16(f_info->src);
2486 	s_rule->act = cpu_to_le32(act);
2487 
2488 	if (daddr)
2489 		ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
2490 
2491 	if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2492 		off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2493 		*off = cpu_to_be16(vlan_id);
2494 		off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2495 		*off = cpu_to_be16(vlan_tpid);
2496 	}
2497 
2498 	/* Create the switch rule with the final dummy Ethernet header */
2499 	if (opc != ice_aqc_opc_update_sw_rules)
2500 		s_rule->hdr_len = cpu_to_le16(eth_hdr_sz);
2501 }
2502 
2503 /**
2504  * ice_add_marker_act
2505  * @hw: pointer to the hardware structure
2506  * @m_ent: the management entry for which sw marker needs to be added
2507  * @sw_marker: sw marker to tag the Rx descriptor with
2508  * @l_id: large action resource ID
2509  *
2510  * Create a large action to hold software marker and update the switch rule
2511  * entry pointed by m_ent with newly created large action
2512  */
2513 static int
ice_add_marker_act(struct ice_hw * hw,struct ice_fltr_mgmt_list_entry * m_ent,u16 sw_marker,u16 l_id)2514 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2515 		   u16 sw_marker, u16 l_id)
2516 {
2517 	struct ice_sw_rule_lkup_rx_tx *rx_tx;
2518 	struct ice_sw_rule_lg_act *lg_act;
2519 	/* For software marker we need 3 large actions
2520 	 * 1. FWD action: FWD TO VSI or VSI LIST
2521 	 * 2. GENERIC VALUE action to hold the profile ID
2522 	 * 3. GENERIC VALUE action to hold the software marker ID
2523 	 */
2524 	const u16 num_lg_acts = 3;
2525 	u16 lg_act_size;
2526 	u16 rules_size;
2527 	int status;
2528 	u32 act;
2529 	u16 id;
2530 
2531 	if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2532 		return -EINVAL;
2533 
2534 	/* Create two back-to-back switch rules and submit them to the HW using
2535 	 * one memory buffer:
2536 	 *    1. Large Action
2537 	 *    2. Look up Tx Rx
2538 	 */
2539 	lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(lg_act, num_lg_acts);
2540 	rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(rx_tx);
2541 	lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
2542 	if (!lg_act)
2543 		return -ENOMEM;
2544 
2545 	rx_tx = (typeof(rx_tx))((u8 *)lg_act + lg_act_size);
2546 
2547 	/* Fill in the first switch rule i.e. large action */
2548 	lg_act->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
2549 	lg_act->index = cpu_to_le16(l_id);
2550 	lg_act->size = cpu_to_le16(num_lg_acts);
2551 
2552 	/* First action VSI forwarding or VSI list forwarding depending on how
2553 	 * many VSIs
2554 	 */
2555 	id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2556 		m_ent->fltr_info.fwd_id.hw_vsi_id;
2557 
2558 	act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2559 	act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
2560 	if (m_ent->vsi_count > 1)
2561 		act |= ICE_LG_ACT_VSI_LIST;
2562 	lg_act->act[0] = cpu_to_le32(act);
2563 
2564 	/* Second action descriptor type */
2565 	act = ICE_LG_ACT_GENERIC;
2566 
2567 	act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2568 	lg_act->act[1] = cpu_to_le32(act);
2569 
2570 	act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2571 	       ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2572 
2573 	/* Third action Marker value */
2574 	act |= ICE_LG_ACT_GENERIC;
2575 	act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2576 		ICE_LG_ACT_GENERIC_VALUE_M;
2577 
2578 	lg_act->act[2] = cpu_to_le32(act);
2579 
2580 	/* call the fill switch rule to fill the lookup Tx Rx structure */
2581 	ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2582 			 ice_aqc_opc_update_sw_rules);
2583 
2584 	/* Update the action to point to the large action ID */
2585 	rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR |
2586 				 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2587 				  ICE_SINGLE_ACT_PTR_VAL_M));
2588 
2589 	/* Use the filter rule ID of the previously created rule with single
2590 	 * act. Once the update happens, hardware will treat this as large
2591 	 * action
2592 	 */
2593 	rx_tx->index = cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
2594 
2595 	status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2596 				 ice_aqc_opc_update_sw_rules, NULL);
2597 	if (!status) {
2598 		m_ent->lg_act_idx = l_id;
2599 		m_ent->sw_marker_id = sw_marker;
2600 	}
2601 
2602 	devm_kfree(ice_hw_to_dev(hw), lg_act);
2603 	return status;
2604 }
2605 
2606 /**
2607  * ice_create_vsi_list_map
2608  * @hw: pointer to the hardware structure
2609  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2610  * @num_vsi: number of VSI handles in the array
2611  * @vsi_list_id: VSI list ID generated as part of allocate resource
2612  *
2613  * Helper function to create a new entry of VSI list ID to VSI mapping
2614  * using the given VSI list ID
2615  */
2616 static struct ice_vsi_list_map_info *
ice_create_vsi_list_map(struct ice_hw * hw,u16 * vsi_handle_arr,u16 num_vsi,u16 vsi_list_id)2617 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2618 			u16 vsi_list_id)
2619 {
2620 	struct ice_switch_info *sw = hw->switch_info;
2621 	struct ice_vsi_list_map_info *v_map;
2622 	int i;
2623 
2624 	v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
2625 	if (!v_map)
2626 		return NULL;
2627 
2628 	v_map->vsi_list_id = vsi_list_id;
2629 	v_map->ref_cnt = 1;
2630 	for (i = 0; i < num_vsi; i++)
2631 		set_bit(vsi_handle_arr[i], v_map->vsi_map);
2632 
2633 	list_add(&v_map->list_entry, &sw->vsi_list_map_head);
2634 	return v_map;
2635 }
2636 
2637 /**
2638  * ice_update_vsi_list_rule
2639  * @hw: pointer to the hardware structure
2640  * @vsi_handle_arr: array of VSI handles to form a VSI list
2641  * @num_vsi: number of VSI handles in the array
2642  * @vsi_list_id: VSI list ID generated as part of allocate resource
2643  * @remove: Boolean value to indicate if this is a remove action
2644  * @opc: switch rules population command type - pass in the command opcode
2645  * @lkup_type: lookup type of the filter
2646  *
2647  * Call AQ command to add a new switch rule or update existing switch rule
2648  * using the given VSI list ID
2649  */
2650 static int
ice_update_vsi_list_rule(struct ice_hw * hw,u16 * vsi_handle_arr,u16 num_vsi,u16 vsi_list_id,bool remove,enum ice_adminq_opc opc,enum ice_sw_lkup_type lkup_type)2651 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2652 			 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2653 			 enum ice_sw_lkup_type lkup_type)
2654 {
2655 	struct ice_sw_rule_vsi_list *s_rule;
2656 	u16 s_rule_size;
2657 	u16 rule_type;
2658 	int status;
2659 	int i;
2660 
2661 	if (!num_vsi)
2662 		return -EINVAL;
2663 
2664 	if (lkup_type == ICE_SW_LKUP_MAC ||
2665 	    lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2666 	    lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2667 	    lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2668 	    lkup_type == ICE_SW_LKUP_PROMISC ||
2669 	    lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
2670 		rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2671 			ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2672 	else if (lkup_type == ICE_SW_LKUP_VLAN)
2673 		rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2674 			ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2675 	else
2676 		return -EINVAL;
2677 
2678 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
2679 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2680 	if (!s_rule)
2681 		return -ENOMEM;
2682 	for (i = 0; i < num_vsi; i++) {
2683 		if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2684 			status = -EINVAL;
2685 			goto exit;
2686 		}
2687 		/* AQ call requires hw_vsi_id(s) */
2688 		s_rule->vsi[i] =
2689 			cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2690 	}
2691 
2692 	s_rule->hdr.type = cpu_to_le16(rule_type);
2693 	s_rule->number_vsi = cpu_to_le16(num_vsi);
2694 	s_rule->index = cpu_to_le16(vsi_list_id);
2695 
2696 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2697 
2698 exit:
2699 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2700 	return status;
2701 }
2702 
2703 /**
2704  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2705  * @hw: pointer to the HW struct
2706  * @vsi_handle_arr: array of VSI handles to form a VSI list
2707  * @num_vsi: number of VSI handles in the array
2708  * @vsi_list_id: stores the ID of the VSI list to be created
2709  * @lkup_type: switch rule filter's lookup type
2710  */
2711 static int
ice_create_vsi_list_rule(struct ice_hw * hw,u16 * vsi_handle_arr,u16 num_vsi,u16 * vsi_list_id,enum ice_sw_lkup_type lkup_type)2712 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2713 			 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2714 {
2715 	int status;
2716 
2717 	status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2718 					    ice_aqc_opc_alloc_res);
2719 	if (status)
2720 		return status;
2721 
2722 	/* Update the newly created VSI list to include the specified VSIs */
2723 	return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2724 					*vsi_list_id, false,
2725 					ice_aqc_opc_add_sw_rules, lkup_type);
2726 }
2727 
2728 /**
2729  * ice_create_pkt_fwd_rule
2730  * @hw: pointer to the hardware structure
2731  * @f_entry: entry containing packet forwarding information
2732  *
2733  * Create switch rule with given filter information and add an entry
2734  * to the corresponding filter management list to track this switch rule
2735  * and VSI mapping
2736  */
2737 static int
ice_create_pkt_fwd_rule(struct ice_hw * hw,struct ice_fltr_list_entry * f_entry)2738 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2739 			struct ice_fltr_list_entry *f_entry)
2740 {
2741 	struct ice_fltr_mgmt_list_entry *fm_entry;
2742 	struct ice_sw_rule_lkup_rx_tx *s_rule;
2743 	enum ice_sw_lkup_type l_type;
2744 	struct ice_sw_recipe *recp;
2745 	int status;
2746 
2747 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2748 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2749 			      GFP_KERNEL);
2750 	if (!s_rule)
2751 		return -ENOMEM;
2752 	fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
2753 				GFP_KERNEL);
2754 	if (!fm_entry) {
2755 		status = -ENOMEM;
2756 		goto ice_create_pkt_fwd_rule_exit;
2757 	}
2758 
2759 	fm_entry->fltr_info = f_entry->fltr_info;
2760 
2761 	/* Initialize all the fields for the management entry */
2762 	fm_entry->vsi_count = 1;
2763 	fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2764 	fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2765 	fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2766 
2767 	ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2768 			 ice_aqc_opc_add_sw_rules);
2769 
2770 	status = ice_aq_sw_rules(hw, s_rule,
2771 				 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2772 				 ice_aqc_opc_add_sw_rules, NULL);
2773 	if (status) {
2774 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
2775 		goto ice_create_pkt_fwd_rule_exit;
2776 	}
2777 
2778 	f_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2779 	fm_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2780 
2781 	/* The book keeping entries will get removed when base driver
2782 	 * calls remove filter AQ command
2783 	 */
2784 	l_type = fm_entry->fltr_info.lkup_type;
2785 	recp = &hw->switch_info->recp_list[l_type];
2786 	list_add(&fm_entry->list_entry, &recp->filt_rules);
2787 
2788 ice_create_pkt_fwd_rule_exit:
2789 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2790 	return status;
2791 }
2792 
2793 /**
2794  * ice_update_pkt_fwd_rule
2795  * @hw: pointer to the hardware structure
2796  * @f_info: filter information for switch rule
2797  *
2798  * Call AQ command to update a previously created switch rule with a
2799  * VSI list ID
2800  */
2801 static int
ice_update_pkt_fwd_rule(struct ice_hw * hw,struct ice_fltr_info * f_info)2802 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2803 {
2804 	struct ice_sw_rule_lkup_rx_tx *s_rule;
2805 	int status;
2806 
2807 	s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2808 			      ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2809 			      GFP_KERNEL);
2810 	if (!s_rule)
2811 		return -ENOMEM;
2812 
2813 	ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2814 
2815 	s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2816 
2817 	/* Update switch rule with new rule set to forward VSI list */
2818 	status = ice_aq_sw_rules(hw, s_rule,
2819 				 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2820 				 ice_aqc_opc_update_sw_rules, NULL);
2821 
2822 	devm_kfree(ice_hw_to_dev(hw), s_rule);
2823 	return status;
2824 }
2825 
2826 /**
2827  * ice_update_sw_rule_bridge_mode
2828  * @hw: pointer to the HW struct
2829  *
2830  * Updates unicast switch filter rules based on VEB/VEPA mode
2831  */
ice_update_sw_rule_bridge_mode(struct ice_hw * hw)2832 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2833 {
2834 	struct ice_switch_info *sw = hw->switch_info;
2835 	struct ice_fltr_mgmt_list_entry *fm_entry;
2836 	struct list_head *rule_head;
2837 	struct mutex *rule_lock; /* Lock to protect filter rule list */
2838 	int status = 0;
2839 
2840 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2841 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2842 
2843 	mutex_lock(rule_lock);
2844 	list_for_each_entry(fm_entry, rule_head, list_entry) {
2845 		struct ice_fltr_info *fi = &fm_entry->fltr_info;
2846 		u8 *addr = fi->l_data.mac.mac_addr;
2847 
2848 		/* Update unicast Tx rules to reflect the selected
2849 		 * VEB/VEPA mode
2850 		 */
2851 		if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
2852 		    (fi->fltr_act == ICE_FWD_TO_VSI ||
2853 		     fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2854 		     fi->fltr_act == ICE_FWD_TO_Q ||
2855 		     fi->fltr_act == ICE_FWD_TO_QGRP)) {
2856 			status = ice_update_pkt_fwd_rule(hw, fi);
2857 			if (status)
2858 				break;
2859 		}
2860 	}
2861 
2862 	mutex_unlock(rule_lock);
2863 
2864 	return status;
2865 }
2866 
2867 /**
2868  * ice_add_update_vsi_list
2869  * @hw: pointer to the hardware structure
2870  * @m_entry: pointer to current filter management list entry
2871  * @cur_fltr: filter information from the book keeping entry
2872  * @new_fltr: filter information with the new VSI to be added
2873  *
2874  * Call AQ command to add or update previously created VSI list with new VSI.
2875  *
2876  * Helper function to do book keeping associated with adding filter information
2877  * The algorithm to do the book keeping is described below :
2878  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2879  *	if only one VSI has been added till now
2880  *		Allocate a new VSI list and add two VSIs
2881  *		to this list using switch rule command
2882  *		Update the previously created switch rule with the
2883  *		newly created VSI list ID
2884  *	if a VSI list was previously created
2885  *		Add the new VSI to the previously created VSI list set
2886  *		using the update switch rule command
2887  */
2888 static int
ice_add_update_vsi_list(struct ice_hw * hw,struct ice_fltr_mgmt_list_entry * m_entry,struct ice_fltr_info * cur_fltr,struct ice_fltr_info * new_fltr)2889 ice_add_update_vsi_list(struct ice_hw *hw,
2890 			struct ice_fltr_mgmt_list_entry *m_entry,
2891 			struct ice_fltr_info *cur_fltr,
2892 			struct ice_fltr_info *new_fltr)
2893 {
2894 	u16 vsi_list_id = 0;
2895 	int status = 0;
2896 
2897 	if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2898 	     cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2899 		return -EOPNOTSUPP;
2900 
2901 	if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2902 	     new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2903 	    (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2904 	     cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2905 		return -EOPNOTSUPP;
2906 
2907 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2908 		/* Only one entry existed in the mapping and it was not already
2909 		 * a part of a VSI list. So, create a VSI list with the old and
2910 		 * new VSIs.
2911 		 */
2912 		struct ice_fltr_info tmp_fltr;
2913 		u16 vsi_handle_arr[2];
2914 
2915 		/* A rule already exists with the new VSI being added */
2916 		if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2917 			return -EEXIST;
2918 
2919 		vsi_handle_arr[0] = cur_fltr->vsi_handle;
2920 		vsi_handle_arr[1] = new_fltr->vsi_handle;
2921 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2922 						  &vsi_list_id,
2923 						  new_fltr->lkup_type);
2924 		if (status)
2925 			return status;
2926 
2927 		tmp_fltr = *new_fltr;
2928 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2929 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2930 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2931 		/* Update the previous switch rule of "MAC forward to VSI" to
2932 		 * "MAC fwd to VSI list"
2933 		 */
2934 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2935 		if (status)
2936 			return status;
2937 
2938 		cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2939 		cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2940 		m_entry->vsi_list_info =
2941 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2942 						vsi_list_id);
2943 
2944 		if (!m_entry->vsi_list_info)
2945 			return -ENOMEM;
2946 
2947 		/* If this entry was large action then the large action needs
2948 		 * to be updated to point to FWD to VSI list
2949 		 */
2950 		if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2951 			status =
2952 			    ice_add_marker_act(hw, m_entry,
2953 					       m_entry->sw_marker_id,
2954 					       m_entry->lg_act_idx);
2955 	} else {
2956 		u16 vsi_handle = new_fltr->vsi_handle;
2957 		enum ice_adminq_opc opcode;
2958 
2959 		if (!m_entry->vsi_list_info)
2960 			return -EIO;
2961 
2962 		/* A rule already exists with the new VSI being added */
2963 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
2964 			return 0;
2965 
2966 		/* Update the previously created VSI list set with
2967 		 * the new VSI ID passed in
2968 		 */
2969 		vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2970 		opcode = ice_aqc_opc_update_sw_rules;
2971 
2972 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2973 						  vsi_list_id, false, opcode,
2974 						  new_fltr->lkup_type);
2975 		/* update VSI list mapping info with new VSI ID */
2976 		if (!status)
2977 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
2978 	}
2979 	if (!status)
2980 		m_entry->vsi_count++;
2981 	return status;
2982 }
2983 
2984 /**
2985  * ice_find_rule_entry - Search a rule entry
2986  * @hw: pointer to the hardware structure
2987  * @recp_id: lookup type for which the specified rule needs to be searched
2988  * @f_info: rule information
2989  *
2990  * Helper function to search for a given rule entry
2991  * Returns pointer to entry storing the rule if found
2992  */
2993 static struct ice_fltr_mgmt_list_entry *
ice_find_rule_entry(struct ice_hw * hw,u8 recp_id,struct ice_fltr_info * f_info)2994 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2995 {
2996 	struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2997 	struct ice_switch_info *sw = hw->switch_info;
2998 	struct list_head *list_head;
2999 
3000 	list_head = &sw->recp_list[recp_id].filt_rules;
3001 	list_for_each_entry(list_itr, list_head, list_entry) {
3002 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3003 			    sizeof(f_info->l_data)) &&
3004 		    f_info->flag == list_itr->fltr_info.flag) {
3005 			ret = list_itr;
3006 			break;
3007 		}
3008 	}
3009 	return ret;
3010 }
3011 
3012 /**
3013  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3014  * @hw: pointer to the hardware structure
3015  * @recp_id: lookup type for which VSI lists needs to be searched
3016  * @vsi_handle: VSI handle to be found in VSI list
3017  * @vsi_list_id: VSI list ID found containing vsi_handle
3018  *
3019  * Helper function to search a VSI list with single entry containing given VSI
3020  * handle element. This can be extended further to search VSI list with more
3021  * than 1 vsi_count. Returns pointer to VSI list entry if found.
3022  */
3023 static struct ice_vsi_list_map_info *
ice_find_vsi_list_entry(struct ice_hw * hw,u8 recp_id,u16 vsi_handle,u16 * vsi_list_id)3024 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
3025 			u16 *vsi_list_id)
3026 {
3027 	struct ice_vsi_list_map_info *map_info = NULL;
3028 	struct ice_switch_info *sw = hw->switch_info;
3029 	struct ice_fltr_mgmt_list_entry *list_itr;
3030 	struct list_head *list_head;
3031 
3032 	list_head = &sw->recp_list[recp_id].filt_rules;
3033 	list_for_each_entry(list_itr, list_head, list_entry) {
3034 		if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
3035 			map_info = list_itr->vsi_list_info;
3036 			if (test_bit(vsi_handle, map_info->vsi_map)) {
3037 				*vsi_list_id = map_info->vsi_list_id;
3038 				return map_info;
3039 			}
3040 		}
3041 	}
3042 	return NULL;
3043 }
3044 
3045 /**
3046  * ice_add_rule_internal - add rule for a given lookup type
3047  * @hw: pointer to the hardware structure
3048  * @recp_id: lookup type (recipe ID) for which rule has to be added
3049  * @f_entry: structure containing MAC forwarding information
3050  *
3051  * Adds or updates the rule lists for a given recipe
3052  */
3053 static int
ice_add_rule_internal(struct ice_hw * hw,u8 recp_id,struct ice_fltr_list_entry * f_entry)3054 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
3055 		      struct ice_fltr_list_entry *f_entry)
3056 {
3057 	struct ice_switch_info *sw = hw->switch_info;
3058 	struct ice_fltr_info *new_fltr, *cur_fltr;
3059 	struct ice_fltr_mgmt_list_entry *m_entry;
3060 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3061 	int status = 0;
3062 
3063 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3064 		return -EINVAL;
3065 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3066 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3067 
3068 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3069 
3070 	mutex_lock(rule_lock);
3071 	new_fltr = &f_entry->fltr_info;
3072 	if (new_fltr->flag & ICE_FLTR_RX)
3073 		new_fltr->src = hw->port_info->lport;
3074 	else if (new_fltr->flag & ICE_FLTR_TX)
3075 		new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
3076 
3077 	m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
3078 	if (!m_entry) {
3079 		mutex_unlock(rule_lock);
3080 		return ice_create_pkt_fwd_rule(hw, f_entry);
3081 	}
3082 
3083 	cur_fltr = &m_entry->fltr_info;
3084 	status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3085 	mutex_unlock(rule_lock);
3086 
3087 	return status;
3088 }
3089 
3090 /**
3091  * ice_remove_vsi_list_rule
3092  * @hw: pointer to the hardware structure
3093  * @vsi_list_id: VSI list ID generated as part of allocate resource
3094  * @lkup_type: switch rule filter lookup type
3095  *
3096  * The VSI list should be emptied before this function is called to remove the
3097  * VSI list.
3098  */
3099 static int
ice_remove_vsi_list_rule(struct ice_hw * hw,u16 vsi_list_id,enum ice_sw_lkup_type lkup_type)3100 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3101 			 enum ice_sw_lkup_type lkup_type)
3102 {
3103 	struct ice_sw_rule_vsi_list *s_rule;
3104 	u16 s_rule_size;
3105 	int status;
3106 
3107 	s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, 0);
3108 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3109 	if (!s_rule)
3110 		return -ENOMEM;
3111 
3112 	s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3113 	s_rule->index = cpu_to_le16(vsi_list_id);
3114 
3115 	/* Free the vsi_list resource that we allocated. It is assumed that the
3116 	 * list is empty at this point.
3117 	 */
3118 	status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3119 					    ice_aqc_opc_free_res);
3120 
3121 	devm_kfree(ice_hw_to_dev(hw), s_rule);
3122 	return status;
3123 }
3124 
3125 /**
3126  * ice_rem_update_vsi_list
3127  * @hw: pointer to the hardware structure
3128  * @vsi_handle: VSI handle of the VSI to remove
3129  * @fm_list: filter management entry for which the VSI list management needs to
3130  *           be done
3131  */
3132 static int
ice_rem_update_vsi_list(struct ice_hw * hw,u16 vsi_handle,struct ice_fltr_mgmt_list_entry * fm_list)3133 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3134 			struct ice_fltr_mgmt_list_entry *fm_list)
3135 {
3136 	enum ice_sw_lkup_type lkup_type;
3137 	u16 vsi_list_id;
3138 	int status = 0;
3139 
3140 	if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3141 	    fm_list->vsi_count == 0)
3142 		return -EINVAL;
3143 
3144 	/* A rule with the VSI being removed does not exist */
3145 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
3146 		return -ENOENT;
3147 
3148 	lkup_type = fm_list->fltr_info.lkup_type;
3149 	vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3150 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3151 					  ice_aqc_opc_update_sw_rules,
3152 					  lkup_type);
3153 	if (status)
3154 		return status;
3155 
3156 	fm_list->vsi_count--;
3157 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3158 
3159 	if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3160 		struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3161 		struct ice_vsi_list_map_info *vsi_list_info =
3162 			fm_list->vsi_list_info;
3163 		u16 rem_vsi_handle;
3164 
3165 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
3166 						ICE_MAX_VSI);
3167 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3168 			return -EIO;
3169 
3170 		/* Make sure VSI list is empty before removing it below */
3171 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3172 						  vsi_list_id, true,
3173 						  ice_aqc_opc_update_sw_rules,
3174 						  lkup_type);
3175 		if (status)
3176 			return status;
3177 
3178 		tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3179 		tmp_fltr_info.fwd_id.hw_vsi_id =
3180 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
3181 		tmp_fltr_info.vsi_handle = rem_vsi_handle;
3182 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3183 		if (status) {
3184 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3185 				  tmp_fltr_info.fwd_id.hw_vsi_id, status);
3186 			return status;
3187 		}
3188 
3189 		fm_list->fltr_info = tmp_fltr_info;
3190 	}
3191 
3192 	if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3193 	    (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3194 		struct ice_vsi_list_map_info *vsi_list_info =
3195 			fm_list->vsi_list_info;
3196 
3197 		/* Remove the VSI list since it is no longer used */
3198 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3199 		if (status) {
3200 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3201 				  vsi_list_id, status);
3202 			return status;
3203 		}
3204 
3205 		list_del(&vsi_list_info->list_entry);
3206 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
3207 		fm_list->vsi_list_info = NULL;
3208 	}
3209 
3210 	return status;
3211 }
3212 
3213 /**
3214  * ice_remove_rule_internal - Remove a filter rule of a given type
3215  * @hw: pointer to the hardware structure
3216  * @recp_id: recipe ID for which the rule needs to removed
3217  * @f_entry: rule entry containing filter information
3218  */
3219 static int
ice_remove_rule_internal(struct ice_hw * hw,u8 recp_id,struct ice_fltr_list_entry * f_entry)3220 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
3221 			 struct ice_fltr_list_entry *f_entry)
3222 {
3223 	struct ice_switch_info *sw = hw->switch_info;
3224 	struct ice_fltr_mgmt_list_entry *list_elem;
3225 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3226 	bool remove_rule = false;
3227 	u16 vsi_handle;
3228 	int status = 0;
3229 
3230 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3231 		return -EINVAL;
3232 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3233 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3234 
3235 	rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3236 	mutex_lock(rule_lock);
3237 	list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
3238 	if (!list_elem) {
3239 		status = -ENOENT;
3240 		goto exit;
3241 	}
3242 
3243 	if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3244 		remove_rule = true;
3245 	} else if (!list_elem->vsi_list_info) {
3246 		status = -ENOENT;
3247 		goto exit;
3248 	} else if (list_elem->vsi_list_info->ref_cnt > 1) {
3249 		/* a ref_cnt > 1 indicates that the vsi_list is being
3250 		 * shared by multiple rules. Decrement the ref_cnt and
3251 		 * remove this rule, but do not modify the list, as it
3252 		 * is in-use by other rules.
3253 		 */
3254 		list_elem->vsi_list_info->ref_cnt--;
3255 		remove_rule = true;
3256 	} else {
3257 		/* a ref_cnt of 1 indicates the vsi_list is only used
3258 		 * by one rule. However, the original removal request is only
3259 		 * for a single VSI. Update the vsi_list first, and only
3260 		 * remove the rule if there are no further VSIs in this list.
3261 		 */
3262 		vsi_handle = f_entry->fltr_info.vsi_handle;
3263 		status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3264 		if (status)
3265 			goto exit;
3266 		/* if VSI count goes to zero after updating the VSI list */
3267 		if (list_elem->vsi_count == 0)
3268 			remove_rule = true;
3269 	}
3270 
3271 	if (remove_rule) {
3272 		/* Remove the lookup rule */
3273 		struct ice_sw_rule_lkup_rx_tx *s_rule;
3274 
3275 		s_rule = devm_kzalloc(ice_hw_to_dev(hw),
3276 				      ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3277 				      GFP_KERNEL);
3278 		if (!s_rule) {
3279 			status = -ENOMEM;
3280 			goto exit;
3281 		}
3282 
3283 		ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3284 				 ice_aqc_opc_remove_sw_rules);
3285 
3286 		status = ice_aq_sw_rules(hw, s_rule,
3287 					 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3288 					 1, ice_aqc_opc_remove_sw_rules, NULL);
3289 
3290 		/* Remove a book keeping from the list */
3291 		devm_kfree(ice_hw_to_dev(hw), s_rule);
3292 
3293 		if (status)
3294 			goto exit;
3295 
3296 		list_del(&list_elem->list_entry);
3297 		devm_kfree(ice_hw_to_dev(hw), list_elem);
3298 	}
3299 exit:
3300 	mutex_unlock(rule_lock);
3301 	return status;
3302 }
3303 
3304 /**
3305  * ice_mac_fltr_exist - does this MAC filter exist for given VSI
3306  * @hw: pointer to the hardware structure
3307  * @mac: MAC address to be checked (for MAC filter)
3308  * @vsi_handle: check MAC filter for this VSI
3309  */
ice_mac_fltr_exist(struct ice_hw * hw,u8 * mac,u16 vsi_handle)3310 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
3311 {
3312 	struct ice_fltr_mgmt_list_entry *entry;
3313 	struct list_head *rule_head;
3314 	struct ice_switch_info *sw;
3315 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3316 	u16 hw_vsi_id;
3317 
3318 	if (!ice_is_vsi_valid(hw, vsi_handle))
3319 		return false;
3320 
3321 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3322 	sw = hw->switch_info;
3323 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3324 	if (!rule_head)
3325 		return false;
3326 
3327 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3328 	mutex_lock(rule_lock);
3329 	list_for_each_entry(entry, rule_head, list_entry) {
3330 		struct ice_fltr_info *f_info = &entry->fltr_info;
3331 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3332 
3333 		if (is_zero_ether_addr(mac_addr))
3334 			continue;
3335 
3336 		if (f_info->flag != ICE_FLTR_TX ||
3337 		    f_info->src_id != ICE_SRC_ID_VSI ||
3338 		    f_info->lkup_type != ICE_SW_LKUP_MAC ||
3339 		    f_info->fltr_act != ICE_FWD_TO_VSI ||
3340 		    hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3341 			continue;
3342 
3343 		if (ether_addr_equal(mac, mac_addr)) {
3344 			mutex_unlock(rule_lock);
3345 			return true;
3346 		}
3347 	}
3348 	mutex_unlock(rule_lock);
3349 	return false;
3350 }
3351 
3352 /**
3353  * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
3354  * @hw: pointer to the hardware structure
3355  * @vlan_id: VLAN ID
3356  * @vsi_handle: check MAC filter for this VSI
3357  */
ice_vlan_fltr_exist(struct ice_hw * hw,u16 vlan_id,u16 vsi_handle)3358 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
3359 {
3360 	struct ice_fltr_mgmt_list_entry *entry;
3361 	struct list_head *rule_head;
3362 	struct ice_switch_info *sw;
3363 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3364 	u16 hw_vsi_id;
3365 
3366 	if (vlan_id > ICE_MAX_VLAN_ID)
3367 		return false;
3368 
3369 	if (!ice_is_vsi_valid(hw, vsi_handle))
3370 		return false;
3371 
3372 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3373 	sw = hw->switch_info;
3374 	rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3375 	if (!rule_head)
3376 		return false;
3377 
3378 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3379 	mutex_lock(rule_lock);
3380 	list_for_each_entry(entry, rule_head, list_entry) {
3381 		struct ice_fltr_info *f_info = &entry->fltr_info;
3382 		u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
3383 		struct ice_vsi_list_map_info *map_info;
3384 
3385 		if (entry_vlan_id > ICE_MAX_VLAN_ID)
3386 			continue;
3387 
3388 		if (f_info->flag != ICE_FLTR_TX ||
3389 		    f_info->src_id != ICE_SRC_ID_VSI ||
3390 		    f_info->lkup_type != ICE_SW_LKUP_VLAN)
3391 			continue;
3392 
3393 		/* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
3394 		if (f_info->fltr_act != ICE_FWD_TO_VSI &&
3395 		    f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
3396 			continue;
3397 
3398 		if (f_info->fltr_act == ICE_FWD_TO_VSI) {
3399 			if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3400 				continue;
3401 		} else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3402 			/* If filter_action is FWD_TO_VSI_LIST, make sure
3403 			 * that VSI being checked is part of VSI list
3404 			 */
3405 			if (entry->vsi_count == 1 &&
3406 			    entry->vsi_list_info) {
3407 				map_info = entry->vsi_list_info;
3408 				if (!test_bit(vsi_handle, map_info->vsi_map))
3409 					continue;
3410 			}
3411 		}
3412 
3413 		if (vlan_id == entry_vlan_id) {
3414 			mutex_unlock(rule_lock);
3415 			return true;
3416 		}
3417 	}
3418 	mutex_unlock(rule_lock);
3419 
3420 	return false;
3421 }
3422 
3423 /**
3424  * ice_add_mac - Add a MAC address based filter rule
3425  * @hw: pointer to the hardware structure
3426  * @m_list: list of MAC addresses and forwarding information
3427  *
3428  * IMPORTANT: When the ucast_shared flag is set to false and m_list has
3429  * multiple unicast addresses, the function assumes that all the
3430  * addresses are unique in a given add_mac call. It doesn't
3431  * check for duplicates in this case, removing duplicates from a given
3432  * list should be taken care of in the caller of this function.
3433  */
ice_add_mac(struct ice_hw * hw,struct list_head * m_list)3434 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
3435 {
3436 	struct ice_sw_rule_lkup_rx_tx *s_rule, *r_iter;
3437 	struct ice_fltr_list_entry *m_list_itr;
3438 	struct list_head *rule_head;
3439 	u16 total_elem_left, s_rule_size;
3440 	struct ice_switch_info *sw;
3441 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3442 	u16 num_unicast = 0;
3443 	int status = 0;
3444 	u8 elem_sent;
3445 
3446 	if (!m_list || !hw)
3447 		return -EINVAL;
3448 
3449 	s_rule = NULL;
3450 	sw = hw->switch_info;
3451 	rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3452 	list_for_each_entry(m_list_itr, m_list, list_entry) {
3453 		u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3454 		u16 vsi_handle;
3455 		u16 hw_vsi_id;
3456 
3457 		m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3458 		vsi_handle = m_list_itr->fltr_info.vsi_handle;
3459 		if (!ice_is_vsi_valid(hw, vsi_handle))
3460 			return -EINVAL;
3461 		hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3462 		m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3463 		/* update the src in case it is VSI num */
3464 		if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3465 			return -EINVAL;
3466 		m_list_itr->fltr_info.src = hw_vsi_id;
3467 		if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3468 		    is_zero_ether_addr(add))
3469 			return -EINVAL;
3470 		if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
3471 			/* Don't overwrite the unicast address */
3472 			mutex_lock(rule_lock);
3473 			if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
3474 						&m_list_itr->fltr_info)) {
3475 				mutex_unlock(rule_lock);
3476 				return -EEXIST;
3477 			}
3478 			mutex_unlock(rule_lock);
3479 			num_unicast++;
3480 		} else if (is_multicast_ether_addr(add) ||
3481 			   (is_unicast_ether_addr(add) && hw->ucast_shared)) {
3482 			m_list_itr->status =
3483 				ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3484 						      m_list_itr);
3485 			if (m_list_itr->status)
3486 				return m_list_itr->status;
3487 		}
3488 	}
3489 
3490 	mutex_lock(rule_lock);
3491 	/* Exit if no suitable entries were found for adding bulk switch rule */
3492 	if (!num_unicast) {
3493 		status = 0;
3494 		goto ice_add_mac_exit;
3495 	}
3496 
3497 	rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3498 
3499 	/* Allocate switch rule buffer for the bulk update for unicast */
3500 	s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule);
3501 	s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
3502 			      GFP_KERNEL);
3503 	if (!s_rule) {
3504 		status = -ENOMEM;
3505 		goto ice_add_mac_exit;
3506 	}
3507 
3508 	r_iter = s_rule;
3509 	list_for_each_entry(m_list_itr, m_list, list_entry) {
3510 		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3511 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3512 
3513 		if (is_unicast_ether_addr(mac_addr)) {
3514 			ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
3515 					 ice_aqc_opc_add_sw_rules);
3516 			r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
3517 		}
3518 	}
3519 
3520 	/* Call AQ bulk switch rule update for all unicast addresses */
3521 	r_iter = s_rule;
3522 	/* Call AQ switch rule in AQ_MAX chunk */
3523 	for (total_elem_left = num_unicast; total_elem_left > 0;
3524 	     total_elem_left -= elem_sent) {
3525 		struct ice_sw_rule_lkup_rx_tx *entry = r_iter;
3526 
3527 		elem_sent = min_t(u8, total_elem_left,
3528 				  (ICE_AQ_MAX_BUF_LEN / s_rule_size));
3529 		status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
3530 					 elem_sent, ice_aqc_opc_add_sw_rules,
3531 					 NULL);
3532 		if (status)
3533 			goto ice_add_mac_exit;
3534 		r_iter = (typeof(s_rule))
3535 			((u8 *)r_iter + (elem_sent * s_rule_size));
3536 	}
3537 
3538 	/* Fill up rule ID based on the value returned from FW */
3539 	r_iter = s_rule;
3540 	list_for_each_entry(m_list_itr, m_list, list_entry) {
3541 		struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
3542 		u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3543 		struct ice_fltr_mgmt_list_entry *fm_entry;
3544 
3545 		if (is_unicast_ether_addr(mac_addr)) {
3546 			f_info->fltr_rule_id = le16_to_cpu(r_iter->index);
3547 			f_info->fltr_act = ICE_FWD_TO_VSI;
3548 			/* Create an entry to track this MAC address */
3549 			fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
3550 						sizeof(*fm_entry), GFP_KERNEL);
3551 			if (!fm_entry) {
3552 				status = -ENOMEM;
3553 				goto ice_add_mac_exit;
3554 			}
3555 			fm_entry->fltr_info = *f_info;
3556 			fm_entry->vsi_count = 1;
3557 			/* The book keeping entries will get removed when
3558 			 * base driver calls remove filter AQ command
3559 			 */
3560 
3561 			list_add(&fm_entry->list_entry, rule_head);
3562 			r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
3563 		}
3564 	}
3565 
3566 ice_add_mac_exit:
3567 	mutex_unlock(rule_lock);
3568 	if (s_rule)
3569 		devm_kfree(ice_hw_to_dev(hw), s_rule);
3570 	return status;
3571 }
3572 
3573 /**
3574  * ice_add_vlan_internal - Add one VLAN based filter rule
3575  * @hw: pointer to the hardware structure
3576  * @f_entry: filter entry containing one VLAN information
3577  */
3578 static int
ice_add_vlan_internal(struct ice_hw * hw,struct ice_fltr_list_entry * f_entry)3579 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3580 {
3581 	struct ice_switch_info *sw = hw->switch_info;
3582 	struct ice_fltr_mgmt_list_entry *v_list_itr;
3583 	struct ice_fltr_info *new_fltr, *cur_fltr;
3584 	enum ice_sw_lkup_type lkup_type;
3585 	u16 vsi_list_id = 0, vsi_handle;
3586 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3587 	int status = 0;
3588 
3589 	if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3590 		return -EINVAL;
3591 
3592 	f_entry->fltr_info.fwd_id.hw_vsi_id =
3593 		ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3594 	new_fltr = &f_entry->fltr_info;
3595 
3596 	/* VLAN ID should only be 12 bits */
3597 	if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3598 		return -EINVAL;
3599 
3600 	if (new_fltr->src_id != ICE_SRC_ID_VSI)
3601 		return -EINVAL;
3602 
3603 	new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3604 	lkup_type = new_fltr->lkup_type;
3605 	vsi_handle = new_fltr->vsi_handle;
3606 	rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3607 	mutex_lock(rule_lock);
3608 	v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3609 	if (!v_list_itr) {
3610 		struct ice_vsi_list_map_info *map_info = NULL;
3611 
3612 		if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3613 			/* All VLAN pruning rules use a VSI list. Check if
3614 			 * there is already a VSI list containing VSI that we
3615 			 * want to add. If found, use the same vsi_list_id for
3616 			 * this new VLAN rule or else create a new list.
3617 			 */
3618 			map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3619 							   vsi_handle,
3620 							   &vsi_list_id);
3621 			if (!map_info) {
3622 				status = ice_create_vsi_list_rule(hw,
3623 								  &vsi_handle,
3624 								  1,
3625 								  &vsi_list_id,
3626 								  lkup_type);
3627 				if (status)
3628 					goto exit;
3629 			}
3630 			/* Convert the action to forwarding to a VSI list. */
3631 			new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3632 			new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3633 		}
3634 
3635 		status = ice_create_pkt_fwd_rule(hw, f_entry);
3636 		if (!status) {
3637 			v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3638 							 new_fltr);
3639 			if (!v_list_itr) {
3640 				status = -ENOENT;
3641 				goto exit;
3642 			}
3643 			/* reuse VSI list for new rule and increment ref_cnt */
3644 			if (map_info) {
3645 				v_list_itr->vsi_list_info = map_info;
3646 				map_info->ref_cnt++;
3647 			} else {
3648 				v_list_itr->vsi_list_info =
3649 					ice_create_vsi_list_map(hw, &vsi_handle,
3650 								1, vsi_list_id);
3651 			}
3652 		}
3653 	} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3654 		/* Update existing VSI list to add new VSI ID only if it used
3655 		 * by one VLAN rule.
3656 		 */
3657 		cur_fltr = &v_list_itr->fltr_info;
3658 		status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3659 						 new_fltr);
3660 	} else {
3661 		/* If VLAN rule exists and VSI list being used by this rule is
3662 		 * referenced by more than 1 VLAN rule. Then create a new VSI
3663 		 * list appending previous VSI with new VSI and update existing
3664 		 * VLAN rule to point to new VSI list ID
3665 		 */
3666 		struct ice_fltr_info tmp_fltr;
3667 		u16 vsi_handle_arr[2];
3668 		u16 cur_handle;
3669 
3670 		/* Current implementation only supports reusing VSI list with
3671 		 * one VSI count. We should never hit below condition
3672 		 */
3673 		if (v_list_itr->vsi_count > 1 &&
3674 		    v_list_itr->vsi_list_info->ref_cnt > 1) {
3675 			ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3676 			status = -EIO;
3677 			goto exit;
3678 		}
3679 
3680 		cur_handle =
3681 			find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3682 				       ICE_MAX_VSI);
3683 
3684 		/* A rule already exists with the new VSI being added */
3685 		if (cur_handle == vsi_handle) {
3686 			status = -EEXIST;
3687 			goto exit;
3688 		}
3689 
3690 		vsi_handle_arr[0] = cur_handle;
3691 		vsi_handle_arr[1] = vsi_handle;
3692 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3693 						  &vsi_list_id, lkup_type);
3694 		if (status)
3695 			goto exit;
3696 
3697 		tmp_fltr = v_list_itr->fltr_info;
3698 		tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3699 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3700 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3701 		/* Update the previous switch rule to a new VSI list which
3702 		 * includes current VSI that is requested
3703 		 */
3704 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3705 		if (status)
3706 			goto exit;
3707 
3708 		/* before overriding VSI list map info. decrement ref_cnt of
3709 		 * previous VSI list
3710 		 */
3711 		v_list_itr->vsi_list_info->ref_cnt--;
3712 
3713 		/* now update to newly created list */
3714 		v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3715 		v_list_itr->vsi_list_info =
3716 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3717 						vsi_list_id);
3718 		v_list_itr->vsi_count++;
3719 	}
3720 
3721 exit:
3722 	mutex_unlock(rule_lock);
3723 	return status;
3724 }
3725 
3726 /**
3727  * ice_add_vlan - Add VLAN based filter rule
3728  * @hw: pointer to the hardware structure
3729  * @v_list: list of VLAN entries and forwarding information
3730  */
ice_add_vlan(struct ice_hw * hw,struct list_head * v_list)3731 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
3732 {
3733 	struct ice_fltr_list_entry *v_list_itr;
3734 
3735 	if (!v_list || !hw)
3736 		return -EINVAL;
3737 
3738 	list_for_each_entry(v_list_itr, v_list, list_entry) {
3739 		if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3740 			return -EINVAL;
3741 		v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3742 		v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3743 		if (v_list_itr->status)
3744 			return v_list_itr->status;
3745 	}
3746 	return 0;
3747 }
3748 
3749 /**
3750  * ice_add_eth_mac - Add ethertype and MAC based filter rule
3751  * @hw: pointer to the hardware structure
3752  * @em_list: list of ether type MAC filter, MAC is optional
3753  *
3754  * This function requires the caller to populate the entries in
3755  * the filter list with the necessary fields (including flags to
3756  * indicate Tx or Rx rules).
3757  */
ice_add_eth_mac(struct ice_hw * hw,struct list_head * em_list)3758 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3759 {
3760 	struct ice_fltr_list_entry *em_list_itr;
3761 
3762 	if (!em_list || !hw)
3763 		return -EINVAL;
3764 
3765 	list_for_each_entry(em_list_itr, em_list, list_entry) {
3766 		enum ice_sw_lkup_type l_type =
3767 			em_list_itr->fltr_info.lkup_type;
3768 
3769 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3770 		    l_type != ICE_SW_LKUP_ETHERTYPE)
3771 			return -EINVAL;
3772 
3773 		em_list_itr->status = ice_add_rule_internal(hw, l_type,
3774 							    em_list_itr);
3775 		if (em_list_itr->status)
3776 			return em_list_itr->status;
3777 	}
3778 	return 0;
3779 }
3780 
3781 /**
3782  * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3783  * @hw: pointer to the hardware structure
3784  * @em_list: list of ethertype or ethertype MAC entries
3785  */
ice_remove_eth_mac(struct ice_hw * hw,struct list_head * em_list)3786 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3787 {
3788 	struct ice_fltr_list_entry *em_list_itr, *tmp;
3789 
3790 	if (!em_list || !hw)
3791 		return -EINVAL;
3792 
3793 	list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
3794 		enum ice_sw_lkup_type l_type =
3795 			em_list_itr->fltr_info.lkup_type;
3796 
3797 		if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3798 		    l_type != ICE_SW_LKUP_ETHERTYPE)
3799 			return -EINVAL;
3800 
3801 		em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3802 							       em_list_itr);
3803 		if (em_list_itr->status)
3804 			return em_list_itr->status;
3805 	}
3806 	return 0;
3807 }
3808 
3809 /**
3810  * ice_rem_sw_rule_info
3811  * @hw: pointer to the hardware structure
3812  * @rule_head: pointer to the switch list structure that we want to delete
3813  */
3814 static void
ice_rem_sw_rule_info(struct ice_hw * hw,struct list_head * rule_head)3815 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3816 {
3817 	if (!list_empty(rule_head)) {
3818 		struct ice_fltr_mgmt_list_entry *entry;
3819 		struct ice_fltr_mgmt_list_entry *tmp;
3820 
3821 		list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
3822 			list_del(&entry->list_entry);
3823 			devm_kfree(ice_hw_to_dev(hw), entry);
3824 		}
3825 	}
3826 }
3827 
3828 /**
3829  * ice_rem_adv_rule_info
3830  * @hw: pointer to the hardware structure
3831  * @rule_head: pointer to the switch list structure that we want to delete
3832  */
3833 static void
ice_rem_adv_rule_info(struct ice_hw * hw,struct list_head * rule_head)3834 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3835 {
3836 	struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3837 	struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3838 
3839 	if (list_empty(rule_head))
3840 		return;
3841 
3842 	list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
3843 		list_del(&lst_itr->list_entry);
3844 		devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
3845 		devm_kfree(ice_hw_to_dev(hw), lst_itr);
3846 	}
3847 }
3848 
3849 /**
3850  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3851  * @hw: pointer to the hardware structure
3852  * @vsi_handle: VSI handle to set as default
3853  * @set: true to add the above mentioned switch rule, false to remove it
3854  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3855  *
3856  * add filter rule to set/unset given VSI as default VSI for the switch
3857  * (represented by swid)
3858  */
ice_cfg_dflt_vsi(struct ice_hw * hw,u16 vsi_handle,bool set,u8 direction)3859 int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
3860 {
3861 	struct ice_sw_rule_lkup_rx_tx *s_rule;
3862 	struct ice_fltr_info f_info;
3863 	enum ice_adminq_opc opcode;
3864 	u16 s_rule_size;
3865 	u16 hw_vsi_id;
3866 	int status;
3867 
3868 	if (!ice_is_vsi_valid(hw, vsi_handle))
3869 		return -EINVAL;
3870 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3871 
3872 	s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule) :
3873 			    ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
3874 
3875 	s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3876 	if (!s_rule)
3877 		return -ENOMEM;
3878 
3879 	memset(&f_info, 0, sizeof(f_info));
3880 
3881 	f_info.lkup_type = ICE_SW_LKUP_DFLT;
3882 	f_info.flag = direction;
3883 	f_info.fltr_act = ICE_FWD_TO_VSI;
3884 	f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3885 
3886 	if (f_info.flag & ICE_FLTR_RX) {
3887 		f_info.src = hw->port_info->lport;
3888 		f_info.src_id = ICE_SRC_ID_LPORT;
3889 		if (!set)
3890 			f_info.fltr_rule_id =
3891 				hw->port_info->dflt_rx_vsi_rule_id;
3892 	} else if (f_info.flag & ICE_FLTR_TX) {
3893 		f_info.src_id = ICE_SRC_ID_VSI;
3894 		f_info.src = hw_vsi_id;
3895 		if (!set)
3896 			f_info.fltr_rule_id =
3897 				hw->port_info->dflt_tx_vsi_rule_id;
3898 	}
3899 
3900 	if (set)
3901 		opcode = ice_aqc_opc_add_sw_rules;
3902 	else
3903 		opcode = ice_aqc_opc_remove_sw_rules;
3904 
3905 	ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3906 
3907 	status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3908 	if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3909 		goto out;
3910 	if (set) {
3911 		u16 index = le16_to_cpu(s_rule->index);
3912 
3913 		if (f_info.flag & ICE_FLTR_TX) {
3914 			hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
3915 			hw->port_info->dflt_tx_vsi_rule_id = index;
3916 		} else if (f_info.flag & ICE_FLTR_RX) {
3917 			hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
3918 			hw->port_info->dflt_rx_vsi_rule_id = index;
3919 		}
3920 	} else {
3921 		if (f_info.flag & ICE_FLTR_TX) {
3922 			hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3923 			hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3924 		} else if (f_info.flag & ICE_FLTR_RX) {
3925 			hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3926 			hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3927 		}
3928 	}
3929 
3930 out:
3931 	devm_kfree(ice_hw_to_dev(hw), s_rule);
3932 	return status;
3933 }
3934 
3935 /**
3936  * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3937  * @hw: pointer to the hardware structure
3938  * @recp_id: lookup type for which the specified rule needs to be searched
3939  * @f_info: rule information
3940  *
3941  * Helper function to search for a unicast rule entry - this is to be used
3942  * to remove unicast MAC filter that is not shared with other VSIs on the
3943  * PF switch.
3944  *
3945  * Returns pointer to entry storing the rule if found
3946  */
3947 static struct ice_fltr_mgmt_list_entry *
ice_find_ucast_rule_entry(struct ice_hw * hw,u8 recp_id,struct ice_fltr_info * f_info)3948 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3949 			  struct ice_fltr_info *f_info)
3950 {
3951 	struct ice_switch_info *sw = hw->switch_info;
3952 	struct ice_fltr_mgmt_list_entry *list_itr;
3953 	struct list_head *list_head;
3954 
3955 	list_head = &sw->recp_list[recp_id].filt_rules;
3956 	list_for_each_entry(list_itr, list_head, list_entry) {
3957 		if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3958 			    sizeof(f_info->l_data)) &&
3959 		    f_info->fwd_id.hw_vsi_id ==
3960 		    list_itr->fltr_info.fwd_id.hw_vsi_id &&
3961 		    f_info->flag == list_itr->fltr_info.flag)
3962 			return list_itr;
3963 	}
3964 	return NULL;
3965 }
3966 
3967 /**
3968  * ice_remove_mac - remove a MAC address based filter rule
3969  * @hw: pointer to the hardware structure
3970  * @m_list: list of MAC addresses and forwarding information
3971  *
3972  * This function removes either a MAC filter rule or a specific VSI from a
3973  * VSI list for a multicast MAC address.
3974  *
3975  * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
3976  * be aware that this call will only work if all the entries passed into m_list
3977  * were added previously. It will not attempt to do a partial remove of entries
3978  * that were found.
3979  */
ice_remove_mac(struct ice_hw * hw,struct list_head * m_list)3980 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
3981 {
3982 	struct ice_fltr_list_entry *list_itr, *tmp;
3983 	struct mutex *rule_lock; /* Lock to protect filter rule list */
3984 
3985 	if (!m_list)
3986 		return -EINVAL;
3987 
3988 	rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3989 	list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
3990 		enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3991 		u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3992 		u16 vsi_handle;
3993 
3994 		if (l_type != ICE_SW_LKUP_MAC)
3995 			return -EINVAL;
3996 
3997 		vsi_handle = list_itr->fltr_info.vsi_handle;
3998 		if (!ice_is_vsi_valid(hw, vsi_handle))
3999 			return -EINVAL;
4000 
4001 		list_itr->fltr_info.fwd_id.hw_vsi_id =
4002 					ice_get_hw_vsi_num(hw, vsi_handle);
4003 		if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
4004 			/* Don't remove the unicast address that belongs to
4005 			 * another VSI on the switch, since it is not being
4006 			 * shared...
4007 			 */
4008 			mutex_lock(rule_lock);
4009 			if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
4010 						       &list_itr->fltr_info)) {
4011 				mutex_unlock(rule_lock);
4012 				return -ENOENT;
4013 			}
4014 			mutex_unlock(rule_lock);
4015 		}
4016 		list_itr->status = ice_remove_rule_internal(hw,
4017 							    ICE_SW_LKUP_MAC,
4018 							    list_itr);
4019 		if (list_itr->status)
4020 			return list_itr->status;
4021 	}
4022 	return 0;
4023 }
4024 
4025 /**
4026  * ice_remove_vlan - Remove VLAN based filter rule
4027  * @hw: pointer to the hardware structure
4028  * @v_list: list of VLAN entries and forwarding information
4029  */
ice_remove_vlan(struct ice_hw * hw,struct list_head * v_list)4030 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
4031 {
4032 	struct ice_fltr_list_entry *v_list_itr, *tmp;
4033 
4034 	if (!v_list || !hw)
4035 		return -EINVAL;
4036 
4037 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4038 		enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4039 
4040 		if (l_type != ICE_SW_LKUP_VLAN)
4041 			return -EINVAL;
4042 		v_list_itr->status = ice_remove_rule_internal(hw,
4043 							      ICE_SW_LKUP_VLAN,
4044 							      v_list_itr);
4045 		if (v_list_itr->status)
4046 			return v_list_itr->status;
4047 	}
4048 	return 0;
4049 }
4050 
4051 /**
4052  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
4053  * @fm_entry: filter entry to inspect
4054  * @vsi_handle: VSI handle to compare with filter info
4055  */
4056 static bool
ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry * fm_entry,u16 vsi_handle)4057 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
4058 {
4059 	return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
4060 		 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
4061 		(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
4062 		 fm_entry->vsi_list_info &&
4063 		 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
4064 }
4065 
4066 /**
4067  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4068  * @hw: pointer to the hardware structure
4069  * @vsi_handle: VSI handle to remove filters from
4070  * @vsi_list_head: pointer to the list to add entry to
4071  * @fi: pointer to fltr_info of filter entry to copy & add
4072  *
4073  * Helper function, used when creating a list of filters to remove from
4074  * a specific VSI. The entry added to vsi_list_head is a COPY of the
4075  * original filter entry, with the exception of fltr_info.fltr_act and
4076  * fltr_info.fwd_id fields. These are set such that later logic can
4077  * extract which VSI to remove the fltr from, and pass on that information.
4078  */
4079 static int
ice_add_entry_to_vsi_fltr_list(struct ice_hw * hw,u16 vsi_handle,struct list_head * vsi_list_head,struct ice_fltr_info * fi)4080 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4081 			       struct list_head *vsi_list_head,
4082 			       struct ice_fltr_info *fi)
4083 {
4084 	struct ice_fltr_list_entry *tmp;
4085 
4086 	/* this memory is freed up in the caller function
4087 	 * once filters for this VSI are removed
4088 	 */
4089 	tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
4090 	if (!tmp)
4091 		return -ENOMEM;
4092 
4093 	tmp->fltr_info = *fi;
4094 
4095 	/* Overwrite these fields to indicate which VSI to remove filter from,
4096 	 * so find and remove logic can extract the information from the
4097 	 * list entries. Note that original entries will still have proper
4098 	 * values.
4099 	 */
4100 	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4101 	tmp->fltr_info.vsi_handle = vsi_handle;
4102 	tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4103 
4104 	list_add(&tmp->list_entry, vsi_list_head);
4105 
4106 	return 0;
4107 }
4108 
4109 /**
4110  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4111  * @hw: pointer to the hardware structure
4112  * @vsi_handle: VSI handle to remove filters from
4113  * @lkup_list_head: pointer to the list that has certain lookup type filters
4114  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4115  *
4116  * Locates all filters in lkup_list_head that are used by the given VSI,
4117  * and adds COPIES of those entries to vsi_list_head (intended to be used
4118  * to remove the listed filters).
4119  * Note that this means all entries in vsi_list_head must be explicitly
4120  * deallocated by the caller when done with list.
4121  */
4122 static int
ice_add_to_vsi_fltr_list(struct ice_hw * hw,u16 vsi_handle,struct list_head * lkup_list_head,struct list_head * vsi_list_head)4123 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4124 			 struct list_head *lkup_list_head,
4125 			 struct list_head *vsi_list_head)
4126 {
4127 	struct ice_fltr_mgmt_list_entry *fm_entry;
4128 	int status = 0;
4129 
4130 	/* check to make sure VSI ID is valid and within boundary */
4131 	if (!ice_is_vsi_valid(hw, vsi_handle))
4132 		return -EINVAL;
4133 
4134 	list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
4135 		if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
4136 			continue;
4137 
4138 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4139 							vsi_list_head,
4140 							&fm_entry->fltr_info);
4141 		if (status)
4142 			return status;
4143 	}
4144 	return status;
4145 }
4146 
4147 /**
4148  * ice_determine_promisc_mask
4149  * @fi: filter info to parse
4150  *
4151  * Helper function to determine which ICE_PROMISC_ mask corresponds
4152  * to given filter into.
4153  */
ice_determine_promisc_mask(struct ice_fltr_info * fi)4154 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4155 {
4156 	u16 vid = fi->l_data.mac_vlan.vlan_id;
4157 	u8 *macaddr = fi->l_data.mac.mac_addr;
4158 	bool is_tx_fltr = false;
4159 	u8 promisc_mask = 0;
4160 
4161 	if (fi->flag == ICE_FLTR_TX)
4162 		is_tx_fltr = true;
4163 
4164 	if (is_broadcast_ether_addr(macaddr))
4165 		promisc_mask |= is_tx_fltr ?
4166 			ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4167 	else if (is_multicast_ether_addr(macaddr))
4168 		promisc_mask |= is_tx_fltr ?
4169 			ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4170 	else if (is_unicast_ether_addr(macaddr))
4171 		promisc_mask |= is_tx_fltr ?
4172 			ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4173 	if (vid)
4174 		promisc_mask |= is_tx_fltr ?
4175 			ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4176 
4177 	return promisc_mask;
4178 }
4179 
4180 /**
4181  * ice_remove_promisc - Remove promisc based filter rules
4182  * @hw: pointer to the hardware structure
4183  * @recp_id: recipe ID for which the rule needs to removed
4184  * @v_list: list of promisc entries
4185  */
4186 static int
ice_remove_promisc(struct ice_hw * hw,u8 recp_id,struct list_head * v_list)4187 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
4188 {
4189 	struct ice_fltr_list_entry *v_list_itr, *tmp;
4190 
4191 	list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4192 		v_list_itr->status =
4193 			ice_remove_rule_internal(hw, recp_id, v_list_itr);
4194 		if (v_list_itr->status)
4195 			return v_list_itr->status;
4196 	}
4197 	return 0;
4198 }
4199 
4200 /**
4201  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4202  * @hw: pointer to the hardware structure
4203  * @vsi_handle: VSI handle to clear mode
4204  * @promisc_mask: mask of promiscuous config bits to clear
4205  * @vid: VLAN ID to clear VLAN promiscuous
4206  */
4207 int
ice_clear_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,u16 vid)4208 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4209 		      u16 vid)
4210 {
4211 	struct ice_switch_info *sw = hw->switch_info;
4212 	struct ice_fltr_list_entry *fm_entry, *tmp;
4213 	struct list_head remove_list_head;
4214 	struct ice_fltr_mgmt_list_entry *itr;
4215 	struct list_head *rule_head;
4216 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
4217 	int status = 0;
4218 	u8 recipe_id;
4219 
4220 	if (!ice_is_vsi_valid(hw, vsi_handle))
4221 		return -EINVAL;
4222 
4223 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4224 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4225 	else
4226 		recipe_id = ICE_SW_LKUP_PROMISC;
4227 
4228 	rule_head = &sw->recp_list[recipe_id].filt_rules;
4229 	rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4230 
4231 	INIT_LIST_HEAD(&remove_list_head);
4232 
4233 	mutex_lock(rule_lock);
4234 	list_for_each_entry(itr, rule_head, list_entry) {
4235 		struct ice_fltr_info *fltr_info;
4236 		u8 fltr_promisc_mask = 0;
4237 
4238 		if (!ice_vsi_uses_fltr(itr, vsi_handle))
4239 			continue;
4240 		fltr_info = &itr->fltr_info;
4241 
4242 		if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4243 		    vid != fltr_info->l_data.mac_vlan.vlan_id)
4244 			continue;
4245 
4246 		fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4247 
4248 		/* Skip if filter is not completely specified by given mask */
4249 		if (fltr_promisc_mask & ~promisc_mask)
4250 			continue;
4251 
4252 		status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4253 							&remove_list_head,
4254 							fltr_info);
4255 		if (status) {
4256 			mutex_unlock(rule_lock);
4257 			goto free_fltr_list;
4258 		}
4259 	}
4260 	mutex_unlock(rule_lock);
4261 
4262 	status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4263 
4264 free_fltr_list:
4265 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4266 		list_del(&fm_entry->list_entry);
4267 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
4268 	}
4269 
4270 	return status;
4271 }
4272 
4273 /**
4274  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4275  * @hw: pointer to the hardware structure
4276  * @vsi_handle: VSI handle to configure
4277  * @promisc_mask: mask of promiscuous config bits
4278  * @vid: VLAN ID to set VLAN promiscuous
4279  */
4280 int
ice_set_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,u16 vid)4281 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4282 {
4283 	enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4284 	struct ice_fltr_list_entry f_list_entry;
4285 	struct ice_fltr_info new_fltr;
4286 	bool is_tx_fltr;
4287 	int status = 0;
4288 	u16 hw_vsi_id;
4289 	int pkt_type;
4290 	u8 recipe_id;
4291 
4292 	if (!ice_is_vsi_valid(hw, vsi_handle))
4293 		return -EINVAL;
4294 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4295 
4296 	memset(&new_fltr, 0, sizeof(new_fltr));
4297 
4298 	if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4299 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4300 		new_fltr.l_data.mac_vlan.vlan_id = vid;
4301 		recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4302 	} else {
4303 		new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4304 		recipe_id = ICE_SW_LKUP_PROMISC;
4305 	}
4306 
4307 	/* Separate filters must be set for each direction/packet type
4308 	 * combination, so we will loop over the mask value, store the
4309 	 * individual type, and clear it out in the input mask as it
4310 	 * is found.
4311 	 */
4312 	while (promisc_mask) {
4313 		u8 *mac_addr;
4314 
4315 		pkt_type = 0;
4316 		is_tx_fltr = false;
4317 
4318 		if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4319 			promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4320 			pkt_type = UCAST_FLTR;
4321 		} else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4322 			promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4323 			pkt_type = UCAST_FLTR;
4324 			is_tx_fltr = true;
4325 		} else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4326 			promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4327 			pkt_type = MCAST_FLTR;
4328 		} else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4329 			promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4330 			pkt_type = MCAST_FLTR;
4331 			is_tx_fltr = true;
4332 		} else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4333 			promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4334 			pkt_type = BCAST_FLTR;
4335 		} else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4336 			promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4337 			pkt_type = BCAST_FLTR;
4338 			is_tx_fltr = true;
4339 		}
4340 
4341 		/* Check for VLAN promiscuous flag */
4342 		if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4343 			promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4344 		} else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4345 			promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4346 			is_tx_fltr = true;
4347 		}
4348 
4349 		/* Set filter DA based on packet type */
4350 		mac_addr = new_fltr.l_data.mac.mac_addr;
4351 		if (pkt_type == BCAST_FLTR) {
4352 			eth_broadcast_addr(mac_addr);
4353 		} else if (pkt_type == MCAST_FLTR ||
4354 			   pkt_type == UCAST_FLTR) {
4355 			/* Use the dummy ether header DA */
4356 			ether_addr_copy(mac_addr, dummy_eth_header);
4357 			if (pkt_type == MCAST_FLTR)
4358 				mac_addr[0] |= 0x1;	/* Set multicast bit */
4359 		}
4360 
4361 		/* Need to reset this to zero for all iterations */
4362 		new_fltr.flag = 0;
4363 		if (is_tx_fltr) {
4364 			new_fltr.flag |= ICE_FLTR_TX;
4365 			new_fltr.src = hw_vsi_id;
4366 		} else {
4367 			new_fltr.flag |= ICE_FLTR_RX;
4368 			new_fltr.src = hw->port_info->lport;
4369 		}
4370 
4371 		new_fltr.fltr_act = ICE_FWD_TO_VSI;
4372 		new_fltr.vsi_handle = vsi_handle;
4373 		new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4374 		f_list_entry.fltr_info = new_fltr;
4375 
4376 		status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4377 		if (status)
4378 			goto set_promisc_exit;
4379 	}
4380 
4381 set_promisc_exit:
4382 	return status;
4383 }
4384 
4385 /**
4386  * ice_set_vlan_vsi_promisc
4387  * @hw: pointer to the hardware structure
4388  * @vsi_handle: VSI handle to configure
4389  * @promisc_mask: mask of promiscuous config bits
4390  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4391  *
4392  * Configure VSI with all associated VLANs to given promiscuous mode(s)
4393  */
4394 int
ice_set_vlan_vsi_promisc(struct ice_hw * hw,u16 vsi_handle,u8 promisc_mask,bool rm_vlan_promisc)4395 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4396 			 bool rm_vlan_promisc)
4397 {
4398 	struct ice_switch_info *sw = hw->switch_info;
4399 	struct ice_fltr_list_entry *list_itr, *tmp;
4400 	struct list_head vsi_list_head;
4401 	struct list_head *vlan_head;
4402 	struct mutex *vlan_lock; /* Lock to protect filter rule list */
4403 	u16 vlan_id;
4404 	int status;
4405 
4406 	INIT_LIST_HEAD(&vsi_list_head);
4407 	vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4408 	vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4409 	mutex_lock(vlan_lock);
4410 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4411 					  &vsi_list_head);
4412 	mutex_unlock(vlan_lock);
4413 	if (status)
4414 		goto free_fltr_list;
4415 
4416 	list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
4417 		/* Avoid enabling or disabling VLAN zero twice when in double
4418 		 * VLAN mode
4419 		 */
4420 		if (ice_is_dvm_ena(hw) &&
4421 		    list_itr->fltr_info.l_data.vlan.tpid == 0)
4422 			continue;
4423 
4424 		vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4425 		if (rm_vlan_promisc)
4426 			status = ice_clear_vsi_promisc(hw, vsi_handle,
4427 						       promisc_mask, vlan_id);
4428 		else
4429 			status = ice_set_vsi_promisc(hw, vsi_handle,
4430 						     promisc_mask, vlan_id);
4431 		if (status && status != -EEXIST)
4432 			break;
4433 	}
4434 
4435 free_fltr_list:
4436 	list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
4437 		list_del(&list_itr->list_entry);
4438 		devm_kfree(ice_hw_to_dev(hw), list_itr);
4439 	}
4440 	return status;
4441 }
4442 
4443 /**
4444  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4445  * @hw: pointer to the hardware structure
4446  * @vsi_handle: VSI handle to remove filters from
4447  * @lkup: switch rule filter lookup type
4448  */
4449 static void
ice_remove_vsi_lkup_fltr(struct ice_hw * hw,u16 vsi_handle,enum ice_sw_lkup_type lkup)4450 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4451 			 enum ice_sw_lkup_type lkup)
4452 {
4453 	struct ice_switch_info *sw = hw->switch_info;
4454 	struct ice_fltr_list_entry *fm_entry;
4455 	struct list_head remove_list_head;
4456 	struct list_head *rule_head;
4457 	struct ice_fltr_list_entry *tmp;
4458 	struct mutex *rule_lock;	/* Lock to protect filter rule list */
4459 	int status;
4460 
4461 	INIT_LIST_HEAD(&remove_list_head);
4462 	rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4463 	rule_head = &sw->recp_list[lkup].filt_rules;
4464 	mutex_lock(rule_lock);
4465 	status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4466 					  &remove_list_head);
4467 	mutex_unlock(rule_lock);
4468 	if (status)
4469 		goto free_fltr_list;
4470 
4471 	switch (lkup) {
4472 	case ICE_SW_LKUP_MAC:
4473 		ice_remove_mac(hw, &remove_list_head);
4474 		break;
4475 	case ICE_SW_LKUP_VLAN:
4476 		ice_remove_vlan(hw, &remove_list_head);
4477 		break;
4478 	case ICE_SW_LKUP_PROMISC:
4479 	case ICE_SW_LKUP_PROMISC_VLAN:
4480 		ice_remove_promisc(hw, lkup, &remove_list_head);
4481 		break;
4482 	case ICE_SW_LKUP_MAC_VLAN:
4483 	case ICE_SW_LKUP_ETHERTYPE:
4484 	case ICE_SW_LKUP_ETHERTYPE_MAC:
4485 	case ICE_SW_LKUP_DFLT:
4486 	case ICE_SW_LKUP_LAST:
4487 	default:
4488 		ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
4489 		break;
4490 	}
4491 
4492 free_fltr_list:
4493 	list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4494 		list_del(&fm_entry->list_entry);
4495 		devm_kfree(ice_hw_to_dev(hw), fm_entry);
4496 	}
4497 }
4498 
4499 /**
4500  * ice_remove_vsi_fltr - Remove all filters for a VSI
4501  * @hw: pointer to the hardware structure
4502  * @vsi_handle: VSI handle to remove filters from
4503  */
ice_remove_vsi_fltr(struct ice_hw * hw,u16 vsi_handle)4504 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4505 {
4506 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4507 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4508 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4509 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4510 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4511 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4512 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4513 	ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4514 }
4515 
4516 /**
4517  * ice_alloc_res_cntr - allocating resource counter
4518  * @hw: pointer to the hardware structure
4519  * @type: type of resource
4520  * @alloc_shared: if set it is shared else dedicated
4521  * @num_items: number of entries requested for FD resource type
4522  * @counter_id: counter index returned by AQ call
4523  */
4524 int
ice_alloc_res_cntr(struct ice_hw * hw,u8 type,u8 alloc_shared,u16 num_items,u16 * counter_id)4525 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4526 		   u16 *counter_id)
4527 {
4528 	struct ice_aqc_alloc_free_res_elem *buf;
4529 	u16 buf_len;
4530 	int status;
4531 
4532 	/* Allocate resource */
4533 	buf_len = struct_size(buf, elem, 1);
4534 	buf = kzalloc(buf_len, GFP_KERNEL);
4535 	if (!buf)
4536 		return -ENOMEM;
4537 
4538 	buf->num_elems = cpu_to_le16(num_items);
4539 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4540 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
4541 
4542 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4543 				       ice_aqc_opc_alloc_res, NULL);
4544 	if (status)
4545 		goto exit;
4546 
4547 	*counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
4548 
4549 exit:
4550 	kfree(buf);
4551 	return status;
4552 }
4553 
4554 /**
4555  * ice_free_res_cntr - free resource counter
4556  * @hw: pointer to the hardware structure
4557  * @type: type of resource
4558  * @alloc_shared: if set it is shared else dedicated
4559  * @num_items: number of entries to be freed for FD resource type
4560  * @counter_id: counter ID resource which needs to be freed
4561  */
4562 int
ice_free_res_cntr(struct ice_hw * hw,u8 type,u8 alloc_shared,u16 num_items,u16 counter_id)4563 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4564 		  u16 counter_id)
4565 {
4566 	struct ice_aqc_alloc_free_res_elem *buf;
4567 	u16 buf_len;
4568 	int status;
4569 
4570 	/* Free resource */
4571 	buf_len = struct_size(buf, elem, 1);
4572 	buf = kzalloc(buf_len, GFP_KERNEL);
4573 	if (!buf)
4574 		return -ENOMEM;
4575 
4576 	buf->num_elems = cpu_to_le16(num_items);
4577 	buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4578 				      ICE_AQC_RES_TYPE_M) | alloc_shared);
4579 	buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
4580 
4581 	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4582 				       ice_aqc_opc_free_res, NULL);
4583 	if (status)
4584 		ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
4585 
4586 	kfree(buf);
4587 	return status;
4588 }
4589 
4590 /* This is mapping table entry that maps every word within a given protocol
4591  * structure to the real byte offset as per the specification of that
4592  * protocol header.
4593  * for example dst address is 3 words in ethertype header and corresponding
4594  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4595  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4596  * matching entry describing its field. This needs to be updated if new
4597  * structure is added to that union.
4598  */
4599 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4600 	{ ICE_MAC_OFOS,		{ 0, 2, 4, 6, 8, 10, 12 } },
4601 	{ ICE_MAC_IL,		{ 0, 2, 4, 6, 8, 10, 12 } },
4602 	{ ICE_ETYPE_OL,		{ 0 } },
4603 	{ ICE_ETYPE_IL,		{ 0 } },
4604 	{ ICE_VLAN_OFOS,	{ 2, 0 } },
4605 	{ ICE_IPV4_OFOS,	{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4606 	{ ICE_IPV4_IL,		{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4607 	{ ICE_IPV6_OFOS,	{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4608 				 26, 28, 30, 32, 34, 36, 38 } },
4609 	{ ICE_IPV6_IL,		{ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4610 				 26, 28, 30, 32, 34, 36, 38 } },
4611 	{ ICE_TCP_IL,		{ 0, 2 } },
4612 	{ ICE_UDP_OF,		{ 0, 2 } },
4613 	{ ICE_UDP_ILOS,		{ 0, 2 } },
4614 	{ ICE_VXLAN,		{ 8, 10, 12, 14 } },
4615 	{ ICE_GENEVE,		{ 8, 10, 12, 14 } },
4616 	{ ICE_NVGRE,		{ 0, 2, 4, 6 } },
4617 	{ ICE_GTP,		{ 8, 10, 12, 14, 16, 18, 20, 22 } },
4618 	{ ICE_GTP_NO_PAY,	{ 8, 10, 12, 14 } },
4619 };
4620 
4621 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4622 	{ ICE_MAC_OFOS,		ICE_MAC_OFOS_HW },
4623 	{ ICE_MAC_IL,		ICE_MAC_IL_HW },
4624 	{ ICE_ETYPE_OL,		ICE_ETYPE_OL_HW },
4625 	{ ICE_ETYPE_IL,		ICE_ETYPE_IL_HW },
4626 	{ ICE_VLAN_OFOS,	ICE_VLAN_OL_HW },
4627 	{ ICE_IPV4_OFOS,	ICE_IPV4_OFOS_HW },
4628 	{ ICE_IPV4_IL,		ICE_IPV4_IL_HW },
4629 	{ ICE_IPV6_OFOS,	ICE_IPV6_OFOS_HW },
4630 	{ ICE_IPV6_IL,		ICE_IPV6_IL_HW },
4631 	{ ICE_TCP_IL,		ICE_TCP_IL_HW },
4632 	{ ICE_UDP_OF,		ICE_UDP_OF_HW },
4633 	{ ICE_UDP_ILOS,		ICE_UDP_ILOS_HW },
4634 	{ ICE_VXLAN,		ICE_UDP_OF_HW },
4635 	{ ICE_GENEVE,		ICE_UDP_OF_HW },
4636 	{ ICE_NVGRE,		ICE_GRE_OF_HW },
4637 	{ ICE_GTP,		ICE_UDP_OF_HW },
4638 	{ ICE_GTP_NO_PAY,	ICE_UDP_ILOS_HW },
4639 };
4640 
4641 /**
4642  * ice_find_recp - find a recipe
4643  * @hw: pointer to the hardware structure
4644  * @lkup_exts: extension sequence to match
4645  * @tun_type: type of recipe tunnel
4646  *
4647  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4648  */
4649 static u16
ice_find_recp(struct ice_hw * hw,struct ice_prot_lkup_ext * lkup_exts,enum ice_sw_tunnel_type tun_type)4650 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
4651 	      enum ice_sw_tunnel_type tun_type)
4652 {
4653 	bool refresh_required = true;
4654 	struct ice_sw_recipe *recp;
4655 	u8 i;
4656 
4657 	/* Walk through existing recipes to find a match */
4658 	recp = hw->switch_info->recp_list;
4659 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4660 		/* If recipe was not created for this ID, in SW bookkeeping,
4661 		 * check if FW has an entry for this recipe. If the FW has an
4662 		 * entry update it in our SW bookkeeping and continue with the
4663 		 * matching.
4664 		 */
4665 		if (!recp[i].recp_created)
4666 			if (ice_get_recp_frm_fw(hw,
4667 						hw->switch_info->recp_list, i,
4668 						&refresh_required))
4669 				continue;
4670 
4671 		/* Skip inverse action recipes */
4672 		if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4673 		    ICE_AQ_RECIPE_ACT_INV_ACT)
4674 			continue;
4675 
4676 		/* if number of words we are looking for match */
4677 		if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4678 			struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
4679 			struct ice_fv_word *be = lkup_exts->fv_words;
4680 			u16 *cr = recp[i].lkup_exts.field_mask;
4681 			u16 *de = lkup_exts->field_mask;
4682 			bool found = true;
4683 			u8 pe, qr;
4684 
4685 			/* ar, cr, and qr are related to the recipe words, while
4686 			 * be, de, and pe are related to the lookup words
4687 			 */
4688 			for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
4689 				for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
4690 				     qr++) {
4691 					if (ar[qr].off == be[pe].off &&
4692 					    ar[qr].prot_id == be[pe].prot_id &&
4693 					    cr[qr] == de[pe])
4694 						/* Found the "pe"th word in the
4695 						 * given recipe
4696 						 */
4697 						break;
4698 				}
4699 				/* After walking through all the words in the
4700 				 * "i"th recipe if "p"th word was not found then
4701 				 * this recipe is not what we are looking for.
4702 				 * So break out from this loop and try the next
4703 				 * recipe
4704 				 */
4705 				if (qr >= recp[i].lkup_exts.n_val_words) {
4706 					found = false;
4707 					break;
4708 				}
4709 			}
4710 			/* If for "i"th recipe the found was never set to false
4711 			 * then it means we found our match
4712 			 * Also tun type of recipe needs to be checked
4713 			 */
4714 			if (found && recp[i].tun_type == tun_type)
4715 				return i; /* Return the recipe ID */
4716 		}
4717 	}
4718 	return ICE_MAX_NUM_RECIPES;
4719 }
4720 
4721 /**
4722  * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
4723  *
4724  * As protocol id for outer vlan is different in dvm and svm, if dvm is
4725  * supported protocol array record for outer vlan has to be modified to
4726  * reflect the value proper for DVM.
4727  */
ice_change_proto_id_to_dvm(void)4728 void ice_change_proto_id_to_dvm(void)
4729 {
4730 	u8 i;
4731 
4732 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4733 		if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
4734 		    ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
4735 			ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
4736 }
4737 
4738 /**
4739  * ice_prot_type_to_id - get protocol ID from protocol type
4740  * @type: protocol type
4741  * @id: pointer to variable that will receive the ID
4742  *
4743  * Returns true if found, false otherwise
4744  */
ice_prot_type_to_id(enum ice_protocol_type type,u8 * id)4745 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
4746 {
4747 	u8 i;
4748 
4749 	for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4750 		if (ice_prot_id_tbl[i].type == type) {
4751 			*id = ice_prot_id_tbl[i].protocol_id;
4752 			return true;
4753 		}
4754 	return false;
4755 }
4756 
4757 /**
4758  * ice_fill_valid_words - count valid words
4759  * @rule: advanced rule with lookup information
4760  * @lkup_exts: byte offset extractions of the words that are valid
4761  *
4762  * calculate valid words in a lookup rule using mask value
4763  */
4764 static u8
ice_fill_valid_words(struct ice_adv_lkup_elem * rule,struct ice_prot_lkup_ext * lkup_exts)4765 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4766 		     struct ice_prot_lkup_ext *lkup_exts)
4767 {
4768 	u8 j, word, prot_id, ret_val;
4769 
4770 	if (!ice_prot_type_to_id(rule->type, &prot_id))
4771 		return 0;
4772 
4773 	word = lkup_exts->n_val_words;
4774 
4775 	for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4776 		if (((u16 *)&rule->m_u)[j] &&
4777 		    rule->type < ARRAY_SIZE(ice_prot_ext)) {
4778 			/* No more space to accommodate */
4779 			if (word >= ICE_MAX_CHAIN_WORDS)
4780 				return 0;
4781 			lkup_exts->fv_words[word].off =
4782 				ice_prot_ext[rule->type].offs[j];
4783 			lkup_exts->fv_words[word].prot_id =
4784 				ice_prot_id_tbl[rule->type].protocol_id;
4785 			lkup_exts->field_mask[word] =
4786 				be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
4787 			word++;
4788 		}
4789 
4790 	ret_val = word - lkup_exts->n_val_words;
4791 	lkup_exts->n_val_words = word;
4792 
4793 	return ret_val;
4794 }
4795 
4796 /**
4797  * ice_create_first_fit_recp_def - Create a recipe grouping
4798  * @hw: pointer to the hardware structure
4799  * @lkup_exts: an array of protocol header extractions
4800  * @rg_list: pointer to a list that stores new recipe groups
4801  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4802  *
4803  * Using first fit algorithm, take all the words that are still not done
4804  * and start grouping them in 4-word groups. Each group makes up one
4805  * recipe.
4806  */
4807 static int
ice_create_first_fit_recp_def(struct ice_hw * hw,struct ice_prot_lkup_ext * lkup_exts,struct list_head * rg_list,u8 * recp_cnt)4808 ice_create_first_fit_recp_def(struct ice_hw *hw,
4809 			      struct ice_prot_lkup_ext *lkup_exts,
4810 			      struct list_head *rg_list,
4811 			      u8 *recp_cnt)
4812 {
4813 	struct ice_pref_recipe_group *grp = NULL;
4814 	u8 j;
4815 
4816 	*recp_cnt = 0;
4817 
4818 	/* Walk through every word in the rule to check if it is not done. If so
4819 	 * then this word needs to be part of a new recipe.
4820 	 */
4821 	for (j = 0; j < lkup_exts->n_val_words; j++)
4822 		if (!test_bit(j, lkup_exts->done)) {
4823 			if (!grp ||
4824 			    grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4825 				struct ice_recp_grp_entry *entry;
4826 
4827 				entry = devm_kzalloc(ice_hw_to_dev(hw),
4828 						     sizeof(*entry),
4829 						     GFP_KERNEL);
4830 				if (!entry)
4831 					return -ENOMEM;
4832 				list_add(&entry->l_entry, rg_list);
4833 				grp = &entry->r_group;
4834 				(*recp_cnt)++;
4835 			}
4836 
4837 			grp->pairs[grp->n_val_pairs].prot_id =
4838 				lkup_exts->fv_words[j].prot_id;
4839 			grp->pairs[grp->n_val_pairs].off =
4840 				lkup_exts->fv_words[j].off;
4841 			grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4842 			grp->n_val_pairs++;
4843 		}
4844 
4845 	return 0;
4846 }
4847 
4848 /**
4849  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4850  * @hw: pointer to the hardware structure
4851  * @fv_list: field vector with the extraction sequence information
4852  * @rg_list: recipe groupings with protocol-offset pairs
4853  *
4854  * Helper function to fill in the field vector indices for protocol-offset
4855  * pairs. These indexes are then ultimately programmed into a recipe.
4856  */
4857 static int
ice_fill_fv_word_index(struct ice_hw * hw,struct list_head * fv_list,struct list_head * rg_list)4858 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
4859 		       struct list_head *rg_list)
4860 {
4861 	struct ice_sw_fv_list_entry *fv;
4862 	struct ice_recp_grp_entry *rg;
4863 	struct ice_fv_word *fv_ext;
4864 
4865 	if (list_empty(fv_list))
4866 		return 0;
4867 
4868 	fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
4869 			      list_entry);
4870 	fv_ext = fv->fv_ptr->ew;
4871 
4872 	list_for_each_entry(rg, rg_list, l_entry) {
4873 		u8 i;
4874 
4875 		for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4876 			struct ice_fv_word *pr;
4877 			bool found = false;
4878 			u16 mask;
4879 			u8 j;
4880 
4881 			pr = &rg->r_group.pairs[i];
4882 			mask = rg->r_group.mask[i];
4883 
4884 			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4885 				if (fv_ext[j].prot_id == pr->prot_id &&
4886 				    fv_ext[j].off == pr->off) {
4887 					found = true;
4888 
4889 					/* Store index of field vector */
4890 					rg->fv_idx[i] = j;
4891 					rg->fv_mask[i] = mask;
4892 					break;
4893 				}
4894 
4895 			/* Protocol/offset could not be found, caller gave an
4896 			 * invalid pair
4897 			 */
4898 			if (!found)
4899 				return -EINVAL;
4900 		}
4901 	}
4902 
4903 	return 0;
4904 }
4905 
4906 /**
4907  * ice_find_free_recp_res_idx - find free result indexes for recipe
4908  * @hw: pointer to hardware structure
4909  * @profiles: bitmap of profiles that will be associated with the new recipe
4910  * @free_idx: pointer to variable to receive the free index bitmap
4911  *
4912  * The algorithm used here is:
4913  *	1. When creating a new recipe, create a set P which contains all
4914  *	   Profiles that will be associated with our new recipe
4915  *
4916  *	2. For each Profile p in set P:
4917  *	    a. Add all recipes associated with Profile p into set R
4918  *	    b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4919  *		[initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4920  *		i. Or just assume they all have the same possible indexes:
4921  *			44, 45, 46, 47
4922  *			i.e., PossibleIndexes = 0x0000F00000000000
4923  *
4924  *	3. For each Recipe r in set R:
4925  *	    a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4926  *	    b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4927  *
4928  *	FreeIndexes will contain the bits indicating the indexes free for use,
4929  *      then the code needs to update the recipe[r].used_result_idx_bits to
4930  *      indicate which indexes were selected for use by this recipe.
4931  */
4932 static u16
ice_find_free_recp_res_idx(struct ice_hw * hw,const unsigned long * profiles,unsigned long * free_idx)4933 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
4934 			   unsigned long *free_idx)
4935 {
4936 	DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
4937 	DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
4938 	DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
4939 	u16 bit;
4940 
4941 	bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
4942 	bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
4943 
4944 	bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
4945 
4946 	/* For each profile we are going to associate the recipe with, add the
4947 	 * recipes that are associated with that profile. This will give us
4948 	 * the set of recipes that our recipe may collide with. Also, determine
4949 	 * what possible result indexes are usable given this set of profiles.
4950 	 */
4951 	for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
4952 		bitmap_or(recipes, recipes, profile_to_recipe[bit],
4953 			  ICE_MAX_NUM_RECIPES);
4954 		bitmap_and(possible_idx, possible_idx,
4955 			   hw->switch_info->prof_res_bm[bit],
4956 			   ICE_MAX_FV_WORDS);
4957 	}
4958 
4959 	/* For each recipe that our new recipe may collide with, determine
4960 	 * which indexes have been used.
4961 	 */
4962 	for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
4963 		bitmap_or(used_idx, used_idx,
4964 			  hw->switch_info->recp_list[bit].res_idxs,
4965 			  ICE_MAX_FV_WORDS);
4966 
4967 	bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4968 
4969 	/* return number of free indexes */
4970 	return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
4971 }
4972 
4973 /**
4974  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4975  * @hw: pointer to hardware structure
4976  * @rm: recipe management list entry
4977  * @profiles: bitmap of profiles that will be associated.
4978  */
4979 static int
ice_add_sw_recipe(struct ice_hw * hw,struct ice_sw_recipe * rm,unsigned long * profiles)4980 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4981 		  unsigned long *profiles)
4982 {
4983 	DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
4984 	struct ice_aqc_recipe_data_elem *tmp;
4985 	struct ice_aqc_recipe_data_elem *buf;
4986 	struct ice_recp_grp_entry *entry;
4987 	u16 free_res_idx;
4988 	u16 recipe_count;
4989 	u8 chain_idx;
4990 	u8 recps = 0;
4991 	int status;
4992 
4993 	/* When more than one recipe are required, another recipe is needed to
4994 	 * chain them together. Matching a tunnel metadata ID takes up one of
4995 	 * the match fields in the chaining recipe reducing the number of
4996 	 * chained recipes by one.
4997 	 */
4998 	 /* check number of free result indices */
4999 	bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
5000 	free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
5001 
5002 	ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
5003 		  free_res_idx, rm->n_grp_count);
5004 
5005 	if (rm->n_grp_count > 1) {
5006 		if (rm->n_grp_count > free_res_idx)
5007 			return -ENOSPC;
5008 
5009 		rm->n_grp_count++;
5010 	}
5011 
5012 	if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
5013 		return -ENOSPC;
5014 
5015 	tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
5016 	if (!tmp)
5017 		return -ENOMEM;
5018 
5019 	buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
5020 			   GFP_KERNEL);
5021 	if (!buf) {
5022 		status = -ENOMEM;
5023 		goto err_mem;
5024 	}
5025 
5026 	bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
5027 	recipe_count = ICE_MAX_NUM_RECIPES;
5028 	status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5029 				   NULL);
5030 	if (status || recipe_count == 0)
5031 		goto err_unroll;
5032 
5033 	/* Allocate the recipe resources, and configure them according to the
5034 	 * match fields from protocol headers and extracted field vectors.
5035 	 */
5036 	chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5037 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
5038 		u8 i;
5039 
5040 		status = ice_alloc_recipe(hw, &entry->rid);
5041 		if (status)
5042 			goto err_unroll;
5043 
5044 		/* Clear the result index of the located recipe, as this will be
5045 		 * updated, if needed, later in the recipe creation process.
5046 		 */
5047 		tmp[0].content.result_indx = 0;
5048 
5049 		buf[recps] = tmp[0];
5050 		buf[recps].recipe_indx = (u8)entry->rid;
5051 		/* if the recipe is a non-root recipe RID should be programmed
5052 		 * as 0 for the rules to be applied correctly.
5053 		 */
5054 		buf[recps].content.rid = 0;
5055 		memset(&buf[recps].content.lkup_indx, 0,
5056 		       sizeof(buf[recps].content.lkup_indx));
5057 
5058 		/* All recipes use look-up index 0 to match switch ID. */
5059 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5060 		buf[recps].content.mask[0] =
5061 			cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5062 		/* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5063 		 * to be 0
5064 		 */
5065 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5066 			buf[recps].content.lkup_indx[i] = 0x80;
5067 			buf[recps].content.mask[i] = 0;
5068 		}
5069 
5070 		for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5071 			buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5072 			buf[recps].content.mask[i + 1] =
5073 				cpu_to_le16(entry->fv_mask[i]);
5074 		}
5075 
5076 		if (rm->n_grp_count > 1) {
5077 			/* Checks to see if there really is a valid result index
5078 			 * that can be used.
5079 			 */
5080 			if (chain_idx >= ICE_MAX_FV_WORDS) {
5081 				ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
5082 				status = -ENOSPC;
5083 				goto err_unroll;
5084 			}
5085 
5086 			entry->chain_idx = chain_idx;
5087 			buf[recps].content.result_indx =
5088 				ICE_AQ_RECIPE_RESULT_EN |
5089 				((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5090 				 ICE_AQ_RECIPE_RESULT_DATA_M);
5091 			clear_bit(chain_idx, result_idx_bm);
5092 			chain_idx = find_first_bit(result_idx_bm,
5093 						   ICE_MAX_FV_WORDS);
5094 		}
5095 
5096 		/* fill recipe dependencies */
5097 		bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
5098 			    ICE_MAX_NUM_RECIPES);
5099 		set_bit(buf[recps].recipe_indx,
5100 			(unsigned long *)buf[recps].recipe_bitmap);
5101 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5102 		recps++;
5103 	}
5104 
5105 	if (rm->n_grp_count == 1) {
5106 		rm->root_rid = buf[0].recipe_indx;
5107 		set_bit(buf[0].recipe_indx, rm->r_bitmap);
5108 		buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5109 		if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5110 			memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5111 			       sizeof(buf[0].recipe_bitmap));
5112 		} else {
5113 			status = -EINVAL;
5114 			goto err_unroll;
5115 		}
5116 		/* Applicable only for ROOT_RECIPE, set the fwd_priority for
5117 		 * the recipe which is getting created if specified
5118 		 * by user. Usually any advanced switch filter, which results
5119 		 * into new extraction sequence, ended up creating a new recipe
5120 		 * of type ROOT and usually recipes are associated with profiles
5121 		 * Switch rule referreing newly created recipe, needs to have
5122 		 * either/or 'fwd' or 'join' priority, otherwise switch rule
5123 		 * evaluation will not happen correctly. In other words, if
5124 		 * switch rule to be evaluated on priority basis, then recipe
5125 		 * needs to have priority, otherwise it will be evaluated last.
5126 		 */
5127 		buf[0].content.act_ctrl_fwd_priority = rm->priority;
5128 	} else {
5129 		struct ice_recp_grp_entry *last_chain_entry;
5130 		u16 rid, i;
5131 
5132 		/* Allocate the last recipe that will chain the outcomes of the
5133 		 * other recipes together
5134 		 */
5135 		status = ice_alloc_recipe(hw, &rid);
5136 		if (status)
5137 			goto err_unroll;
5138 
5139 		buf[recps].recipe_indx = (u8)rid;
5140 		buf[recps].content.rid = (u8)rid;
5141 		buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5142 		/* the new entry created should also be part of rg_list to
5143 		 * make sure we have complete recipe
5144 		 */
5145 		last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
5146 						sizeof(*last_chain_entry),
5147 						GFP_KERNEL);
5148 		if (!last_chain_entry) {
5149 			status = -ENOMEM;
5150 			goto err_unroll;
5151 		}
5152 		last_chain_entry->rid = rid;
5153 		memset(&buf[recps].content.lkup_indx, 0,
5154 		       sizeof(buf[recps].content.lkup_indx));
5155 		/* All recipes use look-up index 0 to match switch ID. */
5156 		buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5157 		buf[recps].content.mask[0] =
5158 			cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5159 		for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5160 			buf[recps].content.lkup_indx[i] =
5161 				ICE_AQ_RECIPE_LKUP_IGNORE;
5162 			buf[recps].content.mask[i] = 0;
5163 		}
5164 
5165 		i = 1;
5166 		/* update r_bitmap with the recp that is used for chaining */
5167 		set_bit(rid, rm->r_bitmap);
5168 		/* this is the recipe that chains all the other recipes so it
5169 		 * should not have a chaining ID to indicate the same
5170 		 */
5171 		last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5172 		list_for_each_entry(entry, &rm->rg_list, l_entry) {
5173 			last_chain_entry->fv_idx[i] = entry->chain_idx;
5174 			buf[recps].content.lkup_indx[i] = entry->chain_idx;
5175 			buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
5176 			set_bit(entry->rid, rm->r_bitmap);
5177 		}
5178 		list_add(&last_chain_entry->l_entry, &rm->rg_list);
5179 		if (sizeof(buf[recps].recipe_bitmap) >=
5180 		    sizeof(rm->r_bitmap)) {
5181 			memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5182 			       sizeof(buf[recps].recipe_bitmap));
5183 		} else {
5184 			status = -EINVAL;
5185 			goto err_unroll;
5186 		}
5187 		buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5188 
5189 		recps++;
5190 		rm->root_rid = (u8)rid;
5191 	}
5192 	status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5193 	if (status)
5194 		goto err_unroll;
5195 
5196 	status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5197 	ice_release_change_lock(hw);
5198 	if (status)
5199 		goto err_unroll;
5200 
5201 	/* Every recipe that just got created add it to the recipe
5202 	 * book keeping list
5203 	 */
5204 	list_for_each_entry(entry, &rm->rg_list, l_entry) {
5205 		struct ice_switch_info *sw = hw->switch_info;
5206 		bool is_root, idx_found = false;
5207 		struct ice_sw_recipe *recp;
5208 		u16 idx, buf_idx = 0;
5209 
5210 		/* find buffer index for copying some data */
5211 		for (idx = 0; idx < rm->n_grp_count; idx++)
5212 			if (buf[idx].recipe_indx == entry->rid) {
5213 				buf_idx = idx;
5214 				idx_found = true;
5215 			}
5216 
5217 		if (!idx_found) {
5218 			status = -EIO;
5219 			goto err_unroll;
5220 		}
5221 
5222 		recp = &sw->recp_list[entry->rid];
5223 		is_root = (rm->root_rid == entry->rid);
5224 		recp->is_root = is_root;
5225 
5226 		recp->root_rid = entry->rid;
5227 		recp->big_recp = (is_root && rm->n_grp_count > 1);
5228 
5229 		memcpy(&recp->ext_words, entry->r_group.pairs,
5230 		       entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
5231 
5232 		memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5233 		       sizeof(recp->r_bitmap));
5234 
5235 		/* Copy non-result fv index values and masks to recipe. This
5236 		 * call will also update the result recipe bitmask.
5237 		 */
5238 		ice_collect_result_idx(&buf[buf_idx], recp);
5239 
5240 		/* for non-root recipes, also copy to the root, this allows
5241 		 * easier matching of a complete chained recipe
5242 		 */
5243 		if (!is_root)
5244 			ice_collect_result_idx(&buf[buf_idx],
5245 					       &sw->recp_list[rm->root_rid]);
5246 
5247 		recp->n_ext_words = entry->r_group.n_val_pairs;
5248 		recp->chain_idx = entry->chain_idx;
5249 		recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5250 		recp->n_grp_count = rm->n_grp_count;
5251 		recp->tun_type = rm->tun_type;
5252 		recp->recp_created = true;
5253 	}
5254 	rm->root_buf = buf;
5255 	kfree(tmp);
5256 	return status;
5257 
5258 err_unroll:
5259 err_mem:
5260 	kfree(tmp);
5261 	devm_kfree(ice_hw_to_dev(hw), buf);
5262 	return status;
5263 }
5264 
5265 /**
5266  * ice_create_recipe_group - creates recipe group
5267  * @hw: pointer to hardware structure
5268  * @rm: recipe management list entry
5269  * @lkup_exts: lookup elements
5270  */
5271 static int
ice_create_recipe_group(struct ice_hw * hw,struct ice_sw_recipe * rm,struct ice_prot_lkup_ext * lkup_exts)5272 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5273 			struct ice_prot_lkup_ext *lkup_exts)
5274 {
5275 	u8 recp_count = 0;
5276 	int status;
5277 
5278 	rm->n_grp_count = 0;
5279 
5280 	/* Create recipes for words that are marked not done by packing them
5281 	 * as best fit.
5282 	 */
5283 	status = ice_create_first_fit_recp_def(hw, lkup_exts,
5284 					       &rm->rg_list, &recp_count);
5285 	if (!status) {
5286 		rm->n_grp_count += recp_count;
5287 		rm->n_ext_words = lkup_exts->n_val_words;
5288 		memcpy(&rm->ext_words, lkup_exts->fv_words,
5289 		       sizeof(rm->ext_words));
5290 		memcpy(rm->word_masks, lkup_exts->field_mask,
5291 		       sizeof(rm->word_masks));
5292 	}
5293 
5294 	return status;
5295 }
5296 
5297 /**
5298  * ice_tun_type_match_word - determine if tun type needs a match mask
5299  * @tun_type: tunnel type
5300  * @mask: mask to be used for the tunnel
5301  */
ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type,u16 * mask)5302 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
5303 {
5304 	switch (tun_type) {
5305 	case ICE_SW_TUN_GENEVE:
5306 	case ICE_SW_TUN_VXLAN:
5307 	case ICE_SW_TUN_NVGRE:
5308 	case ICE_SW_TUN_GTPU:
5309 	case ICE_SW_TUN_GTPC:
5310 		*mask = ICE_TUN_FLAG_MASK;
5311 		return true;
5312 
5313 	default:
5314 		*mask = 0;
5315 		return false;
5316 	}
5317 }
5318 
5319 /**
5320  * ice_add_special_words - Add words that are not protocols, such as metadata
5321  * @rinfo: other information regarding the rule e.g. priority and action info
5322  * @lkup_exts: lookup word structure
5323  */
5324 static int
ice_add_special_words(struct ice_adv_rule_info * rinfo,struct ice_prot_lkup_ext * lkup_exts)5325 ice_add_special_words(struct ice_adv_rule_info *rinfo,
5326 		      struct ice_prot_lkup_ext *lkup_exts)
5327 {
5328 	u16 mask;
5329 
5330 	/* If this is a tunneled packet, then add recipe index to match the
5331 	 * tunnel bit in the packet metadata flags.
5332 	 */
5333 	if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
5334 		if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
5335 			u8 word = lkup_exts->n_val_words++;
5336 
5337 			lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
5338 			lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
5339 			lkup_exts->field_mask[word] = mask;
5340 		} else {
5341 			return -ENOSPC;
5342 		}
5343 	}
5344 
5345 	return 0;
5346 }
5347 
5348 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5349  * @hw: pointer to hardware structure
5350  * @rinfo: other information regarding the rule e.g. priority and action info
5351  * @bm: pointer to memory for returning the bitmap of field vectors
5352  */
5353 static void
ice_get_compat_fv_bitmap(struct ice_hw * hw,struct ice_adv_rule_info * rinfo,unsigned long * bm)5354 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5355 			 unsigned long *bm)
5356 {
5357 	enum ice_prof_type prof_type;
5358 
5359 	bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
5360 
5361 	switch (rinfo->tun_type) {
5362 	case ICE_NON_TUN:
5363 		prof_type = ICE_PROF_NON_TUN;
5364 		break;
5365 	case ICE_ALL_TUNNELS:
5366 		prof_type = ICE_PROF_TUN_ALL;
5367 		break;
5368 	case ICE_SW_TUN_GENEVE:
5369 	case ICE_SW_TUN_VXLAN:
5370 		prof_type = ICE_PROF_TUN_UDP;
5371 		break;
5372 	case ICE_SW_TUN_NVGRE:
5373 		prof_type = ICE_PROF_TUN_GRE;
5374 		break;
5375 	case ICE_SW_TUN_GTPU:
5376 		prof_type = ICE_PROF_TUN_GTPU;
5377 		break;
5378 	case ICE_SW_TUN_GTPC:
5379 		prof_type = ICE_PROF_TUN_GTPC;
5380 		break;
5381 	case ICE_SW_TUN_AND_NON_TUN:
5382 	default:
5383 		prof_type = ICE_PROF_ALL;
5384 		break;
5385 	}
5386 
5387 	ice_get_sw_fv_bitmap(hw, prof_type, bm);
5388 }
5389 
5390 /**
5391  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5392  * @hw: pointer to hardware structure
5393  * @lkups: lookup elements or match criteria for the advanced recipe, one
5394  *  structure per protocol header
5395  * @lkups_cnt: number of protocols
5396  * @rinfo: other information regarding the rule e.g. priority and action info
5397  * @rid: return the recipe ID of the recipe created
5398  */
5399 static int
ice_add_adv_recipe(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_adv_rule_info * rinfo,u16 * rid)5400 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5401 		   u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5402 {
5403 	DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
5404 	DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
5405 	struct ice_prot_lkup_ext *lkup_exts;
5406 	struct ice_recp_grp_entry *r_entry;
5407 	struct ice_sw_fv_list_entry *fvit;
5408 	struct ice_recp_grp_entry *r_tmp;
5409 	struct ice_sw_fv_list_entry *tmp;
5410 	struct ice_sw_recipe *rm;
5411 	int status = 0;
5412 	u8 i;
5413 
5414 	if (!lkups_cnt)
5415 		return -EINVAL;
5416 
5417 	lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
5418 	if (!lkup_exts)
5419 		return -ENOMEM;
5420 
5421 	/* Determine the number of words to be matched and if it exceeds a
5422 	 * recipe's restrictions
5423 	 */
5424 	for (i = 0; i < lkups_cnt; i++) {
5425 		u16 count;
5426 
5427 		if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5428 			status = -EIO;
5429 			goto err_free_lkup_exts;
5430 		}
5431 
5432 		count = ice_fill_valid_words(&lkups[i], lkup_exts);
5433 		if (!count) {
5434 			status = -EIO;
5435 			goto err_free_lkup_exts;
5436 		}
5437 	}
5438 
5439 	rm = kzalloc(sizeof(*rm), GFP_KERNEL);
5440 	if (!rm) {
5441 		status = -ENOMEM;
5442 		goto err_free_lkup_exts;
5443 	}
5444 
5445 	/* Get field vectors that contain fields extracted from all the protocol
5446 	 * headers being programmed.
5447 	 */
5448 	INIT_LIST_HEAD(&rm->fv_list);
5449 	INIT_LIST_HEAD(&rm->rg_list);
5450 
5451 	/* Get bitmap of field vectors (profiles) that are compatible with the
5452 	 * rule request; only these will be searched in the subsequent call to
5453 	 * ice_get_sw_fv_list.
5454 	 */
5455 	ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5456 
5457 	status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list);
5458 	if (status)
5459 		goto err_unroll;
5460 
5461 	/* Create any special protocol/offset pairs, such as looking at tunnel
5462 	 * bits by extracting metadata
5463 	 */
5464 	status = ice_add_special_words(rinfo, lkup_exts);
5465 	if (status)
5466 		goto err_free_lkup_exts;
5467 
5468 	/* Group match words into recipes using preferred recipe grouping
5469 	 * criteria.
5470 	 */
5471 	status = ice_create_recipe_group(hw, rm, lkup_exts);
5472 	if (status)
5473 		goto err_unroll;
5474 
5475 	/* set the recipe priority if specified */
5476 	rm->priority = (u8)rinfo->priority;
5477 
5478 	/* Find offsets from the field vector. Pick the first one for all the
5479 	 * recipes.
5480 	 */
5481 	status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5482 	if (status)
5483 		goto err_unroll;
5484 
5485 	/* get bitmap of all profiles the recipe will be associated with */
5486 	bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
5487 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5488 		ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5489 		set_bit((u16)fvit->profile_id, profiles);
5490 	}
5491 
5492 	/* Look for a recipe which matches our requested fv / mask list */
5493 	*rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
5494 	if (*rid < ICE_MAX_NUM_RECIPES)
5495 		/* Success if found a recipe that match the existing criteria */
5496 		goto err_unroll;
5497 
5498 	rm->tun_type = rinfo->tun_type;
5499 	/* Recipe we need does not exist, add a recipe */
5500 	status = ice_add_sw_recipe(hw, rm, profiles);
5501 	if (status)
5502 		goto err_unroll;
5503 
5504 	/* Associate all the recipes created with all the profiles in the
5505 	 * common field vector.
5506 	 */
5507 	list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5508 		DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
5509 		u16 j;
5510 
5511 		status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5512 						      (u8 *)r_bitmap, NULL);
5513 		if (status)
5514 			goto err_unroll;
5515 
5516 		bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
5517 			  ICE_MAX_NUM_RECIPES);
5518 		status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5519 		if (status)
5520 			goto err_unroll;
5521 
5522 		status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5523 						      (u8 *)r_bitmap,
5524 						      NULL);
5525 		ice_release_change_lock(hw);
5526 
5527 		if (status)
5528 			goto err_unroll;
5529 
5530 		/* Update profile to recipe bitmap array */
5531 		bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
5532 			    ICE_MAX_NUM_RECIPES);
5533 
5534 		/* Update recipe to profile bitmap array */
5535 		for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
5536 			set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
5537 	}
5538 
5539 	*rid = rm->root_rid;
5540 	memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
5541 	       sizeof(*lkup_exts));
5542 err_unroll:
5543 	list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
5544 		list_del(&r_entry->l_entry);
5545 		devm_kfree(ice_hw_to_dev(hw), r_entry);
5546 	}
5547 
5548 	list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
5549 		list_del(&fvit->list_entry);
5550 		devm_kfree(ice_hw_to_dev(hw), fvit);
5551 	}
5552 
5553 	if (rm->root_buf)
5554 		devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
5555 
5556 	kfree(rm);
5557 
5558 err_free_lkup_exts:
5559 	kfree(lkup_exts);
5560 
5561 	return status;
5562 }
5563 
5564 /**
5565  * ice_find_dummy_packet - find dummy packet
5566  *
5567  * @lkups: lookup elements or match criteria for the advanced recipe, one
5568  *	   structure per protocol header
5569  * @lkups_cnt: number of protocols
5570  * @tun_type: tunnel type
5571  *
5572  * Returns the &ice_dummy_pkt_profile corresponding to these lookup params.
5573  */
5574 static const struct ice_dummy_pkt_profile *
ice_find_dummy_packet(struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,enum ice_sw_tunnel_type tun_type)5575 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5576 		      enum ice_sw_tunnel_type tun_type)
5577 {
5578 	const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
5579 	u32 match = 0;
5580 	u16 i;
5581 
5582 	switch (tun_type) {
5583 	case ICE_SW_TUN_GTPC:
5584 		match |= ICE_PKT_TUN_GTPC;
5585 		break;
5586 	case ICE_SW_TUN_GTPU:
5587 		match |= ICE_PKT_TUN_GTPU;
5588 		break;
5589 	case ICE_SW_TUN_NVGRE:
5590 		match |= ICE_PKT_TUN_NVGRE;
5591 		break;
5592 	case ICE_SW_TUN_GENEVE:
5593 	case ICE_SW_TUN_VXLAN:
5594 		match |= ICE_PKT_TUN_UDP;
5595 		break;
5596 	default:
5597 		break;
5598 	}
5599 
5600 	for (i = 0; i < lkups_cnt; i++) {
5601 		if (lkups[i].type == ICE_UDP_ILOS)
5602 			match |= ICE_PKT_INNER_UDP;
5603 		else if (lkups[i].type == ICE_TCP_IL)
5604 			match |= ICE_PKT_INNER_TCP;
5605 		else if (lkups[i].type == ICE_IPV6_OFOS)
5606 			match |= ICE_PKT_OUTER_IPV6;
5607 		else if (lkups[i].type == ICE_VLAN_OFOS)
5608 			match |= ICE_PKT_VLAN;
5609 		else if (lkups[i].type == ICE_ETYPE_OL &&
5610 			 lkups[i].h_u.ethertype.ethtype_id ==
5611 				cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5612 			 lkups[i].m_u.ethertype.ethtype_id ==
5613 				cpu_to_be16(0xFFFF))
5614 			match |= ICE_PKT_OUTER_IPV6;
5615 		else if (lkups[i].type == ICE_ETYPE_IL &&
5616 			 lkups[i].h_u.ethertype.ethtype_id ==
5617 				cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5618 			 lkups[i].m_u.ethertype.ethtype_id ==
5619 				cpu_to_be16(0xFFFF))
5620 			match |= ICE_PKT_INNER_IPV6;
5621 		else if (lkups[i].type == ICE_IPV6_IL)
5622 			match |= ICE_PKT_INNER_IPV6;
5623 		else if (lkups[i].type == ICE_GTP_NO_PAY)
5624 			match |= ICE_PKT_GTP_NOPAY;
5625 	}
5626 
5627 	while (ret->match && (match & ret->match) != ret->match)
5628 		ret++;
5629 
5630 	return ret;
5631 }
5632 
5633 /**
5634  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5635  *
5636  * @lkups: lookup elements or match criteria for the advanced recipe, one
5637  *	   structure per protocol header
5638  * @lkups_cnt: number of protocols
5639  * @s_rule: stores rule information from the match criteria
5640  * @profile: dummy packet profile (the template, its size and header offsets)
5641  */
5642 static int
ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_sw_rule_lkup_rx_tx * s_rule,const struct ice_dummy_pkt_profile * profile)5643 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5644 			  struct ice_sw_rule_lkup_rx_tx *s_rule,
5645 			  const struct ice_dummy_pkt_profile *profile)
5646 {
5647 	u8 *pkt;
5648 	u16 i;
5649 
5650 	/* Start with a packet with a pre-defined/dummy content. Then, fill
5651 	 * in the header values to be looked up or matched.
5652 	 */
5653 	pkt = s_rule->hdr_data;
5654 
5655 	memcpy(pkt, profile->pkt, profile->pkt_len);
5656 
5657 	for (i = 0; i < lkups_cnt; i++) {
5658 		const struct ice_dummy_pkt_offsets *offsets = profile->offsets;
5659 		enum ice_protocol_type type;
5660 		u16 offset = 0, len = 0, j;
5661 		bool found = false;
5662 
5663 		/* find the start of this layer; it should be found since this
5664 		 * was already checked when search for the dummy packet
5665 		 */
5666 		type = lkups[i].type;
5667 		for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5668 			if (type == offsets[j].type) {
5669 				offset = offsets[j].offset;
5670 				found = true;
5671 				break;
5672 			}
5673 		}
5674 		/* this should never happen in a correct calling sequence */
5675 		if (!found)
5676 			return -EINVAL;
5677 
5678 		switch (lkups[i].type) {
5679 		case ICE_MAC_OFOS:
5680 		case ICE_MAC_IL:
5681 			len = sizeof(struct ice_ether_hdr);
5682 			break;
5683 		case ICE_ETYPE_OL:
5684 		case ICE_ETYPE_IL:
5685 			len = sizeof(struct ice_ethtype_hdr);
5686 			break;
5687 		case ICE_VLAN_OFOS:
5688 			len = sizeof(struct ice_vlan_hdr);
5689 			break;
5690 		case ICE_IPV4_OFOS:
5691 		case ICE_IPV4_IL:
5692 			len = sizeof(struct ice_ipv4_hdr);
5693 			break;
5694 		case ICE_IPV6_OFOS:
5695 		case ICE_IPV6_IL:
5696 			len = sizeof(struct ice_ipv6_hdr);
5697 			break;
5698 		case ICE_TCP_IL:
5699 		case ICE_UDP_OF:
5700 		case ICE_UDP_ILOS:
5701 			len = sizeof(struct ice_l4_hdr);
5702 			break;
5703 		case ICE_SCTP_IL:
5704 			len = sizeof(struct ice_sctp_hdr);
5705 			break;
5706 		case ICE_NVGRE:
5707 			len = sizeof(struct ice_nvgre_hdr);
5708 			break;
5709 		case ICE_VXLAN:
5710 		case ICE_GENEVE:
5711 			len = sizeof(struct ice_udp_tnl_hdr);
5712 			break;
5713 		case ICE_GTP_NO_PAY:
5714 		case ICE_GTP:
5715 			len = sizeof(struct ice_udp_gtp_hdr);
5716 			break;
5717 		default:
5718 			return -EINVAL;
5719 		}
5720 
5721 		/* the length should be a word multiple */
5722 		if (len % ICE_BYTES_PER_WORD)
5723 			return -EIO;
5724 
5725 		/* We have the offset to the header start, the length, the
5726 		 * caller's header values and mask. Use this information to
5727 		 * copy the data into the dummy packet appropriately based on
5728 		 * the mask. Note that we need to only write the bits as
5729 		 * indicated by the mask to make sure we don't improperly write
5730 		 * over any significant packet data.
5731 		 */
5732 		for (j = 0; j < len / sizeof(u16); j++) {
5733 			u16 *ptr = (u16 *)(pkt + offset);
5734 			u16 mask = lkups[i].m_raw[j];
5735 
5736 			if (!mask)
5737 				continue;
5738 
5739 			ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask);
5740 		}
5741 	}
5742 
5743 	s_rule->hdr_len = cpu_to_le16(profile->pkt_len);
5744 
5745 	return 0;
5746 }
5747 
5748 /**
5749  * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5750  * @hw: pointer to the hardware structure
5751  * @tun_type: tunnel type
5752  * @pkt: dummy packet to fill in
5753  * @offsets: offset info for the dummy packet
5754  */
5755 static int
ice_fill_adv_packet_tun(struct ice_hw * hw,enum ice_sw_tunnel_type tun_type,u8 * pkt,const struct ice_dummy_pkt_offsets * offsets)5756 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5757 			u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5758 {
5759 	u16 open_port, i;
5760 
5761 	switch (tun_type) {
5762 	case ICE_SW_TUN_VXLAN:
5763 		if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
5764 			return -EIO;
5765 		break;
5766 	case ICE_SW_TUN_GENEVE:
5767 		if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
5768 			return -EIO;
5769 		break;
5770 	default:
5771 		/* Nothing needs to be done for this tunnel type */
5772 		return 0;
5773 	}
5774 
5775 	/* Find the outer UDP protocol header and insert the port number */
5776 	for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5777 		if (offsets[i].type == ICE_UDP_OF) {
5778 			struct ice_l4_hdr *hdr;
5779 			u16 offset;
5780 
5781 			offset = offsets[i].offset;
5782 			hdr = (struct ice_l4_hdr *)&pkt[offset];
5783 			hdr->dst_port = cpu_to_be16(open_port);
5784 
5785 			return 0;
5786 		}
5787 	}
5788 
5789 	return -EIO;
5790 }
5791 
5792 /**
5793  * ice_find_adv_rule_entry - Search a rule entry
5794  * @hw: pointer to the hardware structure
5795  * @lkups: lookup elements or match criteria for the advanced recipe, one
5796  *	   structure per protocol header
5797  * @lkups_cnt: number of protocols
5798  * @recp_id: recipe ID for which we are finding the rule
5799  * @rinfo: other information regarding the rule e.g. priority and action info
5800  *
5801  * Helper function to search for a given advance rule entry
5802  * Returns pointer to entry storing the rule if found
5803  */
5804 static struct ice_adv_fltr_mgmt_list_entry *
ice_find_adv_rule_entry(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,u16 recp_id,struct ice_adv_rule_info * rinfo)5805 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5806 			u16 lkups_cnt, u16 recp_id,
5807 			struct ice_adv_rule_info *rinfo)
5808 {
5809 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
5810 	struct ice_switch_info *sw = hw->switch_info;
5811 	int i;
5812 
5813 	list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
5814 			    list_entry) {
5815 		bool lkups_matched = true;
5816 
5817 		if (lkups_cnt != list_itr->lkups_cnt)
5818 			continue;
5819 		for (i = 0; i < list_itr->lkups_cnt; i++)
5820 			if (memcmp(&list_itr->lkups[i], &lkups[i],
5821 				   sizeof(*lkups))) {
5822 				lkups_matched = false;
5823 				break;
5824 			}
5825 		if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5826 		    rinfo->tun_type == list_itr->rule_info.tun_type &&
5827 		    lkups_matched)
5828 			return list_itr;
5829 	}
5830 	return NULL;
5831 }
5832 
5833 /**
5834  * ice_adv_add_update_vsi_list
5835  * @hw: pointer to the hardware structure
5836  * @m_entry: pointer to current adv filter management list entry
5837  * @cur_fltr: filter information from the book keeping entry
5838  * @new_fltr: filter information with the new VSI to be added
5839  *
5840  * Call AQ command to add or update previously created VSI list with new VSI.
5841  *
5842  * Helper function to do book keeping associated with adding filter information
5843  * The algorithm to do the booking keeping is described below :
5844  * When a VSI needs to subscribe to a given advanced filter
5845  *	if only one VSI has been added till now
5846  *		Allocate a new VSI list and add two VSIs
5847  *		to this list using switch rule command
5848  *		Update the previously created switch rule with the
5849  *		newly created VSI list ID
5850  *	if a VSI list was previously created
5851  *		Add the new VSI to the previously created VSI list set
5852  *		using the update switch rule command
5853  */
5854 static int
ice_adv_add_update_vsi_list(struct ice_hw * hw,struct ice_adv_fltr_mgmt_list_entry * m_entry,struct ice_adv_rule_info * cur_fltr,struct ice_adv_rule_info * new_fltr)5855 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5856 			    struct ice_adv_fltr_mgmt_list_entry *m_entry,
5857 			    struct ice_adv_rule_info *cur_fltr,
5858 			    struct ice_adv_rule_info *new_fltr)
5859 {
5860 	u16 vsi_list_id = 0;
5861 	int status;
5862 
5863 	if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5864 	    cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5865 	    cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5866 		return -EOPNOTSUPP;
5867 
5868 	if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5869 	     new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5870 	    (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5871 	     cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5872 		return -EOPNOTSUPP;
5873 
5874 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5875 		 /* Only one entry existed in the mapping and it was not already
5876 		  * a part of a VSI list. So, create a VSI list with the old and
5877 		  * new VSIs.
5878 		  */
5879 		struct ice_fltr_info tmp_fltr;
5880 		u16 vsi_handle_arr[2];
5881 
5882 		/* A rule already exists with the new VSI being added */
5883 		if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5884 		    new_fltr->sw_act.fwd_id.hw_vsi_id)
5885 			return -EEXIST;
5886 
5887 		vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5888 		vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5889 		status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5890 						  &vsi_list_id,
5891 						  ICE_SW_LKUP_LAST);
5892 		if (status)
5893 			return status;
5894 
5895 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
5896 		tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
5897 		tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5898 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5899 		tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5900 		tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
5901 
5902 		/* Update the previous switch rule of "forward to VSI" to
5903 		 * "fwd to VSI list"
5904 		 */
5905 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5906 		if (status)
5907 			return status;
5908 
5909 		cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5910 		cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5911 		m_entry->vsi_list_info =
5912 			ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5913 						vsi_list_id);
5914 	} else {
5915 		u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5916 
5917 		if (!m_entry->vsi_list_info)
5918 			return -EIO;
5919 
5920 		/* A rule already exists with the new VSI being added */
5921 		if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
5922 			return 0;
5923 
5924 		/* Update the previously created VSI list set with
5925 		 * the new VSI ID passed in
5926 		 */
5927 		vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5928 
5929 		status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5930 						  vsi_list_id, false,
5931 						  ice_aqc_opc_update_sw_rules,
5932 						  ICE_SW_LKUP_LAST);
5933 		/* update VSI list mapping info with new VSI ID */
5934 		if (!status)
5935 			set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
5936 	}
5937 	if (!status)
5938 		m_entry->vsi_count++;
5939 	return status;
5940 }
5941 
5942 /**
5943  * ice_add_adv_rule - helper function to create an advanced switch rule
5944  * @hw: pointer to the hardware structure
5945  * @lkups: information on the words that needs to be looked up. All words
5946  * together makes one recipe
5947  * @lkups_cnt: num of entries in the lkups array
5948  * @rinfo: other information related to the rule that needs to be programmed
5949  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
5950  *               ignored is case of error.
5951  *
5952  * This function can program only 1 rule at a time. The lkups is used to
5953  * describe the all the words that forms the "lookup" portion of the recipe.
5954  * These words can span multiple protocols. Callers to this function need to
5955  * pass in a list of protocol headers with lookup information along and mask
5956  * that determines which words are valid from the given protocol header.
5957  * rinfo describes other information related to this rule such as forwarding
5958  * IDs, priority of this rule, etc.
5959  */
5960 int
ice_add_adv_rule(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_adv_rule_info * rinfo,struct ice_rule_query_data * added_entry)5961 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5962 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
5963 		 struct ice_rule_query_data *added_entry)
5964 {
5965 	struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
5966 	struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
5967 	const struct ice_dummy_pkt_profile *profile;
5968 	u16 rid = 0, i, rule_buf_sz, vsi_handle;
5969 	struct list_head *rule_head;
5970 	struct ice_switch_info *sw;
5971 	u16 word_cnt;
5972 	u32 act = 0;
5973 	int status;
5974 	u8 q_rgn;
5975 
5976 	/* Initialize profile to result index bitmap */
5977 	if (!hw->switch_info->prof_res_bm_init) {
5978 		hw->switch_info->prof_res_bm_init = 1;
5979 		ice_init_prof_result_bm(hw);
5980 	}
5981 
5982 	if (!lkups_cnt)
5983 		return -EINVAL;
5984 
5985 	/* get # of words we need to match */
5986 	word_cnt = 0;
5987 	for (i = 0; i < lkups_cnt; i++) {
5988 		u16 j;
5989 
5990 		for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++)
5991 			if (lkups[i].m_raw[j])
5992 				word_cnt++;
5993 	}
5994 
5995 	if (!word_cnt)
5996 		return -EINVAL;
5997 
5998 	if (word_cnt > ICE_MAX_CHAIN_WORDS)
5999 		return -ENOSPC;
6000 
6001 	/* locate a dummy packet */
6002 	profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
6003 
6004 	if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6005 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6006 	      rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6007 	      rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
6008 		return -EIO;
6009 
6010 	vsi_handle = rinfo->sw_act.vsi_handle;
6011 	if (!ice_is_vsi_valid(hw, vsi_handle))
6012 		return -EINVAL;
6013 
6014 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6015 		rinfo->sw_act.fwd_id.hw_vsi_id =
6016 			ice_get_hw_vsi_num(hw, vsi_handle);
6017 	if (rinfo->sw_act.flag & ICE_FLTR_TX)
6018 		rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6019 
6020 	status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6021 	if (status)
6022 		return status;
6023 	m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6024 	if (m_entry) {
6025 		/* we have to add VSI to VSI_LIST and increment vsi_count.
6026 		 * Also Update VSI list so that we can change forwarding rule
6027 		 * if the rule already exists, we will check if it exists with
6028 		 * same vsi_id, if not then add it to the VSI list if it already
6029 		 * exists if not then create a VSI list and add the existing VSI
6030 		 * ID and the new VSI ID to the list
6031 		 * We will add that VSI to the list
6032 		 */
6033 		status = ice_adv_add_update_vsi_list(hw, m_entry,
6034 						     &m_entry->rule_info,
6035 						     rinfo);
6036 		if (added_entry) {
6037 			added_entry->rid = rid;
6038 			added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6039 			added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6040 		}
6041 		return status;
6042 	}
6043 	rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
6044 	s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6045 	if (!s_rule)
6046 		return -ENOMEM;
6047 	if (!rinfo->flags_info.act_valid) {
6048 		act |= ICE_SINGLE_ACT_LAN_ENABLE;
6049 		act |= ICE_SINGLE_ACT_LB_ENABLE;
6050 	} else {
6051 		act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
6052 						ICE_SINGLE_ACT_LB_ENABLE);
6053 	}
6054 
6055 	switch (rinfo->sw_act.fltr_act) {
6056 	case ICE_FWD_TO_VSI:
6057 		act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6058 			ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6059 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6060 		break;
6061 	case ICE_FWD_TO_Q:
6062 		act |= ICE_SINGLE_ACT_TO_Q;
6063 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6064 		       ICE_SINGLE_ACT_Q_INDEX_M;
6065 		break;
6066 	case ICE_FWD_TO_QGRP:
6067 		q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6068 			(u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
6069 		act |= ICE_SINGLE_ACT_TO_Q;
6070 		act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6071 		       ICE_SINGLE_ACT_Q_INDEX_M;
6072 		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6073 		       ICE_SINGLE_ACT_Q_REGION_M;
6074 		break;
6075 	case ICE_DROP_PACKET:
6076 		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6077 		       ICE_SINGLE_ACT_VALID_BIT;
6078 		break;
6079 	default:
6080 		status = -EIO;
6081 		goto err_ice_add_adv_rule;
6082 	}
6083 
6084 	/* set the rule LOOKUP type based on caller specified 'Rx'
6085 	 * instead of hardcoding it to be either LOOKUP_TX/RX
6086 	 *
6087 	 * for 'Rx' set the source to be the port number
6088 	 * for 'Tx' set the source to be the source HW VSI number (determined
6089 	 * by caller)
6090 	 */
6091 	if (rinfo->rx) {
6092 		s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
6093 		s_rule->src = cpu_to_le16(hw->port_info->lport);
6094 	} else {
6095 		s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
6096 		s_rule->src = cpu_to_le16(rinfo->sw_act.src);
6097 	}
6098 
6099 	s_rule->recipe_id = cpu_to_le16(rid);
6100 	s_rule->act = cpu_to_le32(act);
6101 
6102 	status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
6103 	if (status)
6104 		goto err_ice_add_adv_rule;
6105 
6106 	if (rinfo->tun_type != ICE_NON_TUN &&
6107 	    rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
6108 		status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
6109 						 s_rule->hdr_data,
6110 						 profile->offsets);
6111 		if (status)
6112 			goto err_ice_add_adv_rule;
6113 	}
6114 
6115 	status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6116 				 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6117 				 NULL);
6118 	if (status)
6119 		goto err_ice_add_adv_rule;
6120 	adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
6121 				sizeof(struct ice_adv_fltr_mgmt_list_entry),
6122 				GFP_KERNEL);
6123 	if (!adv_fltr) {
6124 		status = -ENOMEM;
6125 		goto err_ice_add_adv_rule;
6126 	}
6127 
6128 	adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
6129 				       lkups_cnt * sizeof(*lkups), GFP_KERNEL);
6130 	if (!adv_fltr->lkups) {
6131 		status = -ENOMEM;
6132 		goto err_ice_add_adv_rule;
6133 	}
6134 
6135 	adv_fltr->lkups_cnt = lkups_cnt;
6136 	adv_fltr->rule_info = *rinfo;
6137 	adv_fltr->rule_info.fltr_rule_id = le16_to_cpu(s_rule->index);
6138 	sw = hw->switch_info;
6139 	sw->recp_list[rid].adv_rule = true;
6140 	rule_head = &sw->recp_list[rid].filt_rules;
6141 
6142 	if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6143 		adv_fltr->vsi_count = 1;
6144 
6145 	/* Add rule entry to book keeping list */
6146 	list_add(&adv_fltr->list_entry, rule_head);
6147 	if (added_entry) {
6148 		added_entry->rid = rid;
6149 		added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6150 		added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6151 	}
6152 err_ice_add_adv_rule:
6153 	if (status && adv_fltr) {
6154 		devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
6155 		devm_kfree(ice_hw_to_dev(hw), adv_fltr);
6156 	}
6157 
6158 	kfree(s_rule);
6159 
6160 	return status;
6161 }
6162 
6163 /**
6164  * ice_replay_vsi_fltr - Replay filters for requested VSI
6165  * @hw: pointer to the hardware structure
6166  * @vsi_handle: driver VSI handle
6167  * @recp_id: Recipe ID for which rules need to be replayed
6168  * @list_head: list for which filters need to be replayed
6169  *
6170  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6171  * It is required to pass valid VSI handle.
6172  */
6173 static int
ice_replay_vsi_fltr(struct ice_hw * hw,u16 vsi_handle,u8 recp_id,struct list_head * list_head)6174 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6175 		    struct list_head *list_head)
6176 {
6177 	struct ice_fltr_mgmt_list_entry *itr;
6178 	int status = 0;
6179 	u16 hw_vsi_id;
6180 
6181 	if (list_empty(list_head))
6182 		return status;
6183 	hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6184 
6185 	list_for_each_entry(itr, list_head, list_entry) {
6186 		struct ice_fltr_list_entry f_entry;
6187 
6188 		f_entry.fltr_info = itr->fltr_info;
6189 		if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6190 		    itr->fltr_info.vsi_handle == vsi_handle) {
6191 			/* update the src in case it is VSI num */
6192 			if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6193 				f_entry.fltr_info.src = hw_vsi_id;
6194 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
6195 			if (status)
6196 				goto end;
6197 			continue;
6198 		}
6199 		if (!itr->vsi_list_info ||
6200 		    !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
6201 			continue;
6202 		/* Clearing it so that the logic can add it back */
6203 		clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6204 		f_entry.fltr_info.vsi_handle = vsi_handle;
6205 		f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6206 		/* update the src in case it is VSI num */
6207 		if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6208 			f_entry.fltr_info.src = hw_vsi_id;
6209 		if (recp_id == ICE_SW_LKUP_VLAN)
6210 			status = ice_add_vlan_internal(hw, &f_entry);
6211 		else
6212 			status = ice_add_rule_internal(hw, recp_id, &f_entry);
6213 		if (status)
6214 			goto end;
6215 	}
6216 end:
6217 	return status;
6218 }
6219 
6220 /**
6221  * ice_adv_rem_update_vsi_list
6222  * @hw: pointer to the hardware structure
6223  * @vsi_handle: VSI handle of the VSI to remove
6224  * @fm_list: filter management entry for which the VSI list management needs to
6225  *	     be done
6226  */
6227 static int
ice_adv_rem_update_vsi_list(struct ice_hw * hw,u16 vsi_handle,struct ice_adv_fltr_mgmt_list_entry * fm_list)6228 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6229 			    struct ice_adv_fltr_mgmt_list_entry *fm_list)
6230 {
6231 	struct ice_vsi_list_map_info *vsi_list_info;
6232 	enum ice_sw_lkup_type lkup_type;
6233 	u16 vsi_list_id;
6234 	int status;
6235 
6236 	if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6237 	    fm_list->vsi_count == 0)
6238 		return -EINVAL;
6239 
6240 	/* A rule with the VSI being removed does not exist */
6241 	if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
6242 		return -ENOENT;
6243 
6244 	lkup_type = ICE_SW_LKUP_LAST;
6245 	vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6246 	status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6247 					  ice_aqc_opc_update_sw_rules,
6248 					  lkup_type);
6249 	if (status)
6250 		return status;
6251 
6252 	fm_list->vsi_count--;
6253 	clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6254 	vsi_list_info = fm_list->vsi_list_info;
6255 	if (fm_list->vsi_count == 1) {
6256 		struct ice_fltr_info tmp_fltr;
6257 		u16 rem_vsi_handle;
6258 
6259 		rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
6260 						ICE_MAX_VSI);
6261 		if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6262 			return -EIO;
6263 
6264 		/* Make sure VSI list is empty before removing it below */
6265 		status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6266 						  vsi_list_id, true,
6267 						  ice_aqc_opc_update_sw_rules,
6268 						  lkup_type);
6269 		if (status)
6270 			return status;
6271 
6272 		memset(&tmp_fltr, 0, sizeof(tmp_fltr));
6273 		tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
6274 		tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6275 		fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6276 		tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6277 		tmp_fltr.fwd_id.hw_vsi_id =
6278 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
6279 		fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6280 			ice_get_hw_vsi_num(hw, rem_vsi_handle);
6281 		fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
6282 
6283 		/* Update the previous switch rule of "MAC forward to VSI" to
6284 		 * "MAC fwd to VSI list"
6285 		 */
6286 		status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6287 		if (status) {
6288 			ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6289 				  tmp_fltr.fwd_id.hw_vsi_id, status);
6290 			return status;
6291 		}
6292 		fm_list->vsi_list_info->ref_cnt--;
6293 
6294 		/* Remove the VSI list since it is no longer used */
6295 		status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6296 		if (status) {
6297 			ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
6298 				  vsi_list_id, status);
6299 			return status;
6300 		}
6301 
6302 		list_del(&vsi_list_info->list_entry);
6303 		devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
6304 		fm_list->vsi_list_info = NULL;
6305 	}
6306 
6307 	return status;
6308 }
6309 
6310 /**
6311  * ice_rem_adv_rule - removes existing advanced switch rule
6312  * @hw: pointer to the hardware structure
6313  * @lkups: information on the words that needs to be looked up. All words
6314  *         together makes one recipe
6315  * @lkups_cnt: num of entries in the lkups array
6316  * @rinfo: Its the pointer to the rule information for the rule
6317  *
6318  * This function can be used to remove 1 rule at a time. The lkups is
6319  * used to describe all the words that forms the "lookup" portion of the
6320  * rule. These words can span multiple protocols. Callers to this function
6321  * need to pass in a list of protocol headers with lookup information along
6322  * and mask that determines which words are valid from the given protocol
6323  * header. rinfo describes other information related to this rule such as
6324  * forwarding IDs, priority of this rule, etc.
6325  */
6326 static int
ice_rem_adv_rule(struct ice_hw * hw,struct ice_adv_lkup_elem * lkups,u16 lkups_cnt,struct ice_adv_rule_info * rinfo)6327 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6328 		 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6329 {
6330 	struct ice_adv_fltr_mgmt_list_entry *list_elem;
6331 	struct ice_prot_lkup_ext lkup_exts;
6332 	bool remove_rule = false;
6333 	struct mutex *rule_lock; /* Lock to protect filter rule list */
6334 	u16 i, rid, vsi_handle;
6335 	int status = 0;
6336 
6337 	memset(&lkup_exts, 0, sizeof(lkup_exts));
6338 	for (i = 0; i < lkups_cnt; i++) {
6339 		u16 count;
6340 
6341 		if (lkups[i].type >= ICE_PROTOCOL_LAST)
6342 			return -EIO;
6343 
6344 		count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6345 		if (!count)
6346 			return -EIO;
6347 	}
6348 
6349 	/* Create any special protocol/offset pairs, such as looking at tunnel
6350 	 * bits by extracting metadata
6351 	 */
6352 	status = ice_add_special_words(rinfo, &lkup_exts);
6353 	if (status)
6354 		return status;
6355 
6356 	rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
6357 	/* If did not find a recipe that match the existing criteria */
6358 	if (rid == ICE_MAX_NUM_RECIPES)
6359 		return -EINVAL;
6360 
6361 	rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6362 	list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6363 	/* the rule is already removed */
6364 	if (!list_elem)
6365 		return 0;
6366 	mutex_lock(rule_lock);
6367 	if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6368 		remove_rule = true;
6369 	} else if (list_elem->vsi_count > 1) {
6370 		remove_rule = false;
6371 		vsi_handle = rinfo->sw_act.vsi_handle;
6372 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6373 	} else {
6374 		vsi_handle = rinfo->sw_act.vsi_handle;
6375 		status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6376 		if (status) {
6377 			mutex_unlock(rule_lock);
6378 			return status;
6379 		}
6380 		if (list_elem->vsi_count == 0)
6381 			remove_rule = true;
6382 	}
6383 	mutex_unlock(rule_lock);
6384 	if (remove_rule) {
6385 		struct ice_sw_rule_lkup_rx_tx *s_rule;
6386 		u16 rule_buf_sz;
6387 
6388 		rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
6389 		s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6390 		if (!s_rule)
6391 			return -ENOMEM;
6392 		s_rule->act = 0;
6393 		s_rule->index = cpu_to_le16(list_elem->rule_info.fltr_rule_id);
6394 		s_rule->hdr_len = 0;
6395 		status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6396 					 rule_buf_sz, 1,
6397 					 ice_aqc_opc_remove_sw_rules, NULL);
6398 		if (!status || status == -ENOENT) {
6399 			struct ice_switch_info *sw = hw->switch_info;
6400 
6401 			mutex_lock(rule_lock);
6402 			list_del(&list_elem->list_entry);
6403 			devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
6404 			devm_kfree(ice_hw_to_dev(hw), list_elem);
6405 			mutex_unlock(rule_lock);
6406 			if (list_empty(&sw->recp_list[rid].filt_rules))
6407 				sw->recp_list[rid].adv_rule = false;
6408 		}
6409 		kfree(s_rule);
6410 	}
6411 	return status;
6412 }
6413 
6414 /**
6415  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6416  * @hw: pointer to the hardware structure
6417  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6418  *
6419  * This function is used to remove 1 rule at a time. The removal is based on
6420  * the remove_entry parameter. This function will remove rule for a given
6421  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6422  */
6423 int
ice_rem_adv_rule_by_id(struct ice_hw * hw,struct ice_rule_query_data * remove_entry)6424 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6425 		       struct ice_rule_query_data *remove_entry)
6426 {
6427 	struct ice_adv_fltr_mgmt_list_entry *list_itr;
6428 	struct list_head *list_head;
6429 	struct ice_adv_rule_info rinfo;
6430 	struct ice_switch_info *sw;
6431 
6432 	sw = hw->switch_info;
6433 	if (!sw->recp_list[remove_entry->rid].recp_created)
6434 		return -EINVAL;
6435 	list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6436 	list_for_each_entry(list_itr, list_head, list_entry) {
6437 		if (list_itr->rule_info.fltr_rule_id ==
6438 		    remove_entry->rule_id) {
6439 			rinfo = list_itr->rule_info;
6440 			rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6441 			return ice_rem_adv_rule(hw, list_itr->lkups,
6442 						list_itr->lkups_cnt, &rinfo);
6443 		}
6444 	}
6445 	/* either list is empty or unable to find rule */
6446 	return -ENOENT;
6447 }
6448 
6449 /**
6450  * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
6451  *                            given VSI handle
6452  * @hw: pointer to the hardware structure
6453  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6454  *
6455  * This function is used to remove all the rules for a given VSI and as soon
6456  * as removing a rule fails, it will return immediately with the error code,
6457  * else it will return success.
6458  */
ice_rem_adv_rule_for_vsi(struct ice_hw * hw,u16 vsi_handle)6459 int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6460 {
6461 	struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
6462 	struct ice_vsi_list_map_info *map_info;
6463 	struct ice_adv_rule_info rinfo;
6464 	struct list_head *list_head;
6465 	struct ice_switch_info *sw;
6466 	int status;
6467 	u8 rid;
6468 
6469 	sw = hw->switch_info;
6470 	for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6471 		if (!sw->recp_list[rid].recp_created)
6472 			continue;
6473 		if (!sw->recp_list[rid].adv_rule)
6474 			continue;
6475 
6476 		list_head = &sw->recp_list[rid].filt_rules;
6477 		list_for_each_entry_safe(list_itr, tmp_entry, list_head,
6478 					 list_entry) {
6479 			rinfo = list_itr->rule_info;
6480 
6481 			if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
6482 				map_info = list_itr->vsi_list_info;
6483 				if (!map_info)
6484 					continue;
6485 
6486 				if (!test_bit(vsi_handle, map_info->vsi_map))
6487 					continue;
6488 			} else if (rinfo.sw_act.vsi_handle != vsi_handle) {
6489 				continue;
6490 			}
6491 
6492 			rinfo.sw_act.vsi_handle = vsi_handle;
6493 			status = ice_rem_adv_rule(hw, list_itr->lkups,
6494 						  list_itr->lkups_cnt, &rinfo);
6495 			if (status)
6496 				return status;
6497 		}
6498 	}
6499 	return 0;
6500 }
6501 
6502 /**
6503  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6504  * @hw: pointer to the hardware structure
6505  * @vsi_handle: driver VSI handle
6506  * @list_head: list for which filters need to be replayed
6507  *
6508  * Replay the advanced rule for the given VSI.
6509  */
6510 static int
ice_replay_vsi_adv_rule(struct ice_hw * hw,u16 vsi_handle,struct list_head * list_head)6511 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6512 			struct list_head *list_head)
6513 {
6514 	struct ice_rule_query_data added_entry = { 0 };
6515 	struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6516 	int status = 0;
6517 
6518 	if (list_empty(list_head))
6519 		return status;
6520 	list_for_each_entry(adv_fltr, list_head, list_entry) {
6521 		struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6522 		u16 lk_cnt = adv_fltr->lkups_cnt;
6523 
6524 		if (vsi_handle != rinfo->sw_act.vsi_handle)
6525 			continue;
6526 		status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6527 					  &added_entry);
6528 		if (status)
6529 			break;
6530 	}
6531 	return status;
6532 }
6533 
6534 /**
6535  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6536  * @hw: pointer to the hardware structure
6537  * @vsi_handle: driver VSI handle
6538  *
6539  * Replays filters for requested VSI via vsi_handle.
6540  */
ice_replay_vsi_all_fltr(struct ice_hw * hw,u16 vsi_handle)6541 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6542 {
6543 	struct ice_switch_info *sw = hw->switch_info;
6544 	int status;
6545 	u8 i;
6546 
6547 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6548 		struct list_head *head;
6549 
6550 		head = &sw->recp_list[i].filt_replay_rules;
6551 		if (!sw->recp_list[i].adv_rule)
6552 			status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6553 		else
6554 			status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6555 		if (status)
6556 			return status;
6557 	}
6558 	return status;
6559 }
6560 
6561 /**
6562  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6563  * @hw: pointer to the HW struct
6564  *
6565  * Deletes the filter replay rules.
6566  */
ice_rm_all_sw_replay_rule_info(struct ice_hw * hw)6567 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6568 {
6569 	struct ice_switch_info *sw = hw->switch_info;
6570 	u8 i;
6571 
6572 	if (!sw)
6573 		return;
6574 
6575 	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6576 		if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
6577 			struct list_head *l_head;
6578 
6579 			l_head = &sw->recp_list[i].filt_replay_rules;
6580 			if (!sw->recp_list[i].adv_rule)
6581 				ice_rem_sw_rule_info(hw, l_head);
6582 			else
6583 				ice_rem_adv_rule_info(hw, l_head);
6584 		}
6585 	}
6586 }
6587