1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mlx5/fs.h>
34 #include "en.h"
35 #include "en/params.h"
36 #include "en/xsk/pool.h"
37
38 static int flow_type_to_traffic_type(u32 flow_type);
39
flow_type_mask(u32 flow_type)40 static u32 flow_type_mask(u32 flow_type)
41 {
42 return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
43 }
44
45 struct mlx5e_ethtool_rule {
46 struct list_head list;
47 struct ethtool_rx_flow_spec flow_spec;
48 struct mlx5_flow_handle *rule;
49 struct mlx5e_ethtool_table *eth_ft;
50 struct mlx5e_rss *rss;
51 };
52
put_flow_table(struct mlx5e_ethtool_table * eth_ft)53 static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
54 {
55 if (!--eth_ft->num_rules) {
56 mlx5_destroy_flow_table(eth_ft->ft);
57 eth_ft->ft = NULL;
58 }
59 }
60
61 #define MLX5E_ETHTOOL_L3_L4_PRIO 0
62 #define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
63 #define MLX5E_ETHTOOL_NUM_ENTRIES 64000
64 #define MLX5E_ETHTOOL_NUM_GROUPS 10
get_flow_table(struct mlx5e_priv * priv,struct ethtool_rx_flow_spec * fs,int num_tuples)65 static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
66 struct ethtool_rx_flow_spec *fs,
67 int num_tuples)
68 {
69 struct mlx5_flow_table_attr ft_attr = {};
70 struct mlx5e_ethtool_table *eth_ft;
71 struct mlx5_flow_namespace *ns;
72 struct mlx5_flow_table *ft;
73 int max_tuples;
74 int table_size;
75 int prio;
76
77 switch (flow_type_mask(fs->flow_type)) {
78 case TCP_V4_FLOW:
79 case UDP_V4_FLOW:
80 case TCP_V6_FLOW:
81 case UDP_V6_FLOW:
82 max_tuples = ETHTOOL_NUM_L3_L4_FTS;
83 prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
84 eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
85 break;
86 case IP_USER_FLOW:
87 case IPV6_USER_FLOW:
88 max_tuples = ETHTOOL_NUM_L3_L4_FTS;
89 prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
90 eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
91 break;
92 case ETHER_FLOW:
93 max_tuples = ETHTOOL_NUM_L2_FTS;
94 prio = max_tuples - num_tuples;
95 eth_ft = &priv->fs.ethtool.l2_ft[prio];
96 prio += MLX5E_ETHTOOL_L2_PRIO;
97 break;
98 default:
99 return ERR_PTR(-EINVAL);
100 }
101
102 eth_ft->num_rules++;
103 if (eth_ft->ft)
104 return eth_ft;
105
106 ns = mlx5_get_flow_namespace(priv->mdev,
107 MLX5_FLOW_NAMESPACE_ETHTOOL);
108 if (!ns)
109 return ERR_PTR(-EOPNOTSUPP);
110
111 table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
112 flow_table_properties_nic_receive.log_max_ft_size)),
113 MLX5E_ETHTOOL_NUM_ENTRIES);
114
115 ft_attr.prio = prio;
116 ft_attr.max_fte = table_size;
117 ft_attr.autogroup.max_num_groups = MLX5E_ETHTOOL_NUM_GROUPS;
118 ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
119 if (IS_ERR(ft))
120 return (void *)ft;
121
122 eth_ft->ft = ft;
123 return eth_ft;
124 }
125
mask_spec(u8 * mask,u8 * val,size_t size)126 static void mask_spec(u8 *mask, u8 *val, size_t size)
127 {
128 unsigned int i;
129
130 for (i = 0; i < size; i++, mask++, val++)
131 *((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
132 }
133
134 #define MLX5E_FTE_SET(header_p, fld, v) \
135 MLX5_SET(fte_match_set_lyr_2_4, header_p, fld, v)
136
137 #define MLX5E_FTE_ADDR_OF(header_p, fld) \
138 MLX5_ADDR_OF(fte_match_set_lyr_2_4, header_p, fld)
139
140 static void
set_ip4(void * headers_c,void * headers_v,__be32 ip4src_m,__be32 ip4src_v,__be32 ip4dst_m,__be32 ip4dst_v)141 set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m,
142 __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
143 {
144 if (ip4src_m) {
145 memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4),
146 &ip4src_v, sizeof(ip4src_v));
147 memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
148 &ip4src_m, sizeof(ip4src_m));
149 }
150 if (ip4dst_m) {
151 memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
152 &ip4dst_v, sizeof(ip4dst_v));
153 memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
154 &ip4dst_m, sizeof(ip4dst_m));
155 }
156
157 MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
158 MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IP);
159 }
160
161 static void
set_ip6(void * headers_c,void * headers_v,__be32 ip6src_m[4],__be32 ip6src_v[4],__be32 ip6dst_m[4],__be32 ip6dst_v[4])162 set_ip6(void *headers_c, void *headers_v, __be32 ip6src_m[4],
163 __be32 ip6src_v[4], __be32 ip6dst_m[4], __be32 ip6dst_v[4])
164 {
165 u8 ip6_sz = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
166
167 if (!ipv6_addr_any((struct in6_addr *)ip6src_m)) {
168 memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6),
169 ip6src_v, ip6_sz);
170 memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6),
171 ip6src_m, ip6_sz);
172 }
173 if (!ipv6_addr_any((struct in6_addr *)ip6dst_m)) {
174 memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
175 ip6dst_v, ip6_sz);
176 memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
177 ip6dst_m, ip6_sz);
178 }
179
180 MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
181 MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IPV6);
182 }
183
184 static void
set_tcp(void * headers_c,void * headers_v,__be16 psrc_m,__be16 psrc_v,__be16 pdst_m,__be16 pdst_v)185 set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
186 __be16 pdst_m, __be16 pdst_v)
187 {
188 if (psrc_m) {
189 MLX5E_FTE_SET(headers_c, tcp_sport, ntohs(psrc_m));
190 MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v));
191 }
192 if (pdst_m) {
193 MLX5E_FTE_SET(headers_c, tcp_dport, ntohs(pdst_m));
194 MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v));
195 }
196
197 MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
198 MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_TCP);
199 }
200
201 static void
set_udp(void * headers_c,void * headers_v,__be16 psrc_m,__be16 psrc_v,__be16 pdst_m,__be16 pdst_v)202 set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
203 __be16 pdst_m, __be16 pdst_v)
204 {
205 if (psrc_m) {
206 MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_m));
207 MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
208 }
209
210 if (pdst_m) {
211 MLX5E_FTE_SET(headers_c, udp_dport, ntohs(pdst_m));
212 MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v));
213 }
214
215 MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
216 MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_UDP);
217 }
218
219 static void
parse_tcp4(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)220 parse_tcp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
221 {
222 struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
223 struct ethtool_tcpip4_spec *l4_val = &fs->h_u.tcp_ip4_spec;
224
225 set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
226 l4_mask->ip4dst, l4_val->ip4dst);
227
228 set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
229 l4_mask->pdst, l4_val->pdst);
230 }
231
232 static void
parse_udp4(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)233 parse_udp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
234 {
235 struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.udp_ip4_spec;
236 struct ethtool_tcpip4_spec *l4_val = &fs->h_u.udp_ip4_spec;
237
238 set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
239 l4_mask->ip4dst, l4_val->ip4dst);
240
241 set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
242 l4_mask->pdst, l4_val->pdst);
243 }
244
245 static void
parse_ip4(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)246 parse_ip4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
247 {
248 struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
249 struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec;
250
251 set_ip4(headers_c, headers_v, l3_mask->ip4src, l3_val->ip4src,
252 l3_mask->ip4dst, l3_val->ip4dst);
253
254 if (l3_mask->proto) {
255 MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->proto);
256 MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->proto);
257 }
258 }
259
260 static void
parse_ip6(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)261 parse_ip6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
262 {
263 struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
264 struct ethtool_usrip6_spec *l3_val = &fs->h_u.usr_ip6_spec;
265
266 set_ip6(headers_c, headers_v, l3_mask->ip6src,
267 l3_val->ip6src, l3_mask->ip6dst, l3_val->ip6dst);
268
269 if (l3_mask->l4_proto) {
270 MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->l4_proto);
271 MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->l4_proto);
272 }
273 }
274
275 static void
parse_tcp6(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)276 parse_tcp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
277 {
278 struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
279 struct ethtool_tcpip6_spec *l4_val = &fs->h_u.tcp_ip6_spec;
280
281 set_ip6(headers_c, headers_v, l4_mask->ip6src,
282 l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
283
284 set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
285 l4_mask->pdst, l4_val->pdst);
286 }
287
288 static void
parse_udp6(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)289 parse_udp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
290 {
291 struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.udp_ip6_spec;
292 struct ethtool_tcpip6_spec *l4_val = &fs->h_u.udp_ip6_spec;
293
294 set_ip6(headers_c, headers_v, l4_mask->ip6src,
295 l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
296
297 set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
298 l4_mask->pdst, l4_val->pdst);
299 }
300
301 static void
parse_ether(void * headers_c,void * headers_v,struct ethtool_rx_flow_spec * fs)302 parse_ether(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
303 {
304 struct ethhdr *eth_mask = &fs->m_u.ether_spec;
305 struct ethhdr *eth_val = &fs->h_u.ether_spec;
306
307 mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
308 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, smac_47_16), eth_mask->h_source);
309 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, smac_47_16), eth_val->h_source);
310 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), eth_mask->h_dest);
311 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), eth_val->h_dest);
312 MLX5E_FTE_SET(headers_c, ethertype, ntohs(eth_mask->h_proto));
313 MLX5E_FTE_SET(headers_v, ethertype, ntohs(eth_val->h_proto));
314 }
315
316 static void
set_cvlan(void * headers_c,void * headers_v,__be16 vlan_tci)317 set_cvlan(void *headers_c, void *headers_v, __be16 vlan_tci)
318 {
319 MLX5E_FTE_SET(headers_c, cvlan_tag, 1);
320 MLX5E_FTE_SET(headers_v, cvlan_tag, 1);
321 MLX5E_FTE_SET(headers_c, first_vid, 0xfff);
322 MLX5E_FTE_SET(headers_v, first_vid, ntohs(vlan_tci));
323 }
324
325 static void
set_dmac(void * headers_c,void * headers_v,unsigned char m_dest[ETH_ALEN],unsigned char v_dest[ETH_ALEN])326 set_dmac(void *headers_c, void *headers_v,
327 unsigned char m_dest[ETH_ALEN], unsigned char v_dest[ETH_ALEN])
328 {
329 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), m_dest);
330 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), v_dest);
331 }
332
set_flow_attrs(u32 * match_c,u32 * match_v,struct ethtool_rx_flow_spec * fs)333 static int set_flow_attrs(u32 *match_c, u32 *match_v,
334 struct ethtool_rx_flow_spec *fs)
335 {
336 void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
337 outer_headers);
338 void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
339 outer_headers);
340 u32 flow_type = flow_type_mask(fs->flow_type);
341
342 switch (flow_type) {
343 case TCP_V4_FLOW:
344 parse_tcp4(outer_headers_c, outer_headers_v, fs);
345 break;
346 case UDP_V4_FLOW:
347 parse_udp4(outer_headers_c, outer_headers_v, fs);
348 break;
349 case IP_USER_FLOW:
350 parse_ip4(outer_headers_c, outer_headers_v, fs);
351 break;
352 case TCP_V6_FLOW:
353 parse_tcp6(outer_headers_c, outer_headers_v, fs);
354 break;
355 case UDP_V6_FLOW:
356 parse_udp6(outer_headers_c, outer_headers_v, fs);
357 break;
358 case IPV6_USER_FLOW:
359 parse_ip6(outer_headers_c, outer_headers_v, fs);
360 break;
361 case ETHER_FLOW:
362 parse_ether(outer_headers_c, outer_headers_v, fs);
363 break;
364 default:
365 return -EINVAL;
366 }
367
368 if ((fs->flow_type & FLOW_EXT) &&
369 (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)))
370 set_cvlan(outer_headers_c, outer_headers_v, fs->h_ext.vlan_tci);
371
372 if (fs->flow_type & FLOW_MAC_EXT &&
373 !is_zero_ether_addr(fs->m_ext.h_dest)) {
374 mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
375 set_dmac(outer_headers_c, outer_headers_v, fs->m_ext.h_dest,
376 fs->h_ext.h_dest);
377 }
378
379 return 0;
380 }
381
add_rule_to_list(struct mlx5e_priv * priv,struct mlx5e_ethtool_rule * rule)382 static void add_rule_to_list(struct mlx5e_priv *priv,
383 struct mlx5e_ethtool_rule *rule)
384 {
385 struct mlx5e_ethtool_rule *iter;
386 struct list_head *head = &priv->fs.ethtool.rules;
387
388 list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
389 if (iter->flow_spec.location > rule->flow_spec.location)
390 break;
391 head = &iter->list;
392 }
393 priv->fs.ethtool.tot_num_rules++;
394 list_add(&rule->list, head);
395 }
396
outer_header_zero(u32 * match_criteria)397 static bool outer_header_zero(u32 *match_criteria)
398 {
399 int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
400 char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
401 outer_headers);
402
403 return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
404 outer_headers_c + 1,
405 size - 1);
406 }
407
flow_get_tirn(struct mlx5e_priv * priv,struct mlx5e_ethtool_rule * eth_rule,struct ethtool_rx_flow_spec * fs,u32 rss_context,u32 * tirn)408 static int flow_get_tirn(struct mlx5e_priv *priv,
409 struct mlx5e_ethtool_rule *eth_rule,
410 struct ethtool_rx_flow_spec *fs,
411 u32 rss_context, u32 *tirn)
412 {
413 if (fs->flow_type & FLOW_RSS) {
414 struct mlx5e_packet_merge_param pkt_merge_param;
415 struct mlx5e_rss *rss;
416 u32 flow_type;
417 int err;
418 int tt;
419
420 rss = mlx5e_rx_res_rss_get(priv->rx_res, rss_context);
421 if (!rss)
422 return -ENOENT;
423
424 flow_type = flow_type_mask(fs->flow_type);
425 tt = flow_type_to_traffic_type(flow_type);
426 if (tt < 0)
427 return -EINVAL;
428
429 pkt_merge_param = priv->channels.params.packet_merge;
430 err = mlx5e_rss_obtain_tirn(rss, tt, &pkt_merge_param, false, tirn);
431 if (err)
432 return err;
433 eth_rule->rss = rss;
434 mlx5e_rss_refcnt_inc(eth_rule->rss);
435 } else {
436 struct mlx5e_params *params = &priv->channels.params;
437 enum mlx5e_rq_group group;
438 u16 ix;
439
440 mlx5e_qid_get_ch_and_group(params, fs->ring_cookie, &ix, &group);
441
442 *tirn = group == MLX5E_RQ_GROUP_XSK ?
443 mlx5e_rx_res_get_tirn_xsk(priv->rx_res, ix) :
444 mlx5e_rx_res_get_tirn_direct(priv->rx_res, ix);
445 }
446
447 return 0;
448 }
449
450 static struct mlx5_flow_handle *
add_ethtool_flow_rule(struct mlx5e_priv * priv,struct mlx5e_ethtool_rule * eth_rule,struct mlx5_flow_table * ft,struct ethtool_rx_flow_spec * fs,u32 rss_context)451 add_ethtool_flow_rule(struct mlx5e_priv *priv,
452 struct mlx5e_ethtool_rule *eth_rule,
453 struct mlx5_flow_table *ft,
454 struct ethtool_rx_flow_spec *fs, u32 rss_context)
455 {
456 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND };
457 struct mlx5_flow_destination *dst = NULL;
458 struct mlx5_flow_handle *rule;
459 struct mlx5_flow_spec *spec;
460 int err = 0;
461
462 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
463 if (!spec)
464 return ERR_PTR(-ENOMEM);
465 err = set_flow_attrs(spec->match_criteria, spec->match_value,
466 fs);
467 if (err)
468 goto free;
469
470 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
471 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
472 } else {
473 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
474 if (!dst) {
475 err = -ENOMEM;
476 goto free;
477 }
478
479 err = flow_get_tirn(priv, eth_rule, fs, rss_context, &dst->tir_num);
480 if (err)
481 goto free;
482
483 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
484 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
485 }
486
487 spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
488 spec->flow_context.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
489 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
490 if (IS_ERR(rule)) {
491 err = PTR_ERR(rule);
492 netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
493 __func__, err);
494 goto free;
495 }
496 free:
497 kvfree(spec);
498 kfree(dst);
499 return err ? ERR_PTR(err) : rule;
500 }
501
del_ethtool_rule(struct mlx5e_priv * priv,struct mlx5e_ethtool_rule * eth_rule)502 static void del_ethtool_rule(struct mlx5e_priv *priv,
503 struct mlx5e_ethtool_rule *eth_rule)
504 {
505 if (eth_rule->rule)
506 mlx5_del_flow_rules(eth_rule->rule);
507 if (eth_rule->rss)
508 mlx5e_rss_refcnt_dec(eth_rule->rss);
509 list_del(ð_rule->list);
510 priv->fs.ethtool.tot_num_rules--;
511 put_flow_table(eth_rule->eth_ft);
512 kfree(eth_rule);
513 }
514
find_ethtool_rule(struct mlx5e_priv * priv,int location)515 static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
516 int location)
517 {
518 struct mlx5e_ethtool_rule *iter;
519
520 list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
521 if (iter->flow_spec.location == location)
522 return iter;
523 }
524 return NULL;
525 }
526
get_ethtool_rule(struct mlx5e_priv * priv,int location)527 static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
528 int location)
529 {
530 struct mlx5e_ethtool_rule *eth_rule;
531
532 eth_rule = find_ethtool_rule(priv, location);
533 if (eth_rule)
534 del_ethtool_rule(priv, eth_rule);
535
536 eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
537 if (!eth_rule)
538 return ERR_PTR(-ENOMEM);
539
540 add_rule_to_list(priv, eth_rule);
541 return eth_rule;
542 }
543
544 #define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
545
546 #define all_ones(field) (field == (__force typeof(field))-1)
547 #define all_zeros_or_all_ones(field) \
548 ((field) == 0 || (field) == (__force typeof(field))-1)
549
validate_ethter(struct ethtool_rx_flow_spec * fs)550 static int validate_ethter(struct ethtool_rx_flow_spec *fs)
551 {
552 struct ethhdr *eth_mask = &fs->m_u.ether_spec;
553 int ntuples = 0;
554
555 if (!is_zero_ether_addr(eth_mask->h_dest))
556 ntuples++;
557 if (!is_zero_ether_addr(eth_mask->h_source))
558 ntuples++;
559 if (eth_mask->h_proto)
560 ntuples++;
561 return ntuples;
562 }
563
validate_tcpudp4(struct ethtool_rx_flow_spec * fs)564 static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs)
565 {
566 struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
567 int ntuples = 0;
568
569 if (l4_mask->tos)
570 return -EINVAL;
571
572 if (l4_mask->ip4src)
573 ntuples++;
574 if (l4_mask->ip4dst)
575 ntuples++;
576 if (l4_mask->psrc)
577 ntuples++;
578 if (l4_mask->pdst)
579 ntuples++;
580 /* Flow is TCP/UDP */
581 return ++ntuples;
582 }
583
validate_ip4(struct ethtool_rx_flow_spec * fs)584 static int validate_ip4(struct ethtool_rx_flow_spec *fs)
585 {
586 struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
587 int ntuples = 0;
588
589 if (l3_mask->l4_4_bytes || l3_mask->tos ||
590 fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
591 return -EINVAL;
592 if (l3_mask->ip4src)
593 ntuples++;
594 if (l3_mask->ip4dst)
595 ntuples++;
596 if (l3_mask->proto)
597 ntuples++;
598 /* Flow is IPv4 */
599 return ++ntuples;
600 }
601
validate_ip6(struct ethtool_rx_flow_spec * fs)602 static int validate_ip6(struct ethtool_rx_flow_spec *fs)
603 {
604 struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
605 int ntuples = 0;
606
607 if (l3_mask->l4_4_bytes || l3_mask->tclass)
608 return -EINVAL;
609 if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src))
610 ntuples++;
611
612 if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst))
613 ntuples++;
614 if (l3_mask->l4_proto)
615 ntuples++;
616 /* Flow is IPv6 */
617 return ++ntuples;
618 }
619
validate_tcpudp6(struct ethtool_rx_flow_spec * fs)620 static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs)
621 {
622 struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
623 int ntuples = 0;
624
625 if (l4_mask->tclass)
626 return -EINVAL;
627
628 if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6src))
629 ntuples++;
630
631 if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst))
632 ntuples++;
633
634 if (l4_mask->psrc)
635 ntuples++;
636 if (l4_mask->pdst)
637 ntuples++;
638 /* Flow is TCP/UDP */
639 return ++ntuples;
640 }
641
validate_vlan(struct ethtool_rx_flow_spec * fs)642 static int validate_vlan(struct ethtool_rx_flow_spec *fs)
643 {
644 if (fs->m_ext.vlan_etype ||
645 fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
646 return -EINVAL;
647
648 if (fs->m_ext.vlan_tci &&
649 (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID))
650 return -EINVAL;
651
652 return 1;
653 }
654
validate_flow(struct mlx5e_priv * priv,struct ethtool_rx_flow_spec * fs)655 static int validate_flow(struct mlx5e_priv *priv,
656 struct ethtool_rx_flow_spec *fs)
657 {
658 int num_tuples = 0;
659 int ret = 0;
660
661 if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
662 return -ENOSPC;
663
664 if (fs->ring_cookie != RX_CLS_FLOW_DISC)
665 if (!mlx5e_qid_validate(priv->profile, &priv->channels.params,
666 fs->ring_cookie))
667 return -EINVAL;
668
669 switch (flow_type_mask(fs->flow_type)) {
670 case ETHER_FLOW:
671 num_tuples += validate_ethter(fs);
672 break;
673 case TCP_V4_FLOW:
674 case UDP_V4_FLOW:
675 ret = validate_tcpudp4(fs);
676 if (ret < 0)
677 return ret;
678 num_tuples += ret;
679 break;
680 case IP_USER_FLOW:
681 ret = validate_ip4(fs);
682 if (ret < 0)
683 return ret;
684 num_tuples += ret;
685 break;
686 case TCP_V6_FLOW:
687 case UDP_V6_FLOW:
688 ret = validate_tcpudp6(fs);
689 if (ret < 0)
690 return ret;
691 num_tuples += ret;
692 break;
693 case IPV6_USER_FLOW:
694 ret = validate_ip6(fs);
695 if (ret < 0)
696 return ret;
697 num_tuples += ret;
698 break;
699 default:
700 return -ENOTSUPP;
701 }
702 if ((fs->flow_type & FLOW_EXT)) {
703 ret = validate_vlan(fs);
704 if (ret < 0)
705 return ret;
706 num_tuples += ret;
707 }
708
709 if (fs->flow_type & FLOW_MAC_EXT &&
710 !is_zero_ether_addr(fs->m_ext.h_dest))
711 num_tuples++;
712
713 return num_tuples;
714 }
715
716 static int
mlx5e_ethtool_flow_replace(struct mlx5e_priv * priv,struct ethtool_rx_flow_spec * fs,u32 rss_context)717 mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
718 struct ethtool_rx_flow_spec *fs, u32 rss_context)
719 {
720 struct mlx5e_ethtool_table *eth_ft;
721 struct mlx5e_ethtool_rule *eth_rule;
722 struct mlx5_flow_handle *rule;
723 int num_tuples;
724 int err;
725
726 num_tuples = validate_flow(priv, fs);
727 if (num_tuples <= 0) {
728 netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
729 __func__, num_tuples);
730 return num_tuples;
731 }
732
733 eth_ft = get_flow_table(priv, fs, num_tuples);
734 if (IS_ERR(eth_ft))
735 return PTR_ERR(eth_ft);
736
737 eth_rule = get_ethtool_rule(priv, fs->location);
738 if (IS_ERR(eth_rule)) {
739 put_flow_table(eth_ft);
740 return PTR_ERR(eth_rule);
741 }
742
743 eth_rule->flow_spec = *fs;
744 eth_rule->eth_ft = eth_ft;
745 if (!eth_ft->ft) {
746 err = -EINVAL;
747 goto del_ethtool_rule;
748 }
749 rule = add_ethtool_flow_rule(priv, eth_rule, eth_ft->ft, fs, rss_context);
750 if (IS_ERR(rule)) {
751 err = PTR_ERR(rule);
752 goto del_ethtool_rule;
753 }
754
755 eth_rule->rule = rule;
756
757 return 0;
758
759 del_ethtool_rule:
760 del_ethtool_rule(priv, eth_rule);
761
762 return err;
763 }
764
765 static int
mlx5e_ethtool_flow_remove(struct mlx5e_priv * priv,int location)766 mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
767 {
768 struct mlx5e_ethtool_rule *eth_rule;
769 int err = 0;
770
771 if (location >= MAX_NUM_OF_ETHTOOL_RULES)
772 return -ENOSPC;
773
774 eth_rule = find_ethtool_rule(priv, location);
775 if (!eth_rule) {
776 err = -ENOENT;
777 goto out;
778 }
779
780 del_ethtool_rule(priv, eth_rule);
781 out:
782 return err;
783 }
784
785 static int
mlx5e_ethtool_get_flow(struct mlx5e_priv * priv,struct ethtool_rxnfc * info,int location)786 mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
787 struct ethtool_rxnfc *info, int location)
788 {
789 struct mlx5e_ethtool_rule *eth_rule;
790
791 if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
792 return -EINVAL;
793
794 list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
795 int index;
796
797 if (eth_rule->flow_spec.location != location)
798 continue;
799 if (!info)
800 return 0;
801 info->fs = eth_rule->flow_spec;
802 if (!eth_rule->rss)
803 return 0;
804 index = mlx5e_rx_res_rss_index(priv->rx_res, eth_rule->rss);
805 if (index < 0)
806 return index;
807 info->rss_context = index;
808 return 0;
809 }
810
811 return -ENOENT;
812 }
813
814 static int
mlx5e_ethtool_get_all_flows(struct mlx5e_priv * priv,struct ethtool_rxnfc * info,u32 * rule_locs)815 mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
816 struct ethtool_rxnfc *info, u32 *rule_locs)
817 {
818 int location = 0;
819 int idx = 0;
820 int err = 0;
821
822 info->data = MAX_NUM_OF_ETHTOOL_RULES;
823 while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
824 err = mlx5e_ethtool_get_flow(priv, NULL, location);
825 if (!err)
826 rule_locs[idx++] = location;
827 location++;
828 }
829 return err;
830 }
831
mlx5e_ethtool_cleanup_steering(struct mlx5e_priv * priv)832 void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
833 {
834 struct mlx5e_ethtool_rule *iter;
835 struct mlx5e_ethtool_rule *temp;
836
837 list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
838 del_ethtool_rule(priv, iter);
839 }
840
mlx5e_ethtool_init_steering(struct mlx5e_priv * priv)841 void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
842 {
843 INIT_LIST_HEAD(&priv->fs.ethtool.rules);
844 }
845
flow_type_to_traffic_type(u32 flow_type)846 static int flow_type_to_traffic_type(u32 flow_type)
847 {
848 switch (flow_type) {
849 case TCP_V4_FLOW:
850 return MLX5_TT_IPV4_TCP;
851 case TCP_V6_FLOW:
852 return MLX5_TT_IPV6_TCP;
853 case UDP_V4_FLOW:
854 return MLX5_TT_IPV4_UDP;
855 case UDP_V6_FLOW:
856 return MLX5_TT_IPV6_UDP;
857 case AH_V4_FLOW:
858 return MLX5_TT_IPV4_IPSEC_AH;
859 case AH_V6_FLOW:
860 return MLX5_TT_IPV6_IPSEC_AH;
861 case ESP_V4_FLOW:
862 return MLX5_TT_IPV4_IPSEC_ESP;
863 case ESP_V6_FLOW:
864 return MLX5_TT_IPV6_IPSEC_ESP;
865 case IPV4_FLOW:
866 return MLX5_TT_IPV4;
867 case IPV6_FLOW:
868 return MLX5_TT_IPV6;
869 default:
870 return -EINVAL;
871 }
872 }
873
mlx5e_set_rss_hash_opt(struct mlx5e_priv * priv,struct ethtool_rxnfc * nfc)874 static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
875 struct ethtool_rxnfc *nfc)
876 {
877 u8 rx_hash_field = 0;
878 int err;
879 int tt;
880
881 tt = flow_type_to_traffic_type(nfc->flow_type);
882 if (tt < 0)
883 return tt;
884
885 /* RSS does not support anything other than hashing to queues
886 * on src IP, dest IP, TCP/UDP src port and TCP/UDP dest
887 * port.
888 */
889 if (nfc->flow_type != TCP_V4_FLOW &&
890 nfc->flow_type != TCP_V6_FLOW &&
891 nfc->flow_type != UDP_V4_FLOW &&
892 nfc->flow_type != UDP_V6_FLOW)
893 return -EOPNOTSUPP;
894
895 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
896 RXH_L4_B_0_1 | RXH_L4_B_2_3))
897 return -EOPNOTSUPP;
898
899 if (nfc->data & RXH_IP_SRC)
900 rx_hash_field |= MLX5_HASH_FIELD_SEL_SRC_IP;
901 if (nfc->data & RXH_IP_DST)
902 rx_hash_field |= MLX5_HASH_FIELD_SEL_DST_IP;
903 if (nfc->data & RXH_L4_B_0_1)
904 rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_SPORT;
905 if (nfc->data & RXH_L4_B_2_3)
906 rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT;
907
908 mutex_lock(&priv->state_lock);
909 err = mlx5e_rx_res_rss_set_hash_fields(priv->rx_res, tt, rx_hash_field);
910 mutex_unlock(&priv->state_lock);
911
912 return err;
913 }
914
mlx5e_get_rss_hash_opt(struct mlx5e_priv * priv,struct ethtool_rxnfc * nfc)915 static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
916 struct ethtool_rxnfc *nfc)
917 {
918 u32 hash_field = 0;
919 int tt;
920
921 tt = flow_type_to_traffic_type(nfc->flow_type);
922 if (tt < 0)
923 return tt;
924
925 hash_field = mlx5e_rx_res_rss_get_hash_fields(priv->rx_res, tt);
926 nfc->data = 0;
927
928 if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP)
929 nfc->data |= RXH_IP_SRC;
930 if (hash_field & MLX5_HASH_FIELD_SEL_DST_IP)
931 nfc->data |= RXH_IP_DST;
932 if (hash_field & MLX5_HASH_FIELD_SEL_L4_SPORT)
933 nfc->data |= RXH_L4_B_0_1;
934 if (hash_field & MLX5_HASH_FIELD_SEL_L4_DPORT)
935 nfc->data |= RXH_L4_B_2_3;
936
937 return 0;
938 }
939
mlx5e_ethtool_set_rxnfc(struct mlx5e_priv * priv,struct ethtool_rxnfc * cmd)940 int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
941 {
942 int err = 0;
943
944 switch (cmd->cmd) {
945 case ETHTOOL_SRXCLSRLINS:
946 err = mlx5e_ethtool_flow_replace(priv, &cmd->fs, cmd->rss_context);
947 break;
948 case ETHTOOL_SRXCLSRLDEL:
949 err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
950 break;
951 case ETHTOOL_SRXFH:
952 err = mlx5e_set_rss_hash_opt(priv, cmd);
953 break;
954 default:
955 err = -EOPNOTSUPP;
956 break;
957 }
958
959 return err;
960 }
961
mlx5e_ethtool_get_rxnfc(struct mlx5e_priv * priv,struct ethtool_rxnfc * info,u32 * rule_locs)962 int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
963 struct ethtool_rxnfc *info, u32 *rule_locs)
964 {
965 int err = 0;
966
967 switch (info->cmd) {
968 case ETHTOOL_GRXCLSRLCNT:
969 info->rule_cnt = priv->fs.ethtool.tot_num_rules;
970 break;
971 case ETHTOOL_GRXCLSRULE:
972 err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
973 break;
974 case ETHTOOL_GRXCLSRLALL:
975 err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
976 break;
977 case ETHTOOL_GRXFH:
978 err = mlx5e_get_rss_hash_opt(priv, info);
979 break;
980 default:
981 err = -EOPNOTSUPP;
982 break;
983 }
984
985 return err;
986 }
987
988