1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/list.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/mpfs.h>
39 #include "en_tc.h"
40 #include "lib/mpfs.h"
41 #include "en/ptp.h"
42 #include "en/fs_ethtool.h"
43
44 struct mlx5e_flow_steering {
45 struct work_struct set_rx_mode_work;
46 bool state_destroy;
47 bool vlan_strip_disable;
48 struct mlx5_core_dev *mdev;
49 struct net_device *netdev;
50 struct mlx5_flow_namespace *ns;
51 struct mlx5_flow_namespace *egress_ns;
52 #ifdef CONFIG_MLX5_EN_RXNFC
53 struct mlx5e_ethtool_steering *ethtool;
54 #endif
55 struct mlx5e_tc_table *tc;
56 struct mlx5e_promisc_table promisc;
57 struct mlx5e_vlan_table *vlan;
58 struct mlx5e_l2_table l2;
59 struct mlx5_ttc_table *ttc;
60 struct mlx5_ttc_table *inner_ttc;
61 #ifdef CONFIG_MLX5_EN_ARFS
62 struct mlx5e_arfs_tables *arfs;
63 #endif
64 #ifdef CONFIG_MLX5_EN_TLS
65 struct mlx5e_accel_fs_tcp *accel_tcp;
66 #endif
67 struct mlx5e_fs_udp *udp;
68 struct mlx5e_fs_any *any;
69 struct mlx5e_ptp_fs *ptp_fs;
70 };
71
72 static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
73 struct mlx5e_l2_rule *ai, int type);
74 static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs,
75 struct mlx5e_l2_rule *ai);
76
77 enum {
78 MLX5E_FULLMATCH = 0,
79 MLX5E_ALLMULTI = 1,
80 };
81
82 enum {
83 MLX5E_UC = 0,
84 MLX5E_MC_IPV4 = 1,
85 MLX5E_MC_IPV6 = 2,
86 MLX5E_MC_OTHER = 3,
87 };
88
89 enum {
90 MLX5E_ACTION_NONE = 0,
91 MLX5E_ACTION_ADD = 1,
92 MLX5E_ACTION_DEL = 2,
93 };
94
95 struct mlx5e_l2_hash_node {
96 struct hlist_node hlist;
97 u8 action;
98 struct mlx5e_l2_rule ai;
99 bool mpfs;
100 };
101
mlx5e_hash_l2(const u8 * addr)102 static inline int mlx5e_hash_l2(const u8 *addr)
103 {
104 return addr[5];
105 }
106
mlx5e_add_l2_to_hash(struct hlist_head * hash,const u8 * addr)107 static void mlx5e_add_l2_to_hash(struct hlist_head *hash, const u8 *addr)
108 {
109 struct mlx5e_l2_hash_node *hn;
110 int ix = mlx5e_hash_l2(addr);
111 int found = 0;
112
113 hlist_for_each_entry(hn, &hash[ix], hlist)
114 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
115 found = 1;
116 break;
117 }
118
119 if (found) {
120 hn->action = MLX5E_ACTION_NONE;
121 return;
122 }
123
124 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
125 if (!hn)
126 return;
127
128 ether_addr_copy(hn->ai.addr, addr);
129 hn->action = MLX5E_ACTION_ADD;
130
131 hlist_add_head(&hn->hlist, &hash[ix]);
132 }
133
mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node * hn)134 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
135 {
136 hlist_del(&hn->hlist);
137 kfree(hn);
138 }
139
140 struct mlx5e_vlan_table {
141 struct mlx5e_flow_table ft;
142 DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
143 DECLARE_BITMAP(active_svlans, VLAN_N_VID);
144 struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
145 struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
146 struct mlx5_flow_handle *untagged_rule;
147 struct mlx5_flow_handle *any_cvlan_rule;
148 struct mlx5_flow_handle *any_svlan_rule;
149 struct mlx5_flow_handle *trap_rule;
150 bool cvlan_filter_disabled;
151 };
152
mlx5e_vlan_get_active_svlans(struct mlx5e_vlan_table * vlan)153 unsigned long *mlx5e_vlan_get_active_svlans(struct mlx5e_vlan_table *vlan)
154 {
155 return vlan->active_svlans;
156 }
157
mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table * vlan)158 struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan)
159 {
160 return vlan->ft.t;
161 }
162
mlx5e_vport_context_update_vlans(struct mlx5e_flow_steering * fs)163 static int mlx5e_vport_context_update_vlans(struct mlx5e_flow_steering *fs)
164 {
165 int max_list_size;
166 int list_size;
167 u16 *vlans;
168 int vlan;
169 int err;
170 int i;
171
172 list_size = 0;
173 for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID)
174 list_size++;
175
176 max_list_size = 1 << MLX5_CAP_GEN(fs->mdev, log_max_vlan_list);
177
178 if (list_size > max_list_size) {
179 fs_warn(fs, "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
180 list_size, max_list_size);
181 list_size = max_list_size;
182 }
183
184 vlans = kvcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
185 if (!vlans)
186 return -ENOMEM;
187
188 i = 0;
189 for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID) {
190 if (i >= list_size)
191 break;
192 vlans[i++] = vlan;
193 }
194
195 err = mlx5_modify_nic_vport_vlans(fs->mdev, vlans, list_size);
196 if (err)
197 fs_err(fs, "Failed to modify vport vlans list err(%d)\n",
198 err);
199
200 kvfree(vlans);
201 return err;
202 }
203
204 enum mlx5e_vlan_rule_type {
205 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
206 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
207 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
208 MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID,
209 MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
210 };
211
__mlx5e_add_vlan_rule(struct mlx5e_flow_steering * fs,enum mlx5e_vlan_rule_type rule_type,u16 vid,struct mlx5_flow_spec * spec)212 static int __mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
213 enum mlx5e_vlan_rule_type rule_type,
214 u16 vid, struct mlx5_flow_spec *spec)
215 {
216 struct mlx5_flow_table *ft = fs->vlan->ft.t;
217 struct mlx5_flow_destination dest = {};
218 struct mlx5_flow_handle **rule_p;
219 MLX5_DECLARE_FLOW_ACT(flow_act);
220 int err = 0;
221
222 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
223 dest.ft = fs->l2.ft.t;
224
225 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
226
227 switch (rule_type) {
228 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
229 /* cvlan_tag enabled in match criteria and
230 * disabled in match value means both S & C tags
231 * don't exist (untagged of both)
232 */
233 rule_p = &fs->vlan->untagged_rule;
234 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
235 outer_headers.cvlan_tag);
236 break;
237 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
238 rule_p = &fs->vlan->any_cvlan_rule;
239 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
240 outer_headers.cvlan_tag);
241 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
242 break;
243 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
244 rule_p = &fs->vlan->any_svlan_rule;
245 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
246 outer_headers.svlan_tag);
247 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
248 break;
249 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
250 rule_p = &fs->vlan->active_svlans_rule[vid];
251 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
252 outer_headers.svlan_tag);
253 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
254 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
255 outer_headers.first_vid);
256 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
257 vid);
258 break;
259 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
260 rule_p = &fs->vlan->active_cvlans_rule[vid];
261 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
262 outer_headers.cvlan_tag);
263 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
264 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
265 outer_headers.first_vid);
266 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
267 vid);
268 break;
269 }
270
271 if (WARN_ONCE(*rule_p, "VLAN rule already exists type %d", rule_type))
272 return 0;
273
274 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
275
276 if (IS_ERR(*rule_p)) {
277 err = PTR_ERR(*rule_p);
278 *rule_p = NULL;
279 fs_err(fs, "%s: add rule failed\n", __func__);
280 }
281
282 return err;
283 }
284
mlx5e_add_vlan_rule(struct mlx5e_flow_steering * fs,enum mlx5e_vlan_rule_type rule_type,u16 vid)285 static int mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
286 enum mlx5e_vlan_rule_type rule_type, u16 vid)
287 {
288 struct mlx5_flow_spec *spec;
289 int err = 0;
290
291 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
292 if (!spec)
293 return -ENOMEM;
294
295 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
296 mlx5e_vport_context_update_vlans(fs);
297
298 err = __mlx5e_add_vlan_rule(fs, rule_type, vid, spec);
299
300 kvfree(spec);
301
302 return err;
303 }
304
mlx5e_fs_del_vlan_rule(struct mlx5e_flow_steering * fs,enum mlx5e_vlan_rule_type rule_type,u16 vid)305 static void mlx5e_fs_del_vlan_rule(struct mlx5e_flow_steering *fs,
306 enum mlx5e_vlan_rule_type rule_type, u16 vid)
307 {
308 switch (rule_type) {
309 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
310 if (fs->vlan->untagged_rule) {
311 mlx5_del_flow_rules(fs->vlan->untagged_rule);
312 fs->vlan->untagged_rule = NULL;
313 }
314 break;
315 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
316 if (fs->vlan->any_cvlan_rule) {
317 mlx5_del_flow_rules(fs->vlan->any_cvlan_rule);
318 fs->vlan->any_cvlan_rule = NULL;
319 }
320 break;
321 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
322 if (fs->vlan->any_svlan_rule) {
323 mlx5_del_flow_rules(fs->vlan->any_svlan_rule);
324 fs->vlan->any_svlan_rule = NULL;
325 }
326 break;
327 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
328 if (fs->vlan->active_svlans_rule[vid]) {
329 mlx5_del_flow_rules(fs->vlan->active_svlans_rule[vid]);
330 fs->vlan->active_svlans_rule[vid] = NULL;
331 }
332 break;
333 case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
334 if (fs->vlan->active_cvlans_rule[vid]) {
335 mlx5_del_flow_rules(fs->vlan->active_cvlans_rule[vid]);
336 fs->vlan->active_cvlans_rule[vid] = NULL;
337 }
338 mlx5e_vport_context_update_vlans(fs);
339 break;
340 }
341 }
342
mlx5e_fs_del_any_vid_rules(struct mlx5e_flow_steering * fs)343 static void mlx5e_fs_del_any_vid_rules(struct mlx5e_flow_steering *fs)
344 {
345 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
346 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
347 }
348
mlx5e_fs_add_any_vid_rules(struct mlx5e_flow_steering * fs)349 static int mlx5e_fs_add_any_vid_rules(struct mlx5e_flow_steering *fs)
350 {
351 int err;
352
353 err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
354 if (err)
355 return err;
356
357 return mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
358 }
359
360 static struct mlx5_flow_handle *
mlx5e_add_trap_rule(struct mlx5_flow_table * ft,int trap_id,int tir_num)361 mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
362 {
363 struct mlx5_flow_destination dest = {};
364 MLX5_DECLARE_FLOW_ACT(flow_act);
365 struct mlx5_flow_handle *rule;
366 struct mlx5_flow_spec *spec;
367
368 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
369 if (!spec)
370 return ERR_PTR(-ENOMEM);
371 spec->flow_context.flags |= FLOW_CONTEXT_HAS_TAG;
372 spec->flow_context.flow_tag = trap_id;
373 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
374 dest.tir_num = tir_num;
375
376 rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
377 kvfree(spec);
378 return rule;
379 }
380
mlx5e_add_vlan_trap(struct mlx5e_flow_steering * fs,int trap_id,int tir_num)381 int mlx5e_add_vlan_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num)
382 {
383 struct mlx5_flow_table *ft = fs->vlan->ft.t;
384 struct mlx5_flow_handle *rule;
385 int err;
386
387 rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
388 if (IS_ERR(rule)) {
389 err = PTR_ERR(rule);
390 fs->vlan->trap_rule = NULL;
391 fs_err(fs, "%s: add VLAN trap rule failed, err %d\n",
392 __func__, err);
393 return err;
394 }
395 fs->vlan->trap_rule = rule;
396 return 0;
397 }
398
mlx5e_remove_vlan_trap(struct mlx5e_flow_steering * fs)399 void mlx5e_remove_vlan_trap(struct mlx5e_flow_steering *fs)
400 {
401 if (fs->vlan->trap_rule) {
402 mlx5_del_flow_rules(fs->vlan->trap_rule);
403 fs->vlan->trap_rule = NULL;
404 }
405 }
406
mlx5e_add_mac_trap(struct mlx5e_flow_steering * fs,int trap_id,int tir_num)407 int mlx5e_add_mac_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num)
408 {
409 struct mlx5_flow_table *ft = fs->l2.ft.t;
410 struct mlx5_flow_handle *rule;
411 int err;
412
413 rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
414 if (IS_ERR(rule)) {
415 err = PTR_ERR(rule);
416 fs->l2.trap_rule = NULL;
417 fs_err(fs, "%s: add MAC trap rule failed, err %d\n",
418 __func__, err);
419 return err;
420 }
421 fs->l2.trap_rule = rule;
422 return 0;
423 }
424
mlx5e_remove_mac_trap(struct mlx5e_flow_steering * fs)425 void mlx5e_remove_mac_trap(struct mlx5e_flow_steering *fs)
426 {
427 if (fs->l2.trap_rule) {
428 mlx5_del_flow_rules(fs->l2.trap_rule);
429 fs->l2.trap_rule = NULL;
430 }
431 }
432
mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering * fs,bool promisc)433 void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
434 {
435 if (!fs->vlan->cvlan_filter_disabled)
436 return;
437
438 fs->vlan->cvlan_filter_disabled = false;
439 if (promisc)
440 return;
441 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
442 }
443
mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering * fs,bool promisc)444 void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
445 {
446 if (fs->vlan->cvlan_filter_disabled)
447 return;
448
449 fs->vlan->cvlan_filter_disabled = true;
450 if (promisc)
451 return;
452 mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
453 }
454
mlx5e_vlan_rx_add_cvid(struct mlx5e_flow_steering * fs,u16 vid)455 static int mlx5e_vlan_rx_add_cvid(struct mlx5e_flow_steering *fs, u16 vid)
456 {
457 int err;
458
459 set_bit(vid, fs->vlan->active_cvlans);
460
461 err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
462 if (err)
463 clear_bit(vid, fs->vlan->active_cvlans);
464
465 return err;
466 }
467
mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering * fs,struct net_device * netdev,u16 vid)468 static int mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering *fs,
469 struct net_device *netdev, u16 vid)
470 {
471 int err;
472
473 set_bit(vid, fs->vlan->active_svlans);
474
475 err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
476 if (err) {
477 clear_bit(vid, fs->vlan->active_svlans);
478 return err;
479 }
480
481 /* Need to fix some features.. */
482 netdev_update_features(netdev);
483 return err;
484 }
485
mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering * fs,struct net_device * netdev,__be16 proto,u16 vid)486 int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
487 struct net_device *netdev,
488 __be16 proto, u16 vid)
489 {
490
491 if (!fs->vlan) {
492 fs_err(fs, "Vlan doesn't exist\n");
493 return -EINVAL;
494 }
495
496 if (be16_to_cpu(proto) == ETH_P_8021Q)
497 return mlx5e_vlan_rx_add_cvid(fs, vid);
498 else if (be16_to_cpu(proto) == ETH_P_8021AD)
499 return mlx5e_vlan_rx_add_svid(fs, netdev, vid);
500
501 return -EOPNOTSUPP;
502 }
503
mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering * fs,struct net_device * netdev,__be16 proto,u16 vid)504 int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
505 struct net_device *netdev,
506 __be16 proto, u16 vid)
507 {
508 if (!fs->vlan) {
509 fs_err(fs, "Vlan doesn't exist\n");
510 return -EINVAL;
511 }
512
513 if (be16_to_cpu(proto) == ETH_P_8021Q) {
514 clear_bit(vid, fs->vlan->active_cvlans);
515 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
516 } else if (be16_to_cpu(proto) == ETH_P_8021AD) {
517 clear_bit(vid, fs->vlan->active_svlans);
518 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
519 netdev_update_features(netdev);
520 }
521
522 return 0;
523 }
524
mlx5e_fs_add_vlan_rules(struct mlx5e_flow_steering * fs)525 static void mlx5e_fs_add_vlan_rules(struct mlx5e_flow_steering *fs)
526 {
527 int i;
528
529 mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
530
531 for_each_set_bit(i, fs->vlan->active_cvlans, VLAN_N_VID) {
532 mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
533 }
534
535 for_each_set_bit(i, fs->vlan->active_svlans, VLAN_N_VID)
536 mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
537
538 if (fs->vlan->cvlan_filter_disabled)
539 mlx5e_fs_add_any_vid_rules(fs);
540 }
541
mlx5e_del_vlan_rules(struct mlx5e_flow_steering * fs)542 static void mlx5e_del_vlan_rules(struct mlx5e_flow_steering *fs)
543 {
544 int i;
545
546 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
547
548 for_each_set_bit(i, fs->vlan->active_cvlans, VLAN_N_VID) {
549 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
550 }
551
552 for_each_set_bit(i, fs->vlan->active_svlans, VLAN_N_VID)
553 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
554
555 WARN_ON_ONCE(fs->state_destroy);
556
557 mlx5e_remove_vlan_trap(fs);
558
559 /* must be called after DESTROY bit is set and
560 * set_rx_mode is called and flushed
561 */
562 if (fs->vlan->cvlan_filter_disabled)
563 mlx5e_fs_del_any_vid_rules(fs);
564 }
565
566 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
567 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
568 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
569
mlx5e_execute_l2_action(struct mlx5e_flow_steering * fs,struct mlx5e_l2_hash_node * hn)570 static void mlx5e_execute_l2_action(struct mlx5e_flow_steering *fs,
571 struct mlx5e_l2_hash_node *hn)
572 {
573 u8 action = hn->action;
574 u8 mac_addr[ETH_ALEN];
575 int l2_err = 0;
576
577 ether_addr_copy(mac_addr, hn->ai.addr);
578
579 switch (action) {
580 case MLX5E_ACTION_ADD:
581 mlx5e_add_l2_flow_rule(fs, &hn->ai, MLX5E_FULLMATCH);
582 if (!is_multicast_ether_addr(mac_addr)) {
583 l2_err = mlx5_mpfs_add_mac(fs->mdev, mac_addr);
584 hn->mpfs = !l2_err;
585 }
586 hn->action = MLX5E_ACTION_NONE;
587 break;
588
589 case MLX5E_ACTION_DEL:
590 if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
591 l2_err = mlx5_mpfs_del_mac(fs->mdev, mac_addr);
592 mlx5e_del_l2_flow_rule(fs, &hn->ai);
593 mlx5e_del_l2_from_hash(hn);
594 break;
595 }
596
597 if (l2_err)
598 fs_warn(fs, "MPFS, failed to %s mac %pM, err(%d)\n",
599 action == MLX5E_ACTION_ADD ? "add" : "del",
600 mac_addr, l2_err);
601 }
602
mlx5e_sync_netdev_addr(struct mlx5e_flow_steering * fs,struct net_device * netdev)603 static void mlx5e_sync_netdev_addr(struct mlx5e_flow_steering *fs,
604 struct net_device *netdev)
605 {
606 struct netdev_hw_addr *ha;
607
608 netif_addr_lock_bh(netdev);
609
610 mlx5e_add_l2_to_hash(fs->l2.netdev_uc, netdev->dev_addr);
611 netdev_for_each_uc_addr(ha, netdev)
612 mlx5e_add_l2_to_hash(fs->l2.netdev_uc, ha->addr);
613
614 netdev_for_each_mc_addr(ha, netdev)
615 mlx5e_add_l2_to_hash(fs->l2.netdev_mc, ha->addr);
616
617 netif_addr_unlock_bh(netdev);
618 }
619
mlx5e_fill_addr_array(struct mlx5e_flow_steering * fs,int list_type,struct net_device * ndev,u8 addr_array[][ETH_ALEN],int size)620 static void mlx5e_fill_addr_array(struct mlx5e_flow_steering *fs, int list_type,
621 struct net_device *ndev,
622 u8 addr_array[][ETH_ALEN], int size)
623 {
624 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
625 struct mlx5e_l2_hash_node *hn;
626 struct hlist_head *addr_list;
627 struct hlist_node *tmp;
628 int i = 0;
629 int hi;
630
631 addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc;
632
633 if (is_uc) /* Make sure our own address is pushed first */
634 ether_addr_copy(addr_array[i++], ndev->dev_addr);
635 else if (fs->l2.broadcast_enabled)
636 ether_addr_copy(addr_array[i++], ndev->broadcast);
637
638 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
639 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
640 continue;
641 if (i >= size)
642 break;
643 ether_addr_copy(addr_array[i++], hn->ai.addr);
644 }
645 }
646
mlx5e_vport_context_update_addr_list(struct mlx5e_flow_steering * fs,struct net_device * netdev,int list_type)647 static void mlx5e_vport_context_update_addr_list(struct mlx5e_flow_steering *fs,
648 struct net_device *netdev,
649 int list_type)
650 {
651 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
652 struct mlx5e_l2_hash_node *hn;
653 u8 (*addr_array)[ETH_ALEN] = NULL;
654 struct hlist_head *addr_list;
655 struct hlist_node *tmp;
656 int max_size;
657 int size;
658 int err;
659 int hi;
660
661 size = is_uc ? 0 : (fs->l2.broadcast_enabled ? 1 : 0);
662 max_size = is_uc ?
663 1 << MLX5_CAP_GEN(fs->mdev, log_max_current_uc_list) :
664 1 << MLX5_CAP_GEN(fs->mdev, log_max_current_mc_list);
665
666 addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc;
667 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
668 size++;
669
670 if (size > max_size) {
671 fs_warn(fs, "mdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
672 is_uc ? "UC" : "MC", size, max_size);
673 size = max_size;
674 }
675
676 if (size) {
677 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
678 if (!addr_array) {
679 err = -ENOMEM;
680 goto out;
681 }
682 mlx5e_fill_addr_array(fs, list_type, netdev, addr_array, size);
683 }
684
685 err = mlx5_modify_nic_vport_mac_list(fs->mdev, list_type, addr_array, size);
686 out:
687 if (err)
688 fs_err(fs, "Failed to modify vport %s list err(%d)\n",
689 is_uc ? "UC" : "MC", err);
690 kfree(addr_array);
691 }
692
mlx5e_vport_context_update(struct mlx5e_flow_steering * fs,struct net_device * netdev)693 static void mlx5e_vport_context_update(struct mlx5e_flow_steering *fs,
694 struct net_device *netdev)
695 {
696 struct mlx5e_l2_table *ea = &fs->l2;
697
698 mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_UC);
699 mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_MC);
700 mlx5_modify_nic_vport_promisc(fs->mdev, 0,
701 ea->allmulti_enabled,
702 ea->promisc_enabled);
703 }
704
mlx5e_apply_netdev_addr(struct mlx5e_flow_steering * fs)705 static void mlx5e_apply_netdev_addr(struct mlx5e_flow_steering *fs)
706 {
707 struct mlx5e_l2_hash_node *hn;
708 struct hlist_node *tmp;
709 int i;
710
711 mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i)
712 mlx5e_execute_l2_action(fs, hn);
713
714 mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i)
715 mlx5e_execute_l2_action(fs, hn);
716 }
717
mlx5e_handle_netdev_addr(struct mlx5e_flow_steering * fs,struct net_device * netdev)718 static void mlx5e_handle_netdev_addr(struct mlx5e_flow_steering *fs,
719 struct net_device *netdev)
720 {
721 struct mlx5e_l2_hash_node *hn;
722 struct hlist_node *tmp;
723 int i;
724
725 mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i)
726 hn->action = MLX5E_ACTION_DEL;
727 mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i)
728 hn->action = MLX5E_ACTION_DEL;
729
730 if (fs->state_destroy)
731 mlx5e_sync_netdev_addr(fs, netdev);
732
733 mlx5e_apply_netdev_addr(fs);
734 }
735
736 #define MLX5E_PROMISC_GROUP0_SIZE BIT(0)
737 #define MLX5E_PROMISC_TABLE_SIZE MLX5E_PROMISC_GROUP0_SIZE
738
mlx5e_add_promisc_rule(struct mlx5e_flow_steering * fs)739 static int mlx5e_add_promisc_rule(struct mlx5e_flow_steering *fs)
740 {
741 struct mlx5_flow_table *ft = fs->promisc.ft.t;
742 struct mlx5_flow_destination dest = {};
743 struct mlx5_flow_handle **rule_p;
744 MLX5_DECLARE_FLOW_ACT(flow_act);
745 struct mlx5_flow_spec *spec;
746 int err = 0;
747
748 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
749 if (!spec)
750 return -ENOMEM;
751 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
752 dest.ft = mlx5_get_ttc_flow_table(fs->ttc);
753
754 rule_p = &fs->promisc.rule;
755 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
756 if (IS_ERR(*rule_p)) {
757 err = PTR_ERR(*rule_p);
758 *rule_p = NULL;
759 fs_err(fs, "%s: add promiscuous rule failed\n", __func__);
760 }
761 kvfree(spec);
762 return err;
763 }
764
mlx5e_create_promisc_table(struct mlx5e_flow_steering * fs)765 static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
766 {
767 struct mlx5e_flow_table *ft = &fs->promisc.ft;
768 struct mlx5_flow_table_attr ft_attr = {};
769 int err;
770
771 ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE;
772 ft_attr.autogroup.max_num_groups = 1;
773 ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
774 ft_attr.prio = MLX5E_NIC_PRIO;
775
776 ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
777 if (IS_ERR(ft->t)) {
778 err = PTR_ERR(ft->t);
779 fs_err(fs, "fail to create promisc table err=%d\n", err);
780 return err;
781 }
782
783 err = mlx5e_add_promisc_rule(fs);
784 if (err)
785 goto err_destroy_promisc_table;
786
787 return 0;
788
789 err_destroy_promisc_table:
790 mlx5_destroy_flow_table(ft->t);
791 ft->t = NULL;
792
793 return err;
794 }
795
mlx5e_del_promisc_rule(struct mlx5e_flow_steering * fs)796 static void mlx5e_del_promisc_rule(struct mlx5e_flow_steering *fs)
797 {
798 if (WARN(!fs->promisc.rule, "Trying to remove non-existing promiscuous rule"))
799 return;
800 mlx5_del_flow_rules(fs->promisc.rule);
801 fs->promisc.rule = NULL;
802 }
803
mlx5e_destroy_promisc_table(struct mlx5e_flow_steering * fs)804 static void mlx5e_destroy_promisc_table(struct mlx5e_flow_steering *fs)
805 {
806 if (WARN(!fs->promisc.ft.t, "Trying to remove non-existing promiscuous table"))
807 return;
808 mlx5e_del_promisc_rule(fs);
809 mlx5_destroy_flow_table(fs->promisc.ft.t);
810 fs->promisc.ft.t = NULL;
811 }
812
mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering * fs,struct net_device * netdev)813 void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs,
814 struct net_device *netdev)
815 {
816 struct mlx5e_l2_table *ea = &fs->l2;
817
818 bool rx_mode_enable = fs->state_destroy;
819 bool promisc_enabled = rx_mode_enable && (netdev->flags & IFF_PROMISC);
820 bool allmulti_enabled = rx_mode_enable && (netdev->flags & IFF_ALLMULTI);
821 bool broadcast_enabled = rx_mode_enable;
822
823 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
824 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
825 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
826 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
827 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
828 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
829 int err;
830
831 if (enable_promisc) {
832 err = mlx5e_create_promisc_table(fs);
833 if (err)
834 enable_promisc = false;
835 if (!fs->vlan_strip_disable && !err)
836 fs_warn_once(fs,
837 "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
838 }
839 if (enable_allmulti)
840 mlx5e_add_l2_flow_rule(fs, &ea->allmulti, MLX5E_ALLMULTI);
841 if (enable_broadcast)
842 mlx5e_add_l2_flow_rule(fs, &ea->broadcast, MLX5E_FULLMATCH);
843
844 mlx5e_handle_netdev_addr(fs, netdev);
845
846 if (disable_broadcast)
847 mlx5e_del_l2_flow_rule(fs, &ea->broadcast);
848 if (disable_allmulti)
849 mlx5e_del_l2_flow_rule(fs, &ea->allmulti);
850 if (disable_promisc)
851 mlx5e_destroy_promisc_table(fs);
852
853 ea->promisc_enabled = promisc_enabled;
854 ea->allmulti_enabled = allmulti_enabled;
855 ea->broadcast_enabled = broadcast_enabled;
856
857 mlx5e_vport_context_update(fs, netdev);
858 }
859
mlx5e_destroy_groups(struct mlx5e_flow_table * ft)860 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
861 {
862 int i;
863
864 for (i = ft->num_groups - 1; i >= 0; i--) {
865 if (!IS_ERR_OR_NULL(ft->g[i]))
866 mlx5_destroy_flow_group(ft->g[i]);
867 ft->g[i] = NULL;
868 }
869 ft->num_groups = 0;
870 }
871
mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering * fs,struct net_device * netdev)872 void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev)
873 {
874 ether_addr_copy(fs->l2.broadcast.addr, netdev->broadcast);
875 }
876
mlx5e_destroy_flow_table(struct mlx5e_flow_table * ft)877 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
878 {
879 mlx5e_destroy_groups(ft);
880 kfree(ft->g);
881 mlx5_destroy_flow_table(ft->t);
882 ft->t = NULL;
883 }
884
mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res,struct ttc_params * ttc_params)885 static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs,
886 struct mlx5e_rx_res *rx_res,
887 struct ttc_params *ttc_params)
888 {
889 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
890 int tt;
891
892 memset(ttc_params, 0, sizeof(*ttc_params));
893 ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
894 MLX5_FLOW_NAMESPACE_KERNEL);
895 ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
896 ft_attr->prio = MLX5E_NIC_PRIO;
897
898 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
899 ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
900 ttc_params->dests[tt].tir_num =
901 tt == MLX5_TT_ANY ?
902 mlx5e_rx_res_get_tirn_direct(rx_res, 0) :
903 mlx5e_rx_res_get_tirn_rss_inner(rx_res,
904 tt);
905 }
906 }
907
mlx5e_set_ttc_params(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res,struct ttc_params * ttc_params,bool tunnel)908 void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
909 struct mlx5e_rx_res *rx_res,
910 struct ttc_params *ttc_params, bool tunnel)
911
912 {
913 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
914 int tt;
915
916 memset(ttc_params, 0, sizeof(*ttc_params));
917 ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
918 MLX5_FLOW_NAMESPACE_KERNEL);
919 ft_attr->level = MLX5E_TTC_FT_LEVEL;
920 ft_attr->prio = MLX5E_NIC_PRIO;
921
922 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
923 ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
924 ttc_params->dests[tt].tir_num =
925 tt == MLX5_TT_ANY ?
926 mlx5e_rx_res_get_tirn_direct(rx_res, 0) :
927 mlx5e_rx_res_get_tirn_rss(rx_res, tt);
928 }
929
930 ttc_params->inner_ttc = tunnel;
931 if (!tunnel || !mlx5_tunnel_inner_ft_supported(fs->mdev))
932 return;
933
934 for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
935 ttc_params->tunnel_dests[tt].type =
936 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
937 ttc_params->tunnel_dests[tt].ft =
938 mlx5_get_ttc_flow_table(fs->inner_ttc);
939 }
940 }
941
mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering * fs,struct mlx5e_l2_rule * ai)942 static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs,
943 struct mlx5e_l2_rule *ai)
944 {
945 if (!IS_ERR_OR_NULL(ai->rule)) {
946 mlx5_del_flow_rules(ai->rule);
947 ai->rule = NULL;
948 }
949 }
950
mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering * fs,struct mlx5e_l2_rule * ai,int type)951 static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
952 struct mlx5e_l2_rule *ai, int type)
953 {
954 struct mlx5_flow_table *ft = fs->l2.ft.t;
955 struct mlx5_flow_destination dest = {};
956 MLX5_DECLARE_FLOW_ACT(flow_act);
957 struct mlx5_flow_spec *spec;
958 int err = 0;
959 u8 *mc_dmac;
960 u8 *mv_dmac;
961
962 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
963 if (!spec)
964 return -ENOMEM;
965
966 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
967 outer_headers.dmac_47_16);
968 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
969 outer_headers.dmac_47_16);
970
971 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
972 dest.ft = mlx5_get_ttc_flow_table(fs->ttc);
973
974 switch (type) {
975 case MLX5E_FULLMATCH:
976 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
977 eth_broadcast_addr(mc_dmac);
978 ether_addr_copy(mv_dmac, ai->addr);
979 break;
980
981 case MLX5E_ALLMULTI:
982 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
983 mc_dmac[0] = 0x01;
984 mv_dmac[0] = 0x01;
985 break;
986 }
987
988 ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
989 if (IS_ERR(ai->rule)) {
990 fs_err(fs, "%s: add l2 rule(mac:%pM) failed\n", __func__, mv_dmac);
991 err = PTR_ERR(ai->rule);
992 ai->rule = NULL;
993 }
994
995 kvfree(spec);
996
997 return err;
998 }
999
1000 #define MLX5E_NUM_L2_GROUPS 3
1001 #define MLX5E_L2_GROUP1_SIZE BIT(15)
1002 #define MLX5E_L2_GROUP2_SIZE BIT(0)
1003 #define MLX5E_L2_GROUP_TRAP_SIZE BIT(0) /* must be last */
1004 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
1005 MLX5E_L2_GROUP2_SIZE +\
1006 MLX5E_L2_GROUP_TRAP_SIZE)
mlx5e_create_l2_table_groups(struct mlx5e_l2_table * l2_table)1007 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
1008 {
1009 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1010 struct mlx5e_flow_table *ft = &l2_table->ft;
1011 int ix = 0;
1012 u8 *mc_dmac;
1013 u32 *in;
1014 int err;
1015 u8 *mc;
1016
1017 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1018 if (!ft->g)
1019 return -ENOMEM;
1020 in = kvzalloc(inlen, GFP_KERNEL);
1021 if (!in) {
1022 kfree(ft->g);
1023 return -ENOMEM;
1024 }
1025
1026 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1027 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
1028 outer_headers.dmac_47_16);
1029 /* Flow Group for full match */
1030 eth_broadcast_addr(mc_dmac);
1031 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1032 MLX5_SET_CFG(in, start_flow_index, ix);
1033 ix += MLX5E_L2_GROUP1_SIZE;
1034 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1035 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1036 if (IS_ERR(ft->g[ft->num_groups]))
1037 goto err_destroy_groups;
1038 ft->num_groups++;
1039
1040 /* Flow Group for allmulti */
1041 eth_zero_addr(mc_dmac);
1042 mc_dmac[0] = 0x01;
1043 MLX5_SET_CFG(in, start_flow_index, ix);
1044 ix += MLX5E_L2_GROUP2_SIZE;
1045 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1046 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1047 if (IS_ERR(ft->g[ft->num_groups]))
1048 goto err_destroy_groups;
1049 ft->num_groups++;
1050
1051 /* Flow Group for l2 traps */
1052 memset(in, 0, inlen);
1053 MLX5_SET_CFG(in, start_flow_index, ix);
1054 ix += MLX5E_L2_GROUP_TRAP_SIZE;
1055 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1056 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1057 if (IS_ERR(ft->g[ft->num_groups]))
1058 goto err_destroy_groups;
1059 ft->num_groups++;
1060
1061 kvfree(in);
1062 return 0;
1063
1064 err_destroy_groups:
1065 err = PTR_ERR(ft->g[ft->num_groups]);
1066 ft->g[ft->num_groups] = NULL;
1067 mlx5e_destroy_groups(ft);
1068 kvfree(in);
1069 kfree(ft->g);
1070
1071 return err;
1072 }
1073
mlx5e_destroy_l2_table(struct mlx5e_flow_steering * fs)1074 static void mlx5e_destroy_l2_table(struct mlx5e_flow_steering *fs)
1075 {
1076 mlx5e_destroy_flow_table(&fs->l2.ft);
1077 }
1078
mlx5e_create_l2_table(struct mlx5e_flow_steering * fs)1079 static int mlx5e_create_l2_table(struct mlx5e_flow_steering *fs)
1080 {
1081 struct mlx5e_l2_table *l2_table = &fs->l2;
1082 struct mlx5e_flow_table *ft = &l2_table->ft;
1083 struct mlx5_flow_table_attr ft_attr = {};
1084 int err;
1085
1086 ft->num_groups = 0;
1087
1088 ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
1089 ft_attr.level = MLX5E_L2_FT_LEVEL;
1090 ft_attr.prio = MLX5E_NIC_PRIO;
1091
1092 ft->t = mlx5_create_flow_table(fs->ns, &ft_attr);
1093 if (IS_ERR(ft->t)) {
1094 err = PTR_ERR(ft->t);
1095 ft->t = NULL;
1096 return err;
1097 }
1098
1099 err = mlx5e_create_l2_table_groups(l2_table);
1100 if (err)
1101 goto err_destroy_flow_table;
1102
1103 return 0;
1104
1105 err_destroy_flow_table:
1106 mlx5_destroy_flow_table(ft->t);
1107 ft->t = NULL;
1108
1109 return err;
1110 }
1111
1112 #define MLX5E_NUM_VLAN_GROUPS 5
1113 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1114 #define MLX5E_VLAN_GROUP1_SIZE BIT(12)
1115 #define MLX5E_VLAN_GROUP2_SIZE BIT(1)
1116 #define MLX5E_VLAN_GROUP3_SIZE BIT(0)
1117 #define MLX5E_VLAN_GROUP_TRAP_SIZE BIT(0) /* must be last */
1118 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1119 MLX5E_VLAN_GROUP1_SIZE +\
1120 MLX5E_VLAN_GROUP2_SIZE +\
1121 MLX5E_VLAN_GROUP3_SIZE +\
1122 MLX5E_VLAN_GROUP_TRAP_SIZE)
1123
__mlx5e_create_vlan_table_groups(struct mlx5e_flow_table * ft,u32 * in,int inlen)1124 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
1125 int inlen)
1126 {
1127 int err;
1128 int ix = 0;
1129 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1130
1131 memset(in, 0, inlen);
1132 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1133 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1134 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1135 MLX5_SET_CFG(in, start_flow_index, ix);
1136 ix += MLX5E_VLAN_GROUP0_SIZE;
1137 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1138 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1139 if (IS_ERR(ft->g[ft->num_groups]))
1140 goto err_destroy_groups;
1141 ft->num_groups++;
1142
1143 memset(in, 0, inlen);
1144 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1145 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1146 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1147 MLX5_SET_CFG(in, start_flow_index, ix);
1148 ix += MLX5E_VLAN_GROUP1_SIZE;
1149 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1150 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1151 if (IS_ERR(ft->g[ft->num_groups]))
1152 goto err_destroy_groups;
1153 ft->num_groups++;
1154
1155 memset(in, 0, inlen);
1156 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1157 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1158 MLX5_SET_CFG(in, start_flow_index, ix);
1159 ix += MLX5E_VLAN_GROUP2_SIZE;
1160 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1161 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1162 if (IS_ERR(ft->g[ft->num_groups]))
1163 goto err_destroy_groups;
1164 ft->num_groups++;
1165
1166 memset(in, 0, inlen);
1167 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1168 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1169 MLX5_SET_CFG(in, start_flow_index, ix);
1170 ix += MLX5E_VLAN_GROUP3_SIZE;
1171 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1172 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1173 if (IS_ERR(ft->g[ft->num_groups]))
1174 goto err_destroy_groups;
1175 ft->num_groups++;
1176
1177 memset(in, 0, inlen);
1178 MLX5_SET_CFG(in, start_flow_index, ix);
1179 ix += MLX5E_VLAN_GROUP_TRAP_SIZE;
1180 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1181 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1182 if (IS_ERR(ft->g[ft->num_groups]))
1183 goto err_destroy_groups;
1184 ft->num_groups++;
1185
1186 return 0;
1187
1188 err_destroy_groups:
1189 err = PTR_ERR(ft->g[ft->num_groups]);
1190 ft->g[ft->num_groups] = NULL;
1191 mlx5e_destroy_groups(ft);
1192
1193 return err;
1194 }
1195
mlx5e_create_vlan_table_groups(struct mlx5e_flow_table * ft)1196 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1197 {
1198 u32 *in;
1199 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1200 int err;
1201
1202 in = kvzalloc(inlen, GFP_KERNEL);
1203 if (!in)
1204 return -ENOMEM;
1205
1206 err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1207
1208 kvfree(in);
1209 return err;
1210 }
1211
mlx5e_fs_create_vlan_table(struct mlx5e_flow_steering * fs)1212 static int mlx5e_fs_create_vlan_table(struct mlx5e_flow_steering *fs)
1213 {
1214 struct mlx5_flow_table_attr ft_attr = {};
1215 struct mlx5e_flow_table *ft;
1216 int err;
1217
1218 ft = &fs->vlan->ft;
1219 ft->num_groups = 0;
1220
1221 ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1222 ft_attr.level = MLX5E_VLAN_FT_LEVEL;
1223 ft_attr.prio = MLX5E_NIC_PRIO;
1224
1225 ft->t = mlx5_create_flow_table(fs->ns, &ft_attr);
1226 if (IS_ERR(ft->t))
1227 return PTR_ERR(ft->t);
1228
1229 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1230 if (!ft->g) {
1231 err = -ENOMEM;
1232 goto err_destroy_vlan_table;
1233 }
1234
1235 err = mlx5e_create_vlan_table_groups(ft);
1236 if (err)
1237 goto err_free_g;
1238
1239 mlx5e_fs_add_vlan_rules(fs);
1240
1241 return 0;
1242
1243 err_free_g:
1244 kfree(ft->g);
1245 err_destroy_vlan_table:
1246 mlx5_destroy_flow_table(ft->t);
1247
1248 return err;
1249 }
1250
mlx5e_destroy_vlan_table(struct mlx5e_flow_steering * fs)1251 static void mlx5e_destroy_vlan_table(struct mlx5e_flow_steering *fs)
1252 {
1253 mlx5e_del_vlan_rules(fs);
1254 mlx5e_destroy_flow_table(&fs->vlan->ft);
1255 }
1256
mlx5e_destroy_inner_ttc_table(struct mlx5e_flow_steering * fs)1257 static void mlx5e_destroy_inner_ttc_table(struct mlx5e_flow_steering *fs)
1258 {
1259 if (!mlx5_tunnel_inner_ft_supported(fs->mdev))
1260 return;
1261 mlx5_destroy_ttc_table(fs->inner_ttc);
1262 }
1263
mlx5e_destroy_ttc_table(struct mlx5e_flow_steering * fs)1264 void mlx5e_destroy_ttc_table(struct mlx5e_flow_steering *fs)
1265 {
1266 mlx5_destroy_ttc_table(fs->ttc);
1267 }
1268
mlx5e_create_inner_ttc_table(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res)1269 static int mlx5e_create_inner_ttc_table(struct mlx5e_flow_steering *fs,
1270 struct mlx5e_rx_res *rx_res)
1271 {
1272 struct ttc_params ttc_params = {};
1273
1274 if (!mlx5_tunnel_inner_ft_supported(fs->mdev))
1275 return 0;
1276
1277 mlx5e_set_inner_ttc_params(fs, rx_res, &ttc_params);
1278 fs->inner_ttc = mlx5_create_inner_ttc_table(fs->mdev,
1279 &ttc_params);
1280 if (IS_ERR(fs->inner_ttc))
1281 return PTR_ERR(fs->inner_ttc);
1282 return 0;
1283 }
1284
mlx5e_create_ttc_table(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res)1285 int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
1286 struct mlx5e_rx_res *rx_res)
1287 {
1288 struct ttc_params ttc_params = {};
1289
1290 mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true);
1291 fs->ttc = mlx5_create_ttc_table(fs->mdev, &ttc_params);
1292 if (IS_ERR(fs->ttc))
1293 return PTR_ERR(fs->ttc);
1294 return 0;
1295 }
1296
mlx5e_create_flow_steering(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res,const struct mlx5e_profile * profile,struct net_device * netdev)1297 int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs,
1298 struct mlx5e_rx_res *rx_res,
1299 const struct mlx5e_profile *profile,
1300 struct net_device *netdev)
1301 {
1302 struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(fs->mdev,
1303 MLX5_FLOW_NAMESPACE_KERNEL);
1304 int err;
1305
1306 if (!ns)
1307 return -EOPNOTSUPP;
1308
1309 mlx5e_fs_set_ns(fs, ns, false);
1310 err = mlx5e_arfs_create_tables(fs, rx_res,
1311 !!(netdev->hw_features & NETIF_F_NTUPLE));
1312 if (err) {
1313 fs_err(fs, "Failed to create arfs tables, err=%d\n", err);
1314 netdev->hw_features &= ~NETIF_F_NTUPLE;
1315 }
1316
1317 err = mlx5e_create_inner_ttc_table(fs, rx_res);
1318 if (err) {
1319 fs_err(fs, "Failed to create inner ttc table, err=%d\n", err);
1320 goto err_destroy_arfs_tables;
1321 }
1322
1323 err = mlx5e_create_ttc_table(fs, rx_res);
1324 if (err) {
1325 fs_err(fs, "Failed to create ttc table, err=%d\n", err);
1326 goto err_destroy_inner_ttc_table;
1327 }
1328
1329 err = mlx5e_create_l2_table(fs);
1330 if (err) {
1331 fs_err(fs, "Failed to create l2 table, err=%d\n", err);
1332 goto err_destroy_ttc_table;
1333 }
1334
1335 err = mlx5e_fs_create_vlan_table(fs);
1336 if (err) {
1337 fs_err(fs, "Failed to create vlan table, err=%d\n", err);
1338 goto err_destroy_l2_table;
1339 }
1340
1341 err = mlx5e_ptp_alloc_rx_fs(fs, profile);
1342 if (err)
1343 goto err_destory_vlan_table;
1344
1345 mlx5e_ethtool_init_steering(fs);
1346
1347 return 0;
1348
1349 err_destory_vlan_table:
1350 mlx5e_destroy_vlan_table(fs);
1351 err_destroy_l2_table:
1352 mlx5e_destroy_l2_table(fs);
1353 err_destroy_ttc_table:
1354 mlx5e_destroy_ttc_table(fs);
1355 err_destroy_inner_ttc_table:
1356 mlx5e_destroy_inner_ttc_table(fs);
1357 err_destroy_arfs_tables:
1358 mlx5e_arfs_destroy_tables(fs, !!(netdev->hw_features & NETIF_F_NTUPLE));
1359
1360 return err;
1361 }
1362
mlx5e_destroy_flow_steering(struct mlx5e_flow_steering * fs,bool ntuple,const struct mlx5e_profile * profile)1363 void mlx5e_destroy_flow_steering(struct mlx5e_flow_steering *fs, bool ntuple,
1364 const struct mlx5e_profile *profile)
1365 {
1366 mlx5e_ptp_free_rx_fs(fs, profile);
1367 mlx5e_destroy_vlan_table(fs);
1368 mlx5e_destroy_l2_table(fs);
1369 mlx5e_destroy_ttc_table(fs);
1370 mlx5e_destroy_inner_ttc_table(fs);
1371 mlx5e_arfs_destroy_tables(fs, ntuple);
1372 mlx5e_ethtool_cleanup_steering(fs);
1373 }
1374
mlx5e_fs_vlan_alloc(struct mlx5e_flow_steering * fs)1375 static int mlx5e_fs_vlan_alloc(struct mlx5e_flow_steering *fs)
1376 {
1377 fs->vlan = kvzalloc(sizeof(*fs->vlan), GFP_KERNEL);
1378 if (!fs->vlan)
1379 return -ENOMEM;
1380 return 0;
1381 }
1382
mlx5e_fs_vlan_free(struct mlx5e_flow_steering * fs)1383 static void mlx5e_fs_vlan_free(struct mlx5e_flow_steering *fs)
1384 {
1385 kvfree(fs->vlan);
1386 }
1387
mlx5e_fs_get_vlan(struct mlx5e_flow_steering * fs)1388 struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs)
1389 {
1390 return fs->vlan;
1391 }
1392
mlx5e_fs_tc_alloc(struct mlx5e_flow_steering * fs)1393 static int mlx5e_fs_tc_alloc(struct mlx5e_flow_steering *fs)
1394 {
1395 fs->tc = mlx5e_tc_table_alloc();
1396 if (IS_ERR(fs->tc))
1397 return -ENOMEM;
1398 return 0;
1399 }
1400
mlx5e_fs_tc_free(struct mlx5e_flow_steering * fs)1401 static void mlx5e_fs_tc_free(struct mlx5e_flow_steering *fs)
1402 {
1403 mlx5e_tc_table_free(fs->tc);
1404 }
1405
mlx5e_fs_get_tc(struct mlx5e_flow_steering * fs)1406 struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs)
1407 {
1408 return fs->tc;
1409 }
1410
1411 #ifdef CONFIG_MLX5_EN_RXNFC
mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering * fs)1412 static int mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering *fs)
1413 {
1414 return mlx5e_ethtool_alloc(&fs->ethtool);
1415 }
1416
mlx5e_fs_ethtool_free(struct mlx5e_flow_steering * fs)1417 static void mlx5e_fs_ethtool_free(struct mlx5e_flow_steering *fs)
1418 {
1419 mlx5e_ethtool_free(fs->ethtool);
1420 }
1421
mlx5e_fs_get_ethtool(struct mlx5e_flow_steering * fs)1422 struct mlx5e_ethtool_steering *mlx5e_fs_get_ethtool(struct mlx5e_flow_steering *fs)
1423 {
1424 return fs->ethtool;
1425 }
1426 #else
mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering * fs)1427 static int mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering *fs)
1428 { return 0; }
mlx5e_fs_ethtool_free(struct mlx5e_flow_steering * fs)1429 static void mlx5e_fs_ethtool_free(struct mlx5e_flow_steering *fs) { }
1430 #endif
1431
mlx5e_fs_init(const struct mlx5e_profile * profile,struct mlx5_core_dev * mdev,bool state_destroy)1432 struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
1433 struct mlx5_core_dev *mdev,
1434 bool state_destroy)
1435 {
1436 struct mlx5e_flow_steering *fs;
1437 int err;
1438
1439 fs = kvzalloc(sizeof(*fs), GFP_KERNEL);
1440 if (!fs)
1441 goto err;
1442
1443 fs->mdev = mdev;
1444 fs->state_destroy = state_destroy;
1445 if (mlx5e_profile_feature_cap(profile, FS_VLAN)) {
1446 err = mlx5e_fs_vlan_alloc(fs);
1447 if (err)
1448 goto err_free_fs;
1449 }
1450
1451 if (mlx5e_profile_feature_cap(profile, FS_TC)) {
1452 err = mlx5e_fs_tc_alloc(fs);
1453 if (err)
1454 goto err_free_vlan;
1455 }
1456
1457 err = mlx5e_fs_ethtool_alloc(fs);
1458 if (err)
1459 goto err_free_tc;
1460
1461 return fs;
1462 err_free_tc:
1463 mlx5e_fs_tc_free(fs);
1464 err_free_vlan:
1465 mlx5e_fs_vlan_free(fs);
1466 err_free_fs:
1467 kvfree(fs);
1468 err:
1469 return NULL;
1470 }
1471
mlx5e_fs_cleanup(struct mlx5e_flow_steering * fs)1472 void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
1473 {
1474 mlx5e_fs_ethtool_free(fs);
1475 mlx5e_fs_tc_free(fs);
1476 mlx5e_fs_vlan_free(fs);
1477 kvfree(fs);
1478 }
1479
mlx5e_fs_get_l2(struct mlx5e_flow_steering * fs)1480 struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs)
1481 {
1482 return &fs->l2;
1483 }
1484
mlx5e_fs_get_ns(struct mlx5e_flow_steering * fs,bool egress)1485 struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress)
1486 {
1487 return egress ? fs->egress_ns : fs->ns;
1488 }
1489
mlx5e_fs_set_ns(struct mlx5e_flow_steering * fs,struct mlx5_flow_namespace * ns,bool egress)1490 void mlx5e_fs_set_ns(struct mlx5e_flow_steering *fs, struct mlx5_flow_namespace *ns, bool egress)
1491 {
1492 if (!egress)
1493 fs->ns = ns;
1494 else
1495 fs->egress_ns = ns;
1496 }
1497
mlx5e_fs_get_ttc(struct mlx5e_flow_steering * fs,bool inner)1498 struct mlx5_ttc_table *mlx5e_fs_get_ttc(struct mlx5e_flow_steering *fs, bool inner)
1499 {
1500 return inner ? fs->inner_ttc : fs->ttc;
1501 }
1502
mlx5e_fs_set_ttc(struct mlx5e_flow_steering * fs,struct mlx5_ttc_table * ttc,bool inner)1503 void mlx5e_fs_set_ttc(struct mlx5e_flow_steering *fs, struct mlx5_ttc_table *ttc, bool inner)
1504 {
1505 if (!inner)
1506 fs->ttc = ttc;
1507 else
1508 fs->inner_ttc = ttc;
1509 }
1510
1511 #ifdef CONFIG_MLX5_EN_ARFS
mlx5e_fs_get_arfs(struct mlx5e_flow_steering * fs)1512 struct mlx5e_arfs_tables *mlx5e_fs_get_arfs(struct mlx5e_flow_steering *fs)
1513 {
1514 return fs->arfs;
1515 }
1516
mlx5e_fs_set_arfs(struct mlx5e_flow_steering * fs,struct mlx5e_arfs_tables * arfs)1517 void mlx5e_fs_set_arfs(struct mlx5e_flow_steering *fs, struct mlx5e_arfs_tables *arfs)
1518 {
1519 fs->arfs = arfs;
1520 }
1521 #endif
1522
mlx5e_fs_get_ptp(struct mlx5e_flow_steering * fs)1523 struct mlx5e_ptp_fs *mlx5e_fs_get_ptp(struct mlx5e_flow_steering *fs)
1524 {
1525 return fs->ptp_fs;
1526 }
1527
mlx5e_fs_set_ptp(struct mlx5e_flow_steering * fs,struct mlx5e_ptp_fs * ptp_fs)1528 void mlx5e_fs_set_ptp(struct mlx5e_flow_steering *fs, struct mlx5e_ptp_fs *ptp_fs)
1529 {
1530 fs->ptp_fs = ptp_fs;
1531 }
1532
mlx5e_fs_get_any(struct mlx5e_flow_steering * fs)1533 struct mlx5e_fs_any *mlx5e_fs_get_any(struct mlx5e_flow_steering *fs)
1534 {
1535 return fs->any;
1536 }
1537
mlx5e_fs_set_any(struct mlx5e_flow_steering * fs,struct mlx5e_fs_any * any)1538 void mlx5e_fs_set_any(struct mlx5e_flow_steering *fs, struct mlx5e_fs_any *any)
1539 {
1540 fs->any = any;
1541 }
1542
1543 #ifdef CONFIG_MLX5_EN_TLS
mlx5e_fs_get_accel_tcp(struct mlx5e_flow_steering * fs)1544 struct mlx5e_accel_fs_tcp *mlx5e_fs_get_accel_tcp(struct mlx5e_flow_steering *fs)
1545 {
1546 return fs->accel_tcp;
1547 }
1548
mlx5e_fs_set_accel_tcp(struct mlx5e_flow_steering * fs,struct mlx5e_accel_fs_tcp * accel_tcp)1549 void mlx5e_fs_set_accel_tcp(struct mlx5e_flow_steering *fs, struct mlx5e_accel_fs_tcp *accel_tcp)
1550 {
1551 fs->accel_tcp = accel_tcp;
1552 }
1553 #endif
1554
mlx5e_fs_set_state_destroy(struct mlx5e_flow_steering * fs,bool state_destroy)1555 void mlx5e_fs_set_state_destroy(struct mlx5e_flow_steering *fs, bool state_destroy)
1556 {
1557 fs->state_destroy = state_destroy;
1558 }
1559
mlx5e_fs_set_vlan_strip_disable(struct mlx5e_flow_steering * fs,bool vlan_strip_disable)1560 void mlx5e_fs_set_vlan_strip_disable(struct mlx5e_flow_steering *fs,
1561 bool vlan_strip_disable)
1562 {
1563 fs->vlan_strip_disable = vlan_strip_disable;
1564 }
1565
mlx5e_fs_get_udp(struct mlx5e_flow_steering * fs)1566 struct mlx5e_fs_udp *mlx5e_fs_get_udp(struct mlx5e_flow_steering *fs)
1567 {
1568 return fs->udp;
1569 }
1570
mlx5e_fs_set_udp(struct mlx5e_flow_steering * fs,struct mlx5e_fs_udp * udp)1571 void mlx5e_fs_set_udp(struct mlx5e_flow_steering *fs, struct mlx5e_fs_udp *udp)
1572 {
1573 fs->udp = udp;
1574 }
1575
mlx5e_fs_get_mdev(struct mlx5e_flow_steering * fs)1576 struct mlx5_core_dev *mlx5e_fs_get_mdev(struct mlx5e_flow_steering *fs)
1577 {
1578 return fs->mdev;
1579 }
1580