1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
3
4 #include "mlx5_core.h"
5 #include "eswitch.h"
6 #include "helper.h"
7 #include "lgcy.h"
8
esw_acl_ingress_lgcy_rules_destroy(struct mlx5_vport * vport)9 static void esw_acl_ingress_lgcy_rules_destroy(struct mlx5_vport *vport)
10 {
11 if (vport->ingress.legacy.drop_rule) {
12 mlx5_del_flow_rules(vport->ingress.legacy.drop_rule);
13 vport->ingress.legacy.drop_rule = NULL;
14 }
15 esw_acl_ingress_allow_rule_destroy(vport);
16 }
17
esw_acl_ingress_lgcy_groups_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport)18 static int esw_acl_ingress_lgcy_groups_create(struct mlx5_eswitch *esw,
19 struct mlx5_vport *vport)
20 {
21 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
22 struct mlx5_core_dev *dev = esw->dev;
23 struct mlx5_flow_group *g;
24 void *match_criteria;
25 u32 *flow_group_in;
26 int err;
27
28 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
29 if (!flow_group_in)
30 return -ENOMEM;
31
32 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
33
34 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
35 MLX5_MATCH_OUTER_HEADERS);
36 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
37 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
38 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
39 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
40 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
41
42 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
43 if (IS_ERR(g)) {
44 err = PTR_ERR(g);
45 esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n",
46 vport->vport, err);
47 goto spoof_err;
48 }
49 vport->ingress.legacy.allow_untagged_spoofchk_grp = g;
50
51 memset(flow_group_in, 0, inlen);
52 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
53 MLX5_MATCH_OUTER_HEADERS);
54 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
55 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
56 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
57
58 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
59 if (IS_ERR(g)) {
60 err = PTR_ERR(g);
61 esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
62 vport->vport, err);
63 goto untagged_err;
64 }
65 vport->ingress.legacy.allow_untagged_only_grp = g;
66
67 memset(flow_group_in, 0, inlen);
68 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
69 MLX5_MATCH_OUTER_HEADERS);
70 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
71 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
72 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
73 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
74
75 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
76 if (IS_ERR(g)) {
77 err = PTR_ERR(g);
78 esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n",
79 vport->vport, err);
80 goto allow_spoof_err;
81 }
82 vport->ingress.legacy.allow_spoofchk_only_grp = g;
83
84 memset(flow_group_in, 0, inlen);
85 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
86 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
87
88 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
89 if (IS_ERR(g)) {
90 err = PTR_ERR(g);
91 esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n",
92 vport->vport, err);
93 goto drop_err;
94 }
95 vport->ingress.legacy.drop_grp = g;
96 kvfree(flow_group_in);
97 return 0;
98
99 drop_err:
100 if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) {
101 mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
102 vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
103 }
104 allow_spoof_err:
105 if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) {
106 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
107 vport->ingress.legacy.allow_untagged_only_grp = NULL;
108 }
109 untagged_err:
110 if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) {
111 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
112 vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
113 }
114 spoof_err:
115 kvfree(flow_group_in);
116 return err;
117 }
118
esw_acl_ingress_lgcy_groups_destroy(struct mlx5_vport * vport)119 static void esw_acl_ingress_lgcy_groups_destroy(struct mlx5_vport *vport)
120 {
121 if (vport->ingress.legacy.allow_spoofchk_only_grp) {
122 mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
123 vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
124 }
125 if (vport->ingress.legacy.allow_untagged_only_grp) {
126 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
127 vport->ingress.legacy.allow_untagged_only_grp = NULL;
128 }
129 if (vport->ingress.legacy.allow_untagged_spoofchk_grp) {
130 mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
131 vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
132 }
133 if (vport->ingress.legacy.drop_grp) {
134 mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp);
135 vport->ingress.legacy.drop_grp = NULL;
136 }
137 }
138
esw_acl_ingress_lgcy_setup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)139 int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
140 struct mlx5_vport *vport)
141 {
142 bool vst_mode_steering = esw_vst_mode_is_steering(esw);
143 struct mlx5_flow_destination drop_ctr_dst = {};
144 struct mlx5_flow_destination *dst = NULL;
145 struct mlx5_flow_act flow_act = {};
146 struct mlx5_flow_spec *spec = NULL;
147 struct mlx5_fc *counter = NULL;
148 bool vst_check_cvlan = false;
149 bool vst_push_cvlan = false;
150 /* The ingress acl table contains 4 groups
151 * (2 active rules at the same time -
152 * 1 allow rule from one of the first 3 groups.
153 * 1 drop rule from the last group):
154 * 1)Allow untagged traffic with smac=original mac.
155 * 2)Allow untagged traffic.
156 * 3)Allow traffic with smac=original mac.
157 * 4)Drop all other traffic.
158 */
159 int table_size = 4;
160 int dest_num = 0;
161 int err = 0;
162 u8 *smac_v;
163
164 esw_acl_ingress_lgcy_rules_destroy(vport);
165
166 if (vport->ingress.legacy.drop_counter) {
167 counter = vport->ingress.legacy.drop_counter;
168 } else if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
169 counter = mlx5_fc_create(esw->dev, false);
170 if (IS_ERR(counter)) {
171 esw_warn(esw->dev,
172 "vport[%d] configure ingress drop rule counter failed\n",
173 vport->vport);
174 counter = NULL;
175 }
176 vport->ingress.legacy.drop_counter = counter;
177 }
178
179 if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
180 esw_acl_ingress_lgcy_cleanup(esw, vport);
181 return 0;
182 }
183
184 if (!vport->ingress.acl) {
185 vport->ingress.acl = esw_acl_table_create(esw, vport,
186 MLX5_FLOW_NAMESPACE_ESW_INGRESS,
187 table_size);
188 if (IS_ERR(vport->ingress.acl)) {
189 err = PTR_ERR(vport->ingress.acl);
190 vport->ingress.acl = NULL;
191 return err;
192 }
193
194 err = esw_acl_ingress_lgcy_groups_create(esw, vport);
195 if (err)
196 goto out;
197 }
198
199 esw_debug(esw->dev,
200 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
201 vport->vport, vport->info.vlan, vport->info.qos);
202
203 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
204 if (!spec) {
205 err = -ENOMEM;
206 goto out;
207 }
208
209 if ((vport->info.vlan || vport->info.qos)) {
210 if (vst_mode_steering)
211 vst_push_cvlan = true;
212 else if (!MLX5_CAP_ESW(esw->dev, vport_cvlan_insert_always))
213 vst_check_cvlan = true;
214 }
215
216 if (vst_check_cvlan || vport->info.spoofchk)
217 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
218
219 /* Create ingress allow rule */
220 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
221 if (vst_push_cvlan) {
222 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
223 flow_act.vlan[0].prio = vport->info.qos;
224 flow_act.vlan[0].vid = vport->info.vlan;
225 flow_act.vlan[0].ethtype = ETH_P_8021Q;
226 }
227
228 if (vst_check_cvlan)
229 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
230 outer_headers.cvlan_tag);
231
232 if (vport->info.spoofchk) {
233 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
234 outer_headers.smac_47_16);
235 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
236 outer_headers.smac_15_0);
237 smac_v = MLX5_ADDR_OF(fte_match_param,
238 spec->match_value,
239 outer_headers.smac_47_16);
240 ether_addr_copy(smac_v, vport->info.mac);
241 }
242
243 vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
244 &flow_act, NULL, 0);
245 if (IS_ERR(vport->ingress.allow_rule)) {
246 err = PTR_ERR(vport->ingress.allow_rule);
247 esw_warn(esw->dev,
248 "vport[%d] configure ingress allow rule, err(%d)\n",
249 vport->vport, err);
250 vport->ingress.allow_rule = NULL;
251 goto out;
252 }
253
254 if (!vst_check_cvlan && !vport->info.spoofchk)
255 goto out;
256
257 memset(&flow_act, 0, sizeof(flow_act));
258 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
259 /* Attach drop flow counter */
260 if (counter) {
261 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
262 drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
263 drop_ctr_dst.counter_id = mlx5_fc_id(counter);
264 dst = &drop_ctr_dst;
265 dest_num++;
266 }
267 vport->ingress.legacy.drop_rule =
268 mlx5_add_flow_rules(vport->ingress.acl, NULL,
269 &flow_act, dst, dest_num);
270 if (IS_ERR(vport->ingress.legacy.drop_rule)) {
271 err = PTR_ERR(vport->ingress.legacy.drop_rule);
272 esw_warn(esw->dev,
273 "vport[%d] configure ingress drop rule, err(%d)\n",
274 vport->vport, err);
275 vport->ingress.legacy.drop_rule = NULL;
276 goto out;
277 }
278 kvfree(spec);
279 return 0;
280
281 out:
282 if (err)
283 esw_acl_ingress_lgcy_cleanup(esw, vport);
284 kvfree(spec);
285 return err;
286 }
287
esw_acl_ingress_lgcy_cleanup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)288 void esw_acl_ingress_lgcy_cleanup(struct mlx5_eswitch *esw,
289 struct mlx5_vport *vport)
290 {
291 if (IS_ERR_OR_NULL(vport->ingress.acl))
292 goto clean_drop_counter;
293
294 esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
295
296 esw_acl_ingress_lgcy_rules_destroy(vport);
297 esw_acl_ingress_lgcy_groups_destroy(vport);
298 esw_acl_ingress_table_destroy(vport);
299
300 clean_drop_counter:
301 if (vport->ingress.legacy.drop_counter) {
302 mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
303 vport->ingress.legacy.drop_counter = NULL;
304 }
305 }
306