1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
3 
4 #include "mlx5_core.h"
5 #include "eswitch.h"
6 #include "helper.h"
7 #include "ofld.h"
8 
9 static bool
esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch * esw,const struct mlx5_vport * vport)10 esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw,
11 				 const struct mlx5_vport *vport)
12 {
13 	return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
14 		mlx5_eswitch_is_vf_vport(esw, vport->vport));
15 }
16 
esw_acl_ingress_prio_tag_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport)17 static int esw_acl_ingress_prio_tag_create(struct mlx5_eswitch *esw,
18 					   struct mlx5_vport *vport)
19 {
20 	struct mlx5_flow_act flow_act = {};
21 	struct mlx5_flow_spec *spec;
22 	int err = 0;
23 
24 	/* For prio tag mode, there is only 1 FTEs:
25 	 * 1) Untagged packets - push prio tag VLAN and modify metadata if
26 	 * required, allow
27 	 * Unmatched traffic is allowed by default
28 	 */
29 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
30 	if (!spec)
31 		return -ENOMEM;
32 
33 	/* Untagged packets - push prio tag VLAN, allow */
34 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
35 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
36 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
37 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
38 			  MLX5_FLOW_CONTEXT_ACTION_ALLOW;
39 	flow_act.vlan[0].ethtype = ETH_P_8021Q;
40 	flow_act.vlan[0].vid = 0;
41 	flow_act.vlan[0].prio = 0;
42 
43 	if (vport->ingress.offloads.modify_metadata_rule) {
44 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
45 		flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
46 	}
47 
48 	vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
49 							&flow_act, NULL, 0);
50 	if (IS_ERR(vport->ingress.allow_rule)) {
51 		err = PTR_ERR(vport->ingress.allow_rule);
52 		esw_warn(esw->dev,
53 			 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
54 			 vport->vport, err);
55 		vport->ingress.allow_rule = NULL;
56 	}
57 
58 	kvfree(spec);
59 	return err;
60 }
61 
esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport)62 static int esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch *esw,
63 					       struct mlx5_vport *vport)
64 {
65 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
66 	struct mlx5_flow_act flow_act = {};
67 	int err = 0;
68 	u32 key;
69 
70 	key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
71 	key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
72 
73 	MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
74 	MLX5_SET(set_action_in, action, field,
75 		 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
76 	MLX5_SET(set_action_in, action, data, key);
77 	MLX5_SET(set_action_in, action, offset,
78 		 ESW_SOURCE_PORT_METADATA_OFFSET);
79 	MLX5_SET(set_action_in, action, length,
80 		 ESW_SOURCE_PORT_METADATA_BITS);
81 
82 	vport->ingress.offloads.modify_metadata =
83 		mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
84 					 1, action);
85 	if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
86 		err = PTR_ERR(vport->ingress.offloads.modify_metadata);
87 		esw_warn(esw->dev,
88 			 "failed to alloc modify header for vport %d ingress acl (%d)\n",
89 			 vport->vport, err);
90 		return err;
91 	}
92 
93 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
94 	flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
95 	flow_act.fg = vport->ingress.offloads.metadata_allmatch_grp;
96 	vport->ingress.offloads.modify_metadata_rule =
97 				mlx5_add_flow_rules(vport->ingress.acl,
98 						    NULL, &flow_act, NULL, 0);
99 	if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
100 		err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
101 		esw_warn(esw->dev,
102 			 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
103 			 vport->vport, err);
104 		mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
105 		vport->ingress.offloads.modify_metadata_rule = NULL;
106 	}
107 	return err;
108 }
109 
esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch * esw,struct mlx5_vport * vport)110 static void esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch *esw,
111 						 struct mlx5_vport *vport)
112 {
113 	if (!vport->ingress.offloads.modify_metadata_rule)
114 		return;
115 
116 	mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
117 	mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
118 	vport->ingress.offloads.modify_metadata_rule = NULL;
119 }
120 
esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport)121 static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw,
122 						struct mlx5_vport *vport)
123 {
124 	struct mlx5_flow_act flow_act = {};
125 	struct mlx5_flow_handle *flow_rule;
126 	int err = 0;
127 
128 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
129 	flow_act.fg = vport->ingress.offloads.drop_grp;
130 	flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0);
131 	if (IS_ERR(flow_rule)) {
132 		err = PTR_ERR(flow_rule);
133 		goto out;
134 	}
135 
136 	vport->ingress.offloads.drop_rule = flow_rule;
137 out:
138 	return err;
139 }
140 
esw_acl_ingress_src_port_drop_destroy(struct mlx5_eswitch * esw,struct mlx5_vport * vport)141 static void esw_acl_ingress_src_port_drop_destroy(struct mlx5_eswitch *esw,
142 						  struct mlx5_vport *vport)
143 {
144 	if (!vport->ingress.offloads.drop_rule)
145 		return;
146 
147 	mlx5_del_flow_rules(vport->ingress.offloads.drop_rule);
148 	vport->ingress.offloads.drop_rule = NULL;
149 }
150 
esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport)151 static int esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch *esw,
152 					     struct mlx5_vport *vport)
153 {
154 	int err;
155 
156 	if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
157 		err = esw_acl_ingress_mod_metadata_create(esw, vport);
158 		if (err) {
159 			esw_warn(esw->dev,
160 				 "vport(%d) create ingress modify metadata, err(%d)\n",
161 				 vport->vport, err);
162 			return err;
163 		}
164 	}
165 
166 	if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
167 		err = esw_acl_ingress_prio_tag_create(esw, vport);
168 		if (err) {
169 			esw_warn(esw->dev,
170 				 "vport(%d) create ingress prio tag rule, err(%d)\n",
171 				 vport->vport, err);
172 			goto prio_tag_err;
173 		}
174 	}
175 
176 	return 0;
177 
178 prio_tag_err:
179 	esw_acl_ingress_mod_metadata_destroy(esw, vport);
180 	return err;
181 }
182 
esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch * esw,struct mlx5_vport * vport)183 static void esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch *esw,
184 					       struct mlx5_vport *vport)
185 {
186 	esw_acl_ingress_allow_rule_destroy(vport);
187 	esw_acl_ingress_mod_metadata_destroy(esw, vport);
188 	esw_acl_ingress_src_port_drop_destroy(esw, vport);
189 }
190 
esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport)191 static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
192 					      struct mlx5_vport *vport)
193 {
194 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
195 	struct mlx5_flow_group *g;
196 	void *match_criteria;
197 	u32 *flow_group_in;
198 	u32 flow_index = 0;
199 	int ret = 0;
200 
201 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
202 	if (!flow_group_in)
203 		return -ENOMEM;
204 
205 	if (vport->vport == MLX5_VPORT_UPLINK) {
206 		/* This group can hold an FTE to drop all traffic.
207 		 * Need in case LAG is enabled.
208 		 */
209 		MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
210 		MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
211 
212 		g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
213 		if (IS_ERR(g)) {
214 			ret = PTR_ERR(g);
215 			esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
216 				 vport->vport, ret);
217 			goto drop_err;
218 		}
219 		vport->ingress.offloads.drop_grp = g;
220 		flow_index++;
221 	}
222 
223 	if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
224 		/* This group is to hold FTE to match untagged packets when prio_tag
225 		 * is enabled.
226 		 */
227 		memset(flow_group_in, 0, inlen);
228 		match_criteria = MLX5_ADDR_OF(create_flow_group_in,
229 					      flow_group_in, match_criteria);
230 		MLX5_SET(create_flow_group_in, flow_group_in,
231 			 match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
232 		MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
233 		MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
234 		MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
235 
236 		g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
237 		if (IS_ERR(g)) {
238 			ret = PTR_ERR(g);
239 			esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
240 				 vport->vport, ret);
241 			goto prio_tag_err;
242 		}
243 		vport->ingress.offloads.metadata_prio_tag_grp = g;
244 		flow_index++;
245 	}
246 
247 	if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
248 		/* This group holds an FTE with no match to add metadata for
249 		 * tagged packets if prio-tag is enabled, or for all untagged
250 		 * traffic in case prio-tag is disabled.
251 		 */
252 		memset(flow_group_in, 0, inlen);
253 		MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
254 		MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
255 
256 		g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
257 		if (IS_ERR(g)) {
258 			ret = PTR_ERR(g);
259 			esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
260 				 vport->vport, ret);
261 			goto metadata_err;
262 		}
263 		vport->ingress.offloads.metadata_allmatch_grp = g;
264 	}
265 
266 	kvfree(flow_group_in);
267 	return 0;
268 
269 metadata_err:
270 	if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
271 		mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
272 		vport->ingress.offloads.metadata_prio_tag_grp = NULL;
273 	}
274 prio_tag_err:
275 	if (!IS_ERR_OR_NULL(vport->ingress.offloads.drop_grp)) {
276 		mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp);
277 		vport->ingress.offloads.drop_grp = NULL;
278 	}
279 drop_err:
280 	kvfree(flow_group_in);
281 	return ret;
282 }
283 
esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport * vport)284 static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
285 {
286 	if (vport->ingress.offloads.metadata_allmatch_grp) {
287 		mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
288 		vport->ingress.offloads.metadata_allmatch_grp = NULL;
289 	}
290 
291 	if (vport->ingress.offloads.metadata_prio_tag_grp) {
292 		mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
293 		vport->ingress.offloads.metadata_prio_tag_grp = NULL;
294 	}
295 
296 	if (vport->ingress.offloads.drop_grp) {
297 		mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp);
298 		vport->ingress.offloads.drop_grp = NULL;
299 	}
300 }
301 
esw_acl_ingress_ofld_setup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)302 int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
303 			       struct mlx5_vport *vport)
304 {
305 	int num_ftes = 0;
306 	int err;
307 
308 	if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
309 	    !esw_acl_ingress_prio_tag_enabled(esw, vport))
310 		return 0;
311 
312 	esw_acl_ingress_allow_rule_destroy(vport);
313 
314 	if (mlx5_eswitch_vport_match_metadata_enabled(esw))
315 		num_ftes++;
316 	if (vport->vport == MLX5_VPORT_UPLINK)
317 		num_ftes++;
318 	if (esw_acl_ingress_prio_tag_enabled(esw, vport))
319 		num_ftes++;
320 
321 	vport->ingress.acl = esw_acl_table_create(esw, vport,
322 						  MLX5_FLOW_NAMESPACE_ESW_INGRESS,
323 						  num_ftes);
324 	if (IS_ERR(vport->ingress.acl)) {
325 		err = PTR_ERR(vport->ingress.acl);
326 		vport->ingress.acl = NULL;
327 		return err;
328 	}
329 
330 	err = esw_acl_ingress_ofld_groups_create(esw, vport);
331 	if (err)
332 		goto group_err;
333 
334 	esw_debug(esw->dev,
335 		  "vport[%d] configure ingress rules\n", vport->vport);
336 
337 	err = esw_acl_ingress_ofld_rules_create(esw, vport);
338 	if (err)
339 		goto rules_err;
340 
341 	return 0;
342 
343 rules_err:
344 	esw_acl_ingress_ofld_groups_destroy(vport);
345 group_err:
346 	esw_acl_ingress_table_destroy(vport);
347 	return err;
348 }
349 
esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)350 void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
351 				  struct mlx5_vport *vport)
352 {
353 	esw_acl_ingress_ofld_rules_destroy(esw, vport);
354 	esw_acl_ingress_ofld_groups_destroy(vport);
355 	esw_acl_ingress_table_destroy(vport);
356 }
357 
358 /* Caller must hold rtnl_lock */
mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch * esw,u16 vport_num,u32 metadata)359 int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num,
360 					   u32 metadata)
361 {
362 	struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
363 	int err;
364 
365 	if (WARN_ON_ONCE(IS_ERR(vport))) {
366 		esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
367 		err = PTR_ERR(vport);
368 		goto out;
369 	}
370 
371 	esw_acl_ingress_ofld_rules_destroy(esw, vport);
372 
373 	vport->metadata = metadata ? metadata : vport->default_metadata;
374 
375 	/* Recreate ingress acl rules with vport->metadata */
376 	err = esw_acl_ingress_ofld_rules_create(esw, vport);
377 	if (err)
378 		goto out;
379 
380 	return 0;
381 
382 out:
383 	vport->metadata = vport->default_metadata;
384 	return err;
385 }
386 
mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch * esw,u16 vport_num)387 int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num)
388 {
389 	struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
390 
391 	if (IS_ERR(vport)) {
392 		esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
393 		return PTR_ERR(vport);
394 	}
395 
396 	return esw_acl_ingress_src_port_drop_create(esw, vport);
397 }
398 
mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch * esw,u16 vport_num)399 void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num)
400 {
401 	struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
402 
403 	if (WARN_ON_ONCE(IS_ERR(vport))) {
404 		esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
405 		return;
406 	}
407 
408 	esw_acl_ingress_src_port_drop_destroy(esw, vport);
409 }
410