1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3 
4 #include <linux/netdevice.h>
5 #include "en.h"
6 #include "en/fs.h"
7 #include "ipsec.h"
8 #include "fs_core.h"
9 
10 #define NUM_IPSEC_FTE BIT(15)
11 
12 enum accel_fs_esp_type {
13 	ACCEL_FS_ESP4,
14 	ACCEL_FS_ESP6,
15 	ACCEL_FS_ESP_NUM_TYPES,
16 };
17 
18 struct mlx5e_ipsec_rx_err {
19 	struct mlx5_flow_table *ft;
20 	struct mlx5_flow_handle *rule;
21 	struct mlx5_modify_hdr *copy_modify_hdr;
22 };
23 
24 struct mlx5e_accel_fs_esp_prot {
25 	struct mlx5_flow_table *ft;
26 	struct mlx5_flow_group *miss_group;
27 	struct mlx5_flow_handle *miss_rule;
28 	struct mlx5_flow_destination default_dest;
29 	struct mlx5e_ipsec_rx_err rx_err;
30 	u32 refcnt;
31 	struct mutex prot_mutex; /* protect ESP4/ESP6 protocol */
32 };
33 
34 struct mlx5e_accel_fs_esp {
35 	struct mlx5e_accel_fs_esp_prot fs_prot[ACCEL_FS_ESP_NUM_TYPES];
36 };
37 
38 struct mlx5e_ipsec_tx {
39 	struct mlx5_flow_namespace *ns;
40 	struct mlx5_flow_table *ft;
41 	struct mutex mutex; /* Protect IPsec TX steering */
42 	u32 refcnt;
43 };
44 
45 /* IPsec RX flow steering */
fs_esp2tt(enum accel_fs_esp_type i)46 static enum mlx5_traffic_types fs_esp2tt(enum accel_fs_esp_type i)
47 {
48 	if (i == ACCEL_FS_ESP4)
49 		return MLX5_TT_IPV4_IPSEC_ESP;
50 	return MLX5_TT_IPV6_IPSEC_ESP;
51 }
52 
rx_err_add_rule(struct mlx5e_priv * priv,struct mlx5e_accel_fs_esp_prot * fs_prot,struct mlx5e_ipsec_rx_err * rx_err)53 static int rx_err_add_rule(struct mlx5e_priv *priv,
54 			   struct mlx5e_accel_fs_esp_prot *fs_prot,
55 			   struct mlx5e_ipsec_rx_err *rx_err)
56 {
57 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
58 	struct mlx5_core_dev *mdev = priv->mdev;
59 	struct mlx5_flow_act flow_act = {};
60 	struct mlx5_modify_hdr *modify_hdr;
61 	struct mlx5_flow_handle *fte;
62 	struct mlx5_flow_spec *spec;
63 	int err;
64 
65 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
66 	if (!spec)
67 		return -ENOMEM;
68 
69 	/* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
70 	MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
71 	MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
72 	MLX5_SET(copy_action_in, action, src_offset, 0);
73 	MLX5_SET(copy_action_in, action, length, 7);
74 	MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
75 	MLX5_SET(copy_action_in, action, dst_offset, 24);
76 
77 	modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
78 					      1, action);
79 
80 	if (IS_ERR(modify_hdr)) {
81 		err = PTR_ERR(modify_hdr);
82 		netdev_err(priv->netdev,
83 			   "fail to alloc ipsec copy modify_header_id err=%d\n", err);
84 		goto out_spec;
85 	}
86 
87 	/* create fte */
88 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
89 			  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
90 	flow_act.modify_hdr = modify_hdr;
91 	fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act,
92 				  &fs_prot->default_dest, 1);
93 	if (IS_ERR(fte)) {
94 		err = PTR_ERR(fte);
95 		netdev_err(priv->netdev, "fail to add ipsec rx err copy rule err=%d\n", err);
96 		goto out;
97 	}
98 
99 	kvfree(spec);
100 	rx_err->rule = fte;
101 	rx_err->copy_modify_hdr = modify_hdr;
102 	return 0;
103 
104 out:
105 	mlx5_modify_header_dealloc(mdev, modify_hdr);
106 out_spec:
107 	kvfree(spec);
108 	return err;
109 }
110 
rx_fs_create(struct mlx5e_priv * priv,struct mlx5e_accel_fs_esp_prot * fs_prot)111 static int rx_fs_create(struct mlx5e_priv *priv,
112 			struct mlx5e_accel_fs_esp_prot *fs_prot)
113 {
114 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
115 	struct mlx5_flow_table *ft = fs_prot->ft;
116 	struct mlx5_flow_group *miss_group;
117 	struct mlx5_flow_handle *miss_rule;
118 	MLX5_DECLARE_FLOW_ACT(flow_act);
119 	struct mlx5_flow_spec *spec;
120 	u32 *flow_group_in;
121 	int err = 0;
122 
123 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
124 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
125 	if (!flow_group_in || !spec) {
126 		err = -ENOMEM;
127 		goto out;
128 	}
129 
130 	/* Create miss_group */
131 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
132 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
133 	miss_group = mlx5_create_flow_group(ft, flow_group_in);
134 	if (IS_ERR(miss_group)) {
135 		err = PTR_ERR(miss_group);
136 		netdev_err(priv->netdev, "fail to create ipsec rx miss_group err=%d\n", err);
137 		goto out;
138 	}
139 	fs_prot->miss_group = miss_group;
140 
141 	/* Create miss rule */
142 	miss_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1);
143 	if (IS_ERR(miss_rule)) {
144 		mlx5_destroy_flow_group(fs_prot->miss_group);
145 		err = PTR_ERR(miss_rule);
146 		netdev_err(priv->netdev, "fail to create ipsec rx miss_rule err=%d\n", err);
147 		goto out;
148 	}
149 	fs_prot->miss_rule = miss_rule;
150 out:
151 	kvfree(flow_group_in);
152 	kvfree(spec);
153 	return err;
154 }
155 
rx_destroy(struct mlx5e_priv * priv,enum accel_fs_esp_type type)156 static void rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
157 {
158 	struct mlx5e_accel_fs_esp_prot *fs_prot;
159 	struct mlx5e_accel_fs_esp *accel_esp;
160 
161 	accel_esp = priv->ipsec->rx_fs;
162 
163 	/* The netdev unreg already happened, so all offloaded rule are already removed */
164 	fs_prot = &accel_esp->fs_prot[type];
165 
166 	mlx5_del_flow_rules(fs_prot->miss_rule);
167 	mlx5_destroy_flow_group(fs_prot->miss_group);
168 	mlx5_destroy_flow_table(fs_prot->ft);
169 
170 	mlx5_del_flow_rules(fs_prot->rx_err.rule);
171 	mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr);
172 	mlx5_destroy_flow_table(fs_prot->rx_err.ft);
173 }
174 
rx_create(struct mlx5e_priv * priv,enum accel_fs_esp_type type)175 static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
176 {
177 	struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(priv->fs, false);
178 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
179 	struct mlx5_flow_table_attr ft_attr = {};
180 	struct mlx5e_accel_fs_esp_prot *fs_prot;
181 	struct mlx5e_accel_fs_esp *accel_esp;
182 	struct mlx5_flow_table *ft;
183 	int err;
184 
185 	accel_esp = priv->ipsec->rx_fs;
186 	fs_prot = &accel_esp->fs_prot[type];
187 	fs_prot->default_dest =
188 		mlx5_ttc_get_default_dest(ttc, fs_esp2tt(type));
189 
190 	ft_attr.max_fte = 1;
191 	ft_attr.autogroup.max_num_groups = 1;
192 	ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
193 	ft_attr.prio = MLX5E_NIC_PRIO;
194 	ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
195 	if (IS_ERR(ft))
196 		return PTR_ERR(ft);
197 
198 	fs_prot->rx_err.ft = ft;
199 	err = rx_err_add_rule(priv, fs_prot, &fs_prot->rx_err);
200 	if (err)
201 		goto err_add;
202 
203 	/* Create FT */
204 	ft_attr.max_fte = NUM_IPSEC_FTE;
205 	ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
206 	ft_attr.prio = MLX5E_NIC_PRIO;
207 	ft_attr.autogroup.num_reserved_entries = 1;
208 	ft_attr.autogroup.max_num_groups = 1;
209 	ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
210 	if (IS_ERR(ft)) {
211 		err = PTR_ERR(ft);
212 		goto err_fs_ft;
213 	}
214 	fs_prot->ft = ft;
215 
216 	err = rx_fs_create(priv, fs_prot);
217 	if (err)
218 		goto err_fs;
219 
220 	return 0;
221 
222 err_fs:
223 	mlx5_destroy_flow_table(fs_prot->ft);
224 err_fs_ft:
225 	mlx5_del_flow_rules(fs_prot->rx_err.rule);
226 	mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr);
227 err_add:
228 	mlx5_destroy_flow_table(fs_prot->rx_err.ft);
229 	return err;
230 }
231 
rx_ft_get(struct mlx5e_priv * priv,enum accel_fs_esp_type type)232 static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
233 {
234 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
235 	struct mlx5e_accel_fs_esp_prot *fs_prot;
236 	struct mlx5_flow_destination dest = {};
237 	struct mlx5e_accel_fs_esp *accel_esp;
238 	int err = 0;
239 
240 	accel_esp = priv->ipsec->rx_fs;
241 	fs_prot = &accel_esp->fs_prot[type];
242 	mutex_lock(&fs_prot->prot_mutex);
243 	if (fs_prot->refcnt)
244 		goto skip;
245 
246 	/* create FT */
247 	err = rx_create(priv, type);
248 	if (err)
249 		goto out;
250 
251 	/* connect */
252 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
253 	dest.ft = fs_prot->ft;
254 	mlx5_ttc_fwd_dest(ttc, fs_esp2tt(type), &dest);
255 
256 skip:
257 	fs_prot->refcnt++;
258 out:
259 	mutex_unlock(&fs_prot->prot_mutex);
260 	return err;
261 }
262 
rx_ft_put(struct mlx5e_priv * priv,enum accel_fs_esp_type type)263 static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
264 {
265 	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
266 	struct mlx5e_accel_fs_esp_prot *fs_prot;
267 	struct mlx5e_accel_fs_esp *accel_esp;
268 
269 	accel_esp = priv->ipsec->rx_fs;
270 	fs_prot = &accel_esp->fs_prot[type];
271 	mutex_lock(&fs_prot->prot_mutex);
272 	fs_prot->refcnt--;
273 	if (fs_prot->refcnt)
274 		goto out;
275 
276 	/* disconnect */
277 	mlx5_ttc_fwd_default_dest(ttc, fs_esp2tt(type));
278 
279 	/* remove FT */
280 	rx_destroy(priv, type);
281 
282 out:
283 	mutex_unlock(&fs_prot->prot_mutex);
284 }
285 
286 /* IPsec TX flow steering */
tx_create(struct mlx5e_priv * priv)287 static int tx_create(struct mlx5e_priv *priv)
288 {
289 	struct mlx5_flow_table_attr ft_attr = {};
290 	struct mlx5e_ipsec *ipsec = priv->ipsec;
291 	struct mlx5_flow_table *ft;
292 	int err;
293 
294 	ft_attr.max_fte = NUM_IPSEC_FTE;
295 	ft_attr.autogroup.max_num_groups = 1;
296 	ft = mlx5_create_auto_grouped_flow_table(ipsec->tx_fs->ns, &ft_attr);
297 	if (IS_ERR(ft)) {
298 		err = PTR_ERR(ft);
299 		netdev_err(priv->netdev, "fail to create ipsec tx ft err=%d\n", err);
300 		return err;
301 	}
302 	ipsec->tx_fs->ft = ft;
303 	return 0;
304 }
305 
tx_ft_get(struct mlx5e_priv * priv)306 static int tx_ft_get(struct mlx5e_priv *priv)
307 {
308 	struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
309 	int err = 0;
310 
311 	mutex_lock(&tx_fs->mutex);
312 	if (tx_fs->refcnt)
313 		goto skip;
314 
315 	err = tx_create(priv);
316 	if (err)
317 		goto out;
318 skip:
319 	tx_fs->refcnt++;
320 out:
321 	mutex_unlock(&tx_fs->mutex);
322 	return err;
323 }
324 
tx_ft_put(struct mlx5e_priv * priv)325 static void tx_ft_put(struct mlx5e_priv *priv)
326 {
327 	struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
328 
329 	mutex_lock(&tx_fs->mutex);
330 	tx_fs->refcnt--;
331 	if (tx_fs->refcnt)
332 		goto out;
333 
334 	mlx5_destroy_flow_table(tx_fs->ft);
335 out:
336 	mutex_unlock(&tx_fs->mutex);
337 }
338 
setup_fte_common(struct mlx5_accel_esp_xfrm_attrs * attrs,u32 ipsec_obj_id,struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act)339 static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
340 			     u32 ipsec_obj_id,
341 			     struct mlx5_flow_spec *spec,
342 			     struct mlx5_flow_act *flow_act)
343 {
344 	u8 ip_version = attrs->is_ipv6 ? 6 : 4;
345 
346 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS;
347 
348 	/* ip_version */
349 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
350 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ip_version);
351 
352 	/* Non fragmented */
353 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
354 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
355 
356 	/* ESP header */
357 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
358 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
359 
360 	/* SPI number */
361 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
362 	MLX5_SET(fte_match_param, spec->match_value,
363 		 misc_parameters.outer_esp_spi, attrs->spi);
364 
365 	if (ip_version == 4) {
366 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
367 				    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
368 		       &attrs->saddr.a4, 4);
369 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
370 				    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
371 		       &attrs->daddr.a4, 4);
372 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
373 				 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
374 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
375 				 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
376 	} else {
377 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
378 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
379 		       &attrs->saddr.a6, 16);
380 		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
381 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
382 		       &attrs->daddr.a6, 16);
383 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
384 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
385 		       0xff, 16);
386 		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
387 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
388 		       0xff, 16);
389 	}
390 
391 	flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
392 	flow_act->crypto.obj_id = ipsec_obj_id;
393 	flow_act->flags |= FLOW_ACT_NO_APPEND;
394 }
395 
rx_add_rule(struct mlx5e_priv * priv,struct mlx5e_ipsec_sa_entry * sa_entry)396 static int rx_add_rule(struct mlx5e_priv *priv,
397 		       struct mlx5e_ipsec_sa_entry *sa_entry)
398 {
399 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
400 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
401 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
402 	u32 ipsec_obj_id = sa_entry->ipsec_obj_id;
403 	struct mlx5_modify_hdr *modify_hdr = NULL;
404 	struct mlx5e_accel_fs_esp_prot *fs_prot;
405 	struct mlx5_flow_destination dest = {};
406 	struct mlx5e_accel_fs_esp *accel_esp;
407 	struct mlx5_flow_act flow_act = {};
408 	struct mlx5_flow_handle *rule;
409 	enum accel_fs_esp_type type;
410 	struct mlx5_flow_spec *spec;
411 	int err = 0;
412 
413 	accel_esp = priv->ipsec->rx_fs;
414 	type = attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4;
415 	fs_prot = &accel_esp->fs_prot[type];
416 
417 	err = rx_ft_get(priv, type);
418 	if (err)
419 		return err;
420 
421 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
422 	if (!spec) {
423 		err = -ENOMEM;
424 		goto out_err;
425 	}
426 
427 	setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
428 
429 	/* Set bit[31] ipsec marker */
430 	/* Set bit[23-0] ipsec_obj_id */
431 	MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
432 	MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
433 	MLX5_SET(set_action_in, action, data, (ipsec_obj_id | BIT(31)));
434 	MLX5_SET(set_action_in, action, offset, 0);
435 	MLX5_SET(set_action_in, action, length, 32);
436 
437 	modify_hdr = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL,
438 					      1, action);
439 	if (IS_ERR(modify_hdr)) {
440 		err = PTR_ERR(modify_hdr);
441 		netdev_err(priv->netdev,
442 			   "fail to alloc ipsec set modify_header_id err=%d\n", err);
443 		modify_hdr = NULL;
444 		goto out_err;
445 	}
446 
447 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
448 			  MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
449 			  MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
450 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
451 	flow_act.modify_hdr = modify_hdr;
452 	dest.ft = fs_prot->rx_err.ft;
453 	rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1);
454 	if (IS_ERR(rule)) {
455 		err = PTR_ERR(rule);
456 		netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
457 			   attrs->action, err);
458 		goto out_err;
459 	}
460 
461 	ipsec_rule->rule = rule;
462 	ipsec_rule->set_modify_hdr = modify_hdr;
463 	goto out;
464 
465 out_err:
466 	if (modify_hdr)
467 		mlx5_modify_header_dealloc(priv->mdev, modify_hdr);
468 	rx_ft_put(priv, type);
469 
470 out:
471 	kvfree(spec);
472 	return err;
473 }
474 
tx_add_rule(struct mlx5e_priv * priv,struct mlx5e_ipsec_sa_entry * sa_entry)475 static int tx_add_rule(struct mlx5e_priv *priv,
476 		       struct mlx5e_ipsec_sa_entry *sa_entry)
477 {
478 	struct mlx5_flow_act flow_act = {};
479 	struct mlx5_flow_handle *rule;
480 	struct mlx5_flow_spec *spec;
481 	int err = 0;
482 
483 	err = tx_ft_get(priv);
484 	if (err)
485 		return err;
486 
487 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
488 	if (!spec) {
489 		err = -ENOMEM;
490 		goto out;
491 	}
492 
493 	setup_fte_common(&sa_entry->attrs, sa_entry->ipsec_obj_id, spec,
494 			 &flow_act);
495 
496 	/* Add IPsec indicator in metadata_reg_a */
497 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
498 	MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
499 		 MLX5_ETH_WQE_FT_META_IPSEC);
500 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
501 		 MLX5_ETH_WQE_FT_META_IPSEC);
502 
503 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
504 			  MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT;
505 	rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0);
506 	if (IS_ERR(rule)) {
507 		err = PTR_ERR(rule);
508 		netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
509 				sa_entry->attrs.action, err);
510 		goto out;
511 	}
512 
513 	sa_entry->ipsec_rule.rule = rule;
514 
515 out:
516 	kvfree(spec);
517 	if (err)
518 		tx_ft_put(priv);
519 	return err;
520 }
521 
mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv * priv,struct mlx5e_ipsec_sa_entry * sa_entry)522 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
523 				  struct mlx5e_ipsec_sa_entry *sa_entry)
524 {
525 	if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT)
526 		return tx_add_rule(priv, sa_entry);
527 
528 	return rx_add_rule(priv, sa_entry);
529 }
530 
mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv * priv,struct mlx5e_ipsec_sa_entry * sa_entry)531 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
532 				   struct mlx5e_ipsec_sa_entry *sa_entry)
533 {
534 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
535 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
536 
537 	mlx5_del_flow_rules(ipsec_rule->rule);
538 
539 	if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT) {
540 		tx_ft_put(priv);
541 		return;
542 	}
543 
544 	mlx5_modify_header_dealloc(mdev, ipsec_rule->set_modify_hdr);
545 	rx_ft_put(priv,
546 		  sa_entry->attrs.is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
547 }
548 
mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec * ipsec)549 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
550 {
551 	struct mlx5e_accel_fs_esp_prot *fs_prot;
552 	struct mlx5e_accel_fs_esp *accel_esp;
553 	enum accel_fs_esp_type i;
554 
555 	if (!ipsec->rx_fs)
556 		return;
557 
558 	mutex_destroy(&ipsec->tx_fs->mutex);
559 	WARN_ON(ipsec->tx_fs->refcnt);
560 	kfree(ipsec->tx_fs);
561 
562 	accel_esp = ipsec->rx_fs;
563 	for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
564 		fs_prot = &accel_esp->fs_prot[i];
565 		mutex_destroy(&fs_prot->prot_mutex);
566 		WARN_ON(fs_prot->refcnt);
567 	}
568 	kfree(ipsec->rx_fs);
569 }
570 
mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec * ipsec)571 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
572 {
573 	struct mlx5e_accel_fs_esp_prot *fs_prot;
574 	struct mlx5e_accel_fs_esp *accel_esp;
575 	struct mlx5_flow_namespace *ns;
576 	enum accel_fs_esp_type i;
577 	int err = -ENOMEM;
578 
579 	ns = mlx5_get_flow_namespace(ipsec->mdev,
580 				     MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
581 	if (!ns)
582 		return -EOPNOTSUPP;
583 
584 	ipsec->tx_fs = kzalloc(sizeof(*ipsec->tx_fs), GFP_KERNEL);
585 	if (!ipsec->tx_fs)
586 		return -ENOMEM;
587 
588 	ipsec->rx_fs = kzalloc(sizeof(*ipsec->rx_fs), GFP_KERNEL);
589 	if (!ipsec->rx_fs)
590 		goto err_rx;
591 
592 	mutex_init(&ipsec->tx_fs->mutex);
593 	ipsec->tx_fs->ns = ns;
594 
595 	accel_esp = ipsec->rx_fs;
596 	for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
597 		fs_prot = &accel_esp->fs_prot[i];
598 		mutex_init(&fs_prot->prot_mutex);
599 	}
600 
601 	return 0;
602 
603 err_rx:
604 	kfree(ipsec->tx_fs);
605 	return err;
606 }
607