1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3
4 #include <linux/netdevice.h>
5 #include "en.h"
6 #include "en/fs.h"
7 #include "eswitch.h"
8 #include "ipsec.h"
9 #include "fs_core.h"
10 #include "lib/ipsec_fs_roce.h"
11 #include "lib/fs_chains.h"
12 #include "esw/ipsec_fs.h"
13 #include "en_rep.h"
14
15 #define NUM_IPSEC_FTE BIT(15)
16 #define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
17 #define IPSEC_TUNNEL_DEFAULT_TTL 0x40
18
19 struct mlx5e_ipsec_fc {
20 struct mlx5_fc *cnt;
21 struct mlx5_fc *drop;
22 };
23
24 struct mlx5e_ipsec_tx {
25 struct mlx5e_ipsec_ft ft;
26 struct mlx5e_ipsec_miss pol;
27 struct mlx5e_ipsec_miss sa;
28 struct mlx5e_ipsec_rule status;
29 struct mlx5_flow_namespace *ns;
30 struct mlx5e_ipsec_fc *fc;
31 struct mlx5_fs_chains *chains;
32 u8 allow_tunnel_mode : 1;
33 };
34
35 /* IPsec RX flow steering */
family2tt(u32 family)36 static enum mlx5_traffic_types family2tt(u32 family)
37 {
38 if (family == AF_INET)
39 return MLX5_TT_IPV4_IPSEC_ESP;
40 return MLX5_TT_IPV6_IPSEC_ESP;
41 }
42
ipsec_rx(struct mlx5e_ipsec * ipsec,u32 family,int type)43 static struct mlx5e_ipsec_rx *ipsec_rx(struct mlx5e_ipsec *ipsec, u32 family, int type)
44 {
45 if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
46 return ipsec->rx_esw;
47
48 if (family == AF_INET)
49 return ipsec->rx_ipv4;
50
51 return ipsec->rx_ipv6;
52 }
53
ipsec_tx(struct mlx5e_ipsec * ipsec,int type)54 static struct mlx5e_ipsec_tx *ipsec_tx(struct mlx5e_ipsec *ipsec, int type)
55 {
56 if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
57 return ipsec->tx_esw;
58
59 return ipsec->tx;
60 }
61
62 static struct mlx5_fs_chains *
ipsec_chains_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * miss_ft,enum mlx5_flow_namespace_type ns,int base_prio,int base_level,struct mlx5_flow_table ** root_ft)63 ipsec_chains_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *miss_ft,
64 enum mlx5_flow_namespace_type ns, int base_prio,
65 int base_level, struct mlx5_flow_table **root_ft)
66 {
67 struct mlx5_chains_attr attr = {};
68 struct mlx5_fs_chains *chains;
69 struct mlx5_flow_table *ft;
70 int err;
71
72 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
73 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
74 attr.max_grp_num = 2;
75 attr.default_ft = miss_ft;
76 attr.ns = ns;
77 attr.fs_base_prio = base_prio;
78 attr.fs_base_level = base_level;
79 chains = mlx5_chains_create(mdev, &attr);
80 if (IS_ERR(chains))
81 return chains;
82
83 /* Create chain 0, prio 1, level 0 to connect chains to prev in fs_core */
84 ft = mlx5_chains_get_table(chains, 0, 1, 0);
85 if (IS_ERR(ft)) {
86 err = PTR_ERR(ft);
87 goto err_chains_get;
88 }
89
90 *root_ft = ft;
91 return chains;
92
93 err_chains_get:
94 mlx5_chains_destroy(chains);
95 return ERR_PTR(err);
96 }
97
ipsec_chains_destroy(struct mlx5_fs_chains * chains)98 static void ipsec_chains_destroy(struct mlx5_fs_chains *chains)
99 {
100 mlx5_chains_put_table(chains, 0, 1, 0);
101 mlx5_chains_destroy(chains);
102 }
103
104 static struct mlx5_flow_table *
ipsec_chains_get_table(struct mlx5_fs_chains * chains,u32 prio)105 ipsec_chains_get_table(struct mlx5_fs_chains *chains, u32 prio)
106 {
107 return mlx5_chains_get_table(chains, 0, prio + 1, 0);
108 }
109
ipsec_chains_put_table(struct mlx5_fs_chains * chains,u32 prio)110 static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
111 {
112 mlx5_chains_put_table(chains, 0, prio + 1, 0);
113 }
114
ipsec_ft_create(struct mlx5_flow_namespace * ns,int level,int prio,int max_num_groups,u32 flags)115 static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
116 int level, int prio,
117 int max_num_groups, u32 flags)
118 {
119 struct mlx5_flow_table_attr ft_attr = {};
120
121 ft_attr.autogroup.num_reserved_entries = 1;
122 ft_attr.autogroup.max_num_groups = max_num_groups;
123 ft_attr.max_fte = NUM_IPSEC_FTE;
124 ft_attr.level = level;
125 ft_attr.prio = prio;
126 ft_attr.flags = flags;
127
128 return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
129 }
130
ipsec_rx_status_drop_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)131 static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
132 struct mlx5e_ipsec_rx *rx)
133 {
134 mlx5_del_flow_rules(rx->status_drop.rule);
135 mlx5_destroy_flow_group(rx->status_drop.group);
136 mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt);
137 }
138
ipsec_rx_status_pass_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)139 static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
140 struct mlx5e_ipsec_rx *rx)
141 {
142 mlx5_del_flow_rules(rx->status.rule);
143
144 if (rx != ipsec->rx_esw)
145 return;
146
147 #ifdef CONFIG_MLX5_ESWITCH
148 mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
149 #endif
150 }
151
ipsec_rx_status_drop_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)152 static int ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
153 struct mlx5e_ipsec_rx *rx)
154 {
155 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
156 struct mlx5_flow_table *ft = rx->ft.status;
157 struct mlx5_core_dev *mdev = ipsec->mdev;
158 struct mlx5_flow_destination dest = {};
159 struct mlx5_flow_act flow_act = {};
160 struct mlx5_flow_handle *rule;
161 struct mlx5_fc *flow_counter;
162 struct mlx5_flow_spec *spec;
163 struct mlx5_flow_group *g;
164 u32 *flow_group_in;
165 int err = 0;
166
167 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
168 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
169 if (!flow_group_in || !spec) {
170 err = -ENOMEM;
171 goto err_out;
172 }
173
174 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
175 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
176 g = mlx5_create_flow_group(ft, flow_group_in);
177 if (IS_ERR(g)) {
178 err = PTR_ERR(g);
179 mlx5_core_err(mdev,
180 "Failed to add ipsec rx status drop flow group, err=%d\n", err);
181 goto err_out;
182 }
183
184 flow_counter = mlx5_fc_create(mdev, false);
185 if (IS_ERR(flow_counter)) {
186 err = PTR_ERR(flow_counter);
187 mlx5_core_err(mdev,
188 "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
189 goto err_cnt;
190 }
191
192 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
193 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
194 dest.counter_id = mlx5_fc_id(flow_counter);
195 if (rx == ipsec->rx_esw)
196 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
197 rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
198 if (IS_ERR(rule)) {
199 err = PTR_ERR(rule);
200 mlx5_core_err(mdev,
201 "Failed to add ipsec rx status drop rule, err=%d\n", err);
202 goto err_rule;
203 }
204
205 rx->status_drop.group = g;
206 rx->status_drop.rule = rule;
207 rx->status_drop_cnt = flow_counter;
208
209 kvfree(flow_group_in);
210 kvfree(spec);
211 return 0;
212
213 err_rule:
214 mlx5_fc_destroy(mdev, flow_counter);
215 err_cnt:
216 mlx5_destroy_flow_group(g);
217 err_out:
218 kvfree(flow_group_in);
219 kvfree(spec);
220 return err;
221 }
222
ipsec_rx_status_pass_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)223 static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
224 struct mlx5e_ipsec_rx *rx,
225 struct mlx5_flow_destination *dest)
226 {
227 struct mlx5_flow_act flow_act = {};
228 struct mlx5_flow_handle *rule;
229 struct mlx5_flow_spec *spec;
230 int err;
231
232 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
233 if (!spec)
234 return -ENOMEM;
235
236 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
237 misc_parameters_2.ipsec_syndrome);
238 MLX5_SET(fte_match_param, spec->match_value,
239 misc_parameters_2.ipsec_syndrome, 0);
240 if (rx == ipsec->rx_esw)
241 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
242 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
243 flow_act.flags = FLOW_ACT_NO_APPEND;
244 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
245 MLX5_FLOW_CONTEXT_ACTION_COUNT;
246 rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
247 if (IS_ERR(rule)) {
248 err = PTR_ERR(rule);
249 mlx5_core_warn(ipsec->mdev,
250 "Failed to add ipsec rx status pass rule, err=%d\n", err);
251 goto err_rule;
252 }
253
254 rx->status.rule = rule;
255 kvfree(spec);
256 return 0;
257
258 err_rule:
259 kvfree(spec);
260 return err;
261 }
262
mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)263 static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
264 struct mlx5e_ipsec_rx *rx)
265 {
266 ipsec_rx_status_pass_destroy(ipsec, rx);
267 ipsec_rx_status_drop_destroy(ipsec, rx);
268 }
269
mlx5_ipsec_rx_status_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)270 static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
271 struct mlx5e_ipsec_rx *rx,
272 struct mlx5_flow_destination *dest)
273 {
274 int err;
275
276 err = ipsec_rx_status_drop_create(ipsec, rx);
277 if (err)
278 return err;
279
280 err = ipsec_rx_status_pass_create(ipsec, rx, dest);
281 if (err)
282 goto err_pass_create;
283
284 return 0;
285
286 err_pass_create:
287 ipsec_rx_status_drop_destroy(ipsec, rx);
288 return err;
289 }
290
ipsec_miss_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft,struct mlx5e_ipsec_miss * miss,struct mlx5_flow_destination * dest)291 static int ipsec_miss_create(struct mlx5_core_dev *mdev,
292 struct mlx5_flow_table *ft,
293 struct mlx5e_ipsec_miss *miss,
294 struct mlx5_flow_destination *dest)
295 {
296 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
297 MLX5_DECLARE_FLOW_ACT(flow_act);
298 struct mlx5_flow_spec *spec;
299 u32 *flow_group_in;
300 int err = 0;
301
302 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
303 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
304 if (!flow_group_in || !spec) {
305 err = -ENOMEM;
306 goto out;
307 }
308
309 /* Create miss_group */
310 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
311 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
312 miss->group = mlx5_create_flow_group(ft, flow_group_in);
313 if (IS_ERR(miss->group)) {
314 err = PTR_ERR(miss->group);
315 mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n",
316 err);
317 goto out;
318 }
319
320 /* Create miss rule */
321 miss->rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
322 if (IS_ERR(miss->rule)) {
323 mlx5_destroy_flow_group(miss->group);
324 err = PTR_ERR(miss->rule);
325 mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n",
326 err);
327 goto out;
328 }
329 out:
330 kvfree(flow_group_in);
331 kvfree(spec);
332 return err;
333 }
334
ipsec_rx_ft_disconnect(struct mlx5e_ipsec * ipsec,u32 family)335 static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
336 {
337 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
338
339 mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
340 }
341
rx_destroy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)342 static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
343 struct mlx5e_ipsec_rx *rx, u32 family)
344 {
345 /* disconnect */
346 if (rx != ipsec->rx_esw)
347 ipsec_rx_ft_disconnect(ipsec, family);
348
349 if (rx->chains) {
350 ipsec_chains_destroy(rx->chains);
351 } else {
352 mlx5_del_flow_rules(rx->pol.rule);
353 mlx5_destroy_flow_group(rx->pol.group);
354 mlx5_destroy_flow_table(rx->ft.pol);
355 }
356
357 mlx5_del_flow_rules(rx->sa.rule);
358 mlx5_destroy_flow_group(rx->sa.group);
359 mlx5_destroy_flow_table(rx->ft.sa);
360 if (rx->allow_tunnel_mode)
361 mlx5_eswitch_unblock_encap(mdev);
362 mlx5_ipsec_rx_status_destroy(ipsec, rx);
363 mlx5_destroy_flow_table(rx->ft.status);
364
365 mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
366 }
367
ipsec_rx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family,struct mlx5e_ipsec_rx_create_attr * attr)368 static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
369 struct mlx5e_ipsec_rx *rx,
370 u32 family,
371 struct mlx5e_ipsec_rx_create_attr *attr)
372 {
373 if (rx == ipsec->rx_esw) {
374 /* For packet offload in switchdev mode, RX & TX use FDB namespace */
375 attr->ns = ipsec->tx_esw->ns;
376 mlx5_esw_ipsec_rx_create_attr_set(ipsec, attr);
377 return;
378 }
379
380 attr->ns = mlx5e_fs_get_ns(ipsec->fs, false);
381 attr->ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
382 attr->family = family;
383 attr->prio = MLX5E_NIC_PRIO;
384 attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL;
385 attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
386 attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
387 attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL;
388 }
389
ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr,struct mlx5_flow_destination * dest)390 static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
391 struct mlx5e_ipsec_rx *rx,
392 struct mlx5e_ipsec_rx_create_attr *attr,
393 struct mlx5_flow_destination *dest)
394 {
395 struct mlx5_flow_table *ft;
396 int err;
397
398 if (rx == ipsec->rx_esw)
399 return mlx5_esw_ipsec_rx_status_pass_dest_get(ipsec, dest);
400
401 *dest = mlx5_ttc_get_default_dest(attr->ttc, family2tt(attr->family));
402 err = mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, attr->ns, dest,
403 attr->family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
404 attr->prio);
405 if (err)
406 return err;
407
408 ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, attr->family);
409 if (ft) {
410 dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
411 dest->ft = ft;
412 }
413
414 return 0;
415 }
416
ipsec_rx_ft_connect(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr)417 static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
418 struct mlx5e_ipsec_rx *rx,
419 struct mlx5e_ipsec_rx_create_attr *attr)
420 {
421 struct mlx5_flow_destination dest = {};
422
423 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
424 dest.ft = rx->ft.pol;
425 mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest);
426 }
427
rx_create(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)428 static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
429 struct mlx5e_ipsec_rx *rx, u32 family)
430 {
431 struct mlx5e_ipsec_rx_create_attr attr;
432 struct mlx5_flow_destination dest[2];
433 struct mlx5_flow_table *ft;
434 u32 flags = 0;
435 int err;
436
437 ipsec_rx_create_attr_set(ipsec, rx, family, &attr);
438
439 err = ipsec_rx_status_pass_dest_get(ipsec, rx, &attr, &dest[0]);
440 if (err)
441 return err;
442
443 ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 1, 0);
444 if (IS_ERR(ft)) {
445 err = PTR_ERR(ft);
446 goto err_fs_ft_status;
447 }
448 rx->ft.status = ft;
449
450 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
451 dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
452 err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
453 if (err)
454 goto err_add;
455
456 /* Create FT */
457 if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
458 rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
459 if (rx->allow_tunnel_mode)
460 flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
461 ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags);
462 if (IS_ERR(ft)) {
463 err = PTR_ERR(ft);
464 goto err_fs_ft;
465 }
466 rx->ft.sa = ft;
467
468 err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest);
469 if (err)
470 goto err_fs;
471
472 if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
473 rx->chains = ipsec_chains_create(mdev, rx->ft.sa,
474 attr.chains_ns,
475 attr.prio,
476 attr.pol_level,
477 &rx->ft.pol);
478 if (IS_ERR(rx->chains)) {
479 err = PTR_ERR(rx->chains);
480 goto err_pol_ft;
481 }
482
483 goto connect;
484 }
485
486 ft = ipsec_ft_create(attr.ns, attr.pol_level, attr.prio, 2, 0);
487 if (IS_ERR(ft)) {
488 err = PTR_ERR(ft);
489 goto err_pol_ft;
490 }
491 rx->ft.pol = ft;
492 memset(dest, 0x00, 2 * sizeof(*dest));
493 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
494 dest[0].ft = rx->ft.sa;
495 err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest);
496 if (err)
497 goto err_pol_miss;
498
499 connect:
500 /* connect */
501 if (rx != ipsec->rx_esw)
502 ipsec_rx_ft_connect(ipsec, rx, &attr);
503 return 0;
504
505 err_pol_miss:
506 mlx5_destroy_flow_table(rx->ft.pol);
507 err_pol_ft:
508 mlx5_del_flow_rules(rx->sa.rule);
509 mlx5_destroy_flow_group(rx->sa.group);
510 err_fs:
511 mlx5_destroy_flow_table(rx->ft.sa);
512 err_fs_ft:
513 if (rx->allow_tunnel_mode)
514 mlx5_eswitch_unblock_encap(mdev);
515 mlx5_del_flow_rules(rx->status.rule);
516 mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
517 err_add:
518 mlx5_destroy_flow_table(rx->ft.status);
519 err_fs_ft_status:
520 mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
521 return err;
522 }
523
rx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)524 static int rx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
525 struct mlx5e_ipsec_rx *rx, u32 family)
526 {
527 int err;
528
529 if (rx->ft.refcnt)
530 goto skip;
531
532 err = mlx5_eswitch_block_mode(mdev);
533 if (err)
534 return err;
535
536 err = rx_create(mdev, ipsec, rx, family);
537 if (err) {
538 mlx5_eswitch_unblock_mode(mdev);
539 return err;
540 }
541
542 skip:
543 rx->ft.refcnt++;
544 return 0;
545 }
546
rx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)547 static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
548 u32 family)
549 {
550 if (--rx->ft.refcnt)
551 return;
552
553 rx_destroy(ipsec->mdev, ipsec, rx, family);
554 mlx5_eswitch_unblock_mode(ipsec->mdev);
555 }
556
rx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family,int type)557 static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
558 struct mlx5e_ipsec *ipsec, u32 family,
559 int type)
560 {
561 struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
562 int err;
563
564 mutex_lock(&rx->ft.mutex);
565 err = rx_get(mdev, ipsec, rx, family);
566 mutex_unlock(&rx->ft.mutex);
567 if (err)
568 return ERR_PTR(err);
569
570 return rx;
571 }
572
rx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family,u32 prio,int type)573 static struct mlx5_flow_table *rx_ft_get_policy(struct mlx5_core_dev *mdev,
574 struct mlx5e_ipsec *ipsec,
575 u32 family, u32 prio, int type)
576 {
577 struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
578 struct mlx5_flow_table *ft;
579 int err;
580
581 mutex_lock(&rx->ft.mutex);
582 err = rx_get(mdev, ipsec, rx, family);
583 if (err)
584 goto err_get;
585
586 ft = rx->chains ? ipsec_chains_get_table(rx->chains, prio) : rx->ft.pol;
587 if (IS_ERR(ft)) {
588 err = PTR_ERR(ft);
589 goto err_get_ft;
590 }
591
592 mutex_unlock(&rx->ft.mutex);
593 return ft;
594
595 err_get_ft:
596 rx_put(ipsec, rx, family);
597 err_get:
598 mutex_unlock(&rx->ft.mutex);
599 return ERR_PTR(err);
600 }
601
rx_ft_put(struct mlx5e_ipsec * ipsec,u32 family,int type)602 static void rx_ft_put(struct mlx5e_ipsec *ipsec, u32 family, int type)
603 {
604 struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
605
606 mutex_lock(&rx->ft.mutex);
607 rx_put(ipsec, rx, family);
608 mutex_unlock(&rx->ft.mutex);
609 }
610
rx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 family,u32 prio,int type)611 static void rx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 family, u32 prio, int type)
612 {
613 struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
614
615 mutex_lock(&rx->ft.mutex);
616 if (rx->chains)
617 ipsec_chains_put_table(rx->chains, prio);
618
619 rx_put(ipsec, rx, family);
620 mutex_unlock(&rx->ft.mutex);
621 }
622
ipsec_counter_rule_tx(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx)623 static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
624 {
625 struct mlx5_flow_destination dest = {};
626 struct mlx5_flow_act flow_act = {};
627 struct mlx5_flow_handle *fte;
628 struct mlx5_flow_spec *spec;
629 int err;
630
631 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
632 if (!spec)
633 return -ENOMEM;
634
635 /* create fte */
636 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
637 MLX5_FLOW_CONTEXT_ACTION_COUNT;
638 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
639 dest.counter_id = mlx5_fc_id(tx->fc->cnt);
640 fte = mlx5_add_flow_rules(tx->ft.status, spec, &flow_act, &dest, 1);
641 if (IS_ERR(fte)) {
642 err = PTR_ERR(fte);
643 mlx5_core_err(mdev, "Fail to add ipsec tx counter rule err=%d\n", err);
644 goto err_rule;
645 }
646
647 kvfree(spec);
648 tx->status.rule = fte;
649 return 0;
650
651 err_rule:
652 kvfree(spec);
653 return err;
654 }
655
656 /* IPsec TX flow steering */
tx_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5_ipsec_fs * roce)657 static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
658 struct mlx5_ipsec_fs *roce)
659 {
660 mlx5_ipsec_fs_roce_tx_destroy(roce);
661 if (tx->chains) {
662 ipsec_chains_destroy(tx->chains);
663 } else {
664 mlx5_del_flow_rules(tx->pol.rule);
665 mlx5_destroy_flow_group(tx->pol.group);
666 mlx5_destroy_flow_table(tx->ft.pol);
667 }
668
669 if (tx == ipsec->tx_esw) {
670 mlx5_del_flow_rules(tx->sa.rule);
671 mlx5_destroy_flow_group(tx->sa.group);
672 }
673 mlx5_destroy_flow_table(tx->ft.sa);
674 if (tx->allow_tunnel_mode)
675 mlx5_eswitch_unblock_encap(ipsec->mdev);
676 mlx5_del_flow_rules(tx->status.rule);
677 mlx5_destroy_flow_table(tx->ft.status);
678 }
679
ipsec_tx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5e_ipsec_tx_create_attr * attr)680 static void ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
681 struct mlx5e_ipsec_tx *tx,
682 struct mlx5e_ipsec_tx_create_attr *attr)
683 {
684 if (tx == ipsec->tx_esw) {
685 mlx5_esw_ipsec_tx_create_attr_set(ipsec, attr);
686 return;
687 }
688
689 attr->prio = 0;
690 attr->pol_level = 0;
691 attr->sa_level = 1;
692 attr->cnt_level = 2;
693 attr->chains_ns = MLX5_FLOW_NAMESPACE_EGRESS_IPSEC;
694 }
695
tx_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5_ipsec_fs * roce)696 static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
697 struct mlx5_ipsec_fs *roce)
698 {
699 struct mlx5_core_dev *mdev = ipsec->mdev;
700 struct mlx5e_ipsec_tx_create_attr attr;
701 struct mlx5_flow_destination dest = {};
702 struct mlx5_flow_table *ft;
703 u32 flags = 0;
704 int err;
705
706 ipsec_tx_create_attr_set(ipsec, tx, &attr);
707 ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 0);
708 if (IS_ERR(ft))
709 return PTR_ERR(ft);
710 tx->ft.status = ft;
711
712 err = ipsec_counter_rule_tx(mdev, tx);
713 if (err)
714 goto err_status_rule;
715
716 if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
717 tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
718 if (tx->allow_tunnel_mode)
719 flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
720 ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags);
721 if (IS_ERR(ft)) {
722 err = PTR_ERR(ft);
723 goto err_sa_ft;
724 }
725 tx->ft.sa = ft;
726
727 if (tx == ipsec->tx_esw) {
728 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
729 dest.vport.num = MLX5_VPORT_UPLINK;
730 err = ipsec_miss_create(mdev, tx->ft.sa, &tx->sa, &dest);
731 if (err)
732 goto err_sa_miss;
733 memset(&dest, 0, sizeof(dest));
734 }
735
736 if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
737 tx->chains = ipsec_chains_create(
738 mdev, tx->ft.sa, attr.chains_ns, attr.prio, attr.pol_level,
739 &tx->ft.pol);
740 if (IS_ERR(tx->chains)) {
741 err = PTR_ERR(tx->chains);
742 goto err_pol_ft;
743 }
744
745 goto connect_roce;
746 }
747
748 ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 2, 0);
749 if (IS_ERR(ft)) {
750 err = PTR_ERR(ft);
751 goto err_pol_ft;
752 }
753 tx->ft.pol = ft;
754 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
755 dest.ft = tx->ft.sa;
756 err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
757 if (err) {
758 mlx5_destroy_flow_table(tx->ft.pol);
759 goto err_pol_ft;
760 }
761
762 connect_roce:
763 err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol);
764 if (err)
765 goto err_roce;
766 return 0;
767
768 err_roce:
769 if (tx->chains) {
770 ipsec_chains_destroy(tx->chains);
771 } else {
772 mlx5_del_flow_rules(tx->pol.rule);
773 mlx5_destroy_flow_group(tx->pol.group);
774 mlx5_destroy_flow_table(tx->ft.pol);
775 }
776 err_pol_ft:
777 if (tx == ipsec->tx_esw) {
778 mlx5_del_flow_rules(tx->sa.rule);
779 mlx5_destroy_flow_group(tx->sa.group);
780 }
781 err_sa_miss:
782 mlx5_destroy_flow_table(tx->ft.sa);
783 err_sa_ft:
784 if (tx->allow_tunnel_mode)
785 mlx5_eswitch_unblock_encap(mdev);
786 mlx5_del_flow_rules(tx->status.rule);
787 err_status_rule:
788 mlx5_destroy_flow_table(tx->ft.status);
789 return err;
790 }
791
ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft)792 static void ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev *mdev,
793 struct mlx5_flow_table *ft)
794 {
795 #ifdef CONFIG_MLX5_ESWITCH
796 struct mlx5_eswitch *esw = mdev->priv.eswitch;
797 struct mlx5e_rep_priv *uplink_rpriv;
798 struct mlx5e_priv *priv;
799
800 esw->offloads.ft_ipsec_tx_pol = ft;
801 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
802 priv = netdev_priv(uplink_rpriv->netdev);
803 if (!priv->channels.num)
804 return;
805
806 mlx5e_rep_deactivate_channels(priv);
807 mlx5e_rep_activate_channels(priv);
808 #endif
809 }
810
tx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)811 static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
812 struct mlx5e_ipsec_tx *tx)
813 {
814 int err;
815
816 if (tx->ft.refcnt)
817 goto skip;
818
819 err = mlx5_eswitch_block_mode(mdev);
820 if (err)
821 return err;
822
823 err = tx_create(ipsec, tx, ipsec->roce);
824 if (err) {
825 mlx5_eswitch_unblock_mode(mdev);
826 return err;
827 }
828
829 if (tx == ipsec->tx_esw)
830 ipsec_esw_tx_ft_policy_set(mdev, tx->ft.pol);
831
832 skip:
833 tx->ft.refcnt++;
834 return 0;
835 }
836
tx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)837 static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
838 {
839 if (--tx->ft.refcnt)
840 return;
841
842 if (tx == ipsec->tx_esw) {
843 mlx5_esw_ipsec_restore_dest_uplink(ipsec->mdev);
844 ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL);
845 }
846
847 tx_destroy(ipsec, tx, ipsec->roce);
848 mlx5_eswitch_unblock_mode(ipsec->mdev);
849 }
850
tx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 prio,int type)851 static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
852 struct mlx5e_ipsec *ipsec,
853 u32 prio, int type)
854 {
855 struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
856 struct mlx5_flow_table *ft;
857 int err;
858
859 mutex_lock(&tx->ft.mutex);
860 err = tx_get(mdev, ipsec, tx);
861 if (err)
862 goto err_get;
863
864 ft = tx->chains ? ipsec_chains_get_table(tx->chains, prio) : tx->ft.pol;
865 if (IS_ERR(ft)) {
866 err = PTR_ERR(ft);
867 goto err_get_ft;
868 }
869
870 mutex_unlock(&tx->ft.mutex);
871 return ft;
872
873 err_get_ft:
874 tx_put(ipsec, tx);
875 err_get:
876 mutex_unlock(&tx->ft.mutex);
877 return ERR_PTR(err);
878 }
879
tx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,int type)880 static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
881 struct mlx5e_ipsec *ipsec, int type)
882 {
883 struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
884 int err;
885
886 mutex_lock(&tx->ft.mutex);
887 err = tx_get(mdev, ipsec, tx);
888 mutex_unlock(&tx->ft.mutex);
889 if (err)
890 return ERR_PTR(err);
891
892 return tx;
893 }
894
tx_ft_put(struct mlx5e_ipsec * ipsec,int type)895 static void tx_ft_put(struct mlx5e_ipsec *ipsec, int type)
896 {
897 struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
898
899 mutex_lock(&tx->ft.mutex);
900 tx_put(ipsec, tx);
901 mutex_unlock(&tx->ft.mutex);
902 }
903
tx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 prio,int type)904 static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio, int type)
905 {
906 struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
907
908 mutex_lock(&tx->ft.mutex);
909 if (tx->chains)
910 ipsec_chains_put_table(tx->chains, prio);
911
912 tx_put(ipsec, tx);
913 mutex_unlock(&tx->ft.mutex);
914 }
915
setup_fte_addr4(struct mlx5_flow_spec * spec,__be32 * saddr,__be32 * daddr)916 static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
917 __be32 *daddr)
918 {
919 if (!*saddr && !*daddr)
920 return;
921
922 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
923
924 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
925 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
926
927 if (*saddr) {
928 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
929 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
930 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
931 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
932 }
933
934 if (*daddr) {
935 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
936 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
937 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
938 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
939 }
940 }
941
setup_fte_addr6(struct mlx5_flow_spec * spec,__be32 * saddr,__be32 * daddr)942 static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
943 __be32 *daddr)
944 {
945 if (addr6_all_zero(saddr) && addr6_all_zero(daddr))
946 return;
947
948 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
949
950 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
951 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
952
953 if (!addr6_all_zero(saddr)) {
954 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
955 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
956 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
957 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
958 }
959
960 if (!addr6_all_zero(daddr)) {
961 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
962 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
963 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
964 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
965 }
966 }
967
setup_fte_esp(struct mlx5_flow_spec * spec)968 static void setup_fte_esp(struct mlx5_flow_spec *spec)
969 {
970 /* ESP header */
971 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
972
973 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
974 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
975 }
976
setup_fte_spi(struct mlx5_flow_spec * spec,u32 spi,bool encap)977 static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap)
978 {
979 /* SPI number */
980 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
981
982 if (encap) {
983 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
984 misc_parameters.inner_esp_spi);
985 MLX5_SET(fte_match_param, spec->match_value,
986 misc_parameters.inner_esp_spi, spi);
987 } else {
988 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
989 misc_parameters.outer_esp_spi);
990 MLX5_SET(fte_match_param, spec->match_value,
991 misc_parameters.outer_esp_spi, spi);
992 }
993 }
994
setup_fte_no_frags(struct mlx5_flow_spec * spec)995 static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
996 {
997 /* Non fragmented */
998 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
999
1000 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
1001 MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
1002 }
1003
setup_fte_reg_a(struct mlx5_flow_spec * spec)1004 static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
1005 {
1006 /* Add IPsec indicator in metadata_reg_a */
1007 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1008
1009 MLX5_SET(fte_match_param, spec->match_criteria,
1010 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
1011 MLX5_SET(fte_match_param, spec->match_value,
1012 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
1013 }
1014
setup_fte_reg_c4(struct mlx5_flow_spec * spec,u32 reqid)1015 static void setup_fte_reg_c4(struct mlx5_flow_spec *spec, u32 reqid)
1016 {
1017 /* Pass policy check before choosing this SA */
1018 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1019
1020 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1021 misc_parameters_2.metadata_reg_c_4);
1022 MLX5_SET(fte_match_param, spec->match_value,
1023 misc_parameters_2.metadata_reg_c_4, reqid);
1024 }
1025
setup_fte_upper_proto_match(struct mlx5_flow_spec * spec,struct upspec * upspec)1026 static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
1027 {
1028 switch (upspec->proto) {
1029 case IPPROTO_UDP:
1030 if (upspec->dport) {
1031 MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1032 udp_dport, upspec->dport_mask);
1033 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1034 udp_dport, upspec->dport);
1035 }
1036 if (upspec->sport) {
1037 MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1038 udp_sport, upspec->sport_mask);
1039 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1040 udp_sport, upspec->sport);
1041 }
1042 break;
1043 case IPPROTO_TCP:
1044 if (upspec->dport) {
1045 MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1046 tcp_dport, upspec->dport_mask);
1047 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1048 tcp_dport, upspec->dport);
1049 }
1050 if (upspec->sport) {
1051 MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1052 tcp_sport, upspec->sport_mask);
1053 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1054 tcp_sport, upspec->sport);
1055 }
1056 break;
1057 default:
1058 return;
1059 }
1060
1061 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1062 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
1063 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto);
1064 }
1065
ipsec_fs_get_ns(struct mlx5e_ipsec * ipsec,int type,u8 dir)1066 static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec,
1067 int type, u8 dir)
1068 {
1069 if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
1070 return MLX5_FLOW_NAMESPACE_FDB;
1071
1072 if (dir == XFRM_DEV_OFFLOAD_IN)
1073 return MLX5_FLOW_NAMESPACE_KERNEL;
1074
1075 return MLX5_FLOW_NAMESPACE_EGRESS;
1076 }
1077
setup_modify_header(struct mlx5e_ipsec * ipsec,int type,u32 val,u8 dir,struct mlx5_flow_act * flow_act)1078 static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8 dir,
1079 struct mlx5_flow_act *flow_act)
1080 {
1081 enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
1082 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1083 struct mlx5_core_dev *mdev = ipsec->mdev;
1084 struct mlx5_modify_hdr *modify_hdr;
1085
1086 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1087 switch (dir) {
1088 case XFRM_DEV_OFFLOAD_IN:
1089 MLX5_SET(set_action_in, action, field,
1090 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1091 break;
1092 case XFRM_DEV_OFFLOAD_OUT:
1093 MLX5_SET(set_action_in, action, field,
1094 MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
1095 break;
1096 default:
1097 return -EINVAL;
1098 }
1099
1100 MLX5_SET(set_action_in, action, data, val);
1101 MLX5_SET(set_action_in, action, offset, 0);
1102 MLX5_SET(set_action_in, action, length, 32);
1103
1104 modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
1105 if (IS_ERR(modify_hdr)) {
1106 mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
1107 PTR_ERR(modify_hdr));
1108 return PTR_ERR(modify_hdr);
1109 }
1110
1111 flow_act->modify_hdr = modify_hdr;
1112 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1113 return 0;
1114 }
1115
1116 static int
setup_pkt_tunnel_reformat(struct mlx5_core_dev * mdev,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)1117 setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
1118 struct mlx5_accel_esp_xfrm_attrs *attrs,
1119 struct mlx5_pkt_reformat_params *reformat_params)
1120 {
1121 struct ip_esp_hdr *esp_hdr;
1122 struct ipv6hdr *ipv6hdr;
1123 struct ethhdr *eth_hdr;
1124 struct iphdr *iphdr;
1125 char *reformatbf;
1126 size_t bfflen;
1127 void *hdr;
1128
1129 bfflen = sizeof(*eth_hdr);
1130
1131 if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
1132 bfflen += sizeof(*esp_hdr) + 8;
1133
1134 switch (attrs->family) {
1135 case AF_INET:
1136 bfflen += sizeof(*iphdr);
1137 break;
1138 case AF_INET6:
1139 bfflen += sizeof(*ipv6hdr);
1140 break;
1141 default:
1142 return -EINVAL;
1143 }
1144 }
1145
1146 reformatbf = kzalloc(bfflen, GFP_KERNEL);
1147 if (!reformatbf)
1148 return -ENOMEM;
1149
1150 eth_hdr = (struct ethhdr *)reformatbf;
1151 switch (attrs->family) {
1152 case AF_INET:
1153 eth_hdr->h_proto = htons(ETH_P_IP);
1154 break;
1155 case AF_INET6:
1156 eth_hdr->h_proto = htons(ETH_P_IPV6);
1157 break;
1158 default:
1159 goto free_reformatbf;
1160 }
1161
1162 ether_addr_copy(eth_hdr->h_dest, attrs->dmac);
1163 ether_addr_copy(eth_hdr->h_source, attrs->smac);
1164
1165 switch (attrs->dir) {
1166 case XFRM_DEV_OFFLOAD_IN:
1167 reformat_params->type = MLX5_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2;
1168 break;
1169 case XFRM_DEV_OFFLOAD_OUT:
1170 reformat_params->type = MLX5_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL;
1171 reformat_params->param_0 = attrs->authsize;
1172
1173 hdr = reformatbf + sizeof(*eth_hdr);
1174 switch (attrs->family) {
1175 case AF_INET:
1176 iphdr = (struct iphdr *)hdr;
1177 memcpy(&iphdr->saddr, &attrs->saddr.a4, 4);
1178 memcpy(&iphdr->daddr, &attrs->daddr.a4, 4);
1179 iphdr->version = 4;
1180 iphdr->ihl = 5;
1181 iphdr->ttl = IPSEC_TUNNEL_DEFAULT_TTL;
1182 iphdr->protocol = IPPROTO_ESP;
1183 hdr += sizeof(*iphdr);
1184 break;
1185 case AF_INET6:
1186 ipv6hdr = (struct ipv6hdr *)hdr;
1187 memcpy(&ipv6hdr->saddr, &attrs->saddr.a6, 16);
1188 memcpy(&ipv6hdr->daddr, &attrs->daddr.a6, 16);
1189 ipv6hdr->nexthdr = IPPROTO_ESP;
1190 ipv6hdr->version = 6;
1191 ipv6hdr->hop_limit = IPSEC_TUNNEL_DEFAULT_TTL;
1192 hdr += sizeof(*ipv6hdr);
1193 break;
1194 default:
1195 goto free_reformatbf;
1196 }
1197
1198 esp_hdr = (struct ip_esp_hdr *)hdr;
1199 esp_hdr->spi = htonl(attrs->spi);
1200 break;
1201 default:
1202 goto free_reformatbf;
1203 }
1204
1205 reformat_params->size = bfflen;
1206 reformat_params->data = reformatbf;
1207 return 0;
1208
1209 free_reformatbf:
1210 kfree(reformatbf);
1211 return -EINVAL;
1212 }
1213
get_reformat_type(struct mlx5_accel_esp_xfrm_attrs * attrs)1214 static int get_reformat_type(struct mlx5_accel_esp_xfrm_attrs *attrs)
1215 {
1216 switch (attrs->dir) {
1217 case XFRM_DEV_OFFLOAD_IN:
1218 if (attrs->encap)
1219 return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
1220 return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
1221 case XFRM_DEV_OFFLOAD_OUT:
1222 if (attrs->family == AF_INET) {
1223 if (attrs->encap)
1224 return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
1225 return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
1226 }
1227
1228 if (attrs->encap)
1229 return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6;
1230 return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
1231 default:
1232 WARN_ON(true);
1233 }
1234
1235 return -EINVAL;
1236 }
1237
1238 static int
setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)1239 setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs,
1240 struct mlx5_pkt_reformat_params *reformat_params)
1241 {
1242 struct udphdr *udphdr;
1243 char *reformatbf;
1244 size_t bfflen;
1245 __be32 spi;
1246 void *hdr;
1247
1248 reformat_params->type = get_reformat_type(attrs);
1249 if (reformat_params->type < 0)
1250 return reformat_params->type;
1251
1252 switch (attrs->dir) {
1253 case XFRM_DEV_OFFLOAD_IN:
1254 break;
1255 case XFRM_DEV_OFFLOAD_OUT:
1256 bfflen = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE;
1257 if (attrs->encap)
1258 bfflen += sizeof(*udphdr);
1259
1260 reformatbf = kzalloc(bfflen, GFP_KERNEL);
1261 if (!reformatbf)
1262 return -ENOMEM;
1263
1264 hdr = reformatbf;
1265 if (attrs->encap) {
1266 udphdr = (struct udphdr *)reformatbf;
1267 udphdr->source = attrs->sport;
1268 udphdr->dest = attrs->dport;
1269 hdr += sizeof(*udphdr);
1270 }
1271
1272 /* convert to network format */
1273 spi = htonl(attrs->spi);
1274 memcpy(hdr, &spi, sizeof(spi));
1275
1276 reformat_params->param_0 = attrs->authsize;
1277 reformat_params->size = bfflen;
1278 reformat_params->data = reformatbf;
1279 break;
1280 default:
1281 return -EINVAL;
1282 }
1283
1284 return 0;
1285 }
1286
setup_pkt_reformat(struct mlx5e_ipsec * ipsec,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_flow_act * flow_act)1287 static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec,
1288 struct mlx5_accel_esp_xfrm_attrs *attrs,
1289 struct mlx5_flow_act *flow_act)
1290 {
1291 enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, attrs->type,
1292 attrs->dir);
1293 struct mlx5_pkt_reformat_params reformat_params = {};
1294 struct mlx5_core_dev *mdev = ipsec->mdev;
1295 struct mlx5_pkt_reformat *pkt_reformat;
1296 int ret;
1297
1298 switch (attrs->mode) {
1299 case XFRM_MODE_TRANSPORT:
1300 ret = setup_pkt_transport_reformat(attrs, &reformat_params);
1301 break;
1302 case XFRM_MODE_TUNNEL:
1303 ret = setup_pkt_tunnel_reformat(mdev, attrs, &reformat_params);
1304 break;
1305 default:
1306 ret = -EINVAL;
1307 }
1308
1309 if (ret)
1310 return ret;
1311
1312 pkt_reformat =
1313 mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type);
1314 kfree(reformat_params.data);
1315 if (IS_ERR(pkt_reformat))
1316 return PTR_ERR(pkt_reformat);
1317
1318 flow_act->pkt_reformat = pkt_reformat;
1319 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
1320 return 0;
1321 }
1322
rx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1323 static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1324 {
1325 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
1326 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1327 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
1328 struct mlx5_flow_destination dest[2];
1329 struct mlx5_flow_act flow_act = {};
1330 struct mlx5_flow_handle *rule;
1331 struct mlx5_flow_spec *spec;
1332 struct mlx5e_ipsec_rx *rx;
1333 struct mlx5_fc *counter;
1334 int err = 0;
1335
1336 rx = rx_ft_get(mdev, ipsec, attrs->family, attrs->type);
1337 if (IS_ERR(rx))
1338 return PTR_ERR(rx);
1339
1340 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1341 if (!spec) {
1342 err = -ENOMEM;
1343 goto err_alloc;
1344 }
1345
1346 if (attrs->family == AF_INET)
1347 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1348 else
1349 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1350
1351 setup_fte_spi(spec, attrs->spi, attrs->encap);
1352 if (!attrs->encap)
1353 setup_fte_esp(spec);
1354 setup_fte_no_frags(spec);
1355 setup_fte_upper_proto_match(spec, &attrs->upspec);
1356
1357 if (rx != ipsec->rx_esw)
1358 err = setup_modify_header(ipsec, attrs->type,
1359 sa_entry->ipsec_obj_id | BIT(31),
1360 XFRM_DEV_OFFLOAD_IN, &flow_act);
1361 else
1362 err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act);
1363
1364 if (err)
1365 goto err_mod_header;
1366
1367 switch (attrs->type) {
1368 case XFRM_DEV_OFFLOAD_PACKET:
1369 err = setup_pkt_reformat(ipsec, attrs, &flow_act);
1370 if (err)
1371 goto err_pkt_reformat;
1372 break;
1373 default:
1374 break;
1375 }
1376
1377 counter = mlx5_fc_create(mdev, true);
1378 if (IS_ERR(counter)) {
1379 err = PTR_ERR(counter);
1380 goto err_add_cnt;
1381 }
1382 flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
1383 flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
1384 flow_act.flags |= FLOW_ACT_NO_APPEND;
1385 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
1386 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1387 if (attrs->drop)
1388 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1389 else
1390 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1391 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1392 dest[0].ft = rx->ft.status;
1393 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1394 dest[1].counter_id = mlx5_fc_id(counter);
1395 rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
1396 if (IS_ERR(rule)) {
1397 err = PTR_ERR(rule);
1398 mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
1399 goto err_add_flow;
1400 }
1401 kvfree(spec);
1402
1403 sa_entry->ipsec_rule.rule = rule;
1404 sa_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
1405 sa_entry->ipsec_rule.fc = counter;
1406 sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
1407 return 0;
1408
1409 err_add_flow:
1410 mlx5_fc_destroy(mdev, counter);
1411 err_add_cnt:
1412 if (flow_act.pkt_reformat)
1413 mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
1414 err_pkt_reformat:
1415 mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
1416 err_mod_header:
1417 kvfree(spec);
1418 err_alloc:
1419 rx_ft_put(ipsec, attrs->family, attrs->type);
1420 return err;
1421 }
1422
tx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1423 static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1424 {
1425 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
1426 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1427 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
1428 struct mlx5_flow_destination dest[2];
1429 struct mlx5_flow_act flow_act = {};
1430 struct mlx5_flow_handle *rule;
1431 struct mlx5_flow_spec *spec;
1432 struct mlx5e_ipsec_tx *tx;
1433 struct mlx5_fc *counter;
1434 int err;
1435
1436 tx = tx_ft_get(mdev, ipsec, attrs->type);
1437 if (IS_ERR(tx))
1438 return PTR_ERR(tx);
1439
1440 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1441 if (!spec) {
1442 err = -ENOMEM;
1443 goto err_alloc;
1444 }
1445
1446 if (attrs->family == AF_INET)
1447 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1448 else
1449 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1450
1451 setup_fte_no_frags(spec);
1452 setup_fte_upper_proto_match(spec, &attrs->upspec);
1453
1454 switch (attrs->type) {
1455 case XFRM_DEV_OFFLOAD_CRYPTO:
1456 setup_fte_spi(spec, attrs->spi, false);
1457 setup_fte_esp(spec);
1458 setup_fte_reg_a(spec);
1459 break;
1460 case XFRM_DEV_OFFLOAD_PACKET:
1461 if (attrs->reqid)
1462 setup_fte_reg_c4(spec, attrs->reqid);
1463 err = setup_pkt_reformat(ipsec, attrs, &flow_act);
1464 if (err)
1465 goto err_pkt_reformat;
1466 break;
1467 default:
1468 break;
1469 }
1470
1471 counter = mlx5_fc_create(mdev, true);
1472 if (IS_ERR(counter)) {
1473 err = PTR_ERR(counter);
1474 goto err_add_cnt;
1475 }
1476
1477 flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
1478 flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
1479 flow_act.flags |= FLOW_ACT_NO_APPEND;
1480 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
1481 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1482 if (attrs->drop)
1483 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1484 else
1485 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1486
1487 dest[0].ft = tx->ft.status;
1488 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1489 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1490 dest[1].counter_id = mlx5_fc_id(counter);
1491 rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, dest, 2);
1492 if (IS_ERR(rule)) {
1493 err = PTR_ERR(rule);
1494 mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
1495 goto err_add_flow;
1496 }
1497
1498 kvfree(spec);
1499 sa_entry->ipsec_rule.rule = rule;
1500 sa_entry->ipsec_rule.fc = counter;
1501 sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
1502 return 0;
1503
1504 err_add_flow:
1505 mlx5_fc_destroy(mdev, counter);
1506 err_add_cnt:
1507 if (flow_act.pkt_reformat)
1508 mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
1509 err_pkt_reformat:
1510 kvfree(spec);
1511 err_alloc:
1512 tx_ft_put(ipsec, attrs->type);
1513 return err;
1514 }
1515
tx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)1516 static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
1517 {
1518 struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
1519 struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1520 struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
1521 struct mlx5_flow_destination dest[2] = {};
1522 struct mlx5_flow_act flow_act = {};
1523 struct mlx5_flow_handle *rule;
1524 struct mlx5_flow_spec *spec;
1525 struct mlx5_flow_table *ft;
1526 struct mlx5e_ipsec_tx *tx;
1527 int err, dstn = 0;
1528
1529 ft = tx_ft_get_policy(mdev, ipsec, attrs->prio, attrs->type);
1530 if (IS_ERR(ft))
1531 return PTR_ERR(ft);
1532
1533 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1534 if (!spec) {
1535 err = -ENOMEM;
1536 goto err_alloc;
1537 }
1538
1539 tx = ipsec_tx(ipsec, attrs->type);
1540 if (attrs->family == AF_INET)
1541 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1542 else
1543 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1544
1545 setup_fte_no_frags(spec);
1546 setup_fte_upper_proto_match(spec, &attrs->upspec);
1547
1548 switch (attrs->action) {
1549 case XFRM_POLICY_ALLOW:
1550 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1551 if (!attrs->reqid)
1552 break;
1553
1554 err = setup_modify_header(ipsec, attrs->type, attrs->reqid,
1555 XFRM_DEV_OFFLOAD_OUT, &flow_act);
1556 if (err)
1557 goto err_mod_header;
1558 break;
1559 case XFRM_POLICY_BLOCK:
1560 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1561 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1562 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1563 dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
1564 dstn++;
1565 break;
1566 default:
1567 WARN_ON(true);
1568 err = -EINVAL;
1569 goto err_mod_header;
1570 }
1571
1572 flow_act.flags |= FLOW_ACT_NO_APPEND;
1573 if (tx == ipsec->tx_esw && tx->chains)
1574 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1575 dest[dstn].ft = tx->ft.sa;
1576 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1577 dstn++;
1578 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
1579 if (IS_ERR(rule)) {
1580 err = PTR_ERR(rule);
1581 mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
1582 goto err_action;
1583 }
1584
1585 kvfree(spec);
1586 pol_entry->ipsec_rule.rule = rule;
1587 pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
1588 return 0;
1589
1590 err_action:
1591 if (flow_act.modify_hdr)
1592 mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
1593 err_mod_header:
1594 kvfree(spec);
1595 err_alloc:
1596 tx_ft_put_policy(ipsec, attrs->prio, attrs->type);
1597 return err;
1598 }
1599
rx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)1600 static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
1601 {
1602 struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
1603 struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1604 struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
1605 struct mlx5_flow_destination dest[2];
1606 struct mlx5_flow_act flow_act = {};
1607 struct mlx5_flow_handle *rule;
1608 struct mlx5_flow_spec *spec;
1609 struct mlx5_flow_table *ft;
1610 struct mlx5e_ipsec_rx *rx;
1611 int err, dstn = 0;
1612
1613 ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio,
1614 attrs->type);
1615 if (IS_ERR(ft))
1616 return PTR_ERR(ft);
1617
1618 rx = ipsec_rx(pol_entry->ipsec, attrs->family, attrs->type);
1619
1620 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1621 if (!spec) {
1622 err = -ENOMEM;
1623 goto err_alloc;
1624 }
1625
1626 if (attrs->family == AF_INET)
1627 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1628 else
1629 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1630
1631 setup_fte_no_frags(spec);
1632 setup_fte_upper_proto_match(spec, &attrs->upspec);
1633
1634 switch (attrs->action) {
1635 case XFRM_POLICY_ALLOW:
1636 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1637 break;
1638 case XFRM_POLICY_BLOCK:
1639 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
1640 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1641 dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
1642 dstn++;
1643 break;
1644 default:
1645 WARN_ON(true);
1646 err = -EINVAL;
1647 goto err_action;
1648 }
1649
1650 flow_act.flags |= FLOW_ACT_NO_APPEND;
1651 if (rx == ipsec->rx_esw && rx->chains)
1652 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1653 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1654 dest[dstn].ft = rx->ft.sa;
1655 dstn++;
1656 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
1657 if (IS_ERR(rule)) {
1658 err = PTR_ERR(rule);
1659 mlx5_core_err(mdev, "Fail to add RX IPsec policy rule err=%d\n", err);
1660 goto err_action;
1661 }
1662
1663 kvfree(spec);
1664 pol_entry->ipsec_rule.rule = rule;
1665 return 0;
1666
1667 err_action:
1668 kvfree(spec);
1669 err_alloc:
1670 rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio, attrs->type);
1671 return err;
1672 }
1673
ipsec_fs_destroy_single_counter(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_fc * fc)1674 static void ipsec_fs_destroy_single_counter(struct mlx5_core_dev *mdev,
1675 struct mlx5e_ipsec_fc *fc)
1676 {
1677 mlx5_fc_destroy(mdev, fc->drop);
1678 mlx5_fc_destroy(mdev, fc->cnt);
1679 kfree(fc);
1680 }
1681
ipsec_fs_destroy_counters(struct mlx5e_ipsec * ipsec)1682 static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
1683 {
1684 struct mlx5_core_dev *mdev = ipsec->mdev;
1685
1686 ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
1687 ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
1688 if (ipsec->is_uplink_rep) {
1689 ipsec_fs_destroy_single_counter(mdev, ipsec->tx_esw->fc);
1690 ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
1691 }
1692 }
1693
ipsec_fs_init_single_counter(struct mlx5_core_dev * mdev)1694 static struct mlx5e_ipsec_fc *ipsec_fs_init_single_counter(struct mlx5_core_dev *mdev)
1695 {
1696 struct mlx5e_ipsec_fc *fc;
1697 struct mlx5_fc *counter;
1698 int err;
1699
1700 fc = kzalloc(sizeof(*fc), GFP_KERNEL);
1701 if (!fc)
1702 return ERR_PTR(-ENOMEM);
1703
1704 counter = mlx5_fc_create(mdev, false);
1705 if (IS_ERR(counter)) {
1706 err = PTR_ERR(counter);
1707 goto err_cnt;
1708 }
1709 fc->cnt = counter;
1710
1711 counter = mlx5_fc_create(mdev, false);
1712 if (IS_ERR(counter)) {
1713 err = PTR_ERR(counter);
1714 goto err_drop;
1715 }
1716 fc->drop = counter;
1717
1718 return fc;
1719
1720 err_drop:
1721 mlx5_fc_destroy(mdev, fc->cnt);
1722 err_cnt:
1723 kfree(fc);
1724 return ERR_PTR(err);
1725 }
1726
ipsec_fs_init_counters(struct mlx5e_ipsec * ipsec)1727 static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
1728 {
1729 struct mlx5_core_dev *mdev = ipsec->mdev;
1730 struct mlx5e_ipsec_fc *fc;
1731 int err;
1732
1733 fc = ipsec_fs_init_single_counter(mdev);
1734 if (IS_ERR(fc)) {
1735 err = PTR_ERR(fc);
1736 goto err_rx_cnt;
1737 }
1738 ipsec->rx_ipv4->fc = fc;
1739
1740 fc = ipsec_fs_init_single_counter(mdev);
1741 if (IS_ERR(fc)) {
1742 err = PTR_ERR(fc);
1743 goto err_tx_cnt;
1744 }
1745 ipsec->tx->fc = fc;
1746
1747 if (ipsec->is_uplink_rep) {
1748 fc = ipsec_fs_init_single_counter(mdev);
1749 if (IS_ERR(fc)) {
1750 err = PTR_ERR(fc);
1751 goto err_rx_esw_cnt;
1752 }
1753 ipsec->rx_esw->fc = fc;
1754
1755 fc = ipsec_fs_init_single_counter(mdev);
1756 if (IS_ERR(fc)) {
1757 err = PTR_ERR(fc);
1758 goto err_tx_esw_cnt;
1759 }
1760 ipsec->tx_esw->fc = fc;
1761 }
1762
1763 /* Both IPv4 and IPv6 point to same flow counters struct. */
1764 ipsec->rx_ipv6->fc = ipsec->rx_ipv4->fc;
1765 return 0;
1766
1767 err_tx_esw_cnt:
1768 ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
1769 err_rx_esw_cnt:
1770 ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
1771 err_tx_cnt:
1772 ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
1773 err_rx_cnt:
1774 return err;
1775 }
1776
mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv * priv,void * ipsec_stats)1777 void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
1778 {
1779 struct mlx5_core_dev *mdev = priv->mdev;
1780 struct mlx5e_ipsec *ipsec = priv->ipsec;
1781 struct mlx5e_ipsec_hw_stats *stats;
1782 struct mlx5e_ipsec_fc *fc;
1783 u64 packets, bytes;
1784
1785 stats = (struct mlx5e_ipsec_hw_stats *)ipsec_stats;
1786
1787 stats->ipsec_rx_pkts = 0;
1788 stats->ipsec_rx_bytes = 0;
1789 stats->ipsec_rx_drop_pkts = 0;
1790 stats->ipsec_rx_drop_bytes = 0;
1791 stats->ipsec_tx_pkts = 0;
1792 stats->ipsec_tx_bytes = 0;
1793 stats->ipsec_tx_drop_pkts = 0;
1794 stats->ipsec_tx_drop_bytes = 0;
1795
1796 fc = ipsec->rx_ipv4->fc;
1797 mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
1798 mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
1799 &stats->ipsec_rx_drop_bytes);
1800
1801 fc = ipsec->tx->fc;
1802 mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
1803 mlx5_fc_query(mdev, fc->drop, &stats->ipsec_tx_drop_pkts,
1804 &stats->ipsec_tx_drop_bytes);
1805
1806 if (ipsec->is_uplink_rep) {
1807 fc = ipsec->rx_esw->fc;
1808 if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
1809 stats->ipsec_rx_pkts += packets;
1810 stats->ipsec_rx_bytes += bytes;
1811 }
1812
1813 if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
1814 stats->ipsec_rx_drop_pkts += packets;
1815 stats->ipsec_rx_drop_bytes += bytes;
1816 }
1817
1818 fc = ipsec->tx_esw->fc;
1819 if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
1820 stats->ipsec_tx_pkts += packets;
1821 stats->ipsec_tx_bytes += bytes;
1822 }
1823
1824 if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
1825 stats->ipsec_tx_drop_pkts += packets;
1826 stats->ipsec_tx_drop_bytes += bytes;
1827 }
1828 }
1829 }
1830
1831 #ifdef CONFIG_MLX5_ESWITCH
mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev * mdev)1832 static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
1833 {
1834 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1835 int err = 0;
1836
1837 if (esw) {
1838 err = mlx5_esw_lock(esw);
1839 if (err)
1840 return err;
1841 }
1842
1843 if (mdev->num_block_ipsec) {
1844 err = -EBUSY;
1845 goto unlock;
1846 }
1847
1848 mdev->num_block_tc++;
1849
1850 unlock:
1851 if (esw)
1852 mlx5_esw_unlock(esw);
1853
1854 return err;
1855 }
1856 #else
mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev * mdev)1857 static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
1858 {
1859 if (mdev->num_block_ipsec)
1860 return -EBUSY;
1861
1862 mdev->num_block_tc++;
1863 return 0;
1864 }
1865 #endif
1866
mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev * mdev)1867 static void mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev *mdev)
1868 {
1869 mdev->num_block_tc--;
1870 }
1871
mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1872 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1873 {
1874 int err;
1875
1876 if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET) {
1877 err = mlx5e_ipsec_block_tc_offload(sa_entry->ipsec->mdev);
1878 if (err)
1879 return err;
1880 }
1881
1882 if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
1883 err = tx_add_rule(sa_entry);
1884 else
1885 err = rx_add_rule(sa_entry);
1886
1887 if (err)
1888 goto err_out;
1889
1890 return 0;
1891
1892 err_out:
1893 if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
1894 mlx5e_ipsec_unblock_tc_offload(sa_entry->ipsec->mdev);
1895 return err;
1896 }
1897
mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1898 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1899 {
1900 struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
1901 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1902
1903 mlx5_del_flow_rules(ipsec_rule->rule);
1904 mlx5_fc_destroy(mdev, ipsec_rule->fc);
1905 if (ipsec_rule->pkt_reformat)
1906 mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
1907
1908 if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
1909 mlx5e_ipsec_unblock_tc_offload(mdev);
1910
1911 if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
1912 tx_ft_put(sa_entry->ipsec, sa_entry->attrs.type);
1913 return;
1914 }
1915
1916 mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
1917 mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
1918 rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
1919 }
1920
mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry * pol_entry)1921 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
1922 {
1923 int err;
1924
1925 err = mlx5e_ipsec_block_tc_offload(pol_entry->ipsec->mdev);
1926 if (err)
1927 return err;
1928
1929 if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
1930 err = tx_add_policy(pol_entry);
1931 else
1932 err = rx_add_policy(pol_entry);
1933
1934 if (err)
1935 goto err_out;
1936
1937 return 0;
1938
1939 err_out:
1940 mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
1941 return err;
1942 }
1943
mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry * pol_entry)1944 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
1945 {
1946 struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule;
1947 struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1948
1949 mlx5_del_flow_rules(ipsec_rule->rule);
1950
1951 mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
1952
1953 if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
1954 rx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.family,
1955 pol_entry->attrs.prio, pol_entry->attrs.type);
1956 return;
1957 }
1958
1959 if (ipsec_rule->modify_hdr)
1960 mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
1961
1962 tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio, pol_entry->attrs.type);
1963 }
1964
mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec * ipsec)1965 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
1966 {
1967 if (!ipsec->tx)
1968 return;
1969
1970 if (ipsec->roce)
1971 mlx5_ipsec_fs_roce_cleanup(ipsec->roce);
1972
1973 ipsec_fs_destroy_counters(ipsec);
1974 mutex_destroy(&ipsec->tx->ft.mutex);
1975 WARN_ON(ipsec->tx->ft.refcnt);
1976 kfree(ipsec->tx);
1977
1978 mutex_destroy(&ipsec->rx_ipv4->ft.mutex);
1979 WARN_ON(ipsec->rx_ipv4->ft.refcnt);
1980 kfree(ipsec->rx_ipv4);
1981
1982 mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
1983 WARN_ON(ipsec->rx_ipv6->ft.refcnt);
1984 kfree(ipsec->rx_ipv6);
1985
1986 if (ipsec->is_uplink_rep) {
1987 xa_destroy(&ipsec->rx_esw->ipsec_obj_id_map);
1988
1989 mutex_destroy(&ipsec->tx_esw->ft.mutex);
1990 WARN_ON(ipsec->tx_esw->ft.refcnt);
1991 kfree(ipsec->tx_esw);
1992
1993 mutex_destroy(&ipsec->rx_esw->ft.mutex);
1994 WARN_ON(ipsec->rx_esw->ft.refcnt);
1995 kfree(ipsec->rx_esw);
1996 }
1997 }
1998
mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec * ipsec)1999 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
2000 {
2001 struct mlx5_core_dev *mdev = ipsec->mdev;
2002 struct mlx5_flow_namespace *ns, *ns_esw;
2003 int err = -ENOMEM;
2004
2005 ns = mlx5_get_flow_namespace(ipsec->mdev,
2006 MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
2007 if (!ns)
2008 return -EOPNOTSUPP;
2009
2010 if (ipsec->is_uplink_rep) {
2011 ns_esw = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_FDB);
2012 if (!ns_esw)
2013 return -EOPNOTSUPP;
2014
2015 ipsec->tx_esw = kzalloc(sizeof(*ipsec->tx_esw), GFP_KERNEL);
2016 if (!ipsec->tx_esw)
2017 return -ENOMEM;
2018
2019 ipsec->rx_esw = kzalloc(sizeof(*ipsec->rx_esw), GFP_KERNEL);
2020 if (!ipsec->rx_esw)
2021 goto err_rx_esw;
2022 }
2023
2024 ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
2025 if (!ipsec->tx)
2026 goto err_tx;
2027
2028 ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
2029 if (!ipsec->rx_ipv4)
2030 goto err_rx_ipv4;
2031
2032 ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL);
2033 if (!ipsec->rx_ipv6)
2034 goto err_rx_ipv6;
2035
2036 err = ipsec_fs_init_counters(ipsec);
2037 if (err)
2038 goto err_counters;
2039
2040 mutex_init(&ipsec->tx->ft.mutex);
2041 mutex_init(&ipsec->rx_ipv4->ft.mutex);
2042 mutex_init(&ipsec->rx_ipv6->ft.mutex);
2043 ipsec->tx->ns = ns;
2044
2045 if (ipsec->is_uplink_rep) {
2046 mutex_init(&ipsec->tx_esw->ft.mutex);
2047 mutex_init(&ipsec->rx_esw->ft.mutex);
2048 ipsec->tx_esw->ns = ns_esw;
2049 xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
2050 } else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
2051 ipsec->roce = mlx5_ipsec_fs_roce_init(mdev);
2052 }
2053
2054 return 0;
2055
2056 err_counters:
2057 kfree(ipsec->rx_ipv6);
2058 err_rx_ipv6:
2059 kfree(ipsec->rx_ipv4);
2060 err_rx_ipv4:
2061 kfree(ipsec->tx);
2062 err_tx:
2063 kfree(ipsec->rx_esw);
2064 err_rx_esw:
2065 kfree(ipsec->tx_esw);
2066 return err;
2067 }
2068
mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry * sa_entry)2069 void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
2070 {
2071 struct mlx5e_ipsec_sa_entry sa_entry_shadow = {};
2072 int err;
2073
2074 memcpy(&sa_entry_shadow, sa_entry, sizeof(*sa_entry));
2075 memset(&sa_entry_shadow.ipsec_rule, 0x00, sizeof(sa_entry->ipsec_rule));
2076
2077 err = mlx5e_accel_ipsec_fs_add_rule(&sa_entry_shadow);
2078 if (err)
2079 return;
2080
2081 mlx5e_accel_ipsec_fs_del_rule(sa_entry);
2082 memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry));
2083 }
2084
mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry * sa_entry)2085 bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
2086 {
2087 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
2088 struct mlx5e_ipsec_rx *rx;
2089 struct mlx5e_ipsec_tx *tx;
2090
2091 rx = ipsec_rx(sa_entry->ipsec, attrs->family, attrs->type);
2092 tx = ipsec_tx(sa_entry->ipsec, attrs->type);
2093 if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
2094 return tx->allow_tunnel_mode;
2095
2096 return rx->allow_tunnel_mode;
2097 }
2098