1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
3
4 #include "mlx5_core.h"
5 #include "en.h"
6 #include "ipsec.h"
7 #include "lib/crypto.h"
8 #include "fs_core.h"
9 #include "eswitch.h"
10
11 enum {
12 MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
13 MLX5_IPSEC_ASO_REMOVE_FLOW_SOFT_LFT_OFFSET,
14 };
15
mlx5_ipsec_device_caps(struct mlx5_core_dev * mdev)16 u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
17 {
18 u32 caps = 0;
19
20 if (!MLX5_CAP_GEN(mdev, ipsec_offload))
21 return 0;
22
23 if (!MLX5_CAP_GEN(mdev, log_max_dek))
24 return 0;
25
26 if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
27 MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
28 return 0;
29
30 if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
31 !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
32 return 0;
33
34 if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) ||
35 !MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
36 return 0;
37
38 if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
39 MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
40 caps |= MLX5_IPSEC_CAP_CRYPTO;
41
42 if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
43 (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS ||
44 (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS &&
45 is_mdev_legacy_mode(mdev)))) {
46 if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
47 reformat_add_esp_trasport) &&
48 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
49 reformat_del_esp_trasport) &&
50 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
51 caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
52
53 if ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
54 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
55 MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
56 caps |= MLX5_IPSEC_CAP_PRIO;
57
58 if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
59 reformat_l2_to_l3_esp_tunnel) &&
60 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
61 reformat_l3_esp_tunnel_to_l2))
62 caps |= MLX5_IPSEC_CAP_TUNNEL;
63
64 if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
65 reformat_add_esp_transport_over_udp) &&
66 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
67 reformat_del_esp_transport_over_udp))
68 caps |= MLX5_IPSEC_CAP_ESPINUDP;
69 }
70
71 if (mlx5_get_roce_state(mdev) &&
72 MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA &&
73 MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
74 caps |= MLX5_IPSEC_CAP_ROCE;
75
76 if (!caps)
77 return 0;
78
79 if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
80 caps |= MLX5_IPSEC_CAP_ESN;
81
82 /* We can accommodate up to 2^24 different IPsec objects
83 * because we use up to 24 bit in flow table metadata
84 * to hold the IPsec Object unique handle.
85 */
86 WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
87 return caps;
88 }
89 EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
90
mlx5e_ipsec_packet_setup(void * obj,u32 pdn,struct mlx5_accel_esp_xfrm_attrs * attrs)91 static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
92 struct mlx5_accel_esp_xfrm_attrs *attrs)
93 {
94 void *aso_ctx;
95
96 aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
97 if (attrs->replay_esn.trigger) {
98 MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
99
100 if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
101 MLX5_SET(ipsec_aso, aso_ctx, window_sz,
102 attrs->replay_esn.replay_window);
103 MLX5_SET(ipsec_aso, aso_ctx, mode,
104 MLX5_IPSEC_ASO_REPLAY_PROTECTION);
105 }
106 MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
107 attrs->replay_esn.esn);
108 }
109
110 /* ASO context */
111 MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn);
112 MLX5_SET(ipsec_obj, obj, full_offload, 1);
113 MLX5_SET(ipsec_aso, aso_ctx, valid, 1);
114 /* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used
115 * in flow steering to perform matching against. Please be
116 * aware that this register was chosen arbitrary and can't
117 * be used in other places as long as IPsec packet offload
118 * active.
119 */
120 MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
121 if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
122 MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
123
124 if (attrs->lft.hard_packet_limit != XFRM_INF) {
125 MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
126 attrs->lft.hard_packet_limit);
127 MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
128 }
129
130 if (attrs->lft.soft_packet_limit != XFRM_INF) {
131 MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft,
132 attrs->lft.soft_packet_limit);
133
134 MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1);
135 }
136 }
137
mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry)138 static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
139 {
140 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
141 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
142 struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
143 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
144 u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
145 void *obj, *salt_p, *salt_iv_p;
146 struct mlx5e_hw_objs *res;
147 int err;
148
149 obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
150
151 /* salt and seq_iv */
152 salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
153 memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
154
155 MLX5_SET(ipsec_obj, obj, icv_length, MLX5_IPSEC_OBJECT_ICV_LEN_16B);
156 salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
157 memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
158 /* esn */
159 if (attrs->replay_esn.trigger) {
160 MLX5_SET(ipsec_obj, obj, esn_en, 1);
161 MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
162 MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
163 }
164
165 MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
166
167 /* general object fields set */
168 MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
169 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
170 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
171 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
172
173 res = &mdev->mlx5e_res.hw_objs;
174 if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
175 mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
176
177 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
178 if (!err)
179 sa_entry->ipsec_obj_id =
180 MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
181
182 return err;
183 }
184
mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry)185 static void mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
186 {
187 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
188 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
189 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
190
191 MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
192 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
193 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
194 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
195 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
196
197 mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
198 }
199
mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry * sa_entry)200 int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
201 {
202 struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.aes_gcm;
203 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
204 int err;
205
206 /* key */
207 err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key,
208 aes_gcm->key_len / BITS_PER_BYTE,
209 MLX5_ACCEL_OBJ_IPSEC_KEY,
210 &sa_entry->enc_key_id);
211 if (err) {
212 mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
213 return err;
214 }
215
216 err = mlx5_create_ipsec_obj(sa_entry);
217 if (err) {
218 mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
219 goto err_enc_key;
220 }
221
222 return 0;
223
224 err_enc_key:
225 mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
226 return err;
227 }
228
mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry * sa_entry)229 void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
230 {
231 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
232
233 mlx5_destroy_ipsec_obj(sa_entry);
234 mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
235 }
236
mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry,const struct mlx5_accel_esp_xfrm_attrs * attrs)237 static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
238 const struct mlx5_accel_esp_xfrm_attrs *attrs)
239 {
240 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
241 u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
242 u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
243 u64 modify_field_select = 0;
244 u64 general_obj_types;
245 void *obj;
246 int err;
247
248 general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
249 if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
250 return -EINVAL;
251
252 /* general object fields set */
253 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
254 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
255 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
256 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
257 if (err) {
258 mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
259 sa_entry->ipsec_obj_id, err);
260 return err;
261 }
262
263 obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
264 modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
265
266 /* esn */
267 if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
268 !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
269 return -EOPNOTSUPP;
270
271 obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
272 MLX5_SET64(ipsec_obj, obj, modify_field_select,
273 MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
274 MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
275 MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
276 MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
277
278 /* general object fields set */
279 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
280
281 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
282 }
283
mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry * sa_entry,const struct mlx5_accel_esp_xfrm_attrs * attrs)284 void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
285 const struct mlx5_accel_esp_xfrm_attrs *attrs)
286 {
287 int err;
288
289 err = mlx5_modify_ipsec_obj(sa_entry, attrs);
290 if (err)
291 return;
292
293 memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
294 }
295
mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_wqe_aso_ctrl_seg * data)296 static void mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry *sa_entry,
297 struct mlx5_wqe_aso_ctrl_seg *data)
298 {
299 data->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
300 data->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE |
301 MLX5_ASO_ALWAYS_TRUE << 4;
302
303 mlx5e_ipsec_aso_query(sa_entry, data);
304 }
305
mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry * sa_entry,u32 mode_param)306 static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
307 u32 mode_param)
308 {
309 struct mlx5_accel_esp_xfrm_attrs attrs = {};
310 struct mlx5_wqe_aso_ctrl_seg data = {};
311
312 if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
313 sa_entry->esn_state.esn_msb++;
314 sa_entry->esn_state.overlap = 0;
315 } else {
316 sa_entry->esn_state.overlap = 1;
317 }
318
319 mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
320
321 /* It is safe to execute the modify below unlocked since the only flows
322 * that could affect this HW object, are create, destroy and this work.
323 *
324 * Creation flow can't co-exist with this modify work, the destruction
325 * flow would cancel this work, and this work is a single entity that
326 * can't conflict with it self.
327 */
328 spin_unlock_bh(&sa_entry->x->lock);
329 mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
330 spin_lock_bh(&sa_entry->x->lock);
331
332 data.data_offset_condition_operand =
333 MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
334 data.bitwise_data = cpu_to_be64(BIT_ULL(54));
335 data.data_mask = data.bitwise_data;
336
337 mlx5e_ipsec_aso_update(sa_entry, &data);
338 }
339
mlx5e_ipsec_aso_update_hard(struct mlx5e_ipsec_sa_entry * sa_entry)340 static void mlx5e_ipsec_aso_update_hard(struct mlx5e_ipsec_sa_entry *sa_entry)
341 {
342 struct mlx5_wqe_aso_ctrl_seg data = {};
343
344 data.data_offset_condition_operand =
345 MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
346 data.bitwise_data = cpu_to_be64(BIT_ULL(57) + BIT_ULL(31));
347 data.data_mask = data.bitwise_data;
348 mlx5e_ipsec_aso_update(sa_entry, &data);
349 }
350
mlx5e_ipsec_aso_update_soft(struct mlx5e_ipsec_sa_entry * sa_entry,u32 val)351 static void mlx5e_ipsec_aso_update_soft(struct mlx5e_ipsec_sa_entry *sa_entry,
352 u32 val)
353 {
354 struct mlx5_wqe_aso_ctrl_seg data = {};
355
356 data.data_offset_condition_operand =
357 MLX5_IPSEC_ASO_REMOVE_FLOW_SOFT_LFT_OFFSET;
358 data.bitwise_data = cpu_to_be64(val);
359 data.data_mask = cpu_to_be64(U32_MAX);
360 mlx5e_ipsec_aso_update(sa_entry, &data);
361 }
362
mlx5e_ipsec_handle_limits(struct mlx5e_ipsec_sa_entry * sa_entry)363 static void mlx5e_ipsec_handle_limits(struct mlx5e_ipsec_sa_entry *sa_entry)
364 {
365 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
366 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
367 struct mlx5e_ipsec_aso *aso = ipsec->aso;
368 bool soft_arm, hard_arm;
369 u64 hard_cnt;
370
371 lockdep_assert_held(&sa_entry->x->lock);
372
373 soft_arm = !MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm);
374 hard_arm = !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm);
375 if (!soft_arm && !hard_arm)
376 /* It is not lifetime event */
377 return;
378
379 hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
380 if (!hard_cnt || hard_arm) {
381 /* It is possible to see packet counter equal to zero without
382 * hard limit event armed. Such situation can be if packet
383 * decreased, while we handled soft limit event.
384 *
385 * However it will be HW/FW bug if hard limit event is raised
386 * and packet counter is not zero.
387 */
388 WARN_ON_ONCE(hard_arm && hard_cnt);
389
390 /* Notify about hard limit */
391 xfrm_state_check_expire(sa_entry->x);
392 return;
393 }
394
395 /* We are in soft limit event. */
396 if (!sa_entry->limits.soft_limit_hit &&
397 sa_entry->limits.round == attrs->lft.numb_rounds_soft) {
398 sa_entry->limits.soft_limit_hit = true;
399 /* Notify about soft limit */
400 xfrm_state_check_expire(sa_entry->x);
401
402 if (sa_entry->limits.round == attrs->lft.numb_rounds_hard)
403 goto hard;
404
405 if (attrs->lft.soft_packet_limit > BIT_ULL(31)) {
406 /* We cannot avoid a soft_value that might have the high
407 * bit set. For instance soft_value=2^31+1 cannot be
408 * adjusted to the low bit clear version of soft_value=1
409 * because it is too close to 0.
410 *
411 * Thus we have this corner case where we can hit the
412 * soft_limit with the high bit set, but cannot adjust
413 * the counter. Thus we set a temporary interrupt_value
414 * at least 2^30 away from here and do the adjustment
415 * then.
416 */
417 mlx5e_ipsec_aso_update_soft(sa_entry,
418 BIT_ULL(31) - BIT_ULL(30));
419 sa_entry->limits.fix_limit = true;
420 return;
421 }
422
423 sa_entry->limits.fix_limit = true;
424 }
425
426 hard:
427 if (sa_entry->limits.round == attrs->lft.numb_rounds_hard) {
428 mlx5e_ipsec_aso_update_soft(sa_entry, 0);
429 attrs->lft.soft_packet_limit = XFRM_INF;
430 return;
431 }
432
433 mlx5e_ipsec_aso_update_hard(sa_entry);
434 sa_entry->limits.round++;
435 if (sa_entry->limits.round == attrs->lft.numb_rounds_soft)
436 mlx5e_ipsec_aso_update_soft(sa_entry,
437 attrs->lft.soft_packet_limit);
438 if (sa_entry->limits.fix_limit) {
439 sa_entry->limits.fix_limit = false;
440 mlx5e_ipsec_aso_update_soft(sa_entry, BIT_ULL(31) - 1);
441 }
442 }
443
mlx5e_ipsec_handle_event(struct work_struct * _work)444 static void mlx5e_ipsec_handle_event(struct work_struct *_work)
445 {
446 struct mlx5e_ipsec_work *work =
447 container_of(_work, struct mlx5e_ipsec_work, work);
448 struct mlx5e_ipsec_sa_entry *sa_entry = work->data;
449 struct mlx5_accel_esp_xfrm_attrs *attrs;
450 struct mlx5e_ipsec_aso *aso;
451 int ret;
452
453 aso = sa_entry->ipsec->aso;
454 attrs = &sa_entry->attrs;
455
456 spin_lock_bh(&sa_entry->x->lock);
457 ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
458 if (ret)
459 goto unlock;
460
461 if (attrs->replay_esn.trigger &&
462 !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
463 u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
464
465 mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
466 }
467
468 if (attrs->lft.soft_packet_limit != XFRM_INF)
469 mlx5e_ipsec_handle_limits(sa_entry);
470
471 unlock:
472 spin_unlock_bh(&sa_entry->x->lock);
473 kfree(work);
474 }
475
mlx5e_ipsec_event(struct notifier_block * nb,unsigned long event,void * data)476 static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event,
477 void *data)
478 {
479 struct mlx5e_ipsec *ipsec = container_of(nb, struct mlx5e_ipsec, nb);
480 struct mlx5e_ipsec_sa_entry *sa_entry;
481 struct mlx5_eqe_obj_change *object;
482 struct mlx5e_ipsec_work *work;
483 struct mlx5_eqe *eqe = data;
484 u16 type;
485
486 if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
487 return NOTIFY_DONE;
488
489 object = &eqe->data.obj_change;
490 type = be16_to_cpu(object->obj_type);
491
492 if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
493 return NOTIFY_DONE;
494
495 sa_entry = xa_load(&ipsec->sadb, be32_to_cpu(object->obj_id));
496 if (!sa_entry)
497 return NOTIFY_DONE;
498
499 work = kmalloc(sizeof(*work), GFP_ATOMIC);
500 if (!work)
501 return NOTIFY_DONE;
502
503 INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
504 work->data = sa_entry;
505
506 queue_work(ipsec->wq, &work->work);
507 return NOTIFY_OK;
508 }
509
mlx5e_ipsec_aso_init(struct mlx5e_ipsec * ipsec)510 int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
511 {
512 struct mlx5_core_dev *mdev = ipsec->mdev;
513 struct mlx5e_ipsec_aso *aso;
514 struct mlx5e_hw_objs *res;
515 struct device *pdev;
516 int err;
517
518 aso = kzalloc(sizeof(*ipsec->aso), GFP_KERNEL);
519 if (!aso)
520 return -ENOMEM;
521
522 res = &mdev->mlx5e_res.hw_objs;
523
524 pdev = mlx5_core_dma_dev(mdev);
525 aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx),
526 DMA_BIDIRECTIONAL);
527 err = dma_mapping_error(pdev, aso->dma_addr);
528 if (err)
529 goto err_dma;
530
531 aso->aso = mlx5_aso_create(mdev, res->pdn);
532 if (IS_ERR(aso->aso)) {
533 err = PTR_ERR(aso->aso);
534 goto err_aso_create;
535 }
536
537 spin_lock_init(&aso->lock);
538 ipsec->nb.notifier_call = mlx5e_ipsec_event;
539 mlx5_notifier_register(mdev, &ipsec->nb);
540
541 ipsec->aso = aso;
542 return 0;
543
544 err_aso_create:
545 dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
546 DMA_BIDIRECTIONAL);
547 err_dma:
548 kfree(aso);
549 return err;
550 }
551
mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec * ipsec)552 void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
553 {
554 struct mlx5_core_dev *mdev = ipsec->mdev;
555 struct mlx5e_ipsec_aso *aso;
556 struct device *pdev;
557
558 aso = ipsec->aso;
559 pdev = mlx5_core_dma_dev(mdev);
560
561 mlx5_notifier_unregister(mdev, &ipsec->nb);
562 mlx5_aso_destroy(aso->aso);
563 dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
564 DMA_BIDIRECTIONAL);
565 kfree(aso);
566 ipsec->aso = NULL;
567 }
568
mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg * ctrl,struct mlx5_wqe_aso_ctrl_seg * data)569 static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
570 struct mlx5_wqe_aso_ctrl_seg *data)
571 {
572 if (!data)
573 return;
574
575 ctrl->data_mask_mode = data->data_mask_mode;
576 ctrl->condition_1_0_operand = data->condition_1_0_operand;
577 ctrl->condition_1_0_offset = data->condition_1_0_offset;
578 ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
579 ctrl->condition_0_data = data->condition_0_data;
580 ctrl->condition_0_mask = data->condition_0_mask;
581 ctrl->condition_1_data = data->condition_1_data;
582 ctrl->condition_1_mask = data->condition_1_mask;
583 ctrl->bitwise_data = data->bitwise_data;
584 ctrl->data_mask = data->data_mask;
585 }
586
mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_wqe_aso_ctrl_seg * data)587 int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
588 struct mlx5_wqe_aso_ctrl_seg *data)
589 {
590 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
591 struct mlx5e_ipsec_aso *aso = ipsec->aso;
592 struct mlx5_core_dev *mdev = ipsec->mdev;
593 struct mlx5_wqe_aso_ctrl_seg *ctrl;
594 struct mlx5e_hw_objs *res;
595 struct mlx5_aso_wqe *wqe;
596 unsigned long expires;
597 u8 ds_cnt;
598 int ret;
599
600 lockdep_assert_held(&sa_entry->x->lock);
601 res = &mdev->mlx5e_res.hw_objs;
602
603 spin_lock_bh(&aso->lock);
604 memset(aso->ctx, 0, sizeof(aso->ctx));
605 wqe = mlx5_aso_get_wqe(aso->aso);
606 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
607 mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
608 MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
609
610 ctrl = &wqe->aso_ctrl;
611 ctrl->va_l =
612 cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
613 ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
614 ctrl->l_key = cpu_to_be32(res->mkey);
615 mlx5e_ipsec_aso_copy(ctrl, data);
616
617 mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
618 expires = jiffies + msecs_to_jiffies(10);
619 do {
620 ret = mlx5_aso_poll_cq(aso->aso, false);
621 if (ret)
622 /* We are in atomic context */
623 udelay(10);
624 } while (ret && time_is_after_jiffies(expires));
625 spin_unlock_bh(&aso->lock);
626 return ret;
627 }
628