1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
3
4 #include "en_accel/ktls.h"
5 #include "en_accel/ktls_txrx.h"
6 #include "en_accel/ktls_utils.h"
7
8 struct mlx5e_dump_wqe {
9 struct mlx5_wqe_ctrl_seg ctrl;
10 struct mlx5_wqe_data_seg data;
11 };
12
13 #define MLX5E_KTLS_DUMP_WQEBBS \
14 (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
15
16 static u8
mlx5e_ktls_dumps_num_wqes(struct mlx5e_params * params,unsigned int nfrags,unsigned int sync_len)17 mlx5e_ktls_dumps_num_wqes(struct mlx5e_params *params, unsigned int nfrags,
18 unsigned int sync_len)
19 {
20 /* Given the MTU and sync_len, calculates an upper bound for the
21 * number of DUMP WQEs needed for the TX resync of a record.
22 */
23 return nfrags + DIV_ROUND_UP(sync_len, MLX5E_SW2HW_MTU(params, params->sw_mtu));
24 }
25
mlx5e_ktls_get_stop_room(struct mlx5_core_dev * mdev,struct mlx5e_params * params)26 u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
27 {
28 u16 num_dumps, stop_room = 0;
29
30 if (!mlx5e_is_ktls_tx(mdev))
31 return 0;
32
33 num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
34
35 stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
36 stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
37 stop_room += num_dumps * mlx5e_stop_room_for_wqe(mdev, MLX5E_KTLS_DUMP_WQEBBS);
38 stop_room += 1; /* fence nop */
39
40 return stop_room;
41 }
42
mlx5e_ktls_set_tisc(struct mlx5_core_dev * mdev,void * tisc)43 static void mlx5e_ktls_set_tisc(struct mlx5_core_dev *mdev, void *tisc)
44 {
45 MLX5_SET(tisc, tisc, tls_en, 1);
46 MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
47 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
48 }
49
mlx5e_ktls_create_tis(struct mlx5_core_dev * mdev,u32 * tisn)50 static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
51 {
52 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
53
54 mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
55
56 return mlx5_core_create_tis(mdev, in, tisn);
57 }
58
mlx5e_ktls_create_tis_cb(struct mlx5_core_dev * mdev,struct mlx5_async_ctx * async_ctx,u32 * out,int outlen,mlx5_async_cbk_t callback,struct mlx5_async_work * context)59 static int mlx5e_ktls_create_tis_cb(struct mlx5_core_dev *mdev,
60 struct mlx5_async_ctx *async_ctx,
61 u32 *out, int outlen,
62 mlx5_async_cbk_t callback,
63 struct mlx5_async_work *context)
64 {
65 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
66
67 mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
68 MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
69
70 return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
71 out, outlen, callback, context);
72 }
73
mlx5e_ktls_destroy_tis_cb(struct mlx5_core_dev * mdev,u32 tisn,struct mlx5_async_ctx * async_ctx,u32 * out,int outlen,mlx5_async_cbk_t callback,struct mlx5_async_work * context)74 static int mlx5e_ktls_destroy_tis_cb(struct mlx5_core_dev *mdev, u32 tisn,
75 struct mlx5_async_ctx *async_ctx,
76 u32 *out, int outlen,
77 mlx5_async_cbk_t callback,
78 struct mlx5_async_work *context)
79 {
80 u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
81
82 MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
83 MLX5_SET(destroy_tis_in, in, tisn, tisn);
84
85 return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
86 out, outlen, callback, context);
87 }
88
89 struct mlx5e_ktls_offload_context_tx {
90 /* fast path */
91 u32 expected_seq;
92 u32 tisn;
93 bool ctx_post_pending;
94 /* control / resync */
95 struct list_head list_node; /* member of the pool */
96 union mlx5e_crypto_info crypto_info;
97 struct tls_offload_context_tx *tx_ctx;
98 struct mlx5_core_dev *mdev;
99 struct mlx5e_tls_sw_stats *sw_stats;
100 u32 key_id;
101 u8 create_err : 1;
102 };
103
104 static void
mlx5e_set_ktls_tx_priv_ctx(struct tls_context * tls_ctx,struct mlx5e_ktls_offload_context_tx * priv_tx)105 mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
106 struct mlx5e_ktls_offload_context_tx *priv_tx)
107 {
108 struct mlx5e_ktls_offload_context_tx **ctx =
109 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
110
111 BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX);
112
113 *ctx = priv_tx;
114 }
115
116 static struct mlx5e_ktls_offload_context_tx *
mlx5e_get_ktls_tx_priv_ctx(struct tls_context * tls_ctx)117 mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
118 {
119 struct mlx5e_ktls_offload_context_tx **ctx =
120 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
121
122 return *ctx;
123 }
124
125 /* struct for callback API management */
126 struct mlx5e_async_ctx {
127 struct mlx5_async_work context;
128 struct mlx5_async_ctx async_ctx;
129 struct work_struct work;
130 struct mlx5e_ktls_offload_context_tx *priv_tx;
131 struct completion complete;
132 int err;
133 union {
134 u32 out_create[MLX5_ST_SZ_DW(create_tis_out)];
135 u32 out_destroy[MLX5_ST_SZ_DW(destroy_tis_out)];
136 };
137 };
138
mlx5e_bulk_async_init(struct mlx5_core_dev * mdev,int n)139 static struct mlx5e_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
140 {
141 struct mlx5e_async_ctx *bulk_async;
142 int i;
143
144 bulk_async = kvcalloc(n, sizeof(struct mlx5e_async_ctx), GFP_KERNEL);
145 if (!bulk_async)
146 return NULL;
147
148 for (i = 0; i < n; i++) {
149 struct mlx5e_async_ctx *async = &bulk_async[i];
150
151 mlx5_cmd_init_async_ctx(mdev, &async->async_ctx);
152 init_completion(&async->complete);
153 }
154
155 return bulk_async;
156 }
157
mlx5e_bulk_async_cleanup(struct mlx5e_async_ctx * bulk_async,int n)158 static void mlx5e_bulk_async_cleanup(struct mlx5e_async_ctx *bulk_async, int n)
159 {
160 int i;
161
162 for (i = 0; i < n; i++) {
163 struct mlx5e_async_ctx *async = &bulk_async[i];
164
165 mlx5_cmd_cleanup_async_ctx(&async->async_ctx);
166 }
167 kvfree(bulk_async);
168 }
169
create_tis_callback(int status,struct mlx5_async_work * context)170 static void create_tis_callback(int status, struct mlx5_async_work *context)
171 {
172 struct mlx5e_async_ctx *async =
173 container_of(context, struct mlx5e_async_ctx, context);
174 struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
175
176 if (status) {
177 async->err = status;
178 priv_tx->create_err = 1;
179 goto out;
180 }
181
182 priv_tx->tisn = MLX5_GET(create_tis_out, async->out_create, tisn);
183 out:
184 complete(&async->complete);
185 }
186
destroy_tis_callback(int status,struct mlx5_async_work * context)187 static void destroy_tis_callback(int status, struct mlx5_async_work *context)
188 {
189 struct mlx5e_async_ctx *async =
190 container_of(context, struct mlx5e_async_ctx, context);
191 struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
192
193 complete(&async->complete);
194 kfree(priv_tx);
195 }
196
197 static struct mlx5e_ktls_offload_context_tx *
mlx5e_tls_priv_tx_init(struct mlx5_core_dev * mdev,struct mlx5e_tls_sw_stats * sw_stats,struct mlx5e_async_ctx * async)198 mlx5e_tls_priv_tx_init(struct mlx5_core_dev *mdev, struct mlx5e_tls_sw_stats *sw_stats,
199 struct mlx5e_async_ctx *async)
200 {
201 struct mlx5e_ktls_offload_context_tx *priv_tx;
202 int err;
203
204 priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
205 if (!priv_tx)
206 return ERR_PTR(-ENOMEM);
207
208 priv_tx->mdev = mdev;
209 priv_tx->sw_stats = sw_stats;
210
211 if (!async) {
212 err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
213 if (err)
214 goto err_out;
215 } else {
216 async->priv_tx = priv_tx;
217 err = mlx5e_ktls_create_tis_cb(mdev, &async->async_ctx,
218 async->out_create, sizeof(async->out_create),
219 create_tis_callback, &async->context);
220 if (err)
221 goto err_out;
222 }
223
224 return priv_tx;
225
226 err_out:
227 kfree(priv_tx);
228 return ERR_PTR(err);
229 }
230
mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx * priv_tx,struct mlx5e_async_ctx * async)231 static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv_tx,
232 struct mlx5e_async_ctx *async)
233 {
234 if (priv_tx->create_err) {
235 complete(&async->complete);
236 kfree(priv_tx);
237 return;
238 }
239 async->priv_tx = priv_tx;
240 mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn,
241 &async->async_ctx,
242 async->out_destroy, sizeof(async->out_destroy),
243 destroy_tis_callback, &async->context);
244 }
245
mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev * mdev,struct list_head * list,int size)246 static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
247 struct list_head *list, int size)
248 {
249 struct mlx5e_ktls_offload_context_tx *obj, *n;
250 struct mlx5e_async_ctx *bulk_async;
251 int i;
252
253 bulk_async = mlx5e_bulk_async_init(mdev, size);
254 if (!bulk_async)
255 return;
256
257 i = 0;
258 list_for_each_entry_safe(obj, n, list, list_node) {
259 mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
260 i++;
261 }
262
263 for (i = 0; i < size; i++) {
264 struct mlx5e_async_ctx *async = &bulk_async[i];
265
266 wait_for_completion(&async->complete);
267 }
268 mlx5e_bulk_async_cleanup(bulk_async, size);
269 }
270
271 /* Recycling pool API */
272
273 #define MLX5E_TLS_TX_POOL_BULK (16)
274 #define MLX5E_TLS_TX_POOL_HIGH (4 * 1024)
275 #define MLX5E_TLS_TX_POOL_LOW (MLX5E_TLS_TX_POOL_HIGH / 4)
276
277 struct mlx5e_tls_tx_pool {
278 struct mlx5_core_dev *mdev;
279 struct mlx5e_tls_sw_stats *sw_stats;
280 struct mutex lock; /* Protects access to the pool */
281 struct list_head list;
282 size_t size;
283
284 struct workqueue_struct *wq;
285 struct work_struct create_work;
286 struct work_struct destroy_work;
287 };
288
create_work(struct work_struct * work)289 static void create_work(struct work_struct *work)
290 {
291 struct mlx5e_tls_tx_pool *pool =
292 container_of(work, struct mlx5e_tls_tx_pool, create_work);
293 struct mlx5e_ktls_offload_context_tx *obj;
294 struct mlx5e_async_ctx *bulk_async;
295 LIST_HEAD(local_list);
296 int i, j, err = 0;
297
298 bulk_async = mlx5e_bulk_async_init(pool->mdev, MLX5E_TLS_TX_POOL_BULK);
299 if (!bulk_async)
300 return;
301
302 for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) {
303 obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async[i]);
304 if (IS_ERR(obj)) {
305 err = PTR_ERR(obj);
306 break;
307 }
308 list_add(&obj->list_node, &local_list);
309 }
310
311 for (j = 0; j < i; j++) {
312 struct mlx5e_async_ctx *async = &bulk_async[j];
313
314 wait_for_completion(&async->complete);
315 if (!err && async->err)
316 err = async->err;
317 }
318 atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc);
319 mlx5e_bulk_async_cleanup(bulk_async, MLX5E_TLS_TX_POOL_BULK);
320 if (err)
321 goto err_out;
322
323 mutex_lock(&pool->lock);
324 if (pool->size + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH) {
325 mutex_unlock(&pool->lock);
326 goto err_out;
327 }
328 list_splice(&local_list, &pool->list);
329 pool->size += MLX5E_TLS_TX_POOL_BULK;
330 if (pool->size <= MLX5E_TLS_TX_POOL_LOW)
331 queue_work(pool->wq, work);
332 mutex_unlock(&pool->lock);
333 return;
334
335 err_out:
336 mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, i);
337 atomic64_add(i, &pool->sw_stats->tx_tls_pool_free);
338 }
339
destroy_work(struct work_struct * work)340 static void destroy_work(struct work_struct *work)
341 {
342 struct mlx5e_tls_tx_pool *pool =
343 container_of(work, struct mlx5e_tls_tx_pool, destroy_work);
344 struct mlx5e_ktls_offload_context_tx *obj;
345 LIST_HEAD(local_list);
346 int i = 0;
347
348 mutex_lock(&pool->lock);
349 if (pool->size < MLX5E_TLS_TX_POOL_HIGH) {
350 mutex_unlock(&pool->lock);
351 return;
352 }
353
354 list_for_each_entry(obj, &pool->list, list_node)
355 if (++i == MLX5E_TLS_TX_POOL_BULK)
356 break;
357
358 list_cut_position(&local_list, &pool->list, &obj->list_node);
359 pool->size -= MLX5E_TLS_TX_POOL_BULK;
360 if (pool->size >= MLX5E_TLS_TX_POOL_HIGH)
361 queue_work(pool->wq, work);
362 mutex_unlock(&pool->lock);
363
364 mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
365 atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
366 }
367
mlx5e_tls_tx_pool_init(struct mlx5_core_dev * mdev,struct mlx5e_tls_sw_stats * sw_stats)368 static struct mlx5e_tls_tx_pool *mlx5e_tls_tx_pool_init(struct mlx5_core_dev *mdev,
369 struct mlx5e_tls_sw_stats *sw_stats)
370 {
371 struct mlx5e_tls_tx_pool *pool;
372
373 BUILD_BUG_ON(MLX5E_TLS_TX_POOL_LOW + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH);
374
375 pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
376 if (!pool)
377 return NULL;
378
379 pool->wq = create_singlethread_workqueue("mlx5e_tls_tx_pool");
380 if (!pool->wq)
381 goto err_free;
382
383 INIT_LIST_HEAD(&pool->list);
384 mutex_init(&pool->lock);
385
386 INIT_WORK(&pool->create_work, create_work);
387 INIT_WORK(&pool->destroy_work, destroy_work);
388
389 pool->mdev = mdev;
390 pool->sw_stats = sw_stats;
391
392 return pool;
393
394 err_free:
395 kvfree(pool);
396 return NULL;
397 }
398
mlx5e_tls_tx_pool_list_cleanup(struct mlx5e_tls_tx_pool * pool)399 static void mlx5e_tls_tx_pool_list_cleanup(struct mlx5e_tls_tx_pool *pool)
400 {
401 while (pool->size > MLX5E_TLS_TX_POOL_BULK) {
402 struct mlx5e_ktls_offload_context_tx *obj;
403 LIST_HEAD(local_list);
404 int i = 0;
405
406 list_for_each_entry(obj, &pool->list, list_node)
407 if (++i == MLX5E_TLS_TX_POOL_BULK)
408 break;
409
410 list_cut_position(&local_list, &pool->list, &obj->list_node);
411 mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
412 atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
413 pool->size -= MLX5E_TLS_TX_POOL_BULK;
414 }
415 if (pool->size) {
416 mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &pool->list, pool->size);
417 atomic64_add(pool->size, &pool->sw_stats->tx_tls_pool_free);
418 }
419 }
420
mlx5e_tls_tx_pool_cleanup(struct mlx5e_tls_tx_pool * pool)421 static void mlx5e_tls_tx_pool_cleanup(struct mlx5e_tls_tx_pool *pool)
422 {
423 mlx5e_tls_tx_pool_list_cleanup(pool);
424 destroy_workqueue(pool->wq);
425 kvfree(pool);
426 }
427
pool_push(struct mlx5e_tls_tx_pool * pool,struct mlx5e_ktls_offload_context_tx * obj)428 static void pool_push(struct mlx5e_tls_tx_pool *pool, struct mlx5e_ktls_offload_context_tx *obj)
429 {
430 mutex_lock(&pool->lock);
431 list_add(&obj->list_node, &pool->list);
432 if (++pool->size == MLX5E_TLS_TX_POOL_HIGH)
433 queue_work(pool->wq, &pool->destroy_work);
434 mutex_unlock(&pool->lock);
435 }
436
pool_pop(struct mlx5e_tls_tx_pool * pool)437 static struct mlx5e_ktls_offload_context_tx *pool_pop(struct mlx5e_tls_tx_pool *pool)
438 {
439 struct mlx5e_ktls_offload_context_tx *obj;
440
441 mutex_lock(&pool->lock);
442 if (unlikely(pool->size == 0)) {
443 /* pool is empty:
444 * - trigger the populating work, and
445 * - serve the current context via the regular blocking api.
446 */
447 queue_work(pool->wq, &pool->create_work);
448 mutex_unlock(&pool->lock);
449 obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, NULL);
450 if (!IS_ERR(obj))
451 atomic64_inc(&pool->sw_stats->tx_tls_pool_alloc);
452 return obj;
453 }
454
455 obj = list_first_entry(&pool->list, struct mlx5e_ktls_offload_context_tx,
456 list_node);
457 list_del(&obj->list_node);
458 if (--pool->size == MLX5E_TLS_TX_POOL_LOW)
459 queue_work(pool->wq, &pool->create_work);
460 mutex_unlock(&pool->lock);
461 return obj;
462 }
463
464 /* End of pool API */
465
mlx5e_ktls_add_tx(struct net_device * netdev,struct sock * sk,struct tls_crypto_info * crypto_info,u32 start_offload_tcp_sn)466 int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
467 struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn)
468 {
469 struct mlx5e_ktls_offload_context_tx *priv_tx;
470 struct mlx5e_tls_tx_pool *pool;
471 struct tls_context *tls_ctx;
472 struct mlx5e_priv *priv;
473 int err;
474
475 tls_ctx = tls_get_ctx(sk);
476 priv = netdev_priv(netdev);
477 pool = priv->tls->tx_pool;
478
479 priv_tx = pool_pop(pool);
480 if (IS_ERR(priv_tx))
481 return PTR_ERR(priv_tx);
482
483 err = mlx5_ktls_create_key(pool->mdev, crypto_info, &priv_tx->key_id);
484 if (err)
485 goto err_create_key;
486
487 priv_tx->expected_seq = start_offload_tcp_sn;
488 switch (crypto_info->cipher_type) {
489 case TLS_CIPHER_AES_GCM_128:
490 priv_tx->crypto_info.crypto_info_128 =
491 *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
492 break;
493 case TLS_CIPHER_AES_GCM_256:
494 priv_tx->crypto_info.crypto_info_256 =
495 *(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
496 break;
497 default:
498 WARN_ONCE(1, "Unsupported cipher type %u\n",
499 crypto_info->cipher_type);
500 return -EOPNOTSUPP;
501 }
502 priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
503
504 mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
505
506 priv_tx->ctx_post_pending = true;
507 atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
508
509 return 0;
510
511 err_create_key:
512 pool_push(pool, priv_tx);
513 return err;
514 }
515
mlx5e_ktls_del_tx(struct net_device * netdev,struct tls_context * tls_ctx)516 void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
517 {
518 struct mlx5e_ktls_offload_context_tx *priv_tx;
519 struct mlx5e_tls_tx_pool *pool;
520 struct mlx5e_priv *priv;
521
522 priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
523 priv = netdev_priv(netdev);
524 pool = priv->tls->tx_pool;
525
526 atomic64_inc(&priv_tx->sw_stats->tx_tls_del);
527 mlx5_ktls_destroy_key(priv_tx->mdev, priv_tx->key_id);
528 pool_push(pool, priv_tx);
529 }
530
tx_fill_wi(struct mlx5e_txqsq * sq,u16 pi,u8 num_wqebbs,u32 num_bytes,struct page * page)531 static void tx_fill_wi(struct mlx5e_txqsq *sq,
532 u16 pi, u8 num_wqebbs, u32 num_bytes,
533 struct page *page)
534 {
535 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
536
537 *wi = (struct mlx5e_tx_wqe_info) {
538 .num_wqebbs = num_wqebbs,
539 .num_bytes = num_bytes,
540 .resync_dump_frag_page = page,
541 };
542 }
543
544 static bool
mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx * priv_tx)545 mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
546 {
547 bool ret = priv_tx->ctx_post_pending;
548
549 priv_tx->ctx_post_pending = false;
550
551 return ret;
552 }
553
554 static void
post_static_params(struct mlx5e_txqsq * sq,struct mlx5e_ktls_offload_context_tx * priv_tx,bool fence)555 post_static_params(struct mlx5e_txqsq *sq,
556 struct mlx5e_ktls_offload_context_tx *priv_tx,
557 bool fence)
558 {
559 struct mlx5e_set_tls_static_params_wqe *wqe;
560 u16 pi, num_wqebbs;
561
562 num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
563 pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
564 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
565 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info,
566 priv_tx->tisn, priv_tx->key_id, 0, fence,
567 TLS_OFFLOAD_CTX_DIR_TX);
568 tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
569 sq->pc += num_wqebbs;
570 }
571
572 static void
post_progress_params(struct mlx5e_txqsq * sq,struct mlx5e_ktls_offload_context_tx * priv_tx,bool fence)573 post_progress_params(struct mlx5e_txqsq *sq,
574 struct mlx5e_ktls_offload_context_tx *priv_tx,
575 bool fence)
576 {
577 struct mlx5e_set_tls_progress_params_wqe *wqe;
578 u16 pi, num_wqebbs;
579
580 num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
581 pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
582 wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
583 mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, 0,
584 TLS_OFFLOAD_CTX_DIR_TX);
585 tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
586 sq->pc += num_wqebbs;
587 }
588
tx_post_fence_nop(struct mlx5e_txqsq * sq)589 static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
590 {
591 struct mlx5_wq_cyc *wq = &sq->wq;
592 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
593
594 tx_fill_wi(sq, pi, 1, 0, NULL);
595
596 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
597 }
598
599 static void
mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq * sq,struct mlx5e_ktls_offload_context_tx * priv_tx,bool skip_static_post,bool fence_first_post)600 mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
601 struct mlx5e_ktls_offload_context_tx *priv_tx,
602 bool skip_static_post, bool fence_first_post)
603 {
604 bool progress_fence = skip_static_post || !fence_first_post;
605
606 if (!skip_static_post)
607 post_static_params(sq, priv_tx, fence_first_post);
608
609 post_progress_params(sq, priv_tx, progress_fence);
610 tx_post_fence_nop(sq);
611 }
612
613 struct tx_sync_info {
614 u64 rcd_sn;
615 u32 sync_len;
616 int nr_frags;
617 skb_frag_t frags[MAX_SKB_FRAGS];
618 };
619
620 enum mlx5e_ktls_sync_retval {
621 MLX5E_KTLS_SYNC_DONE,
622 MLX5E_KTLS_SYNC_FAIL,
623 MLX5E_KTLS_SYNC_SKIP_NO_DATA,
624 };
625
626 static enum mlx5e_ktls_sync_retval
tx_sync_info_get(struct mlx5e_ktls_offload_context_tx * priv_tx,u32 tcp_seq,int datalen,struct tx_sync_info * info)627 tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
628 u32 tcp_seq, int datalen, struct tx_sync_info *info)
629 {
630 struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
631 enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
632 struct tls_record_info *record;
633 int remaining, i = 0;
634 unsigned long flags;
635 bool ends_before;
636
637 spin_lock_irqsave(&tx_ctx->lock, flags);
638 record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
639
640 if (unlikely(!record)) {
641 ret = MLX5E_KTLS_SYNC_FAIL;
642 goto out;
643 }
644
645 /* There are the following cases:
646 * 1. packet ends before start marker: bypass offload.
647 * 2. packet starts before start marker and ends after it: drop,
648 * not supported, breaks contract with kernel.
649 * 3. packet ends before tls record info starts: drop,
650 * this packet was already acknowledged and its record info
651 * was released.
652 */
653 ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record));
654
655 if (unlikely(tls_record_is_start_marker(record))) {
656 ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
657 goto out;
658 } else if (ends_before) {
659 ret = MLX5E_KTLS_SYNC_FAIL;
660 goto out;
661 }
662
663 info->sync_len = tcp_seq - tls_record_start_seq(record);
664 remaining = info->sync_len;
665 while (remaining > 0) {
666 skb_frag_t *frag = &record->frags[i];
667
668 get_page(skb_frag_page(frag));
669 remaining -= skb_frag_size(frag);
670 info->frags[i++] = *frag;
671 }
672 /* reduce the part which will be sent with the original SKB */
673 if (remaining < 0)
674 skb_frag_size_add(&info->frags[i - 1], remaining);
675 info->nr_frags = i;
676 out:
677 spin_unlock_irqrestore(&tx_ctx->lock, flags);
678 return ret;
679 }
680
681 static void
tx_post_resync_params(struct mlx5e_txqsq * sq,struct mlx5e_ktls_offload_context_tx * priv_tx,u64 rcd_sn)682 tx_post_resync_params(struct mlx5e_txqsq *sq,
683 struct mlx5e_ktls_offload_context_tx *priv_tx,
684 u64 rcd_sn)
685 {
686 __be64 rn_be = cpu_to_be64(rcd_sn);
687 bool skip_static_post;
688 u16 rec_seq_sz;
689 char *rec_seq;
690
691 switch (priv_tx->crypto_info.crypto_info.cipher_type) {
692 case TLS_CIPHER_AES_GCM_128: {
693 struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info.crypto_info_128;
694
695 rec_seq = info->rec_seq;
696 rec_seq_sz = sizeof(info->rec_seq);
697 break;
698 }
699 case TLS_CIPHER_AES_GCM_256: {
700 struct tls12_crypto_info_aes_gcm_256 *info = &priv_tx->crypto_info.crypto_info_256;
701
702 rec_seq = info->rec_seq;
703 rec_seq_sz = sizeof(info->rec_seq);
704 break;
705 }
706 default:
707 WARN_ONCE(1, "Unsupported cipher type %u\n",
708 priv_tx->crypto_info.crypto_info.cipher_type);
709 return;
710 }
711
712 skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
713 if (!skip_static_post)
714 memcpy(rec_seq, &rn_be, rec_seq_sz);
715
716 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
717 }
718
719 static int
tx_post_resync_dump(struct mlx5e_txqsq * sq,skb_frag_t * frag,u32 tisn)720 tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn)
721 {
722 struct mlx5_wqe_ctrl_seg *cseg;
723 struct mlx5_wqe_data_seg *dseg;
724 struct mlx5e_dump_wqe *wqe;
725 dma_addr_t dma_addr = 0;
726 u16 ds_cnt;
727 int fsz;
728 u16 pi;
729
730 BUILD_BUG_ON(MLX5E_KTLS_DUMP_WQEBBS != 1);
731 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
732 wqe = MLX5E_TLS_FETCH_DUMP_WQE(sq, pi);
733
734 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
735
736 cseg = &wqe->ctrl;
737 dseg = &wqe->data;
738
739 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
740 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
741 cseg->tis_tir_num = cpu_to_be32(tisn << 8);
742
743 fsz = skb_frag_size(frag);
744 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
745 DMA_TO_DEVICE);
746 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
747 return -ENOMEM;
748
749 dseg->addr = cpu_to_be64(dma_addr);
750 dseg->lkey = sq->mkey_be;
751 dseg->byte_count = cpu_to_be32(fsz);
752 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
753
754 tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
755 sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
756
757 return 0;
758 }
759
mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq * sq,struct mlx5e_tx_wqe_info * wi,u32 * dma_fifo_cc)760 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
761 struct mlx5e_tx_wqe_info *wi,
762 u32 *dma_fifo_cc)
763 {
764 struct mlx5e_sq_stats *stats;
765 struct mlx5e_sq_dma *dma;
766
767 dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
768 stats = sq->stats;
769
770 mlx5e_tx_dma_unmap(sq->pdev, dma);
771 put_page(wi->resync_dump_frag_page);
772 stats->tls_dump_packets++;
773 stats->tls_dump_bytes += wi->num_bytes;
774 }
775
776 static enum mlx5e_ktls_sync_retval
mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx * priv_tx,struct mlx5e_txqsq * sq,int datalen,u32 seq)777 mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
778 struct mlx5e_txqsq *sq,
779 int datalen,
780 u32 seq)
781 {
782 enum mlx5e_ktls_sync_retval ret;
783 struct tx_sync_info info = {};
784 int i;
785
786 ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
787 if (unlikely(ret != MLX5E_KTLS_SYNC_DONE))
788 /* We might get here with ret == FAIL if a retransmission
789 * reaches the driver after the relevant record is acked.
790 * It should be safe to drop the packet in this case
791 */
792 return ret;
793
794 tx_post_resync_params(sq, priv_tx, info.rcd_sn);
795
796 for (i = 0; i < info.nr_frags; i++) {
797 unsigned int orig_fsz, frag_offset = 0, n = 0;
798 skb_frag_t *f = &info.frags[i];
799
800 orig_fsz = skb_frag_size(f);
801
802 do {
803 unsigned int fsz;
804
805 n++;
806 fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
807 skb_frag_size_set(f, fsz);
808 if (tx_post_resync_dump(sq, f, priv_tx->tisn)) {
809 page_ref_add(skb_frag_page(f), n - 1);
810 goto err_out;
811 }
812
813 skb_frag_off_add(f, fsz);
814 frag_offset += fsz;
815 } while (frag_offset < orig_fsz);
816
817 page_ref_add(skb_frag_page(f), n - 1);
818 }
819
820 return MLX5E_KTLS_SYNC_DONE;
821
822 err_out:
823 for (; i < info.nr_frags; i++)
824 /* The put_page() here undoes the page ref obtained in tx_sync_info_get().
825 * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
826 * released only upon their completions (or in mlx5e_free_txqsq_descs,
827 * if channel closes).
828 */
829 put_page(skb_frag_page(&info.frags[i]));
830
831 return MLX5E_KTLS_SYNC_FAIL;
832 }
833
mlx5e_ktls_handle_tx_skb(struct net_device * netdev,struct mlx5e_txqsq * sq,struct sk_buff * skb,struct mlx5e_accel_tx_tls_state * state)834 bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
835 struct sk_buff *skb,
836 struct mlx5e_accel_tx_tls_state *state)
837 {
838 struct mlx5e_ktls_offload_context_tx *priv_tx;
839 struct mlx5e_sq_stats *stats = sq->stats;
840 struct net_device *tls_netdev;
841 struct tls_context *tls_ctx;
842 int datalen;
843 u32 seq;
844
845 datalen = skb->len - skb_tcp_all_headers(skb);
846 if (!datalen)
847 return true;
848
849 mlx5e_tx_mpwqe_ensure_complete(sq);
850
851 tls_ctx = tls_get_ctx(skb->sk);
852 tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
853 /* Don't WARN on NULL: if tls_device_down is running in parallel,
854 * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
855 * true. Rather continue processing this packet.
856 */
857 if (WARN_ON_ONCE(tls_netdev && tls_netdev != netdev))
858 goto err_out;
859
860 priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
861
862 if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx)))
863 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
864
865 seq = ntohl(tcp_hdr(skb)->seq);
866 if (unlikely(priv_tx->expected_seq != seq)) {
867 enum mlx5e_ktls_sync_retval ret =
868 mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
869
870 stats->tls_ooo++;
871
872 switch (ret) {
873 case MLX5E_KTLS_SYNC_DONE:
874 break;
875 case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
876 stats->tls_skip_no_sync_data++;
877 if (likely(!skb->decrypted))
878 goto out;
879 WARN_ON_ONCE(1);
880 goto err_out;
881 case MLX5E_KTLS_SYNC_FAIL:
882 stats->tls_drop_no_sync_data++;
883 goto err_out;
884 }
885 }
886
887 priv_tx->expected_seq = seq + datalen;
888
889 state->tls_tisn = priv_tx->tisn;
890
891 stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
892 stats->tls_encrypted_bytes += datalen;
893
894 out:
895 return true;
896
897 err_out:
898 dev_kfree_skb_any(skb);
899 return false;
900 }
901
mlx5e_ktls_init_tx(struct mlx5e_priv * priv)902 int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
903 {
904 if (!mlx5e_is_ktls_tx(priv->mdev))
905 return 0;
906
907 priv->tls->tx_pool = mlx5e_tls_tx_pool_init(priv->mdev, &priv->tls->sw_stats);
908 if (!priv->tls->tx_pool)
909 return -ENOMEM;
910
911 return 0;
912 }
913
mlx5e_ktls_cleanup_tx(struct mlx5e_priv * priv)914 void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
915 {
916 if (!mlx5e_is_ktls_tx(priv->mdev))
917 return;
918
919 mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool);
920 priv->tls->tx_pool = NULL;
921 }
922