1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
3
4 #include "en.h"
5 #include "lib/mlx5.h"
6 #include "en_accel/ktls.h"
7 #include "en_accel/ktls_utils.h"
8 #include "en_accel/fs_tcp.h"
9
mlx5_ktls_create_key(struct mlx5_core_dev * mdev,struct tls_crypto_info * crypto_info,u32 * p_key_id)10 int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
11 struct tls_crypto_info *crypto_info,
12 u32 *p_key_id)
13 {
14 u32 sz_bytes;
15 void *key;
16
17 switch (crypto_info->cipher_type) {
18 case TLS_CIPHER_AES_GCM_128: {
19 struct tls12_crypto_info_aes_gcm_128 *info =
20 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
21
22 key = info->key;
23 sz_bytes = sizeof(info->key);
24 break;
25 }
26 case TLS_CIPHER_AES_GCM_256: {
27 struct tls12_crypto_info_aes_gcm_256 *info =
28 (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
29
30 key = info->key;
31 sz_bytes = sizeof(info->key);
32 break;
33 }
34 default:
35 return -EINVAL;
36 }
37
38 return mlx5_create_encryption_key(mdev, key, sz_bytes,
39 MLX5_ACCEL_OBJ_TLS_KEY,
40 p_key_id);
41 }
42
mlx5_ktls_destroy_key(struct mlx5_core_dev * mdev,u32 key_id)43 void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
44 {
45 mlx5_destroy_encryption_key(mdev, key_id);
46 }
47
mlx5e_ktls_add(struct net_device * netdev,struct sock * sk,enum tls_offload_ctx_dir direction,struct tls_crypto_info * crypto_info,u32 start_offload_tcp_sn)48 static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
49 enum tls_offload_ctx_dir direction,
50 struct tls_crypto_info *crypto_info,
51 u32 start_offload_tcp_sn)
52 {
53 struct mlx5e_priv *priv = netdev_priv(netdev);
54 struct mlx5_core_dev *mdev = priv->mdev;
55 int err;
56
57 if (!mlx5e_ktls_type_check(mdev, crypto_info))
58 return -EOPNOTSUPP;
59
60 if (direction == TLS_OFFLOAD_CTX_DIR_TX)
61 err = mlx5e_ktls_add_tx(netdev, sk, crypto_info, start_offload_tcp_sn);
62 else
63 err = mlx5e_ktls_add_rx(netdev, sk, crypto_info, start_offload_tcp_sn);
64
65 return err;
66 }
67
mlx5e_ktls_del(struct net_device * netdev,struct tls_context * tls_ctx,enum tls_offload_ctx_dir direction)68 static void mlx5e_ktls_del(struct net_device *netdev,
69 struct tls_context *tls_ctx,
70 enum tls_offload_ctx_dir direction)
71 {
72 if (direction == TLS_OFFLOAD_CTX_DIR_TX)
73 mlx5e_ktls_del_tx(netdev, tls_ctx);
74 else
75 mlx5e_ktls_del_rx(netdev, tls_ctx);
76 }
77
mlx5e_ktls_resync(struct net_device * netdev,struct sock * sk,u32 seq,u8 * rcd_sn,enum tls_offload_ctx_dir direction)78 static int mlx5e_ktls_resync(struct net_device *netdev,
79 struct sock *sk, u32 seq, u8 *rcd_sn,
80 enum tls_offload_ctx_dir direction)
81 {
82 if (unlikely(direction != TLS_OFFLOAD_CTX_DIR_RX))
83 return -EOPNOTSUPP;
84
85 mlx5e_ktls_rx_resync(netdev, sk, seq, rcd_sn);
86 return 0;
87 }
88
89 static const struct tlsdev_ops mlx5e_ktls_ops = {
90 .tls_dev_add = mlx5e_ktls_add,
91 .tls_dev_del = mlx5e_ktls_del,
92 .tls_dev_resync = mlx5e_ktls_resync,
93 };
94
mlx5e_is_ktls_rx(struct mlx5_core_dev * mdev)95 bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
96 {
97 u8 max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
98
99 if (is_kdump_kernel() || !MLX5_CAP_GEN(mdev, tls_rx))
100 return false;
101
102 /* Check the possibility to post the required ICOSQ WQEs. */
103 if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS))
104 return false;
105 if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS))
106 return false;
107 if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_KTLS_GET_PROGRESS_WQEBBS))
108 return false;
109
110 return true;
111 }
112
mlx5e_ktls_build_netdev(struct mlx5e_priv * priv)113 void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
114 {
115 struct net_device *netdev = priv->netdev;
116 struct mlx5_core_dev *mdev = priv->mdev;
117
118 if (!mlx5e_is_ktls_tx(mdev) && !mlx5e_is_ktls_rx(mdev))
119 return;
120
121 if (mlx5e_is_ktls_tx(mdev)) {
122 netdev->hw_features |= NETIF_F_HW_TLS_TX;
123 netdev->features |= NETIF_F_HW_TLS_TX;
124 }
125
126 if (mlx5e_is_ktls_rx(mdev))
127 netdev->hw_features |= NETIF_F_HW_TLS_RX;
128
129 netdev->tlsdev_ops = &mlx5e_ktls_ops;
130 }
131
mlx5e_ktls_set_feature_rx(struct net_device * netdev,bool enable)132 int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable)
133 {
134 struct mlx5e_priv *priv = netdev_priv(netdev);
135 int err = 0;
136
137 mutex_lock(&priv->state_lock);
138 if (enable)
139 err = mlx5e_accel_fs_tcp_create(priv->fs);
140 else
141 mlx5e_accel_fs_tcp_destroy(priv->fs);
142 mutex_unlock(&priv->state_lock);
143
144 return err;
145 }
146
mlx5e_ktls_init_rx(struct mlx5e_priv * priv)147 int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
148 {
149 int err;
150
151 if (!mlx5e_is_ktls_rx(priv->mdev))
152 return 0;
153
154 priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx");
155 if (!priv->tls->rx_wq)
156 return -ENOMEM;
157
158 if (priv->netdev->features & NETIF_F_HW_TLS_RX) {
159 err = mlx5e_accel_fs_tcp_create(priv->fs);
160 if (err) {
161 destroy_workqueue(priv->tls->rx_wq);
162 return err;
163 }
164 }
165
166 return 0;
167 }
168
mlx5e_ktls_cleanup_rx(struct mlx5e_priv * priv)169 void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
170 {
171 if (!mlx5e_is_ktls_rx(priv->mdev))
172 return;
173
174 if (priv->netdev->features & NETIF_F_HW_TLS_RX)
175 mlx5e_accel_fs_tcp_destroy(priv->fs);
176
177 destroy_workqueue(priv->tls->rx_wq);
178 }
179
mlx5e_ktls_init(struct mlx5e_priv * priv)180 int mlx5e_ktls_init(struct mlx5e_priv *priv)
181 {
182 struct mlx5e_tls *tls;
183
184 if (!mlx5e_is_ktls_device(priv->mdev))
185 return 0;
186
187 tls = kzalloc(sizeof(*tls), GFP_KERNEL);
188 if (!tls)
189 return -ENOMEM;
190
191 priv->tls = tls;
192 return 0;
193 }
194
mlx5e_ktls_cleanup(struct mlx5e_priv * priv)195 void mlx5e_ktls_cleanup(struct mlx5e_priv *priv)
196 {
197 kfree(priv->tls);
198 priv->tls = NULL;
199 }
200