1 /*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34 #include <crypto/aead.h>
35 #include <net/xfrm.h>
36 #include <net/esp.h>
37 #include "ipsec.h"
38 #include "ipsec_rxtx.h"
39 #include "en.h"
40
41 enum {
42 MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
43 MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
44 };
45
mlx5e_ipsec_remove_trailer(struct sk_buff * skb,struct xfrm_state * x)46 static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
47 {
48 unsigned int alen = crypto_aead_authsize(x->data);
49 struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
50 struct iphdr *ipv4hdr = ip_hdr(skb);
51 unsigned int trailer_len;
52 u8 plen;
53 int ret;
54
55 ret = skb_copy_bits(skb, skb->len - alen - 2, &plen, 1);
56 if (unlikely(ret))
57 return ret;
58
59 trailer_len = alen + plen + 2;
60
61 pskb_trim(skb, skb->len - trailer_len);
62 if (skb->protocol == htons(ETH_P_IP)) {
63 ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
64 ip_send_check(ipv4hdr);
65 } else {
66 ipv6hdr->payload_len = htons(ntohs(ipv6hdr->payload_len) -
67 trailer_len);
68 }
69 return 0;
70 }
71
mlx5e_ipsec_set_swp(struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg,u8 mode,struct xfrm_offload * xo)72 static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
73 struct mlx5_wqe_eth_seg *eseg, u8 mode,
74 struct xfrm_offload *xo)
75 {
76 /* Tunnel Mode:
77 * SWP: OutL3 InL3 InL4
78 * Pkt: MAC IP ESP IP L4
79 *
80 * Transport Mode:
81 * SWP: OutL3 OutL4
82 * Pkt: MAC IP ESP L4
83 *
84 * Tunnel(VXLAN TCP/UDP) over Transport Mode
85 * SWP: OutL3 InL3 InL4
86 * Pkt: MAC IP ESP UDP VXLAN IP L4
87 */
88
89 /* Shared settings */
90 eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
91 if (skb->protocol == htons(ETH_P_IPV6))
92 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
93
94 /* Tunnel mode */
95 if (mode == XFRM_MODE_TUNNEL) {
96 eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
97 if (xo->proto == IPPROTO_IPV6)
98 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
99
100 switch (xo->inner_ipproto) {
101 case IPPROTO_UDP:
102 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
103 fallthrough;
104 case IPPROTO_TCP:
105 /* IP | ESP | IP | [TCP | UDP] */
106 eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
107 break;
108 default:
109 break;
110 }
111 return;
112 }
113
114 /* Transport mode */
115 if (mode != XFRM_MODE_TRANSPORT)
116 return;
117
118 if (!xo->inner_ipproto) {
119 switch (xo->proto) {
120 case IPPROTO_UDP:
121 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
122 fallthrough;
123 case IPPROTO_TCP:
124 /* IP | ESP | TCP */
125 eseg->swp_outer_l4_offset = skb_inner_transport_offset(skb) / 2;
126 break;
127 default:
128 break;
129 }
130 } else {
131 /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
132 switch (xo->inner_ipproto) {
133 case IPPROTO_UDP:
134 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
135 fallthrough;
136 case IPPROTO_TCP:
137 eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
138 eseg->swp_inner_l4_offset =
139 (skb->csum_start + skb->head - skb->data) / 2;
140 if (inner_ip_hdr(skb)->version == 6)
141 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
142 break;
143 default:
144 break;
145 }
146 }
147
148 }
149
mlx5e_ipsec_set_iv_esn(struct sk_buff * skb,struct xfrm_state * x,struct xfrm_offload * xo)150 void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
151 struct xfrm_offload *xo)
152 {
153 struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
154 __u32 oseq = replay_esn->oseq;
155 int iv_offset;
156 __be64 seqno;
157 u32 seq_hi;
158
159 if (unlikely(skb_is_gso(skb) && oseq < MLX5E_IPSEC_ESN_SCOPE_MID &&
160 MLX5E_IPSEC_ESN_SCOPE_MID < (oseq - skb_shinfo(skb)->gso_segs))) {
161 seq_hi = xo->seq.hi - 1;
162 } else {
163 seq_hi = xo->seq.hi;
164 }
165
166 /* Place the SN in the IV field */
167 seqno = cpu_to_be64(xo->seq.low + ((u64)seq_hi << 32));
168 iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
169 skb_store_bits(skb, iv_offset, &seqno, 8);
170 }
171
mlx5e_ipsec_set_iv(struct sk_buff * skb,struct xfrm_state * x,struct xfrm_offload * xo)172 void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
173 struct xfrm_offload *xo)
174 {
175 int iv_offset;
176 __be64 seqno;
177
178 /* Place the SN in the IV field */
179 seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
180 iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
181 skb_store_bits(skb, iv_offset, &seqno, 8);
182 }
183
mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe * wqe,struct mlx5e_accel_tx_ipsec_state * ipsec_st,struct mlx5_wqe_inline_seg * inlseg)184 void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
185 struct mlx5e_accel_tx_ipsec_state *ipsec_st,
186 struct mlx5_wqe_inline_seg *inlseg)
187 {
188 inlseg->byte_count = cpu_to_be32(ipsec_st->tailen | MLX5_INLINE_SEG);
189 esp_output_fill_trailer((u8 *)inlseg->data, 0, ipsec_st->plen, ipsec_st->xo->proto);
190 }
191
mlx5e_ipsec_set_state(struct mlx5e_priv * priv,struct sk_buff * skb,struct xfrm_state * x,struct xfrm_offload * xo,struct mlx5e_accel_tx_ipsec_state * ipsec_st)192 static int mlx5e_ipsec_set_state(struct mlx5e_priv *priv,
193 struct sk_buff *skb,
194 struct xfrm_state *x,
195 struct xfrm_offload *xo,
196 struct mlx5e_accel_tx_ipsec_state *ipsec_st)
197 {
198 unsigned int blksize, clen, alen, plen;
199 struct crypto_aead *aead;
200 unsigned int tailen;
201
202 ipsec_st->x = x;
203 ipsec_st->xo = xo;
204 aead = x->data;
205 alen = crypto_aead_authsize(aead);
206 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
207 clen = ALIGN(skb->len + 2, blksize);
208 plen = max_t(u32, clen - skb->len, 4);
209 tailen = plen + alen;
210 ipsec_st->plen = plen;
211 ipsec_st->tailen = tailen;
212
213 return 0;
214 }
215
mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv * priv,struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg)216 void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
217 struct mlx5_wqe_eth_seg *eseg)
218 {
219 struct xfrm_offload *xo = xfrm_offload(skb);
220 struct xfrm_encap_tmpl *encap;
221 struct xfrm_state *x;
222 struct sec_path *sp;
223 u8 l3_proto;
224
225 sp = skb_sec_path(skb);
226 if (unlikely(sp->len != 1))
227 return;
228
229 x = xfrm_input_state(skb);
230 if (unlikely(!x))
231 return;
232
233 if (unlikely(!x->xso.offload_handle ||
234 (skb->protocol != htons(ETH_P_IP) &&
235 skb->protocol != htons(ETH_P_IPV6))))
236 return;
237
238 mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
239
240 l3_proto = (x->props.family == AF_INET) ?
241 ((struct iphdr *)skb_network_header(skb))->protocol :
242 ((struct ipv6hdr *)skb_network_header(skb))->nexthdr;
243
244 eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
245 eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
246 encap = x->encap;
247 if (!encap) {
248 eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
249 cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) :
250 cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
251 } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) {
252 eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
253 cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
254 cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
255 }
256 }
257
mlx5e_ipsec_handle_tx_skb(struct net_device * netdev,struct sk_buff * skb,struct mlx5e_accel_tx_ipsec_state * ipsec_st)258 bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
259 struct sk_buff *skb,
260 struct mlx5e_accel_tx_ipsec_state *ipsec_st)
261 {
262 struct mlx5e_priv *priv = netdev_priv(netdev);
263 struct xfrm_offload *xo = xfrm_offload(skb);
264 struct mlx5e_ipsec_sa_entry *sa_entry;
265 struct xfrm_state *x;
266 struct sec_path *sp;
267
268 sp = skb_sec_path(skb);
269 if (unlikely(sp->len != 1)) {
270 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_bundle);
271 goto drop;
272 }
273
274 x = xfrm_input_state(skb);
275 if (unlikely(!x)) {
276 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_no_state);
277 goto drop;
278 }
279
280 if (unlikely(!x->xso.offload_handle ||
281 (skb->protocol != htons(ETH_P_IP) &&
282 skb->protocol != htons(ETH_P_IPV6)))) {
283 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_not_ip);
284 goto drop;
285 }
286
287 if (!skb_is_gso(skb))
288 if (unlikely(mlx5e_ipsec_remove_trailer(skb, x))) {
289 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer);
290 goto drop;
291 }
292
293 sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
294 sa_entry->set_iv_op(skb, x, xo);
295 mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st);
296
297 return true;
298
299 drop:
300 kfree_skb(skb);
301 return false;
302 }
303
304 enum {
305 MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
306 MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
307 MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
308 };
309
mlx5e_ipsec_offload_handle_rx_skb(struct net_device * netdev,struct sk_buff * skb,struct mlx5_cqe64 * cqe)310 void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
311 struct sk_buff *skb,
312 struct mlx5_cqe64 *cqe)
313 {
314 u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
315 struct mlx5e_priv *priv;
316 struct xfrm_offload *xo;
317 struct xfrm_state *xs;
318 struct sec_path *sp;
319 u32 sa_handle;
320
321 sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data);
322 priv = netdev_priv(netdev);
323 sp = secpath_set(skb);
324 if (unlikely(!sp)) {
325 atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
326 return;
327 }
328
329 xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
330 if (unlikely(!xs)) {
331 atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
332 return;
333 }
334
335 sp->xvec[sp->len++] = xs;
336 sp->olen++;
337
338 xo = xfrm_offload(skb);
339 xo->flags = CRYPTO_DONE;
340
341 switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
342 case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
343 xo->status = CRYPTO_SUCCESS;
344 break;
345 case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
346 xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
347 break;
348 case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
349 xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
350 break;
351 default:
352 atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
353 }
354 }
355