1 /*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34 #ifndef __MLX5E_IPSEC_RXTX_H__
35 #define __MLX5E_IPSEC_RXTX_H__
36
37 #include <linux/skbuff.h>
38 #include <net/xfrm.h>
39 #include "en.h"
40 #include "en/txrx.h"
41
42 /* Bit31: IPsec marker, Bit30: reserved, Bit29-24: IPsec syndrome, Bit23-0: IPsec obj id */
43 #define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
44 #define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
45 #define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
46
47 struct mlx5e_accel_tx_ipsec_state {
48 struct xfrm_offload *xo;
49 struct xfrm_state *x;
50 u32 tailen;
51 u32 plen;
52 };
53
54 #ifdef CONFIG_MLX5_EN_IPSEC
55
56 void mlx5e_ipsec_inverse_table_init(void);
57 void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
58 struct xfrm_offload *xo);
59 void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
60 struct xfrm_offload *xo);
61 bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
62 struct sk_buff *skb,
63 struct mlx5e_accel_tx_ipsec_state *ipsec_st);
64 void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
65 struct mlx5e_accel_tx_ipsec_state *ipsec_st,
66 struct mlx5_wqe_inline_seg *inlseg);
67 void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
68 struct sk_buff *skb,
69 struct mlx5_cqe64 *cqe);
mlx5e_ipsec_tx_ids_len(struct mlx5e_accel_tx_ipsec_state * ipsec_st)70 static inline unsigned int mlx5e_ipsec_tx_ids_len(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
71 {
72 return ipsec_st->tailen;
73 }
74
mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 * cqe)75 static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe)
76 {
77 return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
78 }
79
mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg * eseg)80 static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
81 {
82 return eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
83 }
84
85 void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
86 struct mlx5_wqe_eth_seg *eseg);
87
88 static inline netdev_features_t
mlx5e_ipsec_feature_check(struct sk_buff * skb,netdev_features_t features)89 mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
90 {
91 struct xfrm_offload *xo = xfrm_offload(skb);
92 struct sec_path *sp = skb_sec_path(skb);
93
94 if (sp && sp->len && xo) {
95 struct xfrm_state *x = sp->xvec[0];
96
97 if (!x || !x->xso.offload_handle)
98 goto out_disable;
99
100 if (xo->inner_ipproto) {
101 /* Cannot support tunnel packet over IPsec tunnel mode
102 * because we cannot offload three IP header csum
103 */
104 if (x->props.mode == XFRM_MODE_TUNNEL)
105 goto out_disable;
106
107 /* Only support UDP or TCP L4 checksum */
108 if (xo->inner_ipproto != IPPROTO_UDP &&
109 xo->inner_ipproto != IPPROTO_TCP)
110 goto out_disable;
111 }
112
113 return features;
114
115 }
116
117 /* Disable CSUM and GSO for software IPsec */
118 out_disable:
119 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
120 }
121
122 static inline bool
mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq * sq,struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg)123 mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
124 struct mlx5_wqe_eth_seg *eseg)
125 {
126 u8 inner_ipproto;
127
128 if (!mlx5e_ipsec_eseg_meta(eseg))
129 return false;
130
131 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
132 inner_ipproto = xfrm_offload(skb)->inner_ipproto;
133 if (inner_ipproto) {
134 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
135 if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP)
136 eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
137 } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
138 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
139 sq->stats->csum_partial_inner++;
140 }
141
142 return true;
143 }
144 #else
145 static inline
mlx5e_ipsec_offload_handle_rx_skb(struct net_device * netdev,struct sk_buff * skb,struct mlx5_cqe64 * cqe)146 void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
147 struct sk_buff *skb,
148 struct mlx5_cqe64 *cqe)
149 {}
150
mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg * eseg)151 static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
152 {
153 return false;
154 }
155
mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 * cqe)156 static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
157 static inline netdev_features_t
mlx5e_ipsec_feature_check(struct sk_buff * skb,netdev_features_t features)158 mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
159 { return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); }
160
161 static inline bool
mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq * sq,struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg)162 mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
163 struct mlx5_wqe_eth_seg *eseg)
164 {
165 return false;
166 }
167 #endif /* CONFIG_MLX5_EN_IPSEC */
168
169 #endif /* __MLX5E_IPSEC_RXTX_H__ */
170