1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/netdevice.h>
5 #include <linux/netlink.h>
6 #include <linux/random.h>
7 #include <net/vxlan.h>
8 
9 #include "reg.h"
10 #include "spectrum.h"
11 #include "spectrum_nve.h"
12 
13 #define MLXSW_SP_NVE_VXLAN_IPV4_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \
14 						 VXLAN_F_LEARN)
15 #define MLXSW_SP_NVE_VXLAN_IPV6_SUPPORTED_FLAGS (VXLAN_F_IPV6 | \
16 						 VXLAN_F_UDP_ZERO_CSUM6_TX | \
17 						 VXLAN_F_UDP_ZERO_CSUM6_RX)
18 
mlxsw_sp_nve_vxlan_ipv4_flags_check(const struct vxlan_config * cfg,struct netlink_ext_ack * extack)19 static bool mlxsw_sp_nve_vxlan_ipv4_flags_check(const struct vxlan_config *cfg,
20 						struct netlink_ext_ack *extack)
21 {
22 	if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) {
23 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for TX");
24 		return false;
25 	}
26 
27 	if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_IPV4_SUPPORTED_FLAGS) {
28 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
29 		return false;
30 	}
31 
32 	return true;
33 }
34 
mlxsw_sp_nve_vxlan_ipv6_flags_check(const struct vxlan_config * cfg,struct netlink_ext_ack * extack)35 static bool mlxsw_sp_nve_vxlan_ipv6_flags_check(const struct vxlan_config *cfg,
36 						struct netlink_ext_ack *extack)
37 {
38 	if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) {
39 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for TX");
40 		return false;
41 	}
42 
43 	if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) {
44 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for RX");
45 		return false;
46 	}
47 
48 	if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_IPV6_SUPPORTED_FLAGS) {
49 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
50 		return false;
51 	}
52 
53 	return true;
54 }
55 
mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve * nve,const struct mlxsw_sp_nve_params * params,struct netlink_ext_ack * extack)56 static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
57 					   const struct mlxsw_sp_nve_params *params,
58 					   struct netlink_ext_ack *extack)
59 {
60 	struct vxlan_dev *vxlan = netdev_priv(params->dev);
61 	struct vxlan_config *cfg = &vxlan->cfg;
62 
63 	if (vxlan_addr_multicast(&cfg->remote_ip)) {
64 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Multicast destination IP is not supported");
65 		return false;
66 	}
67 
68 	if (vxlan_addr_any(&cfg->saddr)) {
69 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Source address must be specified");
70 		return false;
71 	}
72 
73 	if (cfg->remote_ifindex) {
74 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Local interface is not supported");
75 		return false;
76 	}
77 
78 	if (cfg->port_min || cfg->port_max) {
79 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only default UDP source port range is supported");
80 		return false;
81 	}
82 
83 	if (cfg->tos != 1) {
84 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: TOS must be configured to inherit");
85 		return false;
86 	}
87 
88 	if (cfg->flags & VXLAN_F_TTL_INHERIT) {
89 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to inherit");
90 		return false;
91 	}
92 
93 	switch (cfg->saddr.sa.sa_family) {
94 	case AF_INET:
95 		if (!mlxsw_sp_nve_vxlan_ipv4_flags_check(cfg, extack))
96 			return false;
97 		break;
98 	case AF_INET6:
99 		if (!mlxsw_sp_nve_vxlan_ipv6_flags_check(cfg, extack))
100 			return false;
101 		break;
102 	}
103 
104 	if (cfg->ttl == 0) {
105 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to 0");
106 		return false;
107 	}
108 
109 	if (cfg->label != 0) {
110 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Flow label must be configured to 0");
111 		return false;
112 	}
113 
114 	return true;
115 }
116 
mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve * nve,const struct mlxsw_sp_nve_params * params,struct netlink_ext_ack * extack)117 static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
118 					    const struct mlxsw_sp_nve_params *params,
119 					    struct netlink_ext_ack *extack)
120 {
121 	if (params->ethertype == ETH_P_8021AD) {
122 		NL_SET_ERR_MSG_MOD(extack, "VxLAN: 802.1ad bridge is not supported with VxLAN");
123 		return false;
124 	}
125 
126 	return mlxsw_sp_nve_vxlan_can_offload(nve, params, extack);
127 }
128 
129 static void
mlxsw_sp_nve_vxlan_ul_proto_sip_config(const struct vxlan_config * cfg,struct mlxsw_sp_nve_config * config)130 mlxsw_sp_nve_vxlan_ul_proto_sip_config(const struct vxlan_config *cfg,
131 				       struct mlxsw_sp_nve_config *config)
132 {
133 	switch (cfg->saddr.sa.sa_family) {
134 	case AF_INET:
135 		config->ul_proto = MLXSW_SP_L3_PROTO_IPV4;
136 		config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr;
137 		break;
138 	case AF_INET6:
139 		config->ul_proto = MLXSW_SP_L3_PROTO_IPV6;
140 		config->ul_sip.addr6 = cfg->saddr.sin6.sin6_addr;
141 		break;
142 	}
143 }
144 
mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve * nve,const struct mlxsw_sp_nve_params * params,struct mlxsw_sp_nve_config * config)145 static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
146 				      const struct mlxsw_sp_nve_params *params,
147 				      struct mlxsw_sp_nve_config *config)
148 {
149 	struct vxlan_dev *vxlan = netdev_priv(params->dev);
150 	struct vxlan_config *cfg = &vxlan->cfg;
151 
152 	config->type = MLXSW_SP_NVE_TYPE_VXLAN;
153 	config->ttl = cfg->ttl;
154 	config->flowlabel = cfg->label;
155 	config->learning_en = cfg->flags & VXLAN_F_LEARN ? 1 : 0;
156 	config->ul_tb_id = RT_TABLE_MAIN;
157 	mlxsw_sp_nve_vxlan_ul_proto_sip_config(cfg, config);
158 	config->udp_dport = cfg->dst_port;
159 }
160 
161 static void
mlxsw_sp_nve_vxlan_config_prepare(char * tngcr_pl,const struct mlxsw_sp_nve_config * config)162 mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl,
163 				  const struct mlxsw_sp_nve_config *config)
164 {
165 	struct in6_addr addr6;
166 	u8 udp_sport;
167 
168 	mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true,
169 			     config->ttl);
170 	/* VxLAN driver's default UDP source port range is 32768 (0x8000)
171 	 * to 60999 (0xee47). Set the upper 8 bits of the UDP source port
172 	 * to a random number between 0x80 and 0xee
173 	 */
174 	get_random_bytes(&udp_sport, sizeof(udp_sport));
175 	udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80;
176 	mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport);
177 
178 	switch (config->ul_proto) {
179 	case MLXSW_SP_L3_PROTO_IPV4:
180 		mlxsw_reg_tngcr_usipv4_set(tngcr_pl,
181 					   be32_to_cpu(config->ul_sip.addr4));
182 		break;
183 	case MLXSW_SP_L3_PROTO_IPV6:
184 		addr6 = config->ul_sip.addr6;
185 		mlxsw_reg_tngcr_usipv6_memcpy_to(tngcr_pl,
186 						 (const char *)&addr6);
187 		break;
188 	}
189 }
190 
191 static int
mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nve_config * config)192 mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
193 			       const struct mlxsw_sp_nve_config *config)
194 {
195 	char tngcr_pl[MLXSW_REG_TNGCR_LEN];
196 	u16 ul_vr_id;
197 	int err;
198 
199 	err = mlxsw_sp_router_tb_id_vr_id(mlxsw_sp, config->ul_tb_id,
200 					  &ul_vr_id);
201 	if (err)
202 		return err;
203 
204 	mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config);
205 	mlxsw_reg_tngcr_learn_enable_set(tngcr_pl, config->learning_en);
206 	mlxsw_reg_tngcr_underlay_virtual_router_set(tngcr_pl, ul_vr_id);
207 
208 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
209 }
210 
mlxsw_sp1_nve_vxlan_config_clear(struct mlxsw_sp * mlxsw_sp)211 static void mlxsw_sp1_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
212 {
213 	char tngcr_pl[MLXSW_REG_TNGCR_LEN];
214 
215 	mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
216 
217 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
218 }
219 
mlxsw_sp1_nve_vxlan_rtdp_set(struct mlxsw_sp * mlxsw_sp,unsigned int tunnel_index)220 static int mlxsw_sp1_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp,
221 					unsigned int tunnel_index)
222 {
223 	char rtdp_pl[MLXSW_REG_RTDP_LEN];
224 
225 	mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index);
226 
227 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
228 }
229 
mlxsw_sp1_nve_vxlan_init(struct mlxsw_sp_nve * nve,const struct mlxsw_sp_nve_config * config)230 static int mlxsw_sp1_nve_vxlan_init(struct mlxsw_sp_nve *nve,
231 				    const struct mlxsw_sp_nve_config *config)
232 {
233 	struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
234 	int err;
235 
236 	err = mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, config->udp_dport);
237 	if (err)
238 		return err;
239 
240 	err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
241 	if (err)
242 		goto err_parsing_depth_inc;
243 
244 	err = mlxsw_sp1_nve_vxlan_config_set(mlxsw_sp, config);
245 	if (err)
246 		goto err_config_set;
247 
248 	err = mlxsw_sp1_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index);
249 	if (err)
250 		goto err_rtdp_set;
251 
252 	err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id,
253 						config->ul_proto,
254 						&config->ul_sip,
255 						nve->tunnel_index);
256 	if (err)
257 		goto err_promote_decap;
258 
259 	return 0;
260 
261 err_promote_decap:
262 err_rtdp_set:
263 	mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
264 err_config_set:
265 	mlxsw_sp_parsing_depth_dec(mlxsw_sp);
266 err_parsing_depth_inc:
267 	mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0);
268 	return err;
269 }
270 
mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve * nve)271 static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
272 {
273 	struct mlxsw_sp_nve_config *config = &nve->config;
274 	struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
275 
276 	mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
277 					 config->ul_proto, &config->ul_sip);
278 	mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
279 	mlxsw_sp_parsing_depth_dec(mlxsw_sp);
280 	mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0);
281 }
282 
283 static int
mlxsw_sp_nve_vxlan_fdb_replay(const struct net_device * nve_dev,__be32 vni,struct netlink_ext_ack * extack)284 mlxsw_sp_nve_vxlan_fdb_replay(const struct net_device *nve_dev, __be32 vni,
285 			      struct netlink_ext_ack *extack)
286 {
287 	if (WARN_ON(!netif_is_vxlan(nve_dev)))
288 		return -EINVAL;
289 	return vxlan_fdb_replay(nve_dev, vni, &mlxsw_sp_switchdev_notifier,
290 				extack);
291 }
292 
293 static void
mlxsw_sp_nve_vxlan_clear_offload(const struct net_device * nve_dev,__be32 vni)294 mlxsw_sp_nve_vxlan_clear_offload(const struct net_device *nve_dev, __be32 vni)
295 {
296 	if (WARN_ON(!netif_is_vxlan(nve_dev)))
297 		return;
298 	vxlan_fdb_clear_offload(nve_dev, vni);
299 }
300 
301 const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
302 	.type		= MLXSW_SP_NVE_TYPE_VXLAN,
303 	.can_offload	= mlxsw_sp1_nve_vxlan_can_offload,
304 	.nve_config	= mlxsw_sp_nve_vxlan_config,
305 	.init		= mlxsw_sp1_nve_vxlan_init,
306 	.fini		= mlxsw_sp1_nve_vxlan_fini,
307 	.fdb_replay	= mlxsw_sp_nve_vxlan_fdb_replay,
308 	.fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
309 };
310 
mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp * mlxsw_sp,bool learning_en)311 static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
312 					     bool learning_en)
313 {
314 	char tnpc_pl[MLXSW_REG_TNPC_LEN];
315 
316 	mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TUNNEL_PORT_NVE,
317 			    learning_en);
318 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnpc), tnpc_pl);
319 }
320 
321 static int
mlxsw_sp2_nve_decap_ethertype_set(struct mlxsw_sp * mlxsw_sp)322 mlxsw_sp2_nve_decap_ethertype_set(struct mlxsw_sp *mlxsw_sp)
323 {
324 	char spvid_pl[MLXSW_REG_SPVID_LEN] = {};
325 
326 	mlxsw_reg_spvid_tport_set(spvid_pl, true);
327 	mlxsw_reg_spvid_local_port_set(spvid_pl,
328 				       MLXSW_REG_TUNNEL_PORT_NVE);
329 	mlxsw_reg_spvid_egr_et_set_set(spvid_pl, true);
330 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
331 }
332 
333 static int
mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nve_config * config)334 mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
335 			       const struct mlxsw_sp_nve_config *config)
336 {
337 	char tngcr_pl[MLXSW_REG_TNGCR_LEN];
338 	char spvtr_pl[MLXSW_REG_SPVTR_LEN];
339 	u16 ul_rif_index;
340 	int err;
341 
342 	err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, config->ul_tb_id,
343 					 &ul_rif_index);
344 	if (err)
345 		return err;
346 	mlxsw_sp->nve->ul_rif_index = ul_rif_index;
347 
348 	err = mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, config->learning_en);
349 	if (err)
350 		goto err_vxlan_learning_set;
351 
352 	mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config);
353 	mlxsw_reg_tngcr_underlay_rif_set(tngcr_pl, ul_rif_index);
354 
355 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
356 	if (err)
357 		goto err_tngcr_write;
358 
359 	mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
360 			     MLXSW_REG_SPVTR_IPVID_MODE_ALWAYS_PUSH_VLAN);
361 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
362 	if (err)
363 		goto err_spvtr_write;
364 
365 	err = mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp);
366 	if (err)
367 		goto err_decap_ethertype_set;
368 
369 	return 0;
370 
371 err_decap_ethertype_set:
372 	mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
373 			     MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID);
374 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
375 err_spvtr_write:
376 	mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
377 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
378 err_tngcr_write:
379 	mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
380 err_vxlan_learning_set:
381 	mlxsw_sp_router_ul_rif_put(mlxsw_sp, ul_rif_index);
382 	return err;
383 }
384 
mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp * mlxsw_sp)385 static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
386 {
387 	char spvtr_pl[MLXSW_REG_SPVTR_LEN];
388 	char tngcr_pl[MLXSW_REG_TNGCR_LEN];
389 
390 	mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
391 			     MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID);
392 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
393 	mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
394 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
395 	mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
396 	mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->nve->ul_rif_index);
397 }
398 
mlxsw_sp2_nve_vxlan_rtdp_set(struct mlxsw_sp * mlxsw_sp,unsigned int tunnel_index,u16 ul_rif_index)399 static int mlxsw_sp2_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp,
400 					unsigned int tunnel_index,
401 					u16 ul_rif_index)
402 {
403 	char rtdp_pl[MLXSW_REG_RTDP_LEN];
404 
405 	mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index);
406 	mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_index);
407 
408 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
409 }
410 
mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve * nve,const struct mlxsw_sp_nve_config * config)411 static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve,
412 				    const struct mlxsw_sp_nve_config *config)
413 {
414 	struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
415 	int err;
416 
417 	err = mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, config->udp_dport);
418 	if (err)
419 		return err;
420 
421 	err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
422 	if (err)
423 		goto err_parsing_depth_inc;
424 
425 	err = mlxsw_sp2_nve_vxlan_config_set(mlxsw_sp, config);
426 	if (err)
427 		goto err_config_set;
428 
429 	err = mlxsw_sp2_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index,
430 					   nve->ul_rif_index);
431 	if (err)
432 		goto err_rtdp_set;
433 
434 	err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id,
435 						config->ul_proto,
436 						&config->ul_sip,
437 						nve->tunnel_index);
438 	if (err)
439 		goto err_promote_decap;
440 
441 	return 0;
442 
443 err_promote_decap:
444 err_rtdp_set:
445 	mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
446 err_config_set:
447 	mlxsw_sp_parsing_depth_dec(mlxsw_sp);
448 err_parsing_depth_inc:
449 	mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0);
450 	return err;
451 }
452 
mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve * nve)453 static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
454 {
455 	struct mlxsw_sp_nve_config *config = &nve->config;
456 	struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
457 
458 	mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
459 					 config->ul_proto, &config->ul_sip);
460 	mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
461 	mlxsw_sp_parsing_depth_dec(mlxsw_sp);
462 	mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0);
463 }
464 
465 const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = {
466 	.type		= MLXSW_SP_NVE_TYPE_VXLAN,
467 	.can_offload	= mlxsw_sp_nve_vxlan_can_offload,
468 	.nve_config	= mlxsw_sp_nve_vxlan_config,
469 	.init		= mlxsw_sp2_nve_vxlan_init,
470 	.fini		= mlxsw_sp2_nve_vxlan_fini,
471 	.fdb_replay	= mlxsw_sp_nve_vxlan_fdb_replay,
472 	.fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
473 };
474