1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
8 #include <linux/in6.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <net/netevent.h>
22 #include <net/neighbour.h>
23 #include <net/arp.h>
24 #include <net/inet_dscp.h>
25 #include <net/ip_fib.h>
26 #include <net/ip6_fib.h>
27 #include <net/nexthop.h>
28 #include <net/fib_rules.h>
29 #include <net/ip_tunnels.h>
30 #include <net/l3mdev.h>
31 #include <net/addrconf.h>
32 #include <net/ndisc.h>
33 #include <net/ipv6.h>
34 #include <net/fib_notifier.h>
35 #include <net/switchdev.h>
36
37 #include "spectrum.h"
38 #include "core.h"
39 #include "reg.h"
40 #include "spectrum_cnt.h"
41 #include "spectrum_dpipe.h"
42 #include "spectrum_ipip.h"
43 #include "spectrum_mr.h"
44 #include "spectrum_mr_tcam.h"
45 #include "spectrum_router.h"
46 #include "spectrum_span.h"
47
48 struct mlxsw_sp_fib;
49 struct mlxsw_sp_vr;
50 struct mlxsw_sp_lpm_tree;
51 struct mlxsw_sp_rif_ops;
52
53 struct mlxsw_sp_rif {
54 struct list_head nexthop_list;
55 struct list_head neigh_list;
56 struct net_device *dev; /* NULL for underlay RIF */
57 struct mlxsw_sp_fid *fid;
58 unsigned char addr[ETH_ALEN];
59 int mtu;
60 u16 rif_index;
61 u8 mac_profile_id;
62 u16 vr_id;
63 const struct mlxsw_sp_rif_ops *ops;
64 struct mlxsw_sp *mlxsw_sp;
65
66 unsigned int counter_ingress;
67 bool counter_ingress_valid;
68 unsigned int counter_egress;
69 bool counter_egress_valid;
70 };
71
72 struct mlxsw_sp_rif_params {
73 struct net_device *dev;
74 union {
75 u16 system_port;
76 u16 lag_id;
77 };
78 u16 vid;
79 bool lag;
80 };
81
82 struct mlxsw_sp_rif_subport {
83 struct mlxsw_sp_rif common;
84 refcount_t ref_count;
85 union {
86 u16 system_port;
87 u16 lag_id;
88 };
89 u16 vid;
90 bool lag;
91 };
92
93 struct mlxsw_sp_rif_ipip_lb {
94 struct mlxsw_sp_rif common;
95 struct mlxsw_sp_rif_ipip_lb_config lb_config;
96 u16 ul_vr_id; /* Reserved for Spectrum-2. */
97 u16 ul_rif_id; /* Reserved for Spectrum. */
98 };
99
100 struct mlxsw_sp_rif_params_ipip_lb {
101 struct mlxsw_sp_rif_params common;
102 struct mlxsw_sp_rif_ipip_lb_config lb_config;
103 };
104
105 struct mlxsw_sp_rif_ops {
106 enum mlxsw_sp_rif_type type;
107 size_t rif_size;
108
109 void (*setup)(struct mlxsw_sp_rif *rif,
110 const struct mlxsw_sp_rif_params *params);
111 int (*configure)(struct mlxsw_sp_rif *rif,
112 struct netlink_ext_ack *extack);
113 void (*deconfigure)(struct mlxsw_sp_rif *rif);
114 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
115 struct netlink_ext_ack *extack);
116 void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
117 };
118
119 struct mlxsw_sp_rif_mac_profile {
120 unsigned char mac_prefix[ETH_ALEN];
121 refcount_t ref_count;
122 u8 id;
123 };
124
125 struct mlxsw_sp_router_ops {
126 int (*init)(struct mlxsw_sp *mlxsw_sp);
127 int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
128 };
129
130 static struct mlxsw_sp_rif *
131 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
132 const struct net_device *dev);
133 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
134 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
135 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
136 struct mlxsw_sp_lpm_tree *lpm_tree);
137 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
138 const struct mlxsw_sp_fib *fib,
139 u8 tree_id);
140 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
141 const struct mlxsw_sp_fib *fib);
142
143 static unsigned int *
mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)144 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
145 enum mlxsw_sp_rif_counter_dir dir)
146 {
147 switch (dir) {
148 case MLXSW_SP_RIF_COUNTER_EGRESS:
149 return &rif->counter_egress;
150 case MLXSW_SP_RIF_COUNTER_INGRESS:
151 return &rif->counter_ingress;
152 }
153 return NULL;
154 }
155
156 static bool
mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)157 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
158 enum mlxsw_sp_rif_counter_dir dir)
159 {
160 switch (dir) {
161 case MLXSW_SP_RIF_COUNTER_EGRESS:
162 return rif->counter_egress_valid;
163 case MLXSW_SP_RIF_COUNTER_INGRESS:
164 return rif->counter_ingress_valid;
165 }
166 return false;
167 }
168
169 static void
mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir,bool valid)170 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
171 enum mlxsw_sp_rif_counter_dir dir,
172 bool valid)
173 {
174 switch (dir) {
175 case MLXSW_SP_RIF_COUNTER_EGRESS:
176 rif->counter_egress_valid = valid;
177 break;
178 case MLXSW_SP_RIF_COUNTER_INGRESS:
179 rif->counter_ingress_valid = valid;
180 break;
181 }
182 }
183
mlxsw_sp_rif_counter_edit(struct mlxsw_sp * mlxsw_sp,u16 rif_index,unsigned int counter_index,bool enable,enum mlxsw_sp_rif_counter_dir dir)184 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
185 unsigned int counter_index, bool enable,
186 enum mlxsw_sp_rif_counter_dir dir)
187 {
188 char ritr_pl[MLXSW_REG_RITR_LEN];
189 bool is_egress = false;
190 int err;
191
192 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
193 is_egress = true;
194 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
195 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
196 if (err)
197 return err;
198
199 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
200 is_egress);
201 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
202 }
203
mlxsw_sp_rif_counter_value_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir,u64 * cnt)204 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
205 struct mlxsw_sp_rif *rif,
206 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
207 {
208 char ricnt_pl[MLXSW_REG_RICNT_LEN];
209 unsigned int *p_counter_index;
210 bool valid;
211 int err;
212
213 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
214 if (!valid)
215 return -EINVAL;
216
217 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
218 if (!p_counter_index)
219 return -EINVAL;
220 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
221 MLXSW_REG_RICNT_OPCODE_NOP);
222 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
223 if (err)
224 return err;
225 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
226 return 0;
227 }
228
229 struct mlxsw_sp_rif_counter_set_basic {
230 u64 good_unicast_packets;
231 u64 good_multicast_packets;
232 u64 good_broadcast_packets;
233 u64 good_unicast_bytes;
234 u64 good_multicast_bytes;
235 u64 good_broadcast_bytes;
236 u64 error_packets;
237 u64 discard_packets;
238 u64 error_bytes;
239 u64 discard_bytes;
240 };
241
242 static int
mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir,struct mlxsw_sp_rif_counter_set_basic * set)243 mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif,
244 enum mlxsw_sp_rif_counter_dir dir,
245 struct mlxsw_sp_rif_counter_set_basic *set)
246 {
247 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
248 char ricnt_pl[MLXSW_REG_RICNT_LEN];
249 unsigned int *p_counter_index;
250 int err;
251
252 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
253 return -EINVAL;
254
255 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
256 if (!p_counter_index)
257 return -EINVAL;
258
259 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
260 MLXSW_REG_RICNT_OPCODE_CLEAR);
261 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
262 if (err)
263 return err;
264
265 if (!set)
266 return 0;
267
268 #define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME) \
269 (set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl))
270
271 MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets);
272 MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets);
273 MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets);
274 MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes);
275 MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes);
276 MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes);
277 MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets);
278 MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets);
279 MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes);
280 MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes);
281
282 #undef MLXSW_SP_RIF_COUNTER_EXTRACT
283
284 return 0;
285 }
286
mlxsw_sp_rif_counter_clear(struct mlxsw_sp * mlxsw_sp,unsigned int counter_index)287 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
288 unsigned int counter_index)
289 {
290 char ricnt_pl[MLXSW_REG_RICNT_LEN];
291
292 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
293 MLXSW_REG_RICNT_OPCODE_CLEAR);
294 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
295 }
296
mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)297 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
298 enum mlxsw_sp_rif_counter_dir dir)
299 {
300 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
301 unsigned int *p_counter_index;
302 int err;
303
304 if (mlxsw_sp_rif_counter_valid_get(rif, dir))
305 return 0;
306
307 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
308 if (!p_counter_index)
309 return -EINVAL;
310
311 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
312 p_counter_index);
313 if (err)
314 return err;
315
316 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
317 if (err)
318 goto err_counter_clear;
319
320 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
321 *p_counter_index, true, dir);
322 if (err)
323 goto err_counter_edit;
324 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
325 return 0;
326
327 err_counter_edit:
328 err_counter_clear:
329 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
330 *p_counter_index);
331 return err;
332 }
333
mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif * rif,enum mlxsw_sp_rif_counter_dir dir)334 void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
335 enum mlxsw_sp_rif_counter_dir dir)
336 {
337 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
338 unsigned int *p_counter_index;
339
340 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
341 return;
342
343 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
344 if (WARN_ON(!p_counter_index))
345 return;
346 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
347 *p_counter_index, false, dir);
348 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
349 *p_counter_index);
350 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
351 }
352
mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif * rif)353 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
354 {
355 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
356 struct devlink *devlink;
357
358 devlink = priv_to_devlink(mlxsw_sp->core);
359 if (!devlink_dpipe_table_counter_enabled(devlink,
360 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
361 return;
362 mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
363 }
364
mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif * rif)365 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
366 {
367 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
368 }
369
370 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
371
372 struct mlxsw_sp_prefix_usage {
373 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
374 };
375
376 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
377 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
378
379 static bool
mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage * prefix_usage1,struct mlxsw_sp_prefix_usage * prefix_usage2)380 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
381 struct mlxsw_sp_prefix_usage *prefix_usage2)
382 {
383 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
384 }
385
386 static void
mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage * prefix_usage1,struct mlxsw_sp_prefix_usage * prefix_usage2)387 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
388 struct mlxsw_sp_prefix_usage *prefix_usage2)
389 {
390 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
391 }
392
393 static void
mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage * prefix_usage,unsigned char prefix_len)394 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
395 unsigned char prefix_len)
396 {
397 set_bit(prefix_len, prefix_usage->b);
398 }
399
400 static void
mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage * prefix_usage,unsigned char prefix_len)401 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
402 unsigned char prefix_len)
403 {
404 clear_bit(prefix_len, prefix_usage->b);
405 }
406
407 struct mlxsw_sp_fib_key {
408 unsigned char addr[sizeof(struct in6_addr)];
409 unsigned char prefix_len;
410 };
411
412 enum mlxsw_sp_fib_entry_type {
413 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
414 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
415 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
416 MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
417 MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
418
419 /* This is a special case of local delivery, where a packet should be
420 * decapsulated on reception. Note that there is no corresponding ENCAP,
421 * because that's a type of next hop, not of FIB entry. (There can be
422 * several next hops in a REMOTE entry, and some of them may be
423 * encapsulating entries.)
424 */
425 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
426 MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
427 };
428
429 struct mlxsw_sp_nexthop_group_info;
430 struct mlxsw_sp_nexthop_group;
431 struct mlxsw_sp_fib_entry;
432
433 struct mlxsw_sp_fib_node {
434 struct mlxsw_sp_fib_entry *fib_entry;
435 struct list_head list;
436 struct rhash_head ht_node;
437 struct mlxsw_sp_fib *fib;
438 struct mlxsw_sp_fib_key key;
439 };
440
441 struct mlxsw_sp_fib_entry_decap {
442 struct mlxsw_sp_ipip_entry *ipip_entry;
443 u32 tunnel_index;
444 };
445
446 static struct mlxsw_sp_fib_entry_priv *
mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops * ll_ops)447 mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
448 {
449 struct mlxsw_sp_fib_entry_priv *priv;
450
451 if (!ll_ops->fib_entry_priv_size)
452 /* No need to have priv */
453 return NULL;
454
455 priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
456 if (!priv)
457 return ERR_PTR(-ENOMEM);
458 refcount_set(&priv->refcnt, 1);
459 return priv;
460 }
461
462 static void
mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv * priv)463 mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
464 {
465 kfree(priv);
466 }
467
mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv * priv)468 static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
469 {
470 refcount_inc(&priv->refcnt);
471 }
472
mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv * priv)473 static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
474 {
475 if (!priv || !refcount_dec_and_test(&priv->refcnt))
476 return;
477 mlxsw_sp_fib_entry_priv_destroy(priv);
478 }
479
mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry_priv * priv)480 static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
481 struct mlxsw_sp_fib_entry_priv *priv)
482 {
483 if (!priv)
484 return;
485 mlxsw_sp_fib_entry_priv_hold(priv);
486 list_add(&priv->list, &op_ctx->fib_entry_priv_list);
487 }
488
mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx * op_ctx)489 static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
490 {
491 struct mlxsw_sp_fib_entry_priv *priv, *tmp;
492
493 list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
494 mlxsw_sp_fib_entry_priv_put(priv);
495 INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
496 }
497
498 struct mlxsw_sp_fib_entry {
499 struct mlxsw_sp_fib_node *fib_node;
500 enum mlxsw_sp_fib_entry_type type;
501 struct list_head nexthop_group_node;
502 struct mlxsw_sp_nexthop_group *nh_group;
503 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
504 struct mlxsw_sp_fib_entry_priv *priv;
505 };
506
507 struct mlxsw_sp_fib4_entry {
508 struct mlxsw_sp_fib_entry common;
509 struct fib_info *fi;
510 u32 tb_id;
511 dscp_t dscp;
512 u8 type;
513 };
514
515 struct mlxsw_sp_fib6_entry {
516 struct mlxsw_sp_fib_entry common;
517 struct list_head rt6_list;
518 unsigned int nrt6;
519 };
520
521 struct mlxsw_sp_rt6 {
522 struct list_head list;
523 struct fib6_info *rt;
524 };
525
526 struct mlxsw_sp_lpm_tree {
527 u8 id; /* tree ID */
528 unsigned int ref_count;
529 enum mlxsw_sp_l3proto proto;
530 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
531 struct mlxsw_sp_prefix_usage prefix_usage;
532 };
533
534 struct mlxsw_sp_fib {
535 struct rhashtable ht;
536 struct list_head node_list;
537 struct mlxsw_sp_vr *vr;
538 struct mlxsw_sp_lpm_tree *lpm_tree;
539 enum mlxsw_sp_l3proto proto;
540 const struct mlxsw_sp_router_ll_ops *ll_ops;
541 };
542
543 struct mlxsw_sp_vr {
544 u16 id; /* virtual router ID */
545 u32 tb_id; /* kernel fib table id */
546 unsigned int rif_count;
547 struct mlxsw_sp_fib *fib4;
548 struct mlxsw_sp_fib *fib6;
549 struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
550 struct mlxsw_sp_rif *ul_rif;
551 refcount_t ul_rif_refcnt;
552 };
553
mlxsw_sp_router_ll_basic_init(struct mlxsw_sp * mlxsw_sp,u16 vr_id,enum mlxsw_sp_l3proto proto)554 static int mlxsw_sp_router_ll_basic_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
555 enum mlxsw_sp_l3proto proto)
556 {
557 return 0;
558 }
559
mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp * mlxsw_sp,char * xralta_pl)560 static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
561 {
562 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
563 xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
564 }
565
mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp * mlxsw_sp,char * xralst_pl)566 static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
567 {
568 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
569 xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
570 }
571
mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp * mlxsw_sp,char * xraltb_pl)572 static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
573 {
574 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
575 xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
576 }
577
578 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
579
mlxsw_sp_fib_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)580 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
581 struct mlxsw_sp_vr *vr,
582 enum mlxsw_sp_l3proto proto)
583 {
584 const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
585 struct mlxsw_sp_lpm_tree *lpm_tree;
586 struct mlxsw_sp_fib *fib;
587 int err;
588
589 err = ll_ops->init(mlxsw_sp, vr->id, proto);
590 if (err)
591 return ERR_PTR(err);
592
593 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
594 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
595 if (!fib)
596 return ERR_PTR(-ENOMEM);
597 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
598 if (err)
599 goto err_rhashtable_init;
600 INIT_LIST_HEAD(&fib->node_list);
601 fib->proto = proto;
602 fib->vr = vr;
603 fib->lpm_tree = lpm_tree;
604 fib->ll_ops = ll_ops;
605 mlxsw_sp_lpm_tree_hold(lpm_tree);
606 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
607 if (err)
608 goto err_lpm_tree_bind;
609 return fib;
610
611 err_lpm_tree_bind:
612 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
613 err_rhashtable_init:
614 kfree(fib);
615 return ERR_PTR(err);
616 }
617
mlxsw_sp_fib_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib)618 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
619 struct mlxsw_sp_fib *fib)
620 {
621 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
622 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
623 WARN_ON(!list_empty(&fib->node_list));
624 rhashtable_destroy(&fib->ht);
625 kfree(fib);
626 }
627
628 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp * mlxsw_sp)629 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
630 {
631 static struct mlxsw_sp_lpm_tree *lpm_tree;
632 int i;
633
634 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
635 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
636 if (lpm_tree->ref_count == 0)
637 return lpm_tree;
638 }
639 return NULL;
640 }
641
mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_router_ll_ops * ll_ops,struct mlxsw_sp_lpm_tree * lpm_tree)642 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
643 const struct mlxsw_sp_router_ll_ops *ll_ops,
644 struct mlxsw_sp_lpm_tree *lpm_tree)
645 {
646 char xralta_pl[MLXSW_REG_XRALTA_LEN];
647
648 mlxsw_reg_xralta_pack(xralta_pl, true,
649 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
650 lpm_tree->id);
651 return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
652 }
653
mlxsw_sp_lpm_tree_free(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_router_ll_ops * ll_ops,struct mlxsw_sp_lpm_tree * lpm_tree)654 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
655 const struct mlxsw_sp_router_ll_ops *ll_ops,
656 struct mlxsw_sp_lpm_tree *lpm_tree)
657 {
658 char xralta_pl[MLXSW_REG_XRALTA_LEN];
659
660 mlxsw_reg_xralta_pack(xralta_pl, false,
661 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
662 lpm_tree->id);
663 ll_ops->ralta_write(mlxsw_sp, xralta_pl);
664 }
665
666 static int
mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_router_ll_ops * ll_ops,struct mlxsw_sp_prefix_usage * prefix_usage,struct mlxsw_sp_lpm_tree * lpm_tree)667 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
668 const struct mlxsw_sp_router_ll_ops *ll_ops,
669 struct mlxsw_sp_prefix_usage *prefix_usage,
670 struct mlxsw_sp_lpm_tree *lpm_tree)
671 {
672 char xralst_pl[MLXSW_REG_XRALST_LEN];
673 u8 root_bin = 0;
674 u8 prefix;
675 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
676
677 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
678 root_bin = prefix;
679
680 mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
681 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
682 if (prefix == 0)
683 continue;
684 mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
685 MLXSW_REG_RALST_BIN_NO_CHILD);
686 last_prefix = prefix;
687 }
688 return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
689 }
690
691 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_router_ll_ops * ll_ops,struct mlxsw_sp_prefix_usage * prefix_usage,enum mlxsw_sp_l3proto proto)692 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
693 const struct mlxsw_sp_router_ll_ops *ll_ops,
694 struct mlxsw_sp_prefix_usage *prefix_usage,
695 enum mlxsw_sp_l3proto proto)
696 {
697 struct mlxsw_sp_lpm_tree *lpm_tree;
698 int err;
699
700 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
701 if (!lpm_tree)
702 return ERR_PTR(-EBUSY);
703 lpm_tree->proto = proto;
704 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
705 if (err)
706 return ERR_PTR(err);
707
708 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
709 if (err)
710 goto err_left_struct_set;
711 memcpy(&lpm_tree->prefix_usage, prefix_usage,
712 sizeof(lpm_tree->prefix_usage));
713 memset(&lpm_tree->prefix_ref_count, 0,
714 sizeof(lpm_tree->prefix_ref_count));
715 lpm_tree->ref_count = 1;
716 return lpm_tree;
717
718 err_left_struct_set:
719 mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
720 return ERR_PTR(err);
721 }
722
mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_router_ll_ops * ll_ops,struct mlxsw_sp_lpm_tree * lpm_tree)723 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
724 const struct mlxsw_sp_router_ll_ops *ll_ops,
725 struct mlxsw_sp_lpm_tree *lpm_tree)
726 {
727 mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
728 }
729
730 static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_prefix_usage * prefix_usage,enum mlxsw_sp_l3proto proto)731 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
732 struct mlxsw_sp_prefix_usage *prefix_usage,
733 enum mlxsw_sp_l3proto proto)
734 {
735 const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
736 struct mlxsw_sp_lpm_tree *lpm_tree;
737 int i;
738
739 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
740 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
741 if (lpm_tree->ref_count != 0 &&
742 lpm_tree->proto == proto &&
743 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
744 prefix_usage)) {
745 mlxsw_sp_lpm_tree_hold(lpm_tree);
746 return lpm_tree;
747 }
748 }
749 return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
750 }
751
mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree * lpm_tree)752 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
753 {
754 lpm_tree->ref_count++;
755 }
756
mlxsw_sp_lpm_tree_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lpm_tree * lpm_tree)757 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
758 struct mlxsw_sp_lpm_tree *lpm_tree)
759 {
760 const struct mlxsw_sp_router_ll_ops *ll_ops =
761 mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
762
763 if (--lpm_tree->ref_count == 0)
764 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
765 }
766
767 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
768
mlxsw_sp_lpm_init(struct mlxsw_sp * mlxsw_sp)769 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
770 {
771 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
772 struct mlxsw_sp_lpm_tree *lpm_tree;
773 u64 max_trees;
774 int err, i;
775
776 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
777 return -EIO;
778
779 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
780 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
781 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
782 sizeof(struct mlxsw_sp_lpm_tree),
783 GFP_KERNEL);
784 if (!mlxsw_sp->router->lpm.trees)
785 return -ENOMEM;
786
787 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
788 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
789 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
790 }
791
792 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
793 MLXSW_SP_L3_PROTO_IPV4);
794 if (IS_ERR(lpm_tree)) {
795 err = PTR_ERR(lpm_tree);
796 goto err_ipv4_tree_get;
797 }
798 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
799
800 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
801 MLXSW_SP_L3_PROTO_IPV6);
802 if (IS_ERR(lpm_tree)) {
803 err = PTR_ERR(lpm_tree);
804 goto err_ipv6_tree_get;
805 }
806 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
807
808 return 0;
809
810 err_ipv6_tree_get:
811 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
812 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
813 err_ipv4_tree_get:
814 kfree(mlxsw_sp->router->lpm.trees);
815 return err;
816 }
817
mlxsw_sp_lpm_fini(struct mlxsw_sp * mlxsw_sp)818 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
819 {
820 struct mlxsw_sp_lpm_tree *lpm_tree;
821
822 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
823 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
824
825 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
826 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
827
828 kfree(mlxsw_sp->router->lpm.trees);
829 }
830
mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr * vr)831 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
832 {
833 return !!vr->fib4 || !!vr->fib6 ||
834 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
835 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
836 }
837
mlxsw_sp_vr_find_unused(struct mlxsw_sp * mlxsw_sp)838 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
839 {
840 struct mlxsw_sp_vr *vr;
841 int i;
842
843 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
844 vr = &mlxsw_sp->router->vrs[i];
845 if (!mlxsw_sp_vr_is_used(vr))
846 return vr;
847 }
848 return NULL;
849 }
850
mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_fib * fib,u8 tree_id)851 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
852 const struct mlxsw_sp_fib *fib, u8 tree_id)
853 {
854 char xraltb_pl[MLXSW_REG_XRALTB_LEN];
855
856 mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
857 (enum mlxsw_reg_ralxx_protocol) fib->proto,
858 tree_id);
859 return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
860 }
861
mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_fib * fib)862 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
863 const struct mlxsw_sp_fib *fib)
864 {
865 char xraltb_pl[MLXSW_REG_XRALTB_LEN];
866
867 /* Bind to tree 0 which is default */
868 mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
869 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
870 return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
871 }
872
mlxsw_sp_fix_tb_id(u32 tb_id)873 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
874 {
875 /* For our purpose, squash main, default and local tables into one */
876 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
877 tb_id = RT_TABLE_MAIN;
878 return tb_id;
879 }
880
mlxsw_sp_vr_find(struct mlxsw_sp * mlxsw_sp,u32 tb_id)881 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
882 u32 tb_id)
883 {
884 struct mlxsw_sp_vr *vr;
885 int i;
886
887 tb_id = mlxsw_sp_fix_tb_id(tb_id);
888
889 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
890 vr = &mlxsw_sp->router->vrs[i];
891 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
892 return vr;
893 }
894 return NULL;
895 }
896
mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp * mlxsw_sp,u32 tb_id,u16 * vr_id)897 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
898 u16 *vr_id)
899 {
900 struct mlxsw_sp_vr *vr;
901 int err = 0;
902
903 mutex_lock(&mlxsw_sp->router->lock);
904 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
905 if (!vr) {
906 err = -ESRCH;
907 goto out;
908 }
909 *vr_id = vr->id;
910 out:
911 mutex_unlock(&mlxsw_sp->router->lock);
912 return err;
913 }
914
mlxsw_sp_vr_fib(const struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)915 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
916 enum mlxsw_sp_l3proto proto)
917 {
918 switch (proto) {
919 case MLXSW_SP_L3_PROTO_IPV4:
920 return vr->fib4;
921 case MLXSW_SP_L3_PROTO_IPV6:
922 return vr->fib6;
923 }
924 return NULL;
925 }
926
mlxsw_sp_vr_create(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct netlink_ext_ack * extack)927 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
928 u32 tb_id,
929 struct netlink_ext_ack *extack)
930 {
931 struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
932 struct mlxsw_sp_fib *fib4;
933 struct mlxsw_sp_fib *fib6;
934 struct mlxsw_sp_vr *vr;
935 int err;
936
937 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
938 if (!vr) {
939 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
940 return ERR_PTR(-EBUSY);
941 }
942 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
943 if (IS_ERR(fib4))
944 return ERR_CAST(fib4);
945 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
946 if (IS_ERR(fib6)) {
947 err = PTR_ERR(fib6);
948 goto err_fib6_create;
949 }
950 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
951 MLXSW_SP_L3_PROTO_IPV4);
952 if (IS_ERR(mr4_table)) {
953 err = PTR_ERR(mr4_table);
954 goto err_mr4_table_create;
955 }
956 mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
957 MLXSW_SP_L3_PROTO_IPV6);
958 if (IS_ERR(mr6_table)) {
959 err = PTR_ERR(mr6_table);
960 goto err_mr6_table_create;
961 }
962
963 vr->fib4 = fib4;
964 vr->fib6 = fib6;
965 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
966 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
967 vr->tb_id = tb_id;
968 return vr;
969
970 err_mr6_table_create:
971 mlxsw_sp_mr_table_destroy(mr4_table);
972 err_mr4_table_create:
973 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
974 err_fib6_create:
975 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
976 return ERR_PTR(err);
977 }
978
mlxsw_sp_vr_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr)979 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
980 struct mlxsw_sp_vr *vr)
981 {
982 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
983 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
984 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
985 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
986 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
987 vr->fib6 = NULL;
988 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
989 vr->fib4 = NULL;
990 }
991
mlxsw_sp_vr_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct netlink_ext_ack * extack)992 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
993 struct netlink_ext_ack *extack)
994 {
995 struct mlxsw_sp_vr *vr;
996
997 tb_id = mlxsw_sp_fix_tb_id(tb_id);
998 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
999 if (!vr)
1000 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
1001 return vr;
1002 }
1003
mlxsw_sp_vr_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr)1004 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
1005 {
1006 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
1007 list_empty(&vr->fib6->node_list) &&
1008 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
1009 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
1010 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
1011 }
1012
1013 static bool
mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto,u8 tree_id)1014 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
1015 enum mlxsw_sp_l3proto proto, u8 tree_id)
1016 {
1017 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
1018
1019 if (!mlxsw_sp_vr_is_used(vr))
1020 return false;
1021 if (fib->lpm_tree->id == tree_id)
1022 return true;
1023 return false;
1024 }
1025
mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib,struct mlxsw_sp_lpm_tree * new_tree)1026 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
1027 struct mlxsw_sp_fib *fib,
1028 struct mlxsw_sp_lpm_tree *new_tree)
1029 {
1030 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
1031 int err;
1032
1033 fib->lpm_tree = new_tree;
1034 mlxsw_sp_lpm_tree_hold(new_tree);
1035 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
1036 if (err)
1037 goto err_tree_bind;
1038 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1039 return 0;
1040
1041 err_tree_bind:
1042 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
1043 fib->lpm_tree = old_tree;
1044 return err;
1045 }
1046
mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib * fib,struct mlxsw_sp_lpm_tree * new_tree)1047 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
1048 struct mlxsw_sp_fib *fib,
1049 struct mlxsw_sp_lpm_tree *new_tree)
1050 {
1051 enum mlxsw_sp_l3proto proto = fib->proto;
1052 struct mlxsw_sp_lpm_tree *old_tree;
1053 u8 old_id, new_id = new_tree->id;
1054 struct mlxsw_sp_vr *vr;
1055 int i, err;
1056
1057 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
1058 old_id = old_tree->id;
1059
1060 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
1061 vr = &mlxsw_sp->router->vrs[i];
1062 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
1063 continue;
1064 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1065 mlxsw_sp_vr_fib(vr, proto),
1066 new_tree);
1067 if (err)
1068 goto err_tree_replace;
1069 }
1070
1071 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
1072 sizeof(new_tree->prefix_ref_count));
1073 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
1074 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1075
1076 return 0;
1077
1078 err_tree_replace:
1079 for (i--; i >= 0; i--) {
1080 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
1081 continue;
1082 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1083 mlxsw_sp_vr_fib(vr, proto),
1084 old_tree);
1085 }
1086 return err;
1087 }
1088
mlxsw_sp_vrs_init(struct mlxsw_sp * mlxsw_sp)1089 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1090 {
1091 struct mlxsw_sp_vr *vr;
1092 u64 max_vrs;
1093 int i;
1094
1095 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1096 return -EIO;
1097
1098 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1099 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1100 GFP_KERNEL);
1101 if (!mlxsw_sp->router->vrs)
1102 return -ENOMEM;
1103
1104 for (i = 0; i < max_vrs; i++) {
1105 vr = &mlxsw_sp->router->vrs[i];
1106 vr->id = i;
1107 }
1108
1109 return 0;
1110 }
1111
1112 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1113
mlxsw_sp_vrs_fini(struct mlxsw_sp * mlxsw_sp)1114 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1115 {
1116 /* At this stage we're guaranteed not to have new incoming
1117 * FIB notifications and the work queue is free from FIBs
1118 * sitting on top of mlxsw netdevs. However, we can still
1119 * have other FIBs queued. Flush the queue before flushing
1120 * the device's tables. No need for locks, as we're the only
1121 * writer.
1122 */
1123 mlxsw_core_flush_owq();
1124 mlxsw_sp_router_fib_flush(mlxsw_sp);
1125 kfree(mlxsw_sp->router->vrs);
1126 }
1127
mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device * ol_dev)1128 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1129 {
1130 struct net_device *d;
1131 u32 tb_id;
1132
1133 rcu_read_lock();
1134 d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1135 if (d)
1136 tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1137 else
1138 tb_id = RT_TABLE_MAIN;
1139 rcu_read_unlock();
1140
1141 return tb_id;
1142 }
1143
1144 static struct mlxsw_sp_rif *
1145 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1146 const struct mlxsw_sp_rif_params *params,
1147 struct netlink_ext_ack *extack);
1148
1149 static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev,struct netlink_ext_ack * extack)1150 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1151 enum mlxsw_sp_ipip_type ipipt,
1152 struct net_device *ol_dev,
1153 struct netlink_ext_ack *extack)
1154 {
1155 struct mlxsw_sp_rif_params_ipip_lb lb_params;
1156 const struct mlxsw_sp_ipip_ops *ipip_ops;
1157 struct mlxsw_sp_rif *rif;
1158
1159 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1160 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1161 .common.dev = ol_dev,
1162 .common.lag = false,
1163 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1164 };
1165
1166 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1167 if (IS_ERR(rif))
1168 return ERR_CAST(rif);
1169 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1170 }
1171
1172 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev)1173 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1174 enum mlxsw_sp_ipip_type ipipt,
1175 struct net_device *ol_dev)
1176 {
1177 const struct mlxsw_sp_ipip_ops *ipip_ops;
1178 struct mlxsw_sp_ipip_entry *ipip_entry;
1179 struct mlxsw_sp_ipip_entry *ret = NULL;
1180 int err;
1181
1182 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1183 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1184 if (!ipip_entry)
1185 return ERR_PTR(-ENOMEM);
1186
1187 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1188 ol_dev, NULL);
1189 if (IS_ERR(ipip_entry->ol_lb)) {
1190 ret = ERR_CAST(ipip_entry->ol_lb);
1191 goto err_ol_ipip_lb_create;
1192 }
1193
1194 ipip_entry->ipipt = ipipt;
1195 ipip_entry->ol_dev = ol_dev;
1196 ipip_entry->parms = ipip_ops->parms_init(ol_dev);
1197
1198 err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
1199 if (err) {
1200 ret = ERR_PTR(err);
1201 goto err_rem_ip_addr_set;
1202 }
1203
1204 return ipip_entry;
1205
1206 err_rem_ip_addr_set:
1207 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1208 err_ol_ipip_lb_create:
1209 kfree(ipip_entry);
1210 return ret;
1211 }
1212
mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1213 static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
1214 struct mlxsw_sp_ipip_entry *ipip_entry)
1215 {
1216 const struct mlxsw_sp_ipip_ops *ipip_ops =
1217 mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1218
1219 ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
1220 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1221 kfree(ipip_entry);
1222 }
1223
1224 static bool
mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp * mlxsw_sp,const enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr saddr,u32 ul_tb_id,struct mlxsw_sp_ipip_entry * ipip_entry)1225 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1226 const enum mlxsw_sp_l3proto ul_proto,
1227 union mlxsw_sp_l3addr saddr,
1228 u32 ul_tb_id,
1229 struct mlxsw_sp_ipip_entry *ipip_entry)
1230 {
1231 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1232 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1233 union mlxsw_sp_l3addr tun_saddr;
1234
1235 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1236 return false;
1237
1238 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1239 return tun_ul_tb_id == ul_tb_id &&
1240 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1241 }
1242
mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt)1243 static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
1244 enum mlxsw_sp_ipip_type ipipt)
1245 {
1246 const struct mlxsw_sp_ipip_ops *ipip_ops;
1247
1248 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1249
1250 /* Not all tunnels require to increase the default pasing depth
1251 * (96 bytes).
1252 */
1253 if (ipip_ops->inc_parsing_depth)
1254 return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
1255
1256 return 0;
1257 }
1258
mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt)1259 static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
1260 enum mlxsw_sp_ipip_type ipipt)
1261 {
1262 const struct mlxsw_sp_ipip_ops *ipip_ops =
1263 mlxsw_sp->router->ipip_ops_arr[ipipt];
1264
1265 if (ipip_ops->inc_parsing_depth)
1266 mlxsw_sp_parsing_depth_dec(mlxsw_sp);
1267 }
1268
1269 static int
mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,struct mlxsw_sp_ipip_entry * ipip_entry)1270 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1271 struct mlxsw_sp_fib_entry *fib_entry,
1272 struct mlxsw_sp_ipip_entry *ipip_entry)
1273 {
1274 u32 tunnel_index;
1275 int err;
1276
1277 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1278 1, &tunnel_index);
1279 if (err)
1280 return err;
1281
1282 err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
1283 ipip_entry->ipipt);
1284 if (err)
1285 goto err_parsing_depth_inc;
1286
1287 ipip_entry->decap_fib_entry = fib_entry;
1288 fib_entry->decap.ipip_entry = ipip_entry;
1289 fib_entry->decap.tunnel_index = tunnel_index;
1290
1291 return 0;
1292
1293 err_parsing_depth_inc:
1294 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
1295 fib_entry->decap.tunnel_index);
1296 return err;
1297 }
1298
mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)1299 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1300 struct mlxsw_sp_fib_entry *fib_entry)
1301 {
1302 enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
1303
1304 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1305 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1306 fib_entry->decap.ipip_entry = NULL;
1307 mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
1308 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1309 1, fib_entry->decap.tunnel_index);
1310 }
1311
1312 static struct mlxsw_sp_fib_node *
1313 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1314 size_t addr_len, unsigned char prefix_len);
1315 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1316 struct mlxsw_sp_fib_entry *fib_entry);
1317
1318 static void
mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1319 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1320 struct mlxsw_sp_ipip_entry *ipip_entry)
1321 {
1322 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1323
1324 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1325 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1326
1327 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1328 }
1329
1330 static void
mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct mlxsw_sp_fib_entry * decap_fib_entry)1331 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1332 struct mlxsw_sp_ipip_entry *ipip_entry,
1333 struct mlxsw_sp_fib_entry *decap_fib_entry)
1334 {
1335 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1336 ipip_entry))
1337 return;
1338 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1339
1340 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1341 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1342 }
1343
1344 static struct mlxsw_sp_fib_entry *
mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp * mlxsw_sp,u32 tb_id,enum mlxsw_sp_l3proto proto,const union mlxsw_sp_l3addr * addr,enum mlxsw_sp_fib_entry_type type)1345 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1346 enum mlxsw_sp_l3proto proto,
1347 const union mlxsw_sp_l3addr *addr,
1348 enum mlxsw_sp_fib_entry_type type)
1349 {
1350 struct mlxsw_sp_fib_node *fib_node;
1351 unsigned char addr_prefix_len;
1352 struct mlxsw_sp_fib *fib;
1353 struct mlxsw_sp_vr *vr;
1354 const void *addrp;
1355 size_t addr_len;
1356 u32 addr4;
1357
1358 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1359 if (!vr)
1360 return NULL;
1361 fib = mlxsw_sp_vr_fib(vr, proto);
1362
1363 switch (proto) {
1364 case MLXSW_SP_L3_PROTO_IPV4:
1365 addr4 = be32_to_cpu(addr->addr4);
1366 addrp = &addr4;
1367 addr_len = 4;
1368 addr_prefix_len = 32;
1369 break;
1370 case MLXSW_SP_L3_PROTO_IPV6:
1371 addrp = &addr->addr6;
1372 addr_len = 16;
1373 addr_prefix_len = 128;
1374 break;
1375 default:
1376 WARN_ON(1);
1377 return NULL;
1378 }
1379
1380 fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1381 addr_prefix_len);
1382 if (!fib_node || fib_node->fib_entry->type != type)
1383 return NULL;
1384
1385 return fib_node->fib_entry;
1386 }
1387
1388 /* Given an IPIP entry, find the corresponding decap route. */
1389 static struct mlxsw_sp_fib_entry *
mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1390 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1391 struct mlxsw_sp_ipip_entry *ipip_entry)
1392 {
1393 static struct mlxsw_sp_fib_node *fib_node;
1394 const struct mlxsw_sp_ipip_ops *ipip_ops;
1395 unsigned char saddr_prefix_len;
1396 union mlxsw_sp_l3addr saddr;
1397 struct mlxsw_sp_fib *ul_fib;
1398 struct mlxsw_sp_vr *ul_vr;
1399 const void *saddrp;
1400 size_t saddr_len;
1401 u32 ul_tb_id;
1402 u32 saddr4;
1403
1404 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1405
1406 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1407 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1408 if (!ul_vr)
1409 return NULL;
1410
1411 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1412 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1413 ipip_entry->ol_dev);
1414
1415 switch (ipip_ops->ul_proto) {
1416 case MLXSW_SP_L3_PROTO_IPV4:
1417 saddr4 = be32_to_cpu(saddr.addr4);
1418 saddrp = &saddr4;
1419 saddr_len = 4;
1420 saddr_prefix_len = 32;
1421 break;
1422 case MLXSW_SP_L3_PROTO_IPV6:
1423 saddrp = &saddr.addr6;
1424 saddr_len = 16;
1425 saddr_prefix_len = 128;
1426 break;
1427 default:
1428 WARN_ON(1);
1429 return NULL;
1430 }
1431
1432 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1433 saddr_prefix_len);
1434 if (!fib_node ||
1435 fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1436 return NULL;
1437
1438 return fib_node->fib_entry;
1439 }
1440
1441 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_create(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_ipip_type ipipt,struct net_device * ol_dev)1442 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1443 enum mlxsw_sp_ipip_type ipipt,
1444 struct net_device *ol_dev)
1445 {
1446 struct mlxsw_sp_ipip_entry *ipip_entry;
1447
1448 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1449 if (IS_ERR(ipip_entry))
1450 return ipip_entry;
1451
1452 list_add_tail(&ipip_entry->ipip_list_node,
1453 &mlxsw_sp->router->ipip_list);
1454
1455 return ipip_entry;
1456 }
1457
1458 static void
mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1459 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1460 struct mlxsw_sp_ipip_entry *ipip_entry)
1461 {
1462 list_del(&ipip_entry->ipip_list_node);
1463 mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
1464 }
1465
1466 static bool
mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp * mlxsw_sp,const struct net_device * ul_dev,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr ul_dip,struct mlxsw_sp_ipip_entry * ipip_entry)1467 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1468 const struct net_device *ul_dev,
1469 enum mlxsw_sp_l3proto ul_proto,
1470 union mlxsw_sp_l3addr ul_dip,
1471 struct mlxsw_sp_ipip_entry *ipip_entry)
1472 {
1473 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1474 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1475
1476 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1477 return false;
1478
1479 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1480 ul_tb_id, ipip_entry);
1481 }
1482
1483 /* Given decap parameters, find the corresponding IPIP entry. */
1484 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp * mlxsw_sp,int ul_dev_ifindex,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr ul_dip)1485 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
1486 enum mlxsw_sp_l3proto ul_proto,
1487 union mlxsw_sp_l3addr ul_dip)
1488 {
1489 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1490 struct net_device *ul_dev;
1491
1492 rcu_read_lock();
1493
1494 ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
1495 if (!ul_dev)
1496 goto out_unlock;
1497
1498 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1499 ipip_list_node)
1500 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1501 ul_proto, ul_dip,
1502 ipip_entry))
1503 goto out_unlock;
1504
1505 rcu_read_unlock();
1506
1507 return NULL;
1508
1509 out_unlock:
1510 rcu_read_unlock();
1511 return ipip_entry;
1512 }
1513
mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev,enum mlxsw_sp_ipip_type * p_type)1514 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1515 const struct net_device *dev,
1516 enum mlxsw_sp_ipip_type *p_type)
1517 {
1518 struct mlxsw_sp_router *router = mlxsw_sp->router;
1519 const struct mlxsw_sp_ipip_ops *ipip_ops;
1520 enum mlxsw_sp_ipip_type ipipt;
1521
1522 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1523 ipip_ops = router->ipip_ops_arr[ipipt];
1524 if (dev->type == ipip_ops->dev_type) {
1525 if (p_type)
1526 *p_type = ipipt;
1527 return true;
1528 }
1529 }
1530 return false;
1531 }
1532
mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)1533 static bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1534 const struct net_device *dev)
1535 {
1536 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1537 }
1538
1539 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp * mlxsw_sp,const struct net_device * ol_dev)1540 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1541 const struct net_device *ol_dev)
1542 {
1543 struct mlxsw_sp_ipip_entry *ipip_entry;
1544
1545 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1546 ipip_list_node)
1547 if (ipip_entry->ol_dev == ol_dev)
1548 return ipip_entry;
1549
1550 return NULL;
1551 }
1552
1553 static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp * mlxsw_sp,const struct net_device * ul_dev,struct mlxsw_sp_ipip_entry * start)1554 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1555 const struct net_device *ul_dev,
1556 struct mlxsw_sp_ipip_entry *start)
1557 {
1558 struct mlxsw_sp_ipip_entry *ipip_entry;
1559
1560 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1561 ipip_list_node);
1562 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1563 ipip_list_node) {
1564 struct net_device *ol_dev = ipip_entry->ol_dev;
1565 struct net_device *ipip_ul_dev;
1566
1567 rcu_read_lock();
1568 ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1569 rcu_read_unlock();
1570
1571 if (ipip_ul_dev == ul_dev)
1572 return ipip_entry;
1573 }
1574
1575 return NULL;
1576 }
1577
mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)1578 static bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1579 const struct net_device *dev)
1580 {
1581 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1582 }
1583
mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp * mlxsw_sp,const struct net_device * ol_dev,enum mlxsw_sp_ipip_type ipipt)1584 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1585 const struct net_device *ol_dev,
1586 enum mlxsw_sp_ipip_type ipipt)
1587 {
1588 const struct mlxsw_sp_ipip_ops *ops
1589 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1590
1591 return ops->can_offload(mlxsw_sp, ol_dev);
1592 }
1593
mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1594 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1595 struct net_device *ol_dev)
1596 {
1597 enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1598 struct mlxsw_sp_ipip_entry *ipip_entry;
1599 enum mlxsw_sp_l3proto ul_proto;
1600 union mlxsw_sp_l3addr saddr;
1601 u32 ul_tb_id;
1602
1603 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1604 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1605 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1606 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1607 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1608 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1609 saddr, ul_tb_id,
1610 NULL)) {
1611 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1612 ol_dev);
1613 if (IS_ERR(ipip_entry))
1614 return PTR_ERR(ipip_entry);
1615 }
1616 }
1617
1618 return 0;
1619 }
1620
mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1621 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1622 struct net_device *ol_dev)
1623 {
1624 struct mlxsw_sp_ipip_entry *ipip_entry;
1625
1626 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1627 if (ipip_entry)
1628 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1629 }
1630
1631 static void
mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1632 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1633 struct mlxsw_sp_ipip_entry *ipip_entry)
1634 {
1635 struct mlxsw_sp_fib_entry *decap_fib_entry;
1636
1637 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1638 if (decap_fib_entry)
1639 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1640 decap_fib_entry);
1641 }
1642
1643 static int
mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb * lb_rif,u16 ul_vr_id,u16 ul_rif_id,bool enable)1644 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1645 u16 ul_rif_id, bool enable)
1646 {
1647 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1648 enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
1649 struct mlxsw_sp_rif *rif = &lb_rif->common;
1650 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1651 char ritr_pl[MLXSW_REG_RITR_LEN];
1652 struct in6_addr *saddr6;
1653 u32 saddr4;
1654
1655 ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
1656 switch (lb_cf.ul_protocol) {
1657 case MLXSW_SP_L3_PROTO_IPV4:
1658 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1659 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1660 rif->rif_index, rif->vr_id, rif->dev->mtu);
1661 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1662 ipip_options, ul_vr_id,
1663 ul_rif_id, saddr4,
1664 lb_cf.okey);
1665 break;
1666
1667 case MLXSW_SP_L3_PROTO_IPV6:
1668 saddr6 = &lb_cf.saddr.addr6;
1669 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1670 rif->rif_index, rif->vr_id, rif->dev->mtu);
1671 mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
1672 ipip_options, ul_vr_id,
1673 ul_rif_id, saddr6,
1674 lb_cf.okey);
1675 break;
1676 }
1677
1678 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1679 }
1680
mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1681 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1682 struct net_device *ol_dev)
1683 {
1684 struct mlxsw_sp_ipip_entry *ipip_entry;
1685 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1686 int err = 0;
1687
1688 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1689 if (ipip_entry) {
1690 lb_rif = ipip_entry->ol_lb;
1691 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1692 lb_rif->ul_rif_id, true);
1693 if (err)
1694 goto out;
1695 lb_rif->common.mtu = ol_dev->mtu;
1696 }
1697
1698 out:
1699 return err;
1700 }
1701
mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1702 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1703 struct net_device *ol_dev)
1704 {
1705 struct mlxsw_sp_ipip_entry *ipip_entry;
1706
1707 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1708 if (ipip_entry)
1709 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1710 }
1711
1712 static void
mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1713 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1714 struct mlxsw_sp_ipip_entry *ipip_entry)
1715 {
1716 if (ipip_entry->decap_fib_entry)
1717 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1718 }
1719
mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev)1720 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1721 struct net_device *ol_dev)
1722 {
1723 struct mlxsw_sp_ipip_entry *ipip_entry;
1724
1725 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1726 if (ipip_entry)
1727 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1728 }
1729
1730 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1731 struct mlxsw_sp_rif *old_rif,
1732 struct mlxsw_sp_rif *new_rif);
1733 static int
mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,bool keep_encap,struct netlink_ext_ack * extack)1734 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1735 struct mlxsw_sp_ipip_entry *ipip_entry,
1736 bool keep_encap,
1737 struct netlink_ext_ack *extack)
1738 {
1739 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1740 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1741
1742 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1743 ipip_entry->ipipt,
1744 ipip_entry->ol_dev,
1745 extack);
1746 if (IS_ERR(new_lb_rif))
1747 return PTR_ERR(new_lb_rif);
1748 ipip_entry->ol_lb = new_lb_rif;
1749
1750 if (keep_encap)
1751 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1752 &new_lb_rif->common);
1753
1754 mlxsw_sp_rif_destroy(&old_lb_rif->common);
1755
1756 return 0;
1757 }
1758
1759 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1760 struct mlxsw_sp_rif *rif);
1761
1762 /**
1763 * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1764 * @mlxsw_sp: mlxsw_sp.
1765 * @ipip_entry: IPIP entry.
1766 * @recreate_loopback: Recreates the associated loopback RIF.
1767 * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1768 * relevant when recreate_loopback is true.
1769 * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1770 * is only relevant when recreate_loopback is false.
1771 * @extack: extack.
1772 *
1773 * Return: Non-zero value on failure.
1774 */
__mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,bool recreate_loopback,bool keep_encap,bool update_nexthops,struct netlink_ext_ack * extack)1775 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1776 struct mlxsw_sp_ipip_entry *ipip_entry,
1777 bool recreate_loopback,
1778 bool keep_encap,
1779 bool update_nexthops,
1780 struct netlink_ext_ack *extack)
1781 {
1782 int err;
1783
1784 /* RIFs can't be edited, so to update loopback, we need to destroy and
1785 * recreate it. That creates a window of opportunity where RALUE and
1786 * RATR registers end up referencing a RIF that's already gone. RATRs
1787 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1788 * of RALUE, demote the decap route back.
1789 */
1790 if (ipip_entry->decap_fib_entry)
1791 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1792
1793 if (recreate_loopback) {
1794 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1795 keep_encap, extack);
1796 if (err)
1797 return err;
1798 } else if (update_nexthops) {
1799 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1800 &ipip_entry->ol_lb->common);
1801 }
1802
1803 if (ipip_entry->ol_dev->flags & IFF_UP)
1804 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1805
1806 return 0;
1807 }
1808
mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,struct netlink_ext_ack * extack)1809 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1810 struct net_device *ol_dev,
1811 struct netlink_ext_ack *extack)
1812 {
1813 struct mlxsw_sp_ipip_entry *ipip_entry =
1814 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1815
1816 if (!ipip_entry)
1817 return 0;
1818
1819 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1820 true, false, false, extack);
1821 }
1822
1823 static int
mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev,bool * demote_this,struct netlink_ext_ack * extack)1824 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1825 struct mlxsw_sp_ipip_entry *ipip_entry,
1826 struct net_device *ul_dev,
1827 bool *demote_this,
1828 struct netlink_ext_ack *extack)
1829 {
1830 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1831 enum mlxsw_sp_l3proto ul_proto;
1832 union mlxsw_sp_l3addr saddr;
1833
1834 /* Moving underlay to a different VRF might cause local address
1835 * conflict, and the conflicting tunnels need to be demoted.
1836 */
1837 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1838 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1839 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1840 saddr, ul_tb_id,
1841 ipip_entry)) {
1842 *demote_this = true;
1843 return 0;
1844 }
1845
1846 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1847 true, true, false, extack);
1848 }
1849
1850 static int
mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev)1851 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1852 struct mlxsw_sp_ipip_entry *ipip_entry,
1853 struct net_device *ul_dev)
1854 {
1855 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1856 false, false, true, NULL);
1857 }
1858
1859 static int
mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev)1860 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1861 struct mlxsw_sp_ipip_entry *ipip_entry,
1862 struct net_device *ul_dev)
1863 {
1864 /* A down underlay device causes encapsulated packets to not be
1865 * forwarded, but decap still works. So refresh next hops without
1866 * touching anything else.
1867 */
1868 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1869 false, false, true, NULL);
1870 }
1871
1872 static int
mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,struct netlink_ext_ack * extack)1873 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1874 struct net_device *ol_dev,
1875 struct netlink_ext_ack *extack)
1876 {
1877 const struct mlxsw_sp_ipip_ops *ipip_ops;
1878 struct mlxsw_sp_ipip_entry *ipip_entry;
1879 int err;
1880
1881 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1882 if (!ipip_entry)
1883 /* A change might make a tunnel eligible for offloading, but
1884 * that is currently not implemented. What falls to slow path
1885 * stays there.
1886 */
1887 return 0;
1888
1889 /* A change might make a tunnel not eligible for offloading. */
1890 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1891 ipip_entry->ipipt)) {
1892 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1893 return 0;
1894 }
1895
1896 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1897 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1898 return err;
1899 }
1900
mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry)1901 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1902 struct mlxsw_sp_ipip_entry *ipip_entry)
1903 {
1904 struct net_device *ol_dev = ipip_entry->ol_dev;
1905
1906 if (ol_dev->flags & IFF_UP)
1907 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1908 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1909 }
1910
1911 /* The configuration where several tunnels have the same local address in the
1912 * same underlay table needs special treatment in the HW. That is currently not
1913 * implemented in the driver. This function finds and demotes the first tunnel
1914 * with a given source address, except the one passed in in the argument
1915 * `except'.
1916 */
1917 bool
mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_l3proto ul_proto,union mlxsw_sp_l3addr saddr,u32 ul_tb_id,const struct mlxsw_sp_ipip_entry * except)1918 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1919 enum mlxsw_sp_l3proto ul_proto,
1920 union mlxsw_sp_l3addr saddr,
1921 u32 ul_tb_id,
1922 const struct mlxsw_sp_ipip_entry *except)
1923 {
1924 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1925
1926 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1927 ipip_list_node) {
1928 if (ipip_entry != except &&
1929 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1930 ul_tb_id, ipip_entry)) {
1931 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1932 return true;
1933 }
1934 }
1935
1936 return false;
1937 }
1938
mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp * mlxsw_sp,struct net_device * ul_dev)1939 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1940 struct net_device *ul_dev)
1941 {
1942 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1943
1944 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1945 ipip_list_node) {
1946 struct net_device *ol_dev = ipip_entry->ol_dev;
1947 struct net_device *ipip_ul_dev;
1948
1949 rcu_read_lock();
1950 ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1951 rcu_read_unlock();
1952 if (ipip_ul_dev == ul_dev)
1953 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1954 }
1955 }
1956
mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ol_dev,unsigned long event,struct netdev_notifier_info * info)1957 static int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1958 struct net_device *ol_dev,
1959 unsigned long event,
1960 struct netdev_notifier_info *info)
1961 {
1962 struct netdev_notifier_changeupper_info *chup;
1963 struct netlink_ext_ack *extack;
1964 int err = 0;
1965
1966 switch (event) {
1967 case NETDEV_REGISTER:
1968 err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1969 break;
1970 case NETDEV_UNREGISTER:
1971 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1972 break;
1973 case NETDEV_UP:
1974 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1975 break;
1976 case NETDEV_DOWN:
1977 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1978 break;
1979 case NETDEV_CHANGEUPPER:
1980 chup = container_of(info, typeof(*chup), info);
1981 extack = info->extack;
1982 if (netif_is_l3_master(chup->upper_dev))
1983 err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1984 ol_dev,
1985 extack);
1986 break;
1987 case NETDEV_CHANGE:
1988 extack = info->extack;
1989 err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1990 ol_dev, extack);
1991 break;
1992 case NETDEV_CHANGEMTU:
1993 err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
1994 break;
1995 }
1996 return err;
1997 }
1998
1999 static int
__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipip_entry * ipip_entry,struct net_device * ul_dev,bool * demote_this,unsigned long event,struct netdev_notifier_info * info)2000 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2001 struct mlxsw_sp_ipip_entry *ipip_entry,
2002 struct net_device *ul_dev,
2003 bool *demote_this,
2004 unsigned long event,
2005 struct netdev_notifier_info *info)
2006 {
2007 struct netdev_notifier_changeupper_info *chup;
2008 struct netlink_ext_ack *extack;
2009
2010 switch (event) {
2011 case NETDEV_CHANGEUPPER:
2012 chup = container_of(info, typeof(*chup), info);
2013 extack = info->extack;
2014 if (netif_is_l3_master(chup->upper_dev))
2015 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
2016 ipip_entry,
2017 ul_dev,
2018 demote_this,
2019 extack);
2020 break;
2021
2022 case NETDEV_UP:
2023 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
2024 ul_dev);
2025 case NETDEV_DOWN:
2026 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
2027 ipip_entry,
2028 ul_dev);
2029 }
2030 return 0;
2031 }
2032
2033 static int
mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp * mlxsw_sp,struct net_device * ul_dev,unsigned long event,struct netdev_notifier_info * info)2034 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
2035 struct net_device *ul_dev,
2036 unsigned long event,
2037 struct netdev_notifier_info *info)
2038 {
2039 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
2040 int err;
2041
2042 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
2043 ul_dev,
2044 ipip_entry))) {
2045 struct mlxsw_sp_ipip_entry *prev;
2046 bool demote_this = false;
2047
2048 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
2049 ul_dev, &demote_this,
2050 event, info);
2051 if (err) {
2052 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
2053 ul_dev);
2054 return err;
2055 }
2056
2057 if (demote_this) {
2058 if (list_is_first(&ipip_entry->ipip_list_node,
2059 &mlxsw_sp->router->ipip_list))
2060 prev = NULL;
2061 else
2062 /* This can't be cached from previous iteration,
2063 * because that entry could be gone now.
2064 */
2065 prev = list_prev_entry(ipip_entry,
2066 ipip_list_node);
2067 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
2068 ipip_entry = prev;
2069 }
2070 }
2071
2072 return 0;
2073 }
2074
mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip,u32 tunnel_index)2075 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2076 enum mlxsw_sp_l3proto ul_proto,
2077 const union mlxsw_sp_l3addr *ul_sip,
2078 u32 tunnel_index)
2079 {
2080 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2081 struct mlxsw_sp_router *router = mlxsw_sp->router;
2082 struct mlxsw_sp_fib_entry *fib_entry;
2083 int err = 0;
2084
2085 mutex_lock(&mlxsw_sp->router->lock);
2086
2087 if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
2088 err = -EINVAL;
2089 goto out;
2090 }
2091
2092 router->nve_decap_config.ul_tb_id = ul_tb_id;
2093 router->nve_decap_config.tunnel_index = tunnel_index;
2094 router->nve_decap_config.ul_proto = ul_proto;
2095 router->nve_decap_config.ul_sip = *ul_sip;
2096 router->nve_decap_config.valid = true;
2097
2098 /* It is valid to create a tunnel with a local IP and only later
2099 * assign this IP address to a local interface
2100 */
2101 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2102 ul_proto, ul_sip,
2103 type);
2104 if (!fib_entry)
2105 goto out;
2106
2107 fib_entry->decap.tunnel_index = tunnel_index;
2108 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2109
2110 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2111 if (err)
2112 goto err_fib_entry_update;
2113
2114 goto out;
2115
2116 err_fib_entry_update:
2117 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2118 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2119 out:
2120 mutex_unlock(&mlxsw_sp->router->lock);
2121 return err;
2122 }
2123
mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip)2124 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2125 enum mlxsw_sp_l3proto ul_proto,
2126 const union mlxsw_sp_l3addr *ul_sip)
2127 {
2128 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2129 struct mlxsw_sp_router *router = mlxsw_sp->router;
2130 struct mlxsw_sp_fib_entry *fib_entry;
2131
2132 mutex_lock(&mlxsw_sp->router->lock);
2133
2134 if (WARN_ON_ONCE(!router->nve_decap_config.valid))
2135 goto out;
2136
2137 router->nve_decap_config.valid = false;
2138
2139 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2140 ul_proto, ul_sip,
2141 type);
2142 if (!fib_entry)
2143 goto out;
2144
2145 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2146 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2147 out:
2148 mutex_unlock(&mlxsw_sp->router->lock);
2149 }
2150
mlxsw_sp_router_nve_is_decap(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,enum mlxsw_sp_l3proto ul_proto,const union mlxsw_sp_l3addr * ul_sip)2151 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2152 u32 ul_tb_id,
2153 enum mlxsw_sp_l3proto ul_proto,
2154 const union mlxsw_sp_l3addr *ul_sip)
2155 {
2156 struct mlxsw_sp_router *router = mlxsw_sp->router;
2157
2158 return router->nve_decap_config.valid &&
2159 router->nve_decap_config.ul_tb_id == ul_tb_id &&
2160 router->nve_decap_config.ul_proto == ul_proto &&
2161 !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2162 sizeof(*ul_sip));
2163 }
2164
2165 struct mlxsw_sp_neigh_key {
2166 struct neighbour *n;
2167 };
2168
2169 struct mlxsw_sp_neigh_entry {
2170 struct list_head rif_list_node;
2171 struct rhash_head ht_node;
2172 struct mlxsw_sp_neigh_key key;
2173 u16 rif;
2174 bool connected;
2175 unsigned char ha[ETH_ALEN];
2176 struct list_head nexthop_list; /* list of nexthops using
2177 * this neigh entry
2178 */
2179 struct list_head nexthop_neighs_list_node;
2180 unsigned int counter_index;
2181 bool counter_valid;
2182 };
2183
2184 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2185 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2186 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2187 .key_len = sizeof(struct mlxsw_sp_neigh_key),
2188 };
2189
2190 struct mlxsw_sp_neigh_entry *
mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif * rif,struct mlxsw_sp_neigh_entry * neigh_entry)2191 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2192 struct mlxsw_sp_neigh_entry *neigh_entry)
2193 {
2194 if (!neigh_entry) {
2195 if (list_empty(&rif->neigh_list))
2196 return NULL;
2197 else
2198 return list_first_entry(&rif->neigh_list,
2199 typeof(*neigh_entry),
2200 rif_list_node);
2201 }
2202 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2203 return NULL;
2204 return list_next_entry(neigh_entry, rif_list_node);
2205 }
2206
mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry * neigh_entry)2207 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2208 {
2209 return neigh_entry->key.n->tbl->family;
2210 }
2211
2212 unsigned char *
mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry * neigh_entry)2213 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2214 {
2215 return neigh_entry->ha;
2216 }
2217
mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry * neigh_entry)2218 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2219 {
2220 struct neighbour *n;
2221
2222 n = neigh_entry->key.n;
2223 return ntohl(*((__be32 *) n->primary_key));
2224 }
2225
2226 struct in6_addr *
mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry * neigh_entry)2227 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2228 {
2229 struct neighbour *n;
2230
2231 n = neigh_entry->key.n;
2232 return (struct in6_addr *) &n->primary_key;
2233 }
2234
mlxsw_sp_neigh_counter_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,u64 * p_counter)2235 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2236 struct mlxsw_sp_neigh_entry *neigh_entry,
2237 u64 *p_counter)
2238 {
2239 if (!neigh_entry->counter_valid)
2240 return -EINVAL;
2241
2242 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2243 p_counter, NULL);
2244 }
2245
2246 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp * mlxsw_sp,struct neighbour * n,u16 rif)2247 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2248 u16 rif)
2249 {
2250 struct mlxsw_sp_neigh_entry *neigh_entry;
2251
2252 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2253 if (!neigh_entry)
2254 return NULL;
2255
2256 neigh_entry->key.n = n;
2257 neigh_entry->rif = rif;
2258 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2259
2260 return neigh_entry;
2261 }
2262
mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry * neigh_entry)2263 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2264 {
2265 kfree(neigh_entry);
2266 }
2267
2268 static int
mlxsw_sp_neigh_entry_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2269 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2270 struct mlxsw_sp_neigh_entry *neigh_entry)
2271 {
2272 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2273 &neigh_entry->ht_node,
2274 mlxsw_sp_neigh_ht_params);
2275 }
2276
2277 static void
mlxsw_sp_neigh_entry_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2278 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2279 struct mlxsw_sp_neigh_entry *neigh_entry)
2280 {
2281 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2282 &neigh_entry->ht_node,
2283 mlxsw_sp_neigh_ht_params);
2284 }
2285
2286 static bool
mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2287 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2288 struct mlxsw_sp_neigh_entry *neigh_entry)
2289 {
2290 struct devlink *devlink;
2291 const char *table_name;
2292
2293 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2294 case AF_INET:
2295 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2296 break;
2297 case AF_INET6:
2298 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2299 break;
2300 default:
2301 WARN_ON(1);
2302 return false;
2303 }
2304
2305 devlink = priv_to_devlink(mlxsw_sp->core);
2306 return devlink_dpipe_table_counter_enabled(devlink, table_name);
2307 }
2308
2309 static void
mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2310 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2311 struct mlxsw_sp_neigh_entry *neigh_entry)
2312 {
2313 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2314 return;
2315
2316 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2317 return;
2318
2319 neigh_entry->counter_valid = true;
2320 }
2321
2322 static void
mlxsw_sp_neigh_counter_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2323 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2324 struct mlxsw_sp_neigh_entry *neigh_entry)
2325 {
2326 if (!neigh_entry->counter_valid)
2327 return;
2328 mlxsw_sp_flow_counter_free(mlxsw_sp,
2329 neigh_entry->counter_index);
2330 neigh_entry->counter_valid = false;
2331 }
2332
2333 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_create(struct mlxsw_sp * mlxsw_sp,struct neighbour * n)2334 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2335 {
2336 struct mlxsw_sp_neigh_entry *neigh_entry;
2337 struct mlxsw_sp_rif *rif;
2338 int err;
2339
2340 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2341 if (!rif)
2342 return ERR_PTR(-EINVAL);
2343
2344 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2345 if (!neigh_entry)
2346 return ERR_PTR(-ENOMEM);
2347
2348 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2349 if (err)
2350 goto err_neigh_entry_insert;
2351
2352 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2353 atomic_inc(&mlxsw_sp->router->neighs_update.neigh_count);
2354 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2355
2356 return neigh_entry;
2357
2358 err_neigh_entry_insert:
2359 mlxsw_sp_neigh_entry_free(neigh_entry);
2360 return ERR_PTR(err);
2361 }
2362
2363 static void
mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)2364 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2365 struct mlxsw_sp_neigh_entry *neigh_entry)
2366 {
2367 list_del(&neigh_entry->rif_list_node);
2368 atomic_dec(&mlxsw_sp->router->neighs_update.neigh_count);
2369 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2370 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2371 mlxsw_sp_neigh_entry_free(neigh_entry);
2372 }
2373
2374 static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp * mlxsw_sp,struct neighbour * n)2375 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2376 {
2377 struct mlxsw_sp_neigh_key key;
2378
2379 key.n = n;
2380 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2381 &key, mlxsw_sp_neigh_ht_params);
2382 }
2383
2384 static void
mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp * mlxsw_sp)2385 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2386 {
2387 unsigned long interval;
2388
2389 #if IS_ENABLED(CONFIG_IPV6)
2390 interval = min_t(unsigned long,
2391 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2392 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2393 #else
2394 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2395 #endif
2396 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2397 }
2398
mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int ent_index)2399 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2400 char *rauhtd_pl,
2401 int ent_index)
2402 {
2403 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
2404 struct net_device *dev;
2405 struct neighbour *n;
2406 __be32 dipn;
2407 u32 dip;
2408 u16 rif;
2409
2410 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2411
2412 if (WARN_ON_ONCE(rif >= max_rifs))
2413 return;
2414 if (!mlxsw_sp->router->rifs[rif]) {
2415 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2416 return;
2417 }
2418
2419 dipn = htonl(dip);
2420 dev = mlxsw_sp->router->rifs[rif]->dev;
2421 n = neigh_lookup(&arp_tbl, &dipn, dev);
2422 if (!n)
2423 return;
2424
2425 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2426 neigh_event_send(n, NULL);
2427 neigh_release(n);
2428 }
2429
2430 #if IS_ENABLED(CONFIG_IPV6)
mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2431 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2432 char *rauhtd_pl,
2433 int rec_index)
2434 {
2435 struct net_device *dev;
2436 struct neighbour *n;
2437 struct in6_addr dip;
2438 u16 rif;
2439
2440 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2441 (char *) &dip);
2442
2443 if (!mlxsw_sp->router->rifs[rif]) {
2444 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2445 return;
2446 }
2447
2448 dev = mlxsw_sp->router->rifs[rif]->dev;
2449 n = neigh_lookup(&nd_tbl, &dip, dev);
2450 if (!n)
2451 return;
2452
2453 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2454 neigh_event_send(n, NULL);
2455 neigh_release(n);
2456 }
2457 #else
mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2458 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2459 char *rauhtd_pl,
2460 int rec_index)
2461 {
2462 }
2463 #endif
2464
mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2465 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2466 char *rauhtd_pl,
2467 int rec_index)
2468 {
2469 u8 num_entries;
2470 int i;
2471
2472 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2473 rec_index);
2474 /* Hardware starts counting at 0, so add 1. */
2475 num_entries++;
2476
2477 /* Each record consists of several neighbour entries. */
2478 for (i = 0; i < num_entries; i++) {
2479 int ent_index;
2480
2481 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2482 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2483 ent_index);
2484 }
2485
2486 }
2487
mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2488 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2489 char *rauhtd_pl,
2490 int rec_index)
2491 {
2492 /* One record contains one entry. */
2493 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2494 rec_index);
2495 }
2496
mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,int rec_index)2497 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2498 char *rauhtd_pl, int rec_index)
2499 {
2500 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2501 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2502 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2503 rec_index);
2504 break;
2505 case MLXSW_REG_RAUHTD_TYPE_IPV6:
2506 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2507 rec_index);
2508 break;
2509 }
2510 }
2511
mlxsw_sp_router_rauhtd_is_full(char * rauhtd_pl)2512 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2513 {
2514 u8 num_rec, last_rec_index, num_entries;
2515
2516 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2517 last_rec_index = num_rec - 1;
2518
2519 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2520 return false;
2521 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2522 MLXSW_REG_RAUHTD_TYPE_IPV6)
2523 return true;
2524
2525 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2526 last_rec_index);
2527 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2528 return true;
2529 return false;
2530 }
2531
2532 static int
__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp * mlxsw_sp,char * rauhtd_pl,enum mlxsw_reg_rauhtd_type type)2533 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2534 char *rauhtd_pl,
2535 enum mlxsw_reg_rauhtd_type type)
2536 {
2537 int i, num_rec;
2538 int err;
2539
2540 /* Ensure the RIF we read from the device does not change mid-dump. */
2541 mutex_lock(&mlxsw_sp->router->lock);
2542 do {
2543 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2544 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2545 rauhtd_pl);
2546 if (err) {
2547 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2548 break;
2549 }
2550 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2551 for (i = 0; i < num_rec; i++)
2552 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2553 i);
2554 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2555 mutex_unlock(&mlxsw_sp->router->lock);
2556
2557 return err;
2558 }
2559
mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp * mlxsw_sp)2560 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2561 {
2562 enum mlxsw_reg_rauhtd_type type;
2563 char *rauhtd_pl;
2564 int err;
2565
2566 if (!atomic_read(&mlxsw_sp->router->neighs_update.neigh_count))
2567 return 0;
2568
2569 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2570 if (!rauhtd_pl)
2571 return -ENOMEM;
2572
2573 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2574 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2575 if (err)
2576 goto out;
2577
2578 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2579 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2580 out:
2581 kfree(rauhtd_pl);
2582 return err;
2583 }
2584
mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp * mlxsw_sp)2585 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2586 {
2587 struct mlxsw_sp_neigh_entry *neigh_entry;
2588
2589 mutex_lock(&mlxsw_sp->router->lock);
2590 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2591 nexthop_neighs_list_node)
2592 /* If this neigh have nexthops, make the kernel think this neigh
2593 * is active regardless of the traffic.
2594 */
2595 neigh_event_send(neigh_entry->key.n, NULL);
2596 mutex_unlock(&mlxsw_sp->router->lock);
2597 }
2598
2599 static void
mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp * mlxsw_sp)2600 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2601 {
2602 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2603
2604 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2605 msecs_to_jiffies(interval));
2606 }
2607
mlxsw_sp_router_neighs_update_work(struct work_struct * work)2608 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2609 {
2610 struct mlxsw_sp_router *router;
2611 int err;
2612
2613 router = container_of(work, struct mlxsw_sp_router,
2614 neighs_update.dw.work);
2615 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2616 if (err)
2617 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2618
2619 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2620
2621 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2622 }
2623
mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct * work)2624 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2625 {
2626 struct mlxsw_sp_neigh_entry *neigh_entry;
2627 struct mlxsw_sp_router *router;
2628
2629 router = container_of(work, struct mlxsw_sp_router,
2630 nexthop_probe_dw.work);
2631 /* Iterate over nexthop neighbours, find those who are unresolved and
2632 * send arp on them. This solves the chicken-egg problem when
2633 * the nexthop wouldn't get offloaded until the neighbor is resolved
2634 * but it wouldn't get resolved ever in case traffic is flowing in HW
2635 * using different nexthop.
2636 */
2637 mutex_lock(&router->lock);
2638 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2639 nexthop_neighs_list_node)
2640 if (!neigh_entry->connected)
2641 neigh_event_send(neigh_entry->key.n, NULL);
2642 mutex_unlock(&router->lock);
2643
2644 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2645 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2646 }
2647
2648 static void
2649 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2650 struct mlxsw_sp_neigh_entry *neigh_entry,
2651 bool removing, bool dead);
2652
mlxsw_sp_rauht_op(bool adding)2653 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2654 {
2655 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2656 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2657 }
2658
2659 static int
mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,enum mlxsw_reg_rauht_op op)2660 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2661 struct mlxsw_sp_neigh_entry *neigh_entry,
2662 enum mlxsw_reg_rauht_op op)
2663 {
2664 struct neighbour *n = neigh_entry->key.n;
2665 u32 dip = ntohl(*((__be32 *) n->primary_key));
2666 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2667
2668 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2669 dip);
2670 if (neigh_entry->counter_valid)
2671 mlxsw_reg_rauht_pack_counter(rauht_pl,
2672 neigh_entry->counter_index);
2673 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2674 }
2675
2676 static int
mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,enum mlxsw_reg_rauht_op op)2677 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2678 struct mlxsw_sp_neigh_entry *neigh_entry,
2679 enum mlxsw_reg_rauht_op op)
2680 {
2681 struct neighbour *n = neigh_entry->key.n;
2682 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2683 const char *dip = n->primary_key;
2684
2685 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2686 dip);
2687 if (neigh_entry->counter_valid)
2688 mlxsw_reg_rauht_pack_counter(rauht_pl,
2689 neigh_entry->counter_index);
2690 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2691 }
2692
mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry * neigh_entry)2693 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2694 {
2695 struct neighbour *n = neigh_entry->key.n;
2696
2697 /* Packets with a link-local destination address are trapped
2698 * after LPM lookup and never reach the neighbour table, so
2699 * there is no need to program such neighbours to the device.
2700 */
2701 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2702 IPV6_ADDR_LINKLOCAL)
2703 return true;
2704 return false;
2705 }
2706
2707 static void
mlxsw_sp_neigh_entry_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool adding)2708 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2709 struct mlxsw_sp_neigh_entry *neigh_entry,
2710 bool adding)
2711 {
2712 enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2713 int err;
2714
2715 if (!adding && !neigh_entry->connected)
2716 return;
2717 neigh_entry->connected = adding;
2718 if (neigh_entry->key.n->tbl->family == AF_INET) {
2719 err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2720 op);
2721 if (err)
2722 return;
2723 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2724 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2725 return;
2726 err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2727 op);
2728 if (err)
2729 return;
2730 } else {
2731 WARN_ON_ONCE(1);
2732 return;
2733 }
2734
2735 if (adding)
2736 neigh_entry->key.n->flags |= NTF_OFFLOADED;
2737 else
2738 neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2739 }
2740
2741 void
mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool adding)2742 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2743 struct mlxsw_sp_neigh_entry *neigh_entry,
2744 bool adding)
2745 {
2746 if (adding)
2747 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2748 else
2749 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2750 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2751 }
2752
2753 struct mlxsw_sp_netevent_work {
2754 struct work_struct work;
2755 struct mlxsw_sp *mlxsw_sp;
2756 struct neighbour *n;
2757 };
2758
mlxsw_sp_router_neigh_event_work(struct work_struct * work)2759 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2760 {
2761 struct mlxsw_sp_netevent_work *net_work =
2762 container_of(work, struct mlxsw_sp_netevent_work, work);
2763 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2764 struct mlxsw_sp_neigh_entry *neigh_entry;
2765 struct neighbour *n = net_work->n;
2766 unsigned char ha[ETH_ALEN];
2767 bool entry_connected;
2768 u8 nud_state, dead;
2769
2770 /* If these parameters are changed after we release the lock,
2771 * then we are guaranteed to receive another event letting us
2772 * know about it.
2773 */
2774 read_lock_bh(&n->lock);
2775 memcpy(ha, n->ha, ETH_ALEN);
2776 nud_state = n->nud_state;
2777 dead = n->dead;
2778 read_unlock_bh(&n->lock);
2779
2780 mutex_lock(&mlxsw_sp->router->lock);
2781 mlxsw_sp_span_respin(mlxsw_sp);
2782
2783 entry_connected = nud_state & NUD_VALID && !dead;
2784 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2785 if (!entry_connected && !neigh_entry)
2786 goto out;
2787 if (!neigh_entry) {
2788 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2789 if (IS_ERR(neigh_entry))
2790 goto out;
2791 }
2792
2793 if (neigh_entry->connected && entry_connected &&
2794 !memcmp(neigh_entry->ha, ha, ETH_ALEN))
2795 goto out;
2796
2797 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2798 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2799 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2800 dead);
2801
2802 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2803 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2804
2805 out:
2806 mutex_unlock(&mlxsw_sp->router->lock);
2807 neigh_release(n);
2808 kfree(net_work);
2809 }
2810
2811 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2812
mlxsw_sp_router_mp_hash_event_work(struct work_struct * work)2813 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2814 {
2815 struct mlxsw_sp_netevent_work *net_work =
2816 container_of(work, struct mlxsw_sp_netevent_work, work);
2817 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2818
2819 mlxsw_sp_mp_hash_init(mlxsw_sp);
2820 kfree(net_work);
2821 }
2822
2823 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2824
mlxsw_sp_router_update_priority_work(struct work_struct * work)2825 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2826 {
2827 struct mlxsw_sp_netevent_work *net_work =
2828 container_of(work, struct mlxsw_sp_netevent_work, work);
2829 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2830
2831 __mlxsw_sp_router_init(mlxsw_sp);
2832 kfree(net_work);
2833 }
2834
mlxsw_sp_router_schedule_work(struct net * net,struct notifier_block * nb,void (* cb)(struct work_struct *))2835 static int mlxsw_sp_router_schedule_work(struct net *net,
2836 struct notifier_block *nb,
2837 void (*cb)(struct work_struct *))
2838 {
2839 struct mlxsw_sp_netevent_work *net_work;
2840 struct mlxsw_sp_router *router;
2841
2842 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2843 if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2844 return NOTIFY_DONE;
2845
2846 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2847 if (!net_work)
2848 return NOTIFY_BAD;
2849
2850 INIT_WORK(&net_work->work, cb);
2851 net_work->mlxsw_sp = router->mlxsw_sp;
2852 mlxsw_core_schedule_work(&net_work->work);
2853 return NOTIFY_DONE;
2854 }
2855
mlxsw_sp_router_netevent_event(struct notifier_block * nb,unsigned long event,void * ptr)2856 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2857 unsigned long event, void *ptr)
2858 {
2859 struct mlxsw_sp_netevent_work *net_work;
2860 struct mlxsw_sp_port *mlxsw_sp_port;
2861 struct mlxsw_sp *mlxsw_sp;
2862 unsigned long interval;
2863 struct neigh_parms *p;
2864 struct neighbour *n;
2865
2866 switch (event) {
2867 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2868 p = ptr;
2869
2870 /* We don't care about changes in the default table. */
2871 if (!p->dev || (p->tbl->family != AF_INET &&
2872 p->tbl->family != AF_INET6))
2873 return NOTIFY_DONE;
2874
2875 /* We are in atomic context and can't take RTNL mutex,
2876 * so use RCU variant to walk the device chain.
2877 */
2878 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2879 if (!mlxsw_sp_port)
2880 return NOTIFY_DONE;
2881
2882 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2883 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2884 mlxsw_sp->router->neighs_update.interval = interval;
2885
2886 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2887 break;
2888 case NETEVENT_NEIGH_UPDATE:
2889 n = ptr;
2890
2891 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2892 return NOTIFY_DONE;
2893
2894 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2895 if (!mlxsw_sp_port)
2896 return NOTIFY_DONE;
2897
2898 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2899 if (!net_work) {
2900 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2901 return NOTIFY_BAD;
2902 }
2903
2904 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2905 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2906 net_work->n = n;
2907
2908 /* Take a reference to ensure the neighbour won't be
2909 * destructed until we drop the reference in delayed
2910 * work.
2911 */
2912 neigh_clone(n);
2913 mlxsw_core_schedule_work(&net_work->work);
2914 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2915 break;
2916 case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2917 case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2918 return mlxsw_sp_router_schedule_work(ptr, nb,
2919 mlxsw_sp_router_mp_hash_event_work);
2920
2921 case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2922 return mlxsw_sp_router_schedule_work(ptr, nb,
2923 mlxsw_sp_router_update_priority_work);
2924 }
2925
2926 return NOTIFY_DONE;
2927 }
2928
mlxsw_sp_neigh_init(struct mlxsw_sp * mlxsw_sp)2929 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2930 {
2931 int err;
2932
2933 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2934 &mlxsw_sp_neigh_ht_params);
2935 if (err)
2936 return err;
2937
2938 /* Initialize the polling interval according to the default
2939 * table.
2940 */
2941 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2942
2943 /* Create the delayed works for the activity_update */
2944 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2945 mlxsw_sp_router_neighs_update_work);
2946 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2947 mlxsw_sp_router_probe_unresolved_nexthops);
2948 atomic_set(&mlxsw_sp->router->neighs_update.neigh_count, 0);
2949 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2950 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2951 return 0;
2952 }
2953
mlxsw_sp_neigh_fini(struct mlxsw_sp * mlxsw_sp)2954 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2955 {
2956 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2957 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2958 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2959 }
2960
mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)2961 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2962 struct mlxsw_sp_rif *rif)
2963 {
2964 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2965
2966 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2967 rif_list_node) {
2968 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2969 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2970 }
2971 }
2972
2973 enum mlxsw_sp_nexthop_type {
2974 MLXSW_SP_NEXTHOP_TYPE_ETH,
2975 MLXSW_SP_NEXTHOP_TYPE_IPIP,
2976 };
2977
2978 enum mlxsw_sp_nexthop_action {
2979 /* Nexthop forwards packets to an egress RIF */
2980 MLXSW_SP_NEXTHOP_ACTION_FORWARD,
2981 /* Nexthop discards packets */
2982 MLXSW_SP_NEXTHOP_ACTION_DISCARD,
2983 /* Nexthop traps packets */
2984 MLXSW_SP_NEXTHOP_ACTION_TRAP,
2985 };
2986
2987 struct mlxsw_sp_nexthop_key {
2988 struct fib_nh *fib_nh;
2989 };
2990
2991 struct mlxsw_sp_nexthop {
2992 struct list_head neigh_list_node; /* member of neigh entry list */
2993 struct list_head rif_list_node;
2994 struct list_head router_list_node;
2995 struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
2996 * this nexthop belongs to
2997 */
2998 struct rhash_head ht_node;
2999 struct neigh_table *neigh_tbl;
3000 struct mlxsw_sp_nexthop_key key;
3001 unsigned char gw_addr[sizeof(struct in6_addr)];
3002 int ifindex;
3003 int nh_weight;
3004 int norm_nh_weight;
3005 int num_adj_entries;
3006 struct mlxsw_sp_rif *rif;
3007 u8 should_offload:1, /* set indicates this nexthop should be written
3008 * to the adjacency table.
3009 */
3010 offloaded:1, /* set indicates this nexthop was written to the
3011 * adjacency table.
3012 */
3013 update:1; /* set indicates this nexthop should be updated in the
3014 * adjacency table (f.e., its MAC changed).
3015 */
3016 enum mlxsw_sp_nexthop_action action;
3017 enum mlxsw_sp_nexthop_type type;
3018 union {
3019 struct mlxsw_sp_neigh_entry *neigh_entry;
3020 struct mlxsw_sp_ipip_entry *ipip_entry;
3021 };
3022 unsigned int counter_index;
3023 bool counter_valid;
3024 };
3025
3026 enum mlxsw_sp_nexthop_group_type {
3027 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
3028 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
3029 MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
3030 };
3031
3032 struct mlxsw_sp_nexthop_group_info {
3033 struct mlxsw_sp_nexthop_group *nh_grp;
3034 u32 adj_index;
3035 u16 ecmp_size;
3036 u16 count;
3037 int sum_norm_weight;
3038 u8 adj_index_valid:1,
3039 gateway:1, /* routes using the group use a gateway */
3040 is_resilient:1;
3041 struct list_head list; /* member in nh_res_grp_list */
3042 struct mlxsw_sp_nexthop nexthops[0];
3043 #define nh_rif nexthops[0].rif
3044 };
3045
3046 struct mlxsw_sp_nexthop_group_vr_key {
3047 u16 vr_id;
3048 enum mlxsw_sp_l3proto proto;
3049 };
3050
3051 struct mlxsw_sp_nexthop_group_vr_entry {
3052 struct list_head list; /* member in vr_list */
3053 struct rhash_head ht_node; /* member in vr_ht */
3054 refcount_t ref_count;
3055 struct mlxsw_sp_nexthop_group_vr_key key;
3056 };
3057
3058 struct mlxsw_sp_nexthop_group {
3059 struct rhash_head ht_node;
3060 struct list_head fib_list; /* list of fib entries that use this group */
3061 union {
3062 struct {
3063 struct fib_info *fi;
3064 } ipv4;
3065 struct {
3066 u32 id;
3067 } obj;
3068 };
3069 struct mlxsw_sp_nexthop_group_info *nhgi;
3070 struct list_head vr_list;
3071 struct rhashtable vr_ht;
3072 enum mlxsw_sp_nexthop_group_type type;
3073 bool can_destroy;
3074 };
3075
mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3076 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
3077 struct mlxsw_sp_nexthop *nh)
3078 {
3079 struct devlink *devlink;
3080
3081 devlink = priv_to_devlink(mlxsw_sp->core);
3082 if (!devlink_dpipe_table_counter_enabled(devlink,
3083 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
3084 return;
3085
3086 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
3087 return;
3088
3089 nh->counter_valid = true;
3090 }
3091
mlxsw_sp_nexthop_counter_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3092 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
3093 struct mlxsw_sp_nexthop *nh)
3094 {
3095 if (!nh->counter_valid)
3096 return;
3097 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
3098 nh->counter_valid = false;
3099 }
3100
mlxsw_sp_nexthop_counter_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,u64 * p_counter)3101 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
3102 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
3103 {
3104 if (!nh->counter_valid)
3105 return -EINVAL;
3106
3107 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
3108 p_counter, NULL);
3109 }
3110
mlxsw_sp_nexthop_next(struct mlxsw_sp_router * router,struct mlxsw_sp_nexthop * nh)3111 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
3112 struct mlxsw_sp_nexthop *nh)
3113 {
3114 if (!nh) {
3115 if (list_empty(&router->nexthop_list))
3116 return NULL;
3117 else
3118 return list_first_entry(&router->nexthop_list,
3119 typeof(*nh), router_list_node);
3120 }
3121 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
3122 return NULL;
3123 return list_next_entry(nh, router_list_node);
3124 }
3125
mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop * nh)3126 bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
3127 {
3128 return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3129 }
3130
mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop * nh)3131 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
3132 {
3133 if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
3134 !mlxsw_sp_nexthop_is_forward(nh))
3135 return NULL;
3136 return nh->neigh_entry->ha;
3137 }
3138
mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop * nh,u32 * p_adj_index,u32 * p_adj_size,u32 * p_adj_hash_index)3139 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
3140 u32 *p_adj_size, u32 *p_adj_hash_index)
3141 {
3142 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3143 u32 adj_hash_index = 0;
3144 int i;
3145
3146 if (!nh->offloaded || !nhgi->adj_index_valid)
3147 return -EINVAL;
3148
3149 *p_adj_index = nhgi->adj_index;
3150 *p_adj_size = nhgi->ecmp_size;
3151
3152 for (i = 0; i < nhgi->count; i++) {
3153 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3154
3155 if (nh_iter == nh)
3156 break;
3157 if (nh_iter->offloaded)
3158 adj_hash_index += nh_iter->num_adj_entries;
3159 }
3160
3161 *p_adj_hash_index = adj_hash_index;
3162 return 0;
3163 }
3164
mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop * nh)3165 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
3166 {
3167 return nh->rif;
3168 }
3169
mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop * nh)3170 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
3171 {
3172 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3173 int i;
3174
3175 for (i = 0; i < nhgi->count; i++) {
3176 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3177
3178 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
3179 return true;
3180 }
3181 return false;
3182 }
3183
3184 static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
3185 .key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
3186 .head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
3187 .key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
3188 .automatic_shrinking = true,
3189 };
3190
3191 static struct mlxsw_sp_nexthop_group_vr_entry *
mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3192 mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
3193 const struct mlxsw_sp_fib *fib)
3194 {
3195 struct mlxsw_sp_nexthop_group_vr_key key;
3196
3197 memset(&key, 0, sizeof(key));
3198 key.vr_id = fib->vr->id;
3199 key.proto = fib->proto;
3200 return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
3201 mlxsw_sp_nexthop_group_vr_ht_params);
3202 }
3203
3204 static int
mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3205 mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
3206 const struct mlxsw_sp_fib *fib)
3207 {
3208 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3209 int err;
3210
3211 vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
3212 if (!vr_entry)
3213 return -ENOMEM;
3214
3215 vr_entry->key.vr_id = fib->vr->id;
3216 vr_entry->key.proto = fib->proto;
3217 refcount_set(&vr_entry->ref_count, 1);
3218
3219 err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3220 mlxsw_sp_nexthop_group_vr_ht_params);
3221 if (err)
3222 goto err_hashtable_insert;
3223
3224 list_add(&vr_entry->list, &nh_grp->vr_list);
3225
3226 return 0;
3227
3228 err_hashtable_insert:
3229 kfree(vr_entry);
3230 return err;
3231 }
3232
3233 static void
mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop_group_vr_entry * vr_entry)3234 mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
3235 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
3236 {
3237 list_del(&vr_entry->list);
3238 rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3239 mlxsw_sp_nexthop_group_vr_ht_params);
3240 kfree(vr_entry);
3241 }
3242
3243 static int
mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3244 mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
3245 const struct mlxsw_sp_fib *fib)
3246 {
3247 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3248
3249 vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3250 if (vr_entry) {
3251 refcount_inc(&vr_entry->ref_count);
3252 return 0;
3253 }
3254
3255 return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
3256 }
3257
3258 static void
mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib * fib)3259 mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
3260 const struct mlxsw_sp_fib *fib)
3261 {
3262 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3263
3264 vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3265 if (WARN_ON_ONCE(!vr_entry))
3266 return;
3267
3268 if (!refcount_dec_and_test(&vr_entry->ref_count))
3269 return;
3270
3271 mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
3272 }
3273
3274 struct mlxsw_sp_nexthop_group_cmp_arg {
3275 enum mlxsw_sp_nexthop_group_type type;
3276 union {
3277 struct fib_info *fi;
3278 struct mlxsw_sp_fib6_entry *fib6_entry;
3279 u32 id;
3280 };
3281 };
3282
3283 static bool
mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group * nh_grp,const struct in6_addr * gw,int ifindex,int weight)3284 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3285 const struct in6_addr *gw, int ifindex,
3286 int weight)
3287 {
3288 int i;
3289
3290 for (i = 0; i < nh_grp->nhgi->count; i++) {
3291 const struct mlxsw_sp_nexthop *nh;
3292
3293 nh = &nh_grp->nhgi->nexthops[i];
3294 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3295 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3296 return true;
3297 }
3298
3299 return false;
3300 }
3301
3302 static bool
mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_fib6_entry * fib6_entry)3303 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3304 const struct mlxsw_sp_fib6_entry *fib6_entry)
3305 {
3306 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3307
3308 if (nh_grp->nhgi->count != fib6_entry->nrt6)
3309 return false;
3310
3311 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3312 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3313 struct in6_addr *gw;
3314 int ifindex, weight;
3315
3316 ifindex = fib6_nh->fib_nh_dev->ifindex;
3317 weight = fib6_nh->fib_nh_weight;
3318 gw = &fib6_nh->fib_nh_gw6;
3319 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3320 weight))
3321 return false;
3322 }
3323
3324 return true;
3325 }
3326
3327 static int
mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg * arg,const void * ptr)3328 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3329 {
3330 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3331 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3332
3333 if (nh_grp->type != cmp_arg->type)
3334 return 1;
3335
3336 switch (cmp_arg->type) {
3337 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3338 return cmp_arg->fi != nh_grp->ipv4.fi;
3339 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3340 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3341 cmp_arg->fib6_entry);
3342 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3343 return cmp_arg->id != nh_grp->obj.id;
3344 default:
3345 WARN_ON(1);
3346 return 1;
3347 }
3348 }
3349
mlxsw_sp_nexthop_group_hash_obj(const void * data,u32 len,u32 seed)3350 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3351 {
3352 const struct mlxsw_sp_nexthop_group *nh_grp = data;
3353 const struct mlxsw_sp_nexthop *nh;
3354 struct fib_info *fi;
3355 unsigned int val;
3356 int i;
3357
3358 switch (nh_grp->type) {
3359 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3360 fi = nh_grp->ipv4.fi;
3361 return jhash(&fi, sizeof(fi), seed);
3362 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3363 val = nh_grp->nhgi->count;
3364 for (i = 0; i < nh_grp->nhgi->count; i++) {
3365 nh = &nh_grp->nhgi->nexthops[i];
3366 val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3367 val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3368 }
3369 return jhash(&val, sizeof(val), seed);
3370 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3371 return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
3372 default:
3373 WARN_ON(1);
3374 return 0;
3375 }
3376 }
3377
3378 static u32
mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry * fib6_entry,u32 seed)3379 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3380 {
3381 unsigned int val = fib6_entry->nrt6;
3382 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3383
3384 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3385 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3386 struct net_device *dev = fib6_nh->fib_nh_dev;
3387 struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3388
3389 val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3390 val ^= jhash(gw, sizeof(*gw), seed);
3391 }
3392
3393 return jhash(&val, sizeof(val), seed);
3394 }
3395
3396 static u32
mlxsw_sp_nexthop_group_hash(const void * data,u32 len,u32 seed)3397 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3398 {
3399 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3400
3401 switch (cmp_arg->type) {
3402 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3403 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3404 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3405 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3406 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3407 return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
3408 default:
3409 WARN_ON(1);
3410 return 0;
3411 }
3412 }
3413
3414 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3415 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3416 .hashfn = mlxsw_sp_nexthop_group_hash,
3417 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
3418 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
3419 };
3420
mlxsw_sp_nexthop_group_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3421 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3422 struct mlxsw_sp_nexthop_group *nh_grp)
3423 {
3424 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3425 !nh_grp->nhgi->gateway)
3426 return 0;
3427
3428 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3429 &nh_grp->ht_node,
3430 mlxsw_sp_nexthop_group_ht_params);
3431 }
3432
mlxsw_sp_nexthop_group_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3433 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3434 struct mlxsw_sp_nexthop_group *nh_grp)
3435 {
3436 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3437 !nh_grp->nhgi->gateway)
3438 return;
3439
3440 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3441 &nh_grp->ht_node,
3442 mlxsw_sp_nexthop_group_ht_params);
3443 }
3444
3445 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)3446 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3447 struct fib_info *fi)
3448 {
3449 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3450
3451 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3452 cmp_arg.fi = fi;
3453 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3454 &cmp_arg,
3455 mlxsw_sp_nexthop_group_ht_params);
3456 }
3457
3458 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)3459 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3460 struct mlxsw_sp_fib6_entry *fib6_entry)
3461 {
3462 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3463
3464 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3465 cmp_arg.fib6_entry = fib6_entry;
3466 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3467 &cmp_arg,
3468 mlxsw_sp_nexthop_group_ht_params);
3469 }
3470
3471 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3472 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3473 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3474 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
3475 };
3476
mlxsw_sp_nexthop_insert(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3477 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3478 struct mlxsw_sp_nexthop *nh)
3479 {
3480 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3481 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3482 }
3483
mlxsw_sp_nexthop_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)3484 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3485 struct mlxsw_sp_nexthop *nh)
3486 {
3487 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3488 mlxsw_sp_nexthop_ht_params);
3489 }
3490
3491 static struct mlxsw_sp_nexthop *
mlxsw_sp_nexthop_lookup(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_key key)3492 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3493 struct mlxsw_sp_nexthop_key key)
3494 {
3495 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3496 mlxsw_sp_nexthop_ht_params);
3497 }
3498
mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_l3proto proto,u16 vr_id,u32 adj_index,u16 ecmp_size,u32 new_adj_index,u16 new_ecmp_size)3499 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3500 enum mlxsw_sp_l3proto proto,
3501 u16 vr_id,
3502 u32 adj_index, u16 ecmp_size,
3503 u32 new_adj_index,
3504 u16 new_ecmp_size)
3505 {
3506 char raleu_pl[MLXSW_REG_RALEU_LEN];
3507
3508 mlxsw_reg_raleu_pack(raleu_pl,
3509 (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
3510 adj_index, ecmp_size, new_adj_index,
3511 new_ecmp_size);
3512 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3513 }
3514
mlxsw_sp_adj_index_mass_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,u32 old_adj_index,u16 old_ecmp_size)3515 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3516 struct mlxsw_sp_nexthop_group *nh_grp,
3517 u32 old_adj_index, u16 old_ecmp_size)
3518 {
3519 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3520 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3521 int err;
3522
3523 list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
3524 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
3525 vr_entry->key.proto,
3526 vr_entry->key.vr_id,
3527 old_adj_index,
3528 old_ecmp_size,
3529 nhgi->adj_index,
3530 nhgi->ecmp_size);
3531 if (err)
3532 goto err_mass_update_vr;
3533 }
3534 return 0;
3535
3536 err_mass_update_vr:
3537 list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
3538 mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
3539 vr_entry->key.vr_id,
3540 nhgi->adj_index,
3541 nhgi->ecmp_size,
3542 old_adj_index, old_ecmp_size);
3543 return err;
3544 }
3545
__mlxsw_sp_nexthop_eth_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3546 static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
3547 u32 adj_index,
3548 struct mlxsw_sp_nexthop *nh,
3549 bool force, char *ratr_pl)
3550 {
3551 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3552 enum mlxsw_reg_ratr_op op;
3553 u16 rif_index;
3554
3555 rif_index = nh->rif ? nh->rif->rif_index :
3556 mlxsw_sp->router->lb_rif_index;
3557 op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
3558 MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
3559 mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
3560 adj_index, rif_index);
3561 switch (nh->action) {
3562 case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
3563 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3564 break;
3565 case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
3566 mlxsw_reg_ratr_trap_action_set(ratr_pl,
3567 MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
3568 break;
3569 case MLXSW_SP_NEXTHOP_ACTION_TRAP:
3570 mlxsw_reg_ratr_trap_action_set(ratr_pl,
3571 MLXSW_REG_RATR_TRAP_ACTION_TRAP);
3572 mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
3573 break;
3574 default:
3575 WARN_ON_ONCE(1);
3576 return -EINVAL;
3577 }
3578 if (nh->counter_valid)
3579 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3580 else
3581 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3582
3583 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3584 }
3585
mlxsw_sp_nexthop_eth_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3586 int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3587 struct mlxsw_sp_nexthop *nh, bool force,
3588 char *ratr_pl)
3589 {
3590 int i;
3591
3592 for (i = 0; i < nh->num_adj_entries; i++) {
3593 int err;
3594
3595 err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
3596 nh, force, ratr_pl);
3597 if (err)
3598 return err;
3599 }
3600
3601 return 0;
3602 }
3603
__mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3604 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3605 u32 adj_index,
3606 struct mlxsw_sp_nexthop *nh,
3607 bool force, char *ratr_pl)
3608 {
3609 const struct mlxsw_sp_ipip_ops *ipip_ops;
3610
3611 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3612 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
3613 force, ratr_pl);
3614 }
3615
mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3616 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3617 u32 adj_index,
3618 struct mlxsw_sp_nexthop *nh, bool force,
3619 char *ratr_pl)
3620 {
3621 int i;
3622
3623 for (i = 0; i < nh->num_adj_entries; i++) {
3624 int err;
3625
3626 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3627 nh, force, ratr_pl);
3628 if (err)
3629 return err;
3630 }
3631
3632 return 0;
3633 }
3634
mlxsw_sp_nexthop_update(struct mlxsw_sp * mlxsw_sp,u32 adj_index,struct mlxsw_sp_nexthop * nh,bool force,char * ratr_pl)3635 static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3636 struct mlxsw_sp_nexthop *nh, bool force,
3637 char *ratr_pl)
3638 {
3639 /* When action is discard or trap, the nexthop must be
3640 * programmed as an Ethernet nexthop.
3641 */
3642 if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
3643 nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
3644 nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3645 return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
3646 force, ratr_pl);
3647 else
3648 return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
3649 force, ratr_pl);
3650 }
3651
3652 static int
mlxsw_sp_nexthop_group_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group_info * nhgi,bool reallocate)3653 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3654 struct mlxsw_sp_nexthop_group_info *nhgi,
3655 bool reallocate)
3656 {
3657 char ratr_pl[MLXSW_REG_RATR_LEN];
3658 u32 adj_index = nhgi->adj_index; /* base */
3659 struct mlxsw_sp_nexthop *nh;
3660 int i;
3661
3662 for (i = 0; i < nhgi->count; i++) {
3663 nh = &nhgi->nexthops[i];
3664
3665 if (!nh->should_offload) {
3666 nh->offloaded = 0;
3667 continue;
3668 }
3669
3670 if (nh->update || reallocate) {
3671 int err = 0;
3672
3673 err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
3674 true, ratr_pl);
3675 if (err)
3676 return err;
3677 nh->update = 0;
3678 nh->offloaded = 1;
3679 }
3680 adj_index += nh->num_adj_entries;
3681 }
3682 return 0;
3683 }
3684
3685 static int
mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3686 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3687 struct mlxsw_sp_nexthop_group *nh_grp)
3688 {
3689 struct mlxsw_sp_fib_entry *fib_entry;
3690 int err;
3691
3692 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3693 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3694 if (err)
3695 return err;
3696 }
3697 return 0;
3698 }
3699
3700 struct mlxsw_sp_adj_grp_size_range {
3701 u16 start; /* Inclusive */
3702 u16 end; /* Inclusive */
3703 };
3704
3705 /* Ordered by range start value */
3706 static const struct mlxsw_sp_adj_grp_size_range
3707 mlxsw_sp1_adj_grp_size_ranges[] = {
3708 { .start = 1, .end = 64 },
3709 { .start = 512, .end = 512 },
3710 { .start = 1024, .end = 1024 },
3711 { .start = 2048, .end = 2048 },
3712 { .start = 4096, .end = 4096 },
3713 };
3714
3715 /* Ordered by range start value */
3716 static const struct mlxsw_sp_adj_grp_size_range
3717 mlxsw_sp2_adj_grp_size_ranges[] = {
3718 { .start = 1, .end = 128 },
3719 { .start = 256, .end = 256 },
3720 { .start = 512, .end = 512 },
3721 { .start = 1024, .end = 1024 },
3722 { .start = 2048, .end = 2048 },
3723 { .start = 4096, .end = 4096 },
3724 };
3725
mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp * mlxsw_sp,u16 * p_adj_grp_size)3726 static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
3727 u16 *p_adj_grp_size)
3728 {
3729 int i;
3730
3731 for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
3732 const struct mlxsw_sp_adj_grp_size_range *size_range;
3733
3734 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3735
3736 if (*p_adj_grp_size >= size_range->start &&
3737 *p_adj_grp_size <= size_range->end)
3738 return;
3739
3740 if (*p_adj_grp_size <= size_range->end) {
3741 *p_adj_grp_size = size_range->end;
3742 return;
3743 }
3744 }
3745 }
3746
mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp * mlxsw_sp,u16 * p_adj_grp_size,unsigned int alloc_size)3747 static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
3748 u16 *p_adj_grp_size,
3749 unsigned int alloc_size)
3750 {
3751 int i;
3752
3753 for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
3754 const struct mlxsw_sp_adj_grp_size_range *size_range;
3755
3756 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3757
3758 if (alloc_size >= size_range->end) {
3759 *p_adj_grp_size = size_range->end;
3760 return;
3761 }
3762 }
3763 }
3764
mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp * mlxsw_sp,u16 * p_adj_grp_size)3765 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3766 u16 *p_adj_grp_size)
3767 {
3768 unsigned int alloc_size;
3769 int err;
3770
3771 /* Round up the requested group size to the next size supported
3772 * by the device and make sure the request can be satisfied.
3773 */
3774 mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
3775 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3776 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3777 *p_adj_grp_size, &alloc_size);
3778 if (err)
3779 return err;
3780 /* It is possible the allocation results in more allocated
3781 * entries than requested. Try to use as much of them as
3782 * possible.
3783 */
3784 mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
3785
3786 return 0;
3787 }
3788
3789 static void
mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info * nhgi)3790 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3791 {
3792 int i, g = 0, sum_norm_weight = 0;
3793 struct mlxsw_sp_nexthop *nh;
3794
3795 for (i = 0; i < nhgi->count; i++) {
3796 nh = &nhgi->nexthops[i];
3797
3798 if (!nh->should_offload)
3799 continue;
3800 if (g > 0)
3801 g = gcd(nh->nh_weight, g);
3802 else
3803 g = nh->nh_weight;
3804 }
3805
3806 for (i = 0; i < nhgi->count; i++) {
3807 nh = &nhgi->nexthops[i];
3808
3809 if (!nh->should_offload)
3810 continue;
3811 nh->norm_nh_weight = nh->nh_weight / g;
3812 sum_norm_weight += nh->norm_nh_weight;
3813 }
3814
3815 nhgi->sum_norm_weight = sum_norm_weight;
3816 }
3817
3818 static void
mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info * nhgi)3819 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
3820 {
3821 int i, weight = 0, lower_bound = 0;
3822 int total = nhgi->sum_norm_weight;
3823 u16 ecmp_size = nhgi->ecmp_size;
3824
3825 for (i = 0; i < nhgi->count; i++) {
3826 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
3827 int upper_bound;
3828
3829 if (!nh->should_offload)
3830 continue;
3831 weight += nh->norm_nh_weight;
3832 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3833 nh->num_adj_entries = upper_bound - lower_bound;
3834 lower_bound = upper_bound;
3835 }
3836 }
3837
3838 static struct mlxsw_sp_nexthop *
3839 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3840 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3841
3842 static void
mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3843 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3844 struct mlxsw_sp_nexthop_group *nh_grp)
3845 {
3846 int i;
3847
3848 for (i = 0; i < nh_grp->nhgi->count; i++) {
3849 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3850
3851 if (nh->offloaded)
3852 nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3853 else
3854 nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3855 }
3856 }
3857
3858 static void
__mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_fib6_entry * fib6_entry)3859 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3860 struct mlxsw_sp_fib6_entry *fib6_entry)
3861 {
3862 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3863
3864 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3865 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3866 struct mlxsw_sp_nexthop *nh;
3867
3868 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3869 if (nh && nh->offloaded)
3870 fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3871 else
3872 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3873 }
3874 }
3875
3876 static void
mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3877 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3878 struct mlxsw_sp_nexthop_group *nh_grp)
3879 {
3880 struct mlxsw_sp_fib6_entry *fib6_entry;
3881
3882 /* Unfortunately, in IPv6 the route and the nexthop are described by
3883 * the same struct, so we need to iterate over all the routes using the
3884 * nexthop group and set / clear the offload indication for them.
3885 */
3886 list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3887 common.nexthop_group_node)
3888 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3889 }
3890
3891 static void
mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nexthop * nh,u16 bucket_index)3892 mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3893 const struct mlxsw_sp_nexthop *nh,
3894 u16 bucket_index)
3895 {
3896 struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3897 bool offload = false, trap = false;
3898
3899 if (nh->offloaded) {
3900 if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3901 trap = true;
3902 else
3903 offload = true;
3904 }
3905 nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3906 bucket_index, offload, trap);
3907 }
3908
3909 static void
mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3910 mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3911 struct mlxsw_sp_nexthop_group *nh_grp)
3912 {
3913 int i;
3914
3915 /* Do not update the flags if the nexthop group is being destroyed
3916 * since:
3917 * 1. The nexthop objects is being deleted, in which case the flags are
3918 * irrelevant.
3919 * 2. The nexthop group was replaced by a newer group, in which case
3920 * the flags of the nexthop object were already updated based on the
3921 * new group.
3922 */
3923 if (nh_grp->can_destroy)
3924 return;
3925
3926 nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3927 nh_grp->nhgi->adj_index_valid, false);
3928
3929 /* Update flags of individual nexthop buckets in case of a resilient
3930 * nexthop group.
3931 */
3932 if (!nh_grp->nhgi->is_resilient)
3933 return;
3934
3935 for (i = 0; i < nh_grp->nhgi->count; i++) {
3936 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3937
3938 mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
3939 }
3940 }
3941
3942 static void
mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3943 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3944 struct mlxsw_sp_nexthop_group *nh_grp)
3945 {
3946 switch (nh_grp->type) {
3947 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3948 mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
3949 break;
3950 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3951 mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
3952 break;
3953 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3954 mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
3955 break;
3956 }
3957 }
3958
3959 static int
mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)3960 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3961 struct mlxsw_sp_nexthop_group *nh_grp)
3962 {
3963 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3964 u16 ecmp_size, old_ecmp_size;
3965 struct mlxsw_sp_nexthop *nh;
3966 bool offload_change = false;
3967 u32 adj_index;
3968 bool old_adj_index_valid;
3969 u32 old_adj_index;
3970 int i, err2, err;
3971
3972 if (!nhgi->gateway)
3973 return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3974
3975 for (i = 0; i < nhgi->count; i++) {
3976 nh = &nhgi->nexthops[i];
3977
3978 if (nh->should_offload != nh->offloaded) {
3979 offload_change = true;
3980 if (nh->should_offload)
3981 nh->update = 1;
3982 }
3983 }
3984 if (!offload_change) {
3985 /* Nothing was added or removed, so no need to reallocate. Just
3986 * update MAC on existing adjacency indexes.
3987 */
3988 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
3989 if (err) {
3990 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3991 goto set_trap;
3992 }
3993 /* Flags of individual nexthop buckets might need to be
3994 * updated.
3995 */
3996 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3997 return 0;
3998 }
3999 mlxsw_sp_nexthop_group_normalize(nhgi);
4000 if (!nhgi->sum_norm_weight) {
4001 /* No neigh of this group is connected so we just set
4002 * the trap and let everthing flow through kernel.
4003 */
4004 err = 0;
4005 goto set_trap;
4006 }
4007
4008 ecmp_size = nhgi->sum_norm_weight;
4009 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
4010 if (err)
4011 /* No valid allocation size available. */
4012 goto set_trap;
4013
4014 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4015 ecmp_size, &adj_index);
4016 if (err) {
4017 /* We ran out of KVD linear space, just set the
4018 * trap and let everything flow through kernel.
4019 */
4020 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
4021 goto set_trap;
4022 }
4023 old_adj_index_valid = nhgi->adj_index_valid;
4024 old_adj_index = nhgi->adj_index;
4025 old_ecmp_size = nhgi->ecmp_size;
4026 nhgi->adj_index_valid = 1;
4027 nhgi->adj_index = adj_index;
4028 nhgi->ecmp_size = ecmp_size;
4029 mlxsw_sp_nexthop_group_rebalance(nhgi);
4030 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
4031 if (err) {
4032 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
4033 goto set_trap;
4034 }
4035
4036 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4037
4038 if (!old_adj_index_valid) {
4039 /* The trap was set for fib entries, so we have to call
4040 * fib entry update to unset it and use adjacency index.
4041 */
4042 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4043 if (err) {
4044 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
4045 goto set_trap;
4046 }
4047 return 0;
4048 }
4049
4050 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
4051 old_adj_index, old_ecmp_size);
4052 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4053 old_ecmp_size, old_adj_index);
4054 if (err) {
4055 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
4056 goto set_trap;
4057 }
4058
4059 return 0;
4060
4061 set_trap:
4062 old_adj_index_valid = nhgi->adj_index_valid;
4063 nhgi->adj_index_valid = 0;
4064 for (i = 0; i < nhgi->count; i++) {
4065 nh = &nhgi->nexthops[i];
4066 nh->offloaded = 0;
4067 }
4068 err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4069 if (err2)
4070 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
4071 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4072 if (old_adj_index_valid)
4073 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4074 nhgi->ecmp_size, nhgi->adj_index);
4075 return err;
4076 }
4077
__mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop * nh,bool removing)4078 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
4079 bool removing)
4080 {
4081 if (!removing) {
4082 nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
4083 nh->should_offload = 1;
4084 } else if (nh->nhgi->is_resilient) {
4085 nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4086 nh->should_offload = 1;
4087 } else {
4088 nh->should_offload = 0;
4089 }
4090 nh->update = 1;
4091 }
4092
4093 static int
mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry)4094 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
4095 struct mlxsw_sp_neigh_entry *neigh_entry)
4096 {
4097 struct neighbour *n, *old_n = neigh_entry->key.n;
4098 struct mlxsw_sp_nexthop *nh;
4099 bool entry_connected;
4100 u8 nud_state, dead;
4101 int err;
4102
4103 nh = list_first_entry(&neigh_entry->nexthop_list,
4104 struct mlxsw_sp_nexthop, neigh_list_node);
4105
4106 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4107 if (!n) {
4108 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4109 if (IS_ERR(n))
4110 return PTR_ERR(n);
4111 neigh_event_send(n, NULL);
4112 }
4113
4114 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
4115 neigh_entry->key.n = n;
4116 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4117 if (err)
4118 goto err_neigh_entry_insert;
4119
4120 read_lock_bh(&n->lock);
4121 nud_state = n->nud_state;
4122 dead = n->dead;
4123 read_unlock_bh(&n->lock);
4124 entry_connected = nud_state & NUD_VALID && !dead;
4125
4126 list_for_each_entry(nh, &neigh_entry->nexthop_list,
4127 neigh_list_node) {
4128 neigh_release(old_n);
4129 neigh_clone(n);
4130 __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
4131 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4132 }
4133
4134 neigh_release(n);
4135
4136 return 0;
4137
4138 err_neigh_entry_insert:
4139 neigh_entry->key.n = old_n;
4140 mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4141 neigh_release(n);
4142 return err;
4143 }
4144
4145 static void
mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_neigh_entry * neigh_entry,bool removing,bool dead)4146 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
4147 struct mlxsw_sp_neigh_entry *neigh_entry,
4148 bool removing, bool dead)
4149 {
4150 struct mlxsw_sp_nexthop *nh;
4151
4152 if (list_empty(&neigh_entry->nexthop_list))
4153 return;
4154
4155 if (dead) {
4156 int err;
4157
4158 err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
4159 neigh_entry);
4160 if (err)
4161 dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
4162 return;
4163 }
4164
4165 list_for_each_entry(nh, &neigh_entry->nexthop_list,
4166 neigh_list_node) {
4167 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4168 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4169 }
4170 }
4171
mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop * nh,struct mlxsw_sp_rif * rif)4172 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
4173 struct mlxsw_sp_rif *rif)
4174 {
4175 if (nh->rif)
4176 return;
4177
4178 nh->rif = rif;
4179 list_add(&nh->rif_list_node, &rif->nexthop_list);
4180 }
4181
mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop * nh)4182 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
4183 {
4184 if (!nh->rif)
4185 return;
4186
4187 list_del(&nh->rif_list_node);
4188 nh->rif = NULL;
4189 }
4190
mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4191 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
4192 struct mlxsw_sp_nexthop *nh)
4193 {
4194 struct mlxsw_sp_neigh_entry *neigh_entry;
4195 struct neighbour *n;
4196 u8 nud_state, dead;
4197 int err;
4198
4199 if (!nh->nhgi->gateway || nh->neigh_entry)
4200 return 0;
4201
4202 /* Take a reference of neigh here ensuring that neigh would
4203 * not be destructed before the nexthop entry is finished.
4204 * The reference is taken either in neigh_lookup() or
4205 * in neigh_create() in case n is not found.
4206 */
4207 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4208 if (!n) {
4209 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4210 if (IS_ERR(n))
4211 return PTR_ERR(n);
4212 neigh_event_send(n, NULL);
4213 }
4214 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
4215 if (!neigh_entry) {
4216 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
4217 if (IS_ERR(neigh_entry)) {
4218 err = -EINVAL;
4219 goto err_neigh_entry_create;
4220 }
4221 }
4222
4223 /* If that is the first nexthop connected to that neigh, add to
4224 * nexthop_neighs_list
4225 */
4226 if (list_empty(&neigh_entry->nexthop_list))
4227 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
4228 &mlxsw_sp->router->nexthop_neighs_list);
4229
4230 nh->neigh_entry = neigh_entry;
4231 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
4232 read_lock_bh(&n->lock);
4233 nud_state = n->nud_state;
4234 dead = n->dead;
4235 read_unlock_bh(&n->lock);
4236 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
4237
4238 return 0;
4239
4240 err_neigh_entry_create:
4241 neigh_release(n);
4242 return err;
4243 }
4244
mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4245 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
4246 struct mlxsw_sp_nexthop *nh)
4247 {
4248 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
4249 struct neighbour *n;
4250
4251 if (!neigh_entry)
4252 return;
4253 n = neigh_entry->key.n;
4254
4255 __mlxsw_sp_nexthop_neigh_update(nh, true);
4256 list_del(&nh->neigh_list_node);
4257 nh->neigh_entry = NULL;
4258
4259 /* If that is the last nexthop connected to that neigh, remove from
4260 * nexthop_neighs_list
4261 */
4262 if (list_empty(&neigh_entry->nexthop_list))
4263 list_del(&neigh_entry->nexthop_neighs_list_node);
4264
4265 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
4266 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
4267
4268 neigh_release(n);
4269 }
4270
mlxsw_sp_ipip_netdev_ul_up(struct net_device * ol_dev)4271 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
4272 {
4273 struct net_device *ul_dev;
4274 bool is_up;
4275
4276 rcu_read_lock();
4277 ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
4278 is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
4279 rcu_read_unlock();
4280
4281 return is_up;
4282 }
4283
mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,struct mlxsw_sp_ipip_entry * ipip_entry)4284 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
4285 struct mlxsw_sp_nexthop *nh,
4286 struct mlxsw_sp_ipip_entry *ipip_entry)
4287 {
4288 bool removing;
4289
4290 if (!nh->nhgi->gateway || nh->ipip_entry)
4291 return;
4292
4293 nh->ipip_entry = ipip_entry;
4294 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
4295 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4296 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
4297 }
4298
mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4299 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
4300 struct mlxsw_sp_nexthop *nh)
4301 {
4302 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
4303
4304 if (!ipip_entry)
4305 return;
4306
4307 __mlxsw_sp_nexthop_neigh_update(nh, true);
4308 nh->ipip_entry = NULL;
4309 }
4310
mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct fib_nh * fib_nh,enum mlxsw_sp_ipip_type * p_ipipt)4311 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4312 const struct fib_nh *fib_nh,
4313 enum mlxsw_sp_ipip_type *p_ipipt)
4314 {
4315 struct net_device *dev = fib_nh->fib_nh_dev;
4316
4317 return dev &&
4318 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
4319 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
4320 }
4321
mlxsw_sp_nexthop_type_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,const struct net_device * dev)4322 static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
4323 struct mlxsw_sp_nexthop *nh,
4324 const struct net_device *dev)
4325 {
4326 const struct mlxsw_sp_ipip_ops *ipip_ops;
4327 struct mlxsw_sp_ipip_entry *ipip_entry;
4328 struct mlxsw_sp_rif *rif;
4329 int err;
4330
4331 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4332 if (ipip_entry) {
4333 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4334 if (ipip_ops->can_offload(mlxsw_sp, dev)) {
4335 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4336 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4337 return 0;
4338 }
4339 }
4340
4341 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4342 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4343 if (!rif)
4344 return 0;
4345
4346 mlxsw_sp_nexthop_rif_init(nh, rif);
4347 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4348 if (err)
4349 goto err_neigh_init;
4350
4351 return 0;
4352
4353 err_neigh_init:
4354 mlxsw_sp_nexthop_rif_fini(nh);
4355 return err;
4356 }
4357
mlxsw_sp_nexthop_type_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4358 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
4359 struct mlxsw_sp_nexthop *nh)
4360 {
4361 switch (nh->type) {
4362 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4363 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
4364 mlxsw_sp_nexthop_rif_fini(nh);
4365 break;
4366 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4367 mlxsw_sp_nexthop_rif_fini(nh);
4368 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
4369 break;
4370 }
4371 }
4372
mlxsw_sp_nexthop4_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,struct fib_nh * fib_nh)4373 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
4374 struct mlxsw_sp_nexthop_group *nh_grp,
4375 struct mlxsw_sp_nexthop *nh,
4376 struct fib_nh *fib_nh)
4377 {
4378 struct net_device *dev = fib_nh->fib_nh_dev;
4379 struct in_device *in_dev;
4380 int err;
4381
4382 nh->nhgi = nh_grp->nhgi;
4383 nh->key.fib_nh = fib_nh;
4384 #ifdef CONFIG_IP_ROUTE_MULTIPATH
4385 nh->nh_weight = fib_nh->fib_nh_weight;
4386 #else
4387 nh->nh_weight = 1;
4388 #endif
4389 memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
4390 nh->neigh_tbl = &arp_tbl;
4391 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
4392 if (err)
4393 return err;
4394
4395 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4396 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4397
4398 if (!dev)
4399 return 0;
4400 nh->ifindex = dev->ifindex;
4401
4402 rcu_read_lock();
4403 in_dev = __in_dev_get_rcu(dev);
4404 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
4405 fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
4406 rcu_read_unlock();
4407 return 0;
4408 }
4409 rcu_read_unlock();
4410
4411 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4412 if (err)
4413 goto err_nexthop_neigh_init;
4414
4415 return 0;
4416
4417 err_nexthop_neigh_init:
4418 list_del(&nh->router_list_node);
4419 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4420 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4421 return err;
4422 }
4423
mlxsw_sp_nexthop4_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4424 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4425 struct mlxsw_sp_nexthop *nh)
4426 {
4427 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4428 list_del(&nh->router_list_node);
4429 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4430 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4431 }
4432
mlxsw_sp_nexthop4_event(struct mlxsw_sp * mlxsw_sp,unsigned long event,struct fib_nh * fib_nh)4433 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4434 unsigned long event, struct fib_nh *fib_nh)
4435 {
4436 struct mlxsw_sp_nexthop_key key;
4437 struct mlxsw_sp_nexthop *nh;
4438
4439 key.fib_nh = fib_nh;
4440 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4441 if (!nh)
4442 return;
4443
4444 switch (event) {
4445 case FIB_EVENT_NH_ADD:
4446 mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4447 break;
4448 case FIB_EVENT_NH_DEL:
4449 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4450 break;
4451 }
4452
4453 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4454 }
4455
mlxsw_sp_nexthop_rif_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)4456 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4457 struct mlxsw_sp_rif *rif)
4458 {
4459 struct mlxsw_sp_nexthop *nh;
4460 bool removing;
4461
4462 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
4463 switch (nh->type) {
4464 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4465 removing = false;
4466 break;
4467 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4468 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
4469 break;
4470 default:
4471 WARN_ON(1);
4472 continue;
4473 }
4474
4475 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4476 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4477 }
4478 }
4479
mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * old_rif,struct mlxsw_sp_rif * new_rif)4480 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
4481 struct mlxsw_sp_rif *old_rif,
4482 struct mlxsw_sp_rif *new_rif)
4483 {
4484 struct mlxsw_sp_nexthop *nh;
4485
4486 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
4487 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
4488 nh->rif = new_rif;
4489 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
4490 }
4491
mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)4492 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4493 struct mlxsw_sp_rif *rif)
4494 {
4495 struct mlxsw_sp_nexthop *nh, *tmp;
4496
4497 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
4498 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4499 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4500 }
4501 }
4502
mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp * mlxsw_sp)4503 static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
4504 {
4505 enum mlxsw_reg_ratr_trap_action trap_action;
4506 char ratr_pl[MLXSW_REG_RATR_LEN];
4507 int err;
4508
4509 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4510 &mlxsw_sp->router->adj_trap_index);
4511 if (err)
4512 return err;
4513
4514 trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
4515 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4516 MLXSW_REG_RATR_TYPE_ETHERNET,
4517 mlxsw_sp->router->adj_trap_index,
4518 mlxsw_sp->router->lb_rif_index);
4519 mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4520 mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
4521 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4522 if (err)
4523 goto err_ratr_write;
4524
4525 return 0;
4526
4527 err_ratr_write:
4528 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4529 mlxsw_sp->router->adj_trap_index);
4530 return err;
4531 }
4532
mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp * mlxsw_sp)4533 static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
4534 {
4535 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4536 mlxsw_sp->router->adj_trap_index);
4537 }
4538
mlxsw_sp_nexthop_group_inc(struct mlxsw_sp * mlxsw_sp)4539 static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
4540 {
4541 int err;
4542
4543 if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
4544 return 0;
4545
4546 err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
4547 if (err)
4548 return err;
4549
4550 refcount_set(&mlxsw_sp->router->num_groups, 1);
4551
4552 return 0;
4553 }
4554
mlxsw_sp_nexthop_group_dec(struct mlxsw_sp * mlxsw_sp)4555 static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
4556 {
4557 if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
4558 return;
4559
4560 mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
4561 }
4562
4563 static void
mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nexthop_group * nh_grp,unsigned long * activity)4564 mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
4565 const struct mlxsw_sp_nexthop_group *nh_grp,
4566 unsigned long *activity)
4567 {
4568 char *ratrad_pl;
4569 int i, err;
4570
4571 ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
4572 if (!ratrad_pl)
4573 return;
4574
4575 mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
4576 nh_grp->nhgi->count);
4577 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
4578 if (err)
4579 goto out;
4580
4581 for (i = 0; i < nh_grp->nhgi->count; i++) {
4582 if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
4583 continue;
4584 bitmap_set(activity, i, 1);
4585 }
4586
4587 out:
4588 kfree(ratrad_pl);
4589 }
4590
4591 #define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
4592
4593 static void
mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_nexthop_group * nh_grp)4594 mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
4595 const struct mlxsw_sp_nexthop_group *nh_grp)
4596 {
4597 unsigned long *activity;
4598
4599 activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
4600 if (!activity)
4601 return;
4602
4603 mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
4604 nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4605 nh_grp->nhgi->count, activity);
4606
4607 bitmap_free(activity);
4608 }
4609
4610 static void
mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp * mlxsw_sp)4611 mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
4612 {
4613 unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
4614
4615 mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
4616 msecs_to_jiffies(interval));
4617 }
4618
mlxsw_sp_nh_grp_activity_work(struct work_struct * work)4619 static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
4620 {
4621 struct mlxsw_sp_nexthop_group_info *nhgi;
4622 struct mlxsw_sp_router *router;
4623 bool reschedule = false;
4624
4625 router = container_of(work, struct mlxsw_sp_router,
4626 nh_grp_activity_dw.work);
4627
4628 mutex_lock(&router->lock);
4629
4630 list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
4631 mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
4632 reschedule = true;
4633 }
4634
4635 mutex_unlock(&router->lock);
4636
4637 if (!reschedule)
4638 return;
4639 mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
4640 }
4641
4642 static int
mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_single_info * nh,struct netlink_ext_ack * extack)4643 mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
4644 const struct nh_notifier_single_info *nh,
4645 struct netlink_ext_ack *extack)
4646 {
4647 int err = -EINVAL;
4648
4649 if (nh->is_fdb)
4650 NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
4651 else if (nh->has_encap)
4652 NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
4653 else
4654 err = 0;
4655
4656 return err;
4657 }
4658
4659 static int
mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_single_info * nh,struct netlink_ext_ack * extack)4660 mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
4661 const struct nh_notifier_single_info *nh,
4662 struct netlink_ext_ack *extack)
4663 {
4664 int err;
4665
4666 err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
4667 if (err)
4668 return err;
4669
4670 /* Device only nexthops with an IPIP device are programmed as
4671 * encapsulating adjacency entries.
4672 */
4673 if (!nh->gw_family && !nh->is_reject &&
4674 !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
4675 NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
4676 return -EINVAL;
4677 }
4678
4679 return 0;
4680 }
4681
4682 static int
mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_grp_info * nh_grp,struct netlink_ext_ack * extack)4683 mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
4684 const struct nh_notifier_grp_info *nh_grp,
4685 struct netlink_ext_ack *extack)
4686 {
4687 int i;
4688
4689 if (nh_grp->is_fdb) {
4690 NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
4691 return -EINVAL;
4692 }
4693
4694 for (i = 0; i < nh_grp->num_nh; i++) {
4695 const struct nh_notifier_single_info *nh;
4696 int err;
4697
4698 nh = &nh_grp->nh_entries[i].nh;
4699 err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4700 extack);
4701 if (err)
4702 return err;
4703 }
4704
4705 return 0;
4706 }
4707
4708 static int
mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_res_table_info * nh_res_table,struct netlink_ext_ack * extack)4709 mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
4710 const struct nh_notifier_res_table_info *nh_res_table,
4711 struct netlink_ext_ack *extack)
4712 {
4713 unsigned int alloc_size;
4714 bool valid_size = false;
4715 int err, i;
4716
4717 if (nh_res_table->num_nh_buckets < 32) {
4718 NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
4719 return -EINVAL;
4720 }
4721
4722 for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
4723 const struct mlxsw_sp_adj_grp_size_range *size_range;
4724
4725 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
4726
4727 if (nh_res_table->num_nh_buckets >= size_range->start &&
4728 nh_res_table->num_nh_buckets <= size_range->end) {
4729 valid_size = true;
4730 break;
4731 }
4732 }
4733
4734 if (!valid_size) {
4735 NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
4736 return -EINVAL;
4737 }
4738
4739 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
4740 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4741 nh_res_table->num_nh_buckets,
4742 &alloc_size);
4743 if (err || nh_res_table->num_nh_buckets != alloc_size) {
4744 NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
4745 return -EINVAL;
4746 }
4747
4748 return 0;
4749 }
4750
4751 static int
mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_res_table_info * nh_res_table,struct netlink_ext_ack * extack)4752 mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
4753 const struct nh_notifier_res_table_info *nh_res_table,
4754 struct netlink_ext_ack *extack)
4755 {
4756 int err;
4757 u16 i;
4758
4759 err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
4760 nh_res_table,
4761 extack);
4762 if (err)
4763 return err;
4764
4765 for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
4766 const struct nh_notifier_single_info *nh;
4767 int err;
4768
4769 nh = &nh_res_table->nhs[i];
4770 err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4771 extack);
4772 if (err)
4773 return err;
4774 }
4775
4776 return 0;
4777 }
4778
mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp * mlxsw_sp,unsigned long event,struct nh_notifier_info * info)4779 static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
4780 unsigned long event,
4781 struct nh_notifier_info *info)
4782 {
4783 struct nh_notifier_single_info *nh;
4784
4785 if (event != NEXTHOP_EVENT_REPLACE &&
4786 event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
4787 event != NEXTHOP_EVENT_BUCKET_REPLACE)
4788 return 0;
4789
4790 switch (info->type) {
4791 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4792 return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
4793 info->extack);
4794 case NH_NOTIFIER_INFO_TYPE_GRP:
4795 return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
4796 info->nh_grp,
4797 info->extack);
4798 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4799 return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
4800 info->nh_res_table,
4801 info->extack);
4802 case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
4803 nh = &info->nh_res_bucket->new_nh;
4804 return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4805 info->extack);
4806 default:
4807 NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
4808 return -EOPNOTSUPP;
4809 }
4810 }
4811
mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp * mlxsw_sp,const struct nh_notifier_info * info)4812 static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
4813 const struct nh_notifier_info *info)
4814 {
4815 const struct net_device *dev;
4816
4817 switch (info->type) {
4818 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4819 dev = info->nh->dev;
4820 return info->nh->gw_family || info->nh->is_reject ||
4821 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
4822 case NH_NOTIFIER_INFO_TYPE_GRP:
4823 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4824 /* Already validated earlier. */
4825 return true;
4826 default:
4827 return false;
4828 }
4829 }
4830
mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4831 static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
4832 struct mlxsw_sp_nexthop *nh)
4833 {
4834 u16 lb_rif_index = mlxsw_sp->router->lb_rif_index;
4835
4836 nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
4837 nh->should_offload = 1;
4838 /* While nexthops that discard packets do not forward packets
4839 * via an egress RIF, they still need to be programmed using a
4840 * valid RIF, so use the loopback RIF created during init.
4841 */
4842 nh->rif = mlxsw_sp->router->rifs[lb_rif_index];
4843 }
4844
mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4845 static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
4846 struct mlxsw_sp_nexthop *nh)
4847 {
4848 nh->rif = NULL;
4849 nh->should_offload = 0;
4850 }
4851
4852 static int
mlxsw_sp_nexthop_obj_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,struct nh_notifier_single_info * nh_obj,int weight)4853 mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
4854 struct mlxsw_sp_nexthop_group *nh_grp,
4855 struct mlxsw_sp_nexthop *nh,
4856 struct nh_notifier_single_info *nh_obj, int weight)
4857 {
4858 struct net_device *dev = nh_obj->dev;
4859 int err;
4860
4861 nh->nhgi = nh_grp->nhgi;
4862 nh->nh_weight = weight;
4863
4864 switch (nh_obj->gw_family) {
4865 case AF_INET:
4866 memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
4867 nh->neigh_tbl = &arp_tbl;
4868 break;
4869 case AF_INET6:
4870 memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
4871 #if IS_ENABLED(CONFIG_IPV6)
4872 nh->neigh_tbl = &nd_tbl;
4873 #endif
4874 break;
4875 }
4876
4877 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4878 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4879 nh->ifindex = dev->ifindex;
4880
4881 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4882 if (err)
4883 goto err_type_init;
4884
4885 if (nh_obj->is_reject)
4886 mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
4887
4888 /* In a resilient nexthop group, all the nexthops must be written to
4889 * the adjacency table. Even if they do not have a valid neighbour or
4890 * RIF.
4891 */
4892 if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
4893 nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4894 nh->should_offload = 1;
4895 }
4896
4897 return 0;
4898
4899 err_type_init:
4900 list_del(&nh->router_list_node);
4901 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4902 return err;
4903 }
4904
mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)4905 static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
4906 struct mlxsw_sp_nexthop *nh)
4907 {
4908 if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
4909 mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
4910 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4911 list_del(&nh->router_list_node);
4912 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4913 nh->should_offload = 0;
4914 }
4915
4916 static int
mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct nh_notifier_info * info)4917 mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
4918 struct mlxsw_sp_nexthop_group *nh_grp,
4919 struct nh_notifier_info *info)
4920 {
4921 struct mlxsw_sp_nexthop_group_info *nhgi;
4922 struct mlxsw_sp_nexthop *nh;
4923 bool is_resilient = false;
4924 unsigned int nhs;
4925 int err, i;
4926
4927 switch (info->type) {
4928 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4929 nhs = 1;
4930 break;
4931 case NH_NOTIFIER_INFO_TYPE_GRP:
4932 nhs = info->nh_grp->num_nh;
4933 break;
4934 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4935 nhs = info->nh_res_table->num_nh_buckets;
4936 is_resilient = true;
4937 break;
4938 default:
4939 return -EINVAL;
4940 }
4941
4942 nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
4943 if (!nhgi)
4944 return -ENOMEM;
4945 nh_grp->nhgi = nhgi;
4946 nhgi->nh_grp = nh_grp;
4947 nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
4948 nhgi->is_resilient = is_resilient;
4949 nhgi->count = nhs;
4950 for (i = 0; i < nhgi->count; i++) {
4951 struct nh_notifier_single_info *nh_obj;
4952 int weight;
4953
4954 nh = &nhgi->nexthops[i];
4955 switch (info->type) {
4956 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4957 nh_obj = info->nh;
4958 weight = 1;
4959 break;
4960 case NH_NOTIFIER_INFO_TYPE_GRP:
4961 nh_obj = &info->nh_grp->nh_entries[i].nh;
4962 weight = info->nh_grp->nh_entries[i].weight;
4963 break;
4964 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4965 nh_obj = &info->nh_res_table->nhs[i];
4966 weight = 1;
4967 break;
4968 default:
4969 err = -EINVAL;
4970 goto err_nexthop_obj_init;
4971 }
4972 err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
4973 weight);
4974 if (err)
4975 goto err_nexthop_obj_init;
4976 }
4977 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
4978 if (err)
4979 goto err_group_inc;
4980 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4981 if (err) {
4982 NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
4983 goto err_group_refresh;
4984 }
4985
4986 /* Add resilient nexthop groups to a list so that the activity of their
4987 * nexthop buckets will be periodically queried and cleared.
4988 */
4989 if (nhgi->is_resilient) {
4990 if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
4991 mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
4992 list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
4993 }
4994
4995 return 0;
4996
4997 err_group_refresh:
4998 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
4999 err_group_inc:
5000 i = nhgi->count;
5001 err_nexthop_obj_init:
5002 for (i--; i >= 0; i--) {
5003 nh = &nhgi->nexthops[i];
5004 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5005 }
5006 kfree(nhgi);
5007 return err;
5008 }
5009
5010 static void
mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5011 mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5012 struct mlxsw_sp_nexthop_group *nh_grp)
5013 {
5014 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5015 struct mlxsw_sp_router *router = mlxsw_sp->router;
5016 int i;
5017
5018 if (nhgi->is_resilient) {
5019 list_del(&nhgi->list);
5020 if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
5021 cancel_delayed_work(&router->nh_grp_activity_dw);
5022 }
5023
5024 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5025 for (i = nhgi->count - 1; i >= 0; i--) {
5026 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5027
5028 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5029 }
5030 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5031 WARN_ON_ONCE(nhgi->adj_index_valid);
5032 kfree(nhgi);
5033 }
5034
5035 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5036 mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
5037 struct nh_notifier_info *info)
5038 {
5039 struct mlxsw_sp_nexthop_group *nh_grp;
5040 int err;
5041
5042 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5043 if (!nh_grp)
5044 return ERR_PTR(-ENOMEM);
5045 INIT_LIST_HEAD(&nh_grp->vr_list);
5046 err = rhashtable_init(&nh_grp->vr_ht,
5047 &mlxsw_sp_nexthop_group_vr_ht_params);
5048 if (err)
5049 goto err_nexthop_group_vr_ht_init;
5050 INIT_LIST_HEAD(&nh_grp->fib_list);
5051 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5052 nh_grp->obj.id = info->id;
5053
5054 err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
5055 if (err)
5056 goto err_nexthop_group_info_init;
5057
5058 nh_grp->can_destroy = false;
5059
5060 return nh_grp;
5061
5062 err_nexthop_group_info_init:
5063 rhashtable_destroy(&nh_grp->vr_ht);
5064 err_nexthop_group_vr_ht_init:
5065 kfree(nh_grp);
5066 return ERR_PTR(err);
5067 }
5068
5069 static void
mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5070 mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
5071 struct mlxsw_sp_nexthop_group *nh_grp)
5072 {
5073 if (!nh_grp->can_destroy)
5074 return;
5075 mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
5076 WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
5077 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5078 rhashtable_destroy(&nh_grp->vr_ht);
5079 kfree(nh_grp);
5080 }
5081
5082 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp * mlxsw_sp,u32 id)5083 mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
5084 {
5085 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
5086
5087 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5088 cmp_arg.id = id;
5089 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
5090 &cmp_arg,
5091 mlxsw_sp_nexthop_group_ht_params);
5092 }
5093
mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5094 static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
5095 struct mlxsw_sp_nexthop_group *nh_grp)
5096 {
5097 return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5098 }
5099
5100 static int
mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop_group * old_nh_grp,struct netlink_ext_ack * extack)5101 mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
5102 struct mlxsw_sp_nexthop_group *nh_grp,
5103 struct mlxsw_sp_nexthop_group *old_nh_grp,
5104 struct netlink_ext_ack *extack)
5105 {
5106 struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
5107 struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
5108 int err;
5109
5110 old_nh_grp->nhgi = new_nhgi;
5111 new_nhgi->nh_grp = old_nh_grp;
5112 nh_grp->nhgi = old_nhgi;
5113 old_nhgi->nh_grp = nh_grp;
5114
5115 if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5116 /* Both the old adjacency index and the new one are valid.
5117 * Routes are currently using the old one. Tell the device to
5118 * replace the old adjacency index with the new one.
5119 */
5120 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
5121 old_nhgi->adj_index,
5122 old_nhgi->ecmp_size);
5123 if (err) {
5124 NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
5125 goto err_out;
5126 }
5127 } else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
5128 /* The old adjacency index is valid, while the new one is not.
5129 * Iterate over all the routes using the group and change them
5130 * to trap packets to the CPU.
5131 */
5132 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5133 if (err) {
5134 NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
5135 goto err_out;
5136 }
5137 } else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5138 /* The old adjacency index is invalid, while the new one is.
5139 * Iterate over all the routes using the group and change them
5140 * to forward packets using the new valid index.
5141 */
5142 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5143 if (err) {
5144 NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
5145 goto err_out;
5146 }
5147 }
5148
5149 /* Make sure the flags are set / cleared based on the new nexthop group
5150 * information.
5151 */
5152 mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
5153
5154 /* At this point 'nh_grp' is just a shell that is not used by anyone
5155 * and its nexthop group info is the old info that was just replaced
5156 * with the new one. Remove it.
5157 */
5158 nh_grp->can_destroy = true;
5159 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5160
5161 return 0;
5162
5163 err_out:
5164 old_nhgi->nh_grp = old_nh_grp;
5165 nh_grp->nhgi = new_nhgi;
5166 new_nhgi->nh_grp = nh_grp;
5167 old_nh_grp->nhgi = old_nhgi;
5168 return err;
5169 }
5170
mlxsw_sp_nexthop_obj_new(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5171 static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
5172 struct nh_notifier_info *info)
5173 {
5174 struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
5175 struct netlink_ext_ack *extack = info->extack;
5176 int err;
5177
5178 nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
5179 if (IS_ERR(nh_grp))
5180 return PTR_ERR(nh_grp);
5181
5182 old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5183 if (!old_nh_grp)
5184 err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
5185 else
5186 err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
5187 old_nh_grp, extack);
5188
5189 if (err) {
5190 nh_grp->can_destroy = true;
5191 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5192 }
5193
5194 return err;
5195 }
5196
mlxsw_sp_nexthop_obj_del(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5197 static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
5198 struct nh_notifier_info *info)
5199 {
5200 struct mlxsw_sp_nexthop_group *nh_grp;
5201
5202 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5203 if (!nh_grp)
5204 return;
5205
5206 nh_grp->can_destroy = true;
5207 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5208
5209 /* If the group still has routes using it, then defer the delete
5210 * operation until the last route using it is deleted.
5211 */
5212 if (!list_empty(&nh_grp->fib_list))
5213 return;
5214 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5215 }
5216
mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp * mlxsw_sp,u32 adj_index,char * ratr_pl)5217 static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
5218 u32 adj_index, char *ratr_pl)
5219 {
5220 MLXSW_REG_ZERO(ratr, ratr_pl);
5221 mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5222 mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
5223 mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
5224
5225 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5226 }
5227
mlxsw_sp_nexthop_obj_bucket_compare(char * ratr_pl,char * ratr_pl_new)5228 static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
5229 {
5230 /* Clear the opcode and activity on both the old and new payload as
5231 * they are irrelevant for the comparison.
5232 */
5233 mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5234 mlxsw_reg_ratr_a_set(ratr_pl, 0);
5235 mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
5236 mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
5237
5238 /* If the contents of the adjacency entry are consistent with the
5239 * replacement request, then replacement was successful.
5240 */
5241 if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
5242 return 0;
5243
5244 return -EINVAL;
5245 }
5246
5247 static int
mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh,struct nh_notifier_info * info)5248 mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
5249 struct mlxsw_sp_nexthop *nh,
5250 struct nh_notifier_info *info)
5251 {
5252 u16 bucket_index = info->nh_res_bucket->bucket_index;
5253 struct netlink_ext_ack *extack = info->extack;
5254 bool force = info->nh_res_bucket->force;
5255 char ratr_pl_new[MLXSW_REG_RATR_LEN];
5256 char ratr_pl[MLXSW_REG_RATR_LEN];
5257 u32 adj_index;
5258 int err;
5259
5260 /* No point in trying an atomic replacement if the idle timer interval
5261 * is smaller than the interval in which we query and clear activity.
5262 */
5263 if (!force && info->nh_res_bucket->idle_timer_ms <
5264 MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
5265 force = true;
5266
5267 adj_index = nh->nhgi->adj_index + bucket_index;
5268 err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
5269 if (err) {
5270 NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
5271 return err;
5272 }
5273
5274 if (!force) {
5275 err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
5276 ratr_pl_new);
5277 if (err) {
5278 NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
5279 return err;
5280 }
5281
5282 err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
5283 if (err) {
5284 NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
5285 return err;
5286 }
5287 }
5288
5289 nh->update = 0;
5290 nh->offloaded = 1;
5291 mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
5292
5293 return 0;
5294 }
5295
mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp * mlxsw_sp,struct nh_notifier_info * info)5296 static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
5297 struct nh_notifier_info *info)
5298 {
5299 u16 bucket_index = info->nh_res_bucket->bucket_index;
5300 struct netlink_ext_ack *extack = info->extack;
5301 struct mlxsw_sp_nexthop_group_info *nhgi;
5302 struct nh_notifier_single_info *nh_obj;
5303 struct mlxsw_sp_nexthop_group *nh_grp;
5304 struct mlxsw_sp_nexthop *nh;
5305 int err;
5306
5307 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5308 if (!nh_grp) {
5309 NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
5310 return -EINVAL;
5311 }
5312
5313 nhgi = nh_grp->nhgi;
5314
5315 if (bucket_index >= nhgi->count) {
5316 NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
5317 return -EINVAL;
5318 }
5319
5320 nh = &nhgi->nexthops[bucket_index];
5321 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5322
5323 nh_obj = &info->nh_res_bucket->new_nh;
5324 err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5325 if (err) {
5326 NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
5327 goto err_nexthop_obj_init;
5328 }
5329
5330 err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
5331 if (err)
5332 goto err_nexthop_obj_bucket_adj_update;
5333
5334 return 0;
5335
5336 err_nexthop_obj_bucket_adj_update:
5337 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5338 err_nexthop_obj_init:
5339 nh_obj = &info->nh_res_bucket->old_nh;
5340 mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5341 /* The old adjacency entry was not overwritten */
5342 nh->update = 0;
5343 nh->offloaded = 1;
5344 return err;
5345 }
5346
mlxsw_sp_nexthop_obj_event(struct notifier_block * nb,unsigned long event,void * ptr)5347 static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
5348 unsigned long event, void *ptr)
5349 {
5350 struct nh_notifier_info *info = ptr;
5351 struct mlxsw_sp_router *router;
5352 int err = 0;
5353
5354 router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
5355 err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
5356 if (err)
5357 goto out;
5358
5359 mutex_lock(&router->lock);
5360
5361 switch (event) {
5362 case NEXTHOP_EVENT_REPLACE:
5363 err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
5364 break;
5365 case NEXTHOP_EVENT_DEL:
5366 mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
5367 break;
5368 case NEXTHOP_EVENT_BUCKET_REPLACE:
5369 err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
5370 info);
5371 break;
5372 default:
5373 break;
5374 }
5375
5376 mutex_unlock(&router->lock);
5377
5378 out:
5379 return notifier_from_errno(err);
5380 }
5381
mlxsw_sp_fi_is_gateway(const struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)5382 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5383 struct fib_info *fi)
5384 {
5385 const struct fib_nh *nh = fib_info_nh(fi, 0);
5386
5387 return nh->fib_nh_gw_family ||
5388 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
5389 }
5390
5391 static int
mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5392 mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
5393 struct mlxsw_sp_nexthop_group *nh_grp)
5394 {
5395 unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
5396 struct mlxsw_sp_nexthop_group_info *nhgi;
5397 struct mlxsw_sp_nexthop *nh;
5398 int err, i;
5399
5400 nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5401 if (!nhgi)
5402 return -ENOMEM;
5403 nh_grp->nhgi = nhgi;
5404 nhgi->nh_grp = nh_grp;
5405 nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
5406 nhgi->count = nhs;
5407 for (i = 0; i < nhgi->count; i++) {
5408 struct fib_nh *fib_nh;
5409
5410 nh = &nhgi->nexthops[i];
5411 fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
5412 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
5413 if (err)
5414 goto err_nexthop4_init;
5415 }
5416 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5417 if (err)
5418 goto err_group_inc;
5419 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5420 if (err)
5421 goto err_group_refresh;
5422
5423 return 0;
5424
5425 err_group_refresh:
5426 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5427 err_group_inc:
5428 i = nhgi->count;
5429 err_nexthop4_init:
5430 for (i--; i >= 0; i--) {
5431 nh = &nhgi->nexthops[i];
5432 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5433 }
5434 kfree(nhgi);
5435 return err;
5436 }
5437
5438 static void
mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5439 mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5440 struct mlxsw_sp_nexthop_group *nh_grp)
5441 {
5442 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5443 int i;
5444
5445 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5446 for (i = nhgi->count - 1; i >= 0; i--) {
5447 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5448
5449 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5450 }
5451 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5452 WARN_ON_ONCE(nhgi->adj_index_valid);
5453 kfree(nhgi);
5454 }
5455
5456 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop4_group_create(struct mlxsw_sp * mlxsw_sp,struct fib_info * fi)5457 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
5458 {
5459 struct mlxsw_sp_nexthop_group *nh_grp;
5460 int err;
5461
5462 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5463 if (!nh_grp)
5464 return ERR_PTR(-ENOMEM);
5465 INIT_LIST_HEAD(&nh_grp->vr_list);
5466 err = rhashtable_init(&nh_grp->vr_ht,
5467 &mlxsw_sp_nexthop_group_vr_ht_params);
5468 if (err)
5469 goto err_nexthop_group_vr_ht_init;
5470 INIT_LIST_HEAD(&nh_grp->fib_list);
5471 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
5472 nh_grp->ipv4.fi = fi;
5473 fib_info_hold(fi);
5474
5475 err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
5476 if (err)
5477 goto err_nexthop_group_info_init;
5478
5479 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5480 if (err)
5481 goto err_nexthop_group_insert;
5482
5483 nh_grp->can_destroy = true;
5484
5485 return nh_grp;
5486
5487 err_nexthop_group_insert:
5488 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5489 err_nexthop_group_info_init:
5490 fib_info_put(fi);
5491 rhashtable_destroy(&nh_grp->vr_ht);
5492 err_nexthop_group_vr_ht_init:
5493 kfree(nh_grp);
5494 return ERR_PTR(err);
5495 }
5496
5497 static void
mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)5498 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
5499 struct mlxsw_sp_nexthop_group *nh_grp)
5500 {
5501 if (!nh_grp->can_destroy)
5502 return;
5503 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5504 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5505 fib_info_put(nh_grp->ipv4.fi);
5506 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5507 rhashtable_destroy(&nh_grp->vr_ht);
5508 kfree(nh_grp);
5509 }
5510
mlxsw_sp_nexthop4_group_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,struct fib_info * fi)5511 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
5512 struct mlxsw_sp_fib_entry *fib_entry,
5513 struct fib_info *fi)
5514 {
5515 struct mlxsw_sp_nexthop_group *nh_grp;
5516
5517 if (fi->nh) {
5518 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
5519 fi->nh->id);
5520 if (WARN_ON_ONCE(!nh_grp))
5521 return -EINVAL;
5522 goto out;
5523 }
5524
5525 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
5526 if (!nh_grp) {
5527 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
5528 if (IS_ERR(nh_grp))
5529 return PTR_ERR(nh_grp);
5530 }
5531 out:
5532 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
5533 fib_entry->nh_group = nh_grp;
5534 return 0;
5535 }
5536
mlxsw_sp_nexthop4_group_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5537 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
5538 struct mlxsw_sp_fib_entry *fib_entry)
5539 {
5540 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5541
5542 list_del(&fib_entry->nexthop_group_node);
5543 if (!list_empty(&nh_grp->fib_list))
5544 return;
5545
5546 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
5547 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5548 return;
5549 }
5550
5551 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
5552 }
5553
5554 static bool
mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry * fib_entry)5555 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5556 {
5557 struct mlxsw_sp_fib4_entry *fib4_entry;
5558
5559 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5560 common);
5561 return !fib4_entry->dscp;
5562 }
5563
5564 static bool
mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry * fib_entry)5565 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5566 {
5567 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5568
5569 switch (fib_entry->fib_node->fib->proto) {
5570 case MLXSW_SP_L3_PROTO_IPV4:
5571 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
5572 return false;
5573 break;
5574 case MLXSW_SP_L3_PROTO_IPV6:
5575 break;
5576 }
5577
5578 switch (fib_entry->type) {
5579 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5580 return !!nh_group->nhgi->adj_index_valid;
5581 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5582 return !!nh_group->nhgi->nh_rif;
5583 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5584 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5585 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5586 return true;
5587 default:
5588 return false;
5589 }
5590 }
5591
5592 static struct mlxsw_sp_nexthop *
mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group * nh_grp,const struct mlxsw_sp_rt6 * mlxsw_sp_rt6)5593 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
5594 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5595 {
5596 int i;
5597
5598 for (i = 0; i < nh_grp->nhgi->count; i++) {
5599 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
5600 struct fib6_info *rt = mlxsw_sp_rt6->rt;
5601
5602 if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
5603 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
5604 &rt->fib6_nh->fib_nh_gw6))
5605 return nh;
5606 }
5607
5608 return NULL;
5609 }
5610
5611 static void
mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp * mlxsw_sp,struct fib_entry_notifier_info * fen_info)5612 mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5613 struct fib_entry_notifier_info *fen_info)
5614 {
5615 u32 *p_dst = (u32 *) &fen_info->dst;
5616 struct fib_rt_info fri;
5617
5618 fri.fi = fen_info->fi;
5619 fri.tb_id = fen_info->tb_id;
5620 fri.dst = cpu_to_be32(*p_dst);
5621 fri.dst_len = fen_info->dst_len;
5622 fri.dscp = fen_info->dscp;
5623 fri.type = fen_info->type;
5624 fri.offload = false;
5625 fri.trap = false;
5626 fri.offload_failed = true;
5627 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5628 }
5629
5630 static void
mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5631 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5632 struct mlxsw_sp_fib_entry *fib_entry)
5633 {
5634 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5635 int dst_len = fib_entry->fib_node->key.prefix_len;
5636 struct mlxsw_sp_fib4_entry *fib4_entry;
5637 struct fib_rt_info fri;
5638 bool should_offload;
5639
5640 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5641 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5642 common);
5643 fri.fi = fib4_entry->fi;
5644 fri.tb_id = fib4_entry->tb_id;
5645 fri.dst = cpu_to_be32(*p_dst);
5646 fri.dst_len = dst_len;
5647 fri.dscp = fib4_entry->dscp;
5648 fri.type = fib4_entry->type;
5649 fri.offload = should_offload;
5650 fri.trap = !should_offload;
5651 fri.offload_failed = false;
5652 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5653 }
5654
5655 static void
mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5656 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5657 struct mlxsw_sp_fib_entry *fib_entry)
5658 {
5659 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5660 int dst_len = fib_entry->fib_node->key.prefix_len;
5661 struct mlxsw_sp_fib4_entry *fib4_entry;
5662 struct fib_rt_info fri;
5663
5664 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5665 common);
5666 fri.fi = fib4_entry->fi;
5667 fri.tb_id = fib4_entry->tb_id;
5668 fri.dst = cpu_to_be32(*p_dst);
5669 fri.dst_len = dst_len;
5670 fri.dscp = fib4_entry->dscp;
5671 fri.type = fib4_entry->type;
5672 fri.offload = false;
5673 fri.trap = false;
5674 fri.offload_failed = false;
5675 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5676 }
5677
5678 #if IS_ENABLED(CONFIG_IPV6)
5679 static void
mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)5680 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5681 struct fib6_info **rt_arr,
5682 unsigned int nrt6)
5683 {
5684 int i;
5685
5686 /* In IPv6 a multipath route is represented using multiple routes, so
5687 * we need to set the flags on all of them.
5688 */
5689 for (i = 0; i < nrt6; i++)
5690 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
5691 false, false, true);
5692 }
5693 #else
5694 static void
mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp * mlxsw_sp,struct fib6_info ** rt_arr,unsigned int nrt6)5695 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5696 struct fib6_info **rt_arr,
5697 unsigned int nrt6)
5698 {
5699 }
5700 #endif
5701
5702 #if IS_ENABLED(CONFIG_IPV6)
5703 static void
mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5704 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5705 struct mlxsw_sp_fib_entry *fib_entry)
5706 {
5707 struct mlxsw_sp_fib6_entry *fib6_entry;
5708 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5709 bool should_offload;
5710
5711 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5712
5713 /* In IPv6 a multipath route is represented using multiple routes, so
5714 * we need to set the flags on all of them.
5715 */
5716 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5717 common);
5718 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5719 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5720 should_offload, !should_offload, false);
5721 }
5722 #else
5723 static void
mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5724 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5725 struct mlxsw_sp_fib_entry *fib_entry)
5726 {
5727 }
5728 #endif
5729
5730 #if IS_ENABLED(CONFIG_IPV6)
5731 static void
mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5732 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5733 struct mlxsw_sp_fib_entry *fib_entry)
5734 {
5735 struct mlxsw_sp_fib6_entry *fib6_entry;
5736 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5737
5738 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5739 common);
5740 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5741 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5742 false, false, false);
5743 }
5744 #else
5745 static void
mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5746 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5747 struct mlxsw_sp_fib_entry *fib_entry)
5748 {
5749 }
5750 #endif
5751
5752 static void
mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5753 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5754 struct mlxsw_sp_fib_entry *fib_entry)
5755 {
5756 switch (fib_entry->fib_node->fib->proto) {
5757 case MLXSW_SP_L3_PROTO_IPV4:
5758 mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
5759 break;
5760 case MLXSW_SP_L3_PROTO_IPV6:
5761 mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
5762 break;
5763 }
5764 }
5765
5766 static void
mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)5767 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5768 struct mlxsw_sp_fib_entry *fib_entry)
5769 {
5770 switch (fib_entry->fib_node->fib->proto) {
5771 case MLXSW_SP_L3_PROTO_IPV4:
5772 mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5773 break;
5774 case MLXSW_SP_L3_PROTO_IPV6:
5775 mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5776 break;
5777 }
5778 }
5779
5780 static void
mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)5781 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
5782 struct mlxsw_sp_fib_entry *fib_entry,
5783 enum mlxsw_sp_fib_entry_op op)
5784 {
5785 switch (op) {
5786 case MLXSW_SP_FIB_ENTRY_OP_WRITE:
5787 case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
5788 mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
5789 break;
5790 case MLXSW_SP_FIB_ENTRY_OP_DELETE:
5791 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5792 break;
5793 default:
5794 break;
5795 }
5796 }
5797
5798 struct mlxsw_sp_fib_entry_op_ctx_basic {
5799 char ralue_pl[MLXSW_REG_RALUE_LEN];
5800 };
5801
5802 static void
mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx * op_ctx,enum mlxsw_sp_l3proto proto,enum mlxsw_sp_fib_entry_op op,u16 virtual_router,u8 prefix_len,unsigned char * addr,struct mlxsw_sp_fib_entry_priv * priv)5803 mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5804 enum mlxsw_sp_l3proto proto,
5805 enum mlxsw_sp_fib_entry_op op,
5806 u16 virtual_router, u8 prefix_len,
5807 unsigned char *addr,
5808 struct mlxsw_sp_fib_entry_priv *priv)
5809 {
5810 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5811 enum mlxsw_reg_ralxx_protocol ralxx_proto;
5812 char *ralue_pl = op_ctx_basic->ralue_pl;
5813 enum mlxsw_reg_ralue_op ralue_op;
5814
5815 ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
5816
5817 switch (op) {
5818 case MLXSW_SP_FIB_ENTRY_OP_WRITE:
5819 case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
5820 ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
5821 break;
5822 case MLXSW_SP_FIB_ENTRY_OP_DELETE:
5823 ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
5824 break;
5825 default:
5826 WARN_ON_ONCE(1);
5827 return;
5828 }
5829
5830 switch (proto) {
5831 case MLXSW_SP_L3_PROTO_IPV4:
5832 mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
5833 virtual_router, prefix_len, (u32 *) addr);
5834 break;
5835 case MLXSW_SP_L3_PROTO_IPV6:
5836 mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
5837 virtual_router, prefix_len, addr);
5838 break;
5839 }
5840 }
5841
5842 static void
mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx * op_ctx,enum mlxsw_reg_ralue_trap_action trap_action,u16 trap_id,u32 adjacency_index,u16 ecmp_size)5843 mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5844 enum mlxsw_reg_ralue_trap_action trap_action,
5845 u16 trap_id, u32 adjacency_index, u16 ecmp_size)
5846 {
5847 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5848
5849 mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
5850 trap_id, adjacency_index, ecmp_size);
5851 }
5852
5853 static void
mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx * op_ctx,enum mlxsw_reg_ralue_trap_action trap_action,u16 trap_id,u16 local_erif)5854 mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5855 enum mlxsw_reg_ralue_trap_action trap_action,
5856 u16 trap_id, u16 local_erif)
5857 {
5858 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5859
5860 mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
5861 trap_id, local_erif);
5862 }
5863
5864 static void
mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx * op_ctx)5865 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
5866 {
5867 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5868
5869 mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
5870 }
5871
5872 static void
mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx * op_ctx,u32 tunnel_ptr)5873 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5874 u32 tunnel_ptr)
5875 {
5876 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5877
5878 mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
5879 }
5880
5881 static int
mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,bool * postponed_for_bulk)5882 mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
5883 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5884 bool *postponed_for_bulk)
5885 {
5886 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5887
5888 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5889 op_ctx_basic->ralue_pl);
5890 }
5891
5892 static bool
mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv * priv)5893 mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
5894 {
5895 return true;
5896 }
5897
mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)5898 static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5899 struct mlxsw_sp_fib_entry *fib_entry,
5900 enum mlxsw_sp_fib_entry_op op)
5901 {
5902 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
5903
5904 mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
5905 fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
5906 fib_entry->fib_node->key.prefix_len,
5907 fib_entry->fib_node->key.addr,
5908 fib_entry->priv);
5909 }
5910
mlxsw_sp_fib_entry_commit(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,const struct mlxsw_sp_router_ll_ops * ll_ops)5911 static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
5912 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5913 const struct mlxsw_sp_router_ll_ops *ll_ops)
5914 {
5915 bool postponed_for_bulk = false;
5916 int err;
5917
5918 err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
5919 if (!postponed_for_bulk)
5920 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
5921 return err;
5922 }
5923
mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)5924 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
5925 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5926 struct mlxsw_sp_fib_entry *fib_entry,
5927 enum mlxsw_sp_fib_entry_op op)
5928 {
5929 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5930 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5931 struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
5932 enum mlxsw_reg_ralue_trap_action trap_action;
5933 u16 trap_id = 0;
5934 u32 adjacency_index = 0;
5935 u16 ecmp_size = 0;
5936
5937 /* In case the nexthop group adjacency index is valid, use it
5938 * with provided ECMP size. Otherwise, setup trap and pass
5939 * traffic to kernel.
5940 */
5941 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5942 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5943 adjacency_index = nhgi->adj_index;
5944 ecmp_size = nhgi->ecmp_size;
5945 } else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
5946 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5947 adjacency_index = mlxsw_sp->router->adj_trap_index;
5948 ecmp_size = 1;
5949 } else {
5950 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5951 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5952 }
5953
5954 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5955 ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
5956 adjacency_index, ecmp_size);
5957 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5958 }
5959
mlxsw_sp_fib_entry_op_local(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)5960 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
5961 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5962 struct mlxsw_sp_fib_entry *fib_entry,
5963 enum mlxsw_sp_fib_entry_op op)
5964 {
5965 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5966 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
5967 enum mlxsw_reg_ralue_trap_action trap_action;
5968 u16 trap_id = 0;
5969 u16 rif_index = 0;
5970
5971 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5972 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5973 rif_index = rif->rif_index;
5974 } else {
5975 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5976 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5977 }
5978
5979 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5980 ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
5981 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5982 }
5983
mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)5984 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
5985 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5986 struct mlxsw_sp_fib_entry *fib_entry,
5987 enum mlxsw_sp_fib_entry_op op)
5988 {
5989 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5990
5991 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5992 ll_ops->fib_entry_act_ip2me_pack(op_ctx);
5993 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5994 }
5995
mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)5996 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
5997 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5998 struct mlxsw_sp_fib_entry *fib_entry,
5999 enum mlxsw_sp_fib_entry_op op)
6000 {
6001 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
6002 enum mlxsw_reg_ralue_trap_action trap_action;
6003
6004 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
6005 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
6006 ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
6007 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
6008 }
6009
6010 static int
mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)6011 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
6012 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6013 struct mlxsw_sp_fib_entry *fib_entry,
6014 enum mlxsw_sp_fib_entry_op op)
6015 {
6016 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
6017 enum mlxsw_reg_ralue_trap_action trap_action;
6018 u16 trap_id;
6019
6020 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
6021 trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
6022
6023 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
6024 ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
6025 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
6026 }
6027
6028 static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)6029 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
6030 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6031 struct mlxsw_sp_fib_entry *fib_entry,
6032 enum mlxsw_sp_fib_entry_op op)
6033 {
6034 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
6035 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
6036 const struct mlxsw_sp_ipip_ops *ipip_ops;
6037 int err;
6038
6039 if (WARN_ON(!ipip_entry))
6040 return -EINVAL;
6041
6042 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
6043 err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
6044 fib_entry->decap.tunnel_index);
6045 if (err)
6046 return err;
6047
6048 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
6049 ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
6050 fib_entry->decap.tunnel_index);
6051 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
6052 }
6053
mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)6054 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
6055 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6056 struct mlxsw_sp_fib_entry *fib_entry,
6057 enum mlxsw_sp_fib_entry_op op)
6058 {
6059 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
6060
6061 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
6062 ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
6063 fib_entry->decap.tunnel_index);
6064 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
6065 }
6066
__mlxsw_sp_fib_entry_op(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)6067 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6068 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6069 struct mlxsw_sp_fib_entry *fib_entry,
6070 enum mlxsw_sp_fib_entry_op op)
6071 {
6072 switch (fib_entry->type) {
6073 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
6074 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
6075 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
6076 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
6077 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
6078 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
6079 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
6080 return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
6081 case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
6082 return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
6083 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6084 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
6085 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
6086 return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
6087 }
6088 return -EINVAL;
6089 }
6090
mlxsw_sp_fib_entry_op(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,enum mlxsw_sp_fib_entry_op op)6091 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6092 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6093 struct mlxsw_sp_fib_entry *fib_entry,
6094 enum mlxsw_sp_fib_entry_op op)
6095 {
6096 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
6097
6098 if (err)
6099 return err;
6100
6101 mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
6102
6103 return err;
6104 }
6105
__mlxsw_sp_fib_entry_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry,bool is_new)6106 static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6107 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6108 struct mlxsw_sp_fib_entry *fib_entry,
6109 bool is_new)
6110 {
6111 return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
6112 is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
6113 MLXSW_SP_FIB_ENTRY_OP_UPDATE);
6114 }
6115
mlxsw_sp_fib_entry_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6116 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6117 struct mlxsw_sp_fib_entry *fib_entry)
6118 {
6119 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
6120
6121 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
6122 return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
6123 }
6124
mlxsw_sp_fib_entry_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry)6125 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
6126 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6127 struct mlxsw_sp_fib_entry *fib_entry)
6128 {
6129 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
6130
6131 if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
6132 return 0;
6133 return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
6134 MLXSW_SP_FIB_ENTRY_OP_DELETE);
6135 }
6136
6137 static int
mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp * mlxsw_sp,const struct fib_entry_notifier_info * fen_info,struct mlxsw_sp_fib_entry * fib_entry)6138 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6139 const struct fib_entry_notifier_info *fen_info,
6140 struct mlxsw_sp_fib_entry *fib_entry)
6141 {
6142 struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
6143 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
6144 struct mlxsw_sp_router *router = mlxsw_sp->router;
6145 u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
6146 int ifindex = nhgi->nexthops[0].ifindex;
6147 struct mlxsw_sp_ipip_entry *ipip_entry;
6148
6149 switch (fen_info->type) {
6150 case RTN_LOCAL:
6151 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
6152 MLXSW_SP_L3_PROTO_IPV4, dip);
6153 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
6154 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
6155 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
6156 fib_entry,
6157 ipip_entry);
6158 }
6159 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6160 MLXSW_SP_L3_PROTO_IPV4,
6161 &dip)) {
6162 u32 tunnel_index;
6163
6164 tunnel_index = router->nve_decap_config.tunnel_index;
6165 fib_entry->decap.tunnel_index = tunnel_index;
6166 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6167 return 0;
6168 }
6169 fallthrough;
6170 case RTN_BROADCAST:
6171 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6172 return 0;
6173 case RTN_BLACKHOLE:
6174 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6175 return 0;
6176 case RTN_UNREACHABLE:
6177 case RTN_PROHIBIT:
6178 /* Packets hitting these routes need to be trapped, but
6179 * can do so with a lower priority than packets directed
6180 * at the host, so use action type local instead of trap.
6181 */
6182 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6183 return 0;
6184 case RTN_UNICAST:
6185 if (nhgi->gateway)
6186 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6187 else
6188 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6189 return 0;
6190 default:
6191 return -EINVAL;
6192 }
6193 }
6194
6195 static void
mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6196 mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6197 struct mlxsw_sp_fib_entry *fib_entry)
6198 {
6199 switch (fib_entry->type) {
6200 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6201 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
6202 break;
6203 default:
6204 break;
6205 }
6206 }
6207
6208 static void
mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib4_entry * fib4_entry)6209 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6210 struct mlxsw_sp_fib4_entry *fib4_entry)
6211 {
6212 mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
6213 }
6214
6215 static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,const struct fib_entry_notifier_info * fen_info)6216 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
6217 struct mlxsw_sp_fib_node *fib_node,
6218 const struct fib_entry_notifier_info *fen_info)
6219 {
6220 struct mlxsw_sp_fib4_entry *fib4_entry;
6221 struct mlxsw_sp_fib_entry *fib_entry;
6222 int err;
6223
6224 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
6225 if (!fib4_entry)
6226 return ERR_PTR(-ENOMEM);
6227 fib_entry = &fib4_entry->common;
6228
6229 fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
6230 if (IS_ERR(fib_entry->priv)) {
6231 err = PTR_ERR(fib_entry->priv);
6232 goto err_fib_entry_priv_create;
6233 }
6234
6235 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
6236 if (err)
6237 goto err_nexthop4_group_get;
6238
6239 err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6240 fib_node->fib);
6241 if (err)
6242 goto err_nexthop_group_vr_link;
6243
6244 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
6245 if (err)
6246 goto err_fib4_entry_type_set;
6247
6248 fib4_entry->fi = fen_info->fi;
6249 fib_info_hold(fib4_entry->fi);
6250 fib4_entry->tb_id = fen_info->tb_id;
6251 fib4_entry->type = fen_info->type;
6252 fib4_entry->dscp = fen_info->dscp;
6253
6254 fib_entry->fib_node = fib_node;
6255
6256 return fib4_entry;
6257
6258 err_fib4_entry_type_set:
6259 mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6260 err_nexthop_group_vr_link:
6261 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6262 err_nexthop4_group_get:
6263 mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
6264 err_fib_entry_priv_create:
6265 kfree(fib4_entry);
6266 return ERR_PTR(err);
6267 }
6268
mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib4_entry * fib4_entry)6269 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6270 struct mlxsw_sp_fib4_entry *fib4_entry)
6271 {
6272 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6273
6274 fib_info_put(fib4_entry->fi);
6275 mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
6276 mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
6277 fib_node->fib);
6278 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6279 mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
6280 kfree(fib4_entry);
6281 }
6282
6283 static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp * mlxsw_sp,const struct fib_entry_notifier_info * fen_info)6284 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6285 const struct fib_entry_notifier_info *fen_info)
6286 {
6287 struct mlxsw_sp_fib4_entry *fib4_entry;
6288 struct mlxsw_sp_fib_node *fib_node;
6289 struct mlxsw_sp_fib *fib;
6290 struct mlxsw_sp_vr *vr;
6291
6292 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
6293 if (!vr)
6294 return NULL;
6295 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
6296
6297 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
6298 sizeof(fen_info->dst),
6299 fen_info->dst_len);
6300 if (!fib_node)
6301 return NULL;
6302
6303 fib4_entry = container_of(fib_node->fib_entry,
6304 struct mlxsw_sp_fib4_entry, common);
6305 if (fib4_entry->tb_id == fen_info->tb_id &&
6306 fib4_entry->dscp == fen_info->dscp &&
6307 fib4_entry->type == fen_info->type &&
6308 fib4_entry->fi == fen_info->fi)
6309 return fib4_entry;
6310
6311 return NULL;
6312 }
6313
6314 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
6315 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
6316 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
6317 .key_len = sizeof(struct mlxsw_sp_fib_key),
6318 .automatic_shrinking = true,
6319 };
6320
mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib * fib,struct mlxsw_sp_fib_node * fib_node)6321 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
6322 struct mlxsw_sp_fib_node *fib_node)
6323 {
6324 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
6325 mlxsw_sp_fib_ht_params);
6326 }
6327
mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib * fib,struct mlxsw_sp_fib_node * fib_node)6328 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
6329 struct mlxsw_sp_fib_node *fib_node)
6330 {
6331 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
6332 mlxsw_sp_fib_ht_params);
6333 }
6334
6335 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib * fib,const void * addr,size_t addr_len,unsigned char prefix_len)6336 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
6337 size_t addr_len, unsigned char prefix_len)
6338 {
6339 struct mlxsw_sp_fib_key key;
6340
6341 memset(&key, 0, sizeof(key));
6342 memcpy(key.addr, addr, addr_len);
6343 key.prefix_len = prefix_len;
6344 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
6345 }
6346
6347 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_create(struct mlxsw_sp_fib * fib,const void * addr,size_t addr_len,unsigned char prefix_len)6348 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
6349 size_t addr_len, unsigned char prefix_len)
6350 {
6351 struct mlxsw_sp_fib_node *fib_node;
6352
6353 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
6354 if (!fib_node)
6355 return NULL;
6356
6357 list_add(&fib_node->list, &fib->node_list);
6358 memcpy(fib_node->key.addr, addr, addr_len);
6359 fib_node->key.prefix_len = prefix_len;
6360
6361 return fib_node;
6362 }
6363
mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node * fib_node)6364 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
6365 {
6366 list_del(&fib_node->list);
6367 kfree(fib_node);
6368 }
6369
mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6370 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
6371 struct mlxsw_sp_fib_node *fib_node)
6372 {
6373 struct mlxsw_sp_prefix_usage req_prefix_usage;
6374 struct mlxsw_sp_fib *fib = fib_node->fib;
6375 struct mlxsw_sp_lpm_tree *lpm_tree;
6376 int err;
6377
6378 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
6379 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6380 goto out;
6381
6382 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6383 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
6384 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6385 fib->proto);
6386 if (IS_ERR(lpm_tree))
6387 return PTR_ERR(lpm_tree);
6388
6389 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6390 if (err)
6391 goto err_lpm_tree_replace;
6392
6393 out:
6394 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
6395 return 0;
6396
6397 err_lpm_tree_replace:
6398 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6399 return err;
6400 }
6401
mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6402 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
6403 struct mlxsw_sp_fib_node *fib_node)
6404 {
6405 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
6406 struct mlxsw_sp_prefix_usage req_prefix_usage;
6407 struct mlxsw_sp_fib *fib = fib_node->fib;
6408 int err;
6409
6410 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6411 return;
6412 /* Try to construct a new LPM tree from the current prefix usage
6413 * minus the unused one. If we fail, continue using the old one.
6414 */
6415 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6416 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
6417 fib_node->key.prefix_len);
6418 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6419 fib->proto);
6420 if (IS_ERR(lpm_tree))
6421 return;
6422
6423 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6424 if (err)
6425 goto err_lpm_tree_replace;
6426
6427 return;
6428
6429 err_lpm_tree_replace:
6430 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6431 }
6432
mlxsw_sp_fib_node_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,struct mlxsw_sp_fib * fib)6433 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
6434 struct mlxsw_sp_fib_node *fib_node,
6435 struct mlxsw_sp_fib *fib)
6436 {
6437 int err;
6438
6439 err = mlxsw_sp_fib_node_insert(fib, fib_node);
6440 if (err)
6441 return err;
6442 fib_node->fib = fib;
6443
6444 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
6445 if (err)
6446 goto err_fib_lpm_tree_link;
6447
6448 return 0;
6449
6450 err_fib_lpm_tree_link:
6451 fib_node->fib = NULL;
6452 mlxsw_sp_fib_node_remove(fib, fib_node);
6453 return err;
6454 }
6455
mlxsw_sp_fib_node_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6456 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
6457 struct mlxsw_sp_fib_node *fib_node)
6458 {
6459 struct mlxsw_sp_fib *fib = fib_node->fib;
6460
6461 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
6462 fib_node->fib = NULL;
6463 mlxsw_sp_fib_node_remove(fib, fib_node);
6464 }
6465
6466 static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,const void * addr,size_t addr_len,unsigned char prefix_len,enum mlxsw_sp_l3proto proto)6467 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
6468 size_t addr_len, unsigned char prefix_len,
6469 enum mlxsw_sp_l3proto proto)
6470 {
6471 struct mlxsw_sp_fib_node *fib_node;
6472 struct mlxsw_sp_fib *fib;
6473 struct mlxsw_sp_vr *vr;
6474 int err;
6475
6476 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
6477 if (IS_ERR(vr))
6478 return ERR_CAST(vr);
6479 fib = mlxsw_sp_vr_fib(vr, proto);
6480
6481 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
6482 if (fib_node)
6483 return fib_node;
6484
6485 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
6486 if (!fib_node) {
6487 err = -ENOMEM;
6488 goto err_fib_node_create;
6489 }
6490
6491 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
6492 if (err)
6493 goto err_fib_node_init;
6494
6495 return fib_node;
6496
6497 err_fib_node_init:
6498 mlxsw_sp_fib_node_destroy(fib_node);
6499 err_fib_node_create:
6500 mlxsw_sp_vr_put(mlxsw_sp, vr);
6501 return ERR_PTR(err);
6502 }
6503
mlxsw_sp_fib_node_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)6504 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
6505 struct mlxsw_sp_fib_node *fib_node)
6506 {
6507 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
6508
6509 if (fib_node->fib_entry)
6510 return;
6511 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
6512 mlxsw_sp_fib_node_destroy(fib_node);
6513 mlxsw_sp_vr_put(mlxsw_sp, vr);
6514 }
6515
mlxsw_sp_fib_node_entry_link(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry)6516 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
6517 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6518 struct mlxsw_sp_fib_entry *fib_entry)
6519 {
6520 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6521 bool is_new = !fib_node->fib_entry;
6522 int err;
6523
6524 fib_node->fib_entry = fib_entry;
6525
6526 err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
6527 if (err)
6528 goto err_fib_entry_update;
6529
6530 return 0;
6531
6532 err_fib_entry_update:
6533 fib_node->fib_entry = NULL;
6534 return err;
6535 }
6536
__mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_entry * fib_entry)6537 static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6538 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6539 struct mlxsw_sp_fib_entry *fib_entry)
6540 {
6541 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6542 int err;
6543
6544 err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
6545 fib_node->fib_entry = NULL;
6546 return err;
6547 }
6548
mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6549 static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6550 struct mlxsw_sp_fib_entry *fib_entry)
6551 {
6552 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
6553
6554 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
6555 __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
6556 }
6557
mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry * fib4_entry)6558 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
6559 {
6560 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6561 struct mlxsw_sp_fib4_entry *fib4_replaced;
6562
6563 if (!fib_node->fib_entry)
6564 return true;
6565
6566 fib4_replaced = container_of(fib_node->fib_entry,
6567 struct mlxsw_sp_fib4_entry, common);
6568 if (fib4_entry->tb_id == RT_TABLE_MAIN &&
6569 fib4_replaced->tb_id == RT_TABLE_LOCAL)
6570 return false;
6571
6572 return true;
6573 }
6574
6575 static int
mlxsw_sp_router_fib4_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,const struct fib_entry_notifier_info * fen_info)6576 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
6577 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6578 const struct fib_entry_notifier_info *fen_info)
6579 {
6580 struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
6581 struct mlxsw_sp_fib_entry *replaced;
6582 struct mlxsw_sp_fib_node *fib_node;
6583 int err;
6584
6585 if (fen_info->fi->nh &&
6586 !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
6587 return 0;
6588
6589 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
6590 &fen_info->dst, sizeof(fen_info->dst),
6591 fen_info->dst_len,
6592 MLXSW_SP_L3_PROTO_IPV4);
6593 if (IS_ERR(fib_node)) {
6594 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
6595 return PTR_ERR(fib_node);
6596 }
6597
6598 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
6599 if (IS_ERR(fib4_entry)) {
6600 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
6601 err = PTR_ERR(fib4_entry);
6602 goto err_fib4_entry_create;
6603 }
6604
6605 if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
6606 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6607 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6608 return 0;
6609 }
6610
6611 replaced = fib_node->fib_entry;
6612 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
6613 if (err) {
6614 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
6615 goto err_fib_node_entry_link;
6616 }
6617
6618 /* Nothing to replace */
6619 if (!replaced)
6620 return 0;
6621
6622 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
6623 fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
6624 common);
6625 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
6626
6627 return 0;
6628
6629 err_fib_node_entry_link:
6630 fib_node->fib_entry = replaced;
6631 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6632 err_fib4_entry_create:
6633 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6634 return err;
6635 }
6636
mlxsw_sp_router_fib4_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct fib_entry_notifier_info * fen_info)6637 static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
6638 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6639 struct fib_entry_notifier_info *fen_info)
6640 {
6641 struct mlxsw_sp_fib4_entry *fib4_entry;
6642 struct mlxsw_sp_fib_node *fib_node;
6643 int err;
6644
6645 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
6646 if (!fib4_entry)
6647 return 0;
6648 fib_node = fib4_entry->common.fib_node;
6649
6650 err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
6651 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6652 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6653 return err;
6654 }
6655
mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info * rt)6656 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
6657 {
6658 /* Multicast routes aren't supported, so ignore them. Neighbour
6659 * Discovery packets are specifically trapped.
6660 */
6661 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
6662 return true;
6663
6664 /* Cloned routes are irrelevant in the forwarding path. */
6665 if (rt->fib6_flags & RTF_CACHE)
6666 return true;
6667
6668 return false;
6669 }
6670
mlxsw_sp_rt6_create(struct fib6_info * rt)6671 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
6672 {
6673 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6674
6675 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
6676 if (!mlxsw_sp_rt6)
6677 return ERR_PTR(-ENOMEM);
6678
6679 /* In case of route replace, replaced route is deleted with
6680 * no notification. Take reference to prevent accessing freed
6681 * memory.
6682 */
6683 mlxsw_sp_rt6->rt = rt;
6684 fib6_info_hold(rt);
6685
6686 return mlxsw_sp_rt6;
6687 }
6688
6689 #if IS_ENABLED(CONFIG_IPV6)
mlxsw_sp_rt6_release(struct fib6_info * rt)6690 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6691 {
6692 fib6_info_release(rt);
6693 }
6694 #else
mlxsw_sp_rt6_release(struct fib6_info * rt)6695 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6696 {
6697 }
6698 #endif
6699
mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 * mlxsw_sp_rt6)6700 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
6701 {
6702 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
6703
6704 if (!mlxsw_sp_rt6->rt->nh)
6705 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
6706 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
6707 kfree(mlxsw_sp_rt6);
6708 }
6709
6710 static struct fib6_info *
mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry * fib6_entry)6711 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
6712 {
6713 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
6714 list)->rt;
6715 }
6716
6717 static struct mlxsw_sp_rt6 *
mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry * fib6_entry,const struct fib6_info * rt)6718 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
6719 const struct fib6_info *rt)
6720 {
6721 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6722
6723 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
6724 if (mlxsw_sp_rt6->rt == rt)
6725 return mlxsw_sp_rt6;
6726 }
6727
6728 return NULL;
6729 }
6730
mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt,enum mlxsw_sp_ipip_type * ret)6731 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
6732 const struct fib6_info *rt,
6733 enum mlxsw_sp_ipip_type *ret)
6734 {
6735 return rt->fib6_nh->fib_nh_dev &&
6736 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
6737 }
6738
mlxsw_sp_nexthop6_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_nexthop * nh,const struct fib6_info * rt)6739 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
6740 struct mlxsw_sp_nexthop_group *nh_grp,
6741 struct mlxsw_sp_nexthop *nh,
6742 const struct fib6_info *rt)
6743 {
6744 struct net_device *dev = rt->fib6_nh->fib_nh_dev;
6745 int err;
6746
6747 nh->nhgi = nh_grp->nhgi;
6748 nh->nh_weight = rt->fib6_nh->fib_nh_weight;
6749 memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
6750 #if IS_ENABLED(CONFIG_IPV6)
6751 nh->neigh_tbl = &nd_tbl;
6752 #endif
6753 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
6754
6755 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
6756
6757 if (!dev)
6758 return 0;
6759 nh->ifindex = dev->ifindex;
6760
6761 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
6762 if (err)
6763 goto err_nexthop_type_init;
6764
6765 return 0;
6766
6767 err_nexthop_type_init:
6768 list_del(&nh->router_list_node);
6769 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6770 return err;
6771 }
6772
mlxsw_sp_nexthop6_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop * nh)6773 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
6774 struct mlxsw_sp_nexthop *nh)
6775 {
6776 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
6777 list_del(&nh->router_list_node);
6778 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6779 }
6780
mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt)6781 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
6782 const struct fib6_info *rt)
6783 {
6784 return rt->fib6_nh->fib_nh_gw_family ||
6785 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
6786 }
6787
6788 static int
mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp,struct mlxsw_sp_fib6_entry * fib6_entry)6789 mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
6790 struct mlxsw_sp_nexthop_group *nh_grp,
6791 struct mlxsw_sp_fib6_entry *fib6_entry)
6792 {
6793 struct mlxsw_sp_nexthop_group_info *nhgi;
6794 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6795 struct mlxsw_sp_nexthop *nh;
6796 int err, i;
6797
6798 nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
6799 GFP_KERNEL);
6800 if (!nhgi)
6801 return -ENOMEM;
6802 nh_grp->nhgi = nhgi;
6803 nhgi->nh_grp = nh_grp;
6804 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
6805 struct mlxsw_sp_rt6, list);
6806 nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
6807 nhgi->count = fib6_entry->nrt6;
6808 for (i = 0; i < nhgi->count; i++) {
6809 struct fib6_info *rt = mlxsw_sp_rt6->rt;
6810
6811 nh = &nhgi->nexthops[i];
6812 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
6813 if (err)
6814 goto err_nexthop6_init;
6815 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
6816 }
6817 nh_grp->nhgi = nhgi;
6818 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
6819 if (err)
6820 goto err_group_inc;
6821 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6822 if (err)
6823 goto err_group_refresh;
6824
6825 return 0;
6826
6827 err_group_refresh:
6828 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6829 err_group_inc:
6830 i = nhgi->count;
6831 err_nexthop6_init:
6832 for (i--; i >= 0; i--) {
6833 nh = &nhgi->nexthops[i];
6834 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6835 }
6836 kfree(nhgi);
6837 return err;
6838 }
6839
6840 static void
mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)6841 mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
6842 struct mlxsw_sp_nexthop_group *nh_grp)
6843 {
6844 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
6845 int i;
6846
6847 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6848 for (i = nhgi->count - 1; i >= 0; i--) {
6849 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
6850
6851 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6852 }
6853 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6854 WARN_ON_ONCE(nhgi->adj_index_valid);
6855 kfree(nhgi);
6856 }
6857
6858 static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop6_group_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)6859 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
6860 struct mlxsw_sp_fib6_entry *fib6_entry)
6861 {
6862 struct mlxsw_sp_nexthop_group *nh_grp;
6863 int err;
6864
6865 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
6866 if (!nh_grp)
6867 return ERR_PTR(-ENOMEM);
6868 INIT_LIST_HEAD(&nh_grp->vr_list);
6869 err = rhashtable_init(&nh_grp->vr_ht,
6870 &mlxsw_sp_nexthop_group_vr_ht_params);
6871 if (err)
6872 goto err_nexthop_group_vr_ht_init;
6873 INIT_LIST_HEAD(&nh_grp->fib_list);
6874 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
6875
6876 err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
6877 if (err)
6878 goto err_nexthop_group_info_init;
6879
6880 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
6881 if (err)
6882 goto err_nexthop_group_insert;
6883
6884 nh_grp->can_destroy = true;
6885
6886 return nh_grp;
6887
6888 err_nexthop_group_insert:
6889 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6890 err_nexthop_group_info_init:
6891 rhashtable_destroy(&nh_grp->vr_ht);
6892 err_nexthop_group_vr_ht_init:
6893 kfree(nh_grp);
6894 return ERR_PTR(err);
6895 }
6896
6897 static void
mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_nexthop_group * nh_grp)6898 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
6899 struct mlxsw_sp_nexthop_group *nh_grp)
6900 {
6901 if (!nh_grp->can_destroy)
6902 return;
6903 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
6904 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6905 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
6906 rhashtable_destroy(&nh_grp->vr_ht);
6907 kfree(nh_grp);
6908 }
6909
mlxsw_sp_nexthop6_group_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)6910 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
6911 struct mlxsw_sp_fib6_entry *fib6_entry)
6912 {
6913 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
6914 struct mlxsw_sp_nexthop_group *nh_grp;
6915
6916 if (rt->nh) {
6917 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
6918 rt->nh->id);
6919 if (WARN_ON_ONCE(!nh_grp))
6920 return -EINVAL;
6921 goto out;
6922 }
6923
6924 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
6925 if (!nh_grp) {
6926 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
6927 if (IS_ERR(nh_grp))
6928 return PTR_ERR(nh_grp);
6929 }
6930
6931 /* The route and the nexthop are described by the same struct, so we
6932 * need to the update the nexthop offload indication for the new route.
6933 */
6934 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
6935
6936 out:
6937 list_add_tail(&fib6_entry->common.nexthop_group_node,
6938 &nh_grp->fib_list);
6939 fib6_entry->common.nh_group = nh_grp;
6940
6941 return 0;
6942 }
6943
mlxsw_sp_nexthop6_group_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry)6944 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
6945 struct mlxsw_sp_fib_entry *fib_entry)
6946 {
6947 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
6948
6949 list_del(&fib_entry->nexthop_group_node);
6950 if (!list_empty(&nh_grp->fib_list))
6951 return;
6952
6953 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
6954 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
6955 return;
6956 }
6957
6958 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
6959 }
6960
mlxsw_sp_nexthop6_group_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib6_entry * fib6_entry)6961 static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
6962 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6963 struct mlxsw_sp_fib6_entry *fib6_entry)
6964 {
6965 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
6966 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6967 int err;
6968
6969 mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
6970 fib6_entry->common.nh_group = NULL;
6971 list_del(&fib6_entry->common.nexthop_group_node);
6972
6973 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6974 if (err)
6975 goto err_nexthop6_group_get;
6976
6977 err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
6978 fib_node->fib);
6979 if (err)
6980 goto err_nexthop_group_vr_link;
6981
6982 /* In case this entry is offloaded, then the adjacency index
6983 * currently associated with it in the device's table is that
6984 * of the old group. Start using the new one instead.
6985 */
6986 err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
6987 &fib6_entry->common, false);
6988 if (err)
6989 goto err_fib_entry_update;
6990
6991 if (list_empty(&old_nh_grp->fib_list))
6992 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
6993
6994 return 0;
6995
6996 err_fib_entry_update:
6997 mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6998 fib_node->fib);
6999 err_nexthop_group_vr_link:
7000 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
7001 err_nexthop6_group_get:
7002 list_add_tail(&fib6_entry->common.nexthop_group_node,
7003 &old_nh_grp->fib_list);
7004 fib6_entry->common.nh_group = old_nh_grp;
7005 mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
7006 return err;
7007 }
7008
7009 static int
mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib6_entry * fib6_entry,struct fib6_info ** rt_arr,unsigned int nrt6)7010 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
7011 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7012 struct mlxsw_sp_fib6_entry *fib6_entry,
7013 struct fib6_info **rt_arr, unsigned int nrt6)
7014 {
7015 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7016 int err, i;
7017
7018 for (i = 0; i < nrt6; i++) {
7019 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7020 if (IS_ERR(mlxsw_sp_rt6)) {
7021 err = PTR_ERR(mlxsw_sp_rt6);
7022 goto err_rt6_unwind;
7023 }
7024
7025 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7026 fib6_entry->nrt6++;
7027 }
7028
7029 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
7030 if (err)
7031 goto err_rt6_unwind;
7032
7033 return 0;
7034
7035 err_rt6_unwind:
7036 for (; i > 0; i--) {
7037 fib6_entry->nrt6--;
7038 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7039 struct mlxsw_sp_rt6, list);
7040 list_del(&mlxsw_sp_rt6->list);
7041 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7042 }
7043 return err;
7044 }
7045
7046 static void
mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib6_entry * fib6_entry,struct fib6_info ** rt_arr,unsigned int nrt6)7047 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
7048 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7049 struct mlxsw_sp_fib6_entry *fib6_entry,
7050 struct fib6_info **rt_arr, unsigned int nrt6)
7051 {
7052 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7053 int i;
7054
7055 for (i = 0; i < nrt6; i++) {
7056 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
7057 rt_arr[i]);
7058 if (WARN_ON_ONCE(!mlxsw_sp_rt6))
7059 continue;
7060
7061 fib6_entry->nrt6--;
7062 list_del(&mlxsw_sp_rt6->list);
7063 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7064 }
7065
7066 mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
7067 }
7068
7069 static int
mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,const struct fib6_info * rt)7070 mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
7071 struct mlxsw_sp_fib_entry *fib_entry,
7072 const struct fib6_info *rt)
7073 {
7074 struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
7075 union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
7076 u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id);
7077 struct mlxsw_sp_router *router = mlxsw_sp->router;
7078 int ifindex = nhgi->nexthops[0].ifindex;
7079 struct mlxsw_sp_ipip_entry *ipip_entry;
7080
7081 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7082 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
7083 MLXSW_SP_L3_PROTO_IPV6,
7084 dip);
7085
7086 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
7087 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
7088 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
7089 ipip_entry);
7090 }
7091 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
7092 MLXSW_SP_L3_PROTO_IPV6, &dip)) {
7093 u32 tunnel_index;
7094
7095 tunnel_index = router->nve_decap_config.tunnel_index;
7096 fib_entry->decap.tunnel_index = tunnel_index;
7097 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
7098 }
7099
7100 return 0;
7101 }
7102
mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry * fib_entry,const struct fib6_info * rt)7103 static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
7104 struct mlxsw_sp_fib_entry *fib_entry,
7105 const struct fib6_info *rt)
7106 {
7107 if (rt->fib6_flags & RTF_LOCAL)
7108 return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
7109 rt);
7110 if (rt->fib6_flags & RTF_ANYCAST)
7111 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7112 else if (rt->fib6_type == RTN_BLACKHOLE)
7113 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
7114 else if (rt->fib6_flags & RTF_REJECT)
7115 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
7116 else if (fib_entry->nh_group->nhgi->gateway)
7117 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
7118 else
7119 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
7120
7121 return 0;
7122 }
7123
7124 static void
mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry * fib6_entry)7125 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
7126 {
7127 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
7128
7129 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
7130 list) {
7131 fib6_entry->nrt6--;
7132 list_del(&mlxsw_sp_rt6->list);
7133 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7134 }
7135 }
7136
7137 static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node,struct fib6_info ** rt_arr,unsigned int nrt6)7138 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
7139 struct mlxsw_sp_fib_node *fib_node,
7140 struct fib6_info **rt_arr, unsigned int nrt6)
7141 {
7142 struct mlxsw_sp_fib6_entry *fib6_entry;
7143 struct mlxsw_sp_fib_entry *fib_entry;
7144 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7145 int err, i;
7146
7147 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
7148 if (!fib6_entry)
7149 return ERR_PTR(-ENOMEM);
7150 fib_entry = &fib6_entry->common;
7151
7152 fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
7153 if (IS_ERR(fib_entry->priv)) {
7154 err = PTR_ERR(fib_entry->priv);
7155 goto err_fib_entry_priv_create;
7156 }
7157
7158 INIT_LIST_HEAD(&fib6_entry->rt6_list);
7159
7160 for (i = 0; i < nrt6; i++) {
7161 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7162 if (IS_ERR(mlxsw_sp_rt6)) {
7163 err = PTR_ERR(mlxsw_sp_rt6);
7164 goto err_rt6_unwind;
7165 }
7166 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7167 fib6_entry->nrt6++;
7168 }
7169
7170 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
7171 if (err)
7172 goto err_rt6_unwind;
7173
7174 err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
7175 fib_node->fib);
7176 if (err)
7177 goto err_nexthop_group_vr_link;
7178
7179 err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
7180 if (err)
7181 goto err_fib6_entry_type_set;
7182
7183 fib_entry->fib_node = fib_node;
7184
7185 return fib6_entry;
7186
7187 err_fib6_entry_type_set:
7188 mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
7189 err_nexthop_group_vr_link:
7190 mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
7191 err_rt6_unwind:
7192 for (; i > 0; i--) {
7193 fib6_entry->nrt6--;
7194 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7195 struct mlxsw_sp_rt6, list);
7196 list_del(&mlxsw_sp_rt6->list);
7197 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7198 }
7199 mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
7200 err_fib_entry_priv_create:
7201 kfree(fib6_entry);
7202 return ERR_PTR(err);
7203 }
7204
7205 static void
mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)7206 mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
7207 struct mlxsw_sp_fib6_entry *fib6_entry)
7208 {
7209 mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
7210 }
7211
mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib6_entry * fib6_entry)7212 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
7213 struct mlxsw_sp_fib6_entry *fib6_entry)
7214 {
7215 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7216
7217 mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
7218 mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
7219 fib_node->fib);
7220 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
7221 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
7222 WARN_ON(fib6_entry->nrt6);
7223 mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
7224 kfree(fib6_entry);
7225 }
7226
7227 static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp * mlxsw_sp,const struct fib6_info * rt)7228 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
7229 const struct fib6_info *rt)
7230 {
7231 struct mlxsw_sp_fib6_entry *fib6_entry;
7232 struct mlxsw_sp_fib_node *fib_node;
7233 struct mlxsw_sp_fib *fib;
7234 struct fib6_info *cmp_rt;
7235 struct mlxsw_sp_vr *vr;
7236
7237 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
7238 if (!vr)
7239 return NULL;
7240 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
7241
7242 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
7243 sizeof(rt->fib6_dst.addr),
7244 rt->fib6_dst.plen);
7245 if (!fib_node)
7246 return NULL;
7247
7248 fib6_entry = container_of(fib_node->fib_entry,
7249 struct mlxsw_sp_fib6_entry, common);
7250 cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7251 if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
7252 rt->fib6_metric == cmp_rt->fib6_metric &&
7253 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
7254 return fib6_entry;
7255
7256 return NULL;
7257 }
7258
mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry * fib6_entry)7259 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
7260 {
7261 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7262 struct mlxsw_sp_fib6_entry *fib6_replaced;
7263 struct fib6_info *rt, *rt_replaced;
7264
7265 if (!fib_node->fib_entry)
7266 return true;
7267
7268 fib6_replaced = container_of(fib_node->fib_entry,
7269 struct mlxsw_sp_fib6_entry,
7270 common);
7271 rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7272 rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
7273 if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
7274 rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
7275 return false;
7276
7277 return true;
7278 }
7279
mlxsw_sp_router_fib6_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct fib6_info ** rt_arr,unsigned int nrt6)7280 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
7281 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7282 struct fib6_info **rt_arr, unsigned int nrt6)
7283 {
7284 struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
7285 struct mlxsw_sp_fib_entry *replaced;
7286 struct mlxsw_sp_fib_node *fib_node;
7287 struct fib6_info *rt = rt_arr[0];
7288 int err;
7289
7290 if (rt->fib6_src.plen)
7291 return -EINVAL;
7292
7293 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7294 return 0;
7295
7296 if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
7297 return 0;
7298
7299 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7300 &rt->fib6_dst.addr,
7301 sizeof(rt->fib6_dst.addr),
7302 rt->fib6_dst.plen,
7303 MLXSW_SP_L3_PROTO_IPV6);
7304 if (IS_ERR(fib_node))
7305 return PTR_ERR(fib_node);
7306
7307 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
7308 nrt6);
7309 if (IS_ERR(fib6_entry)) {
7310 err = PTR_ERR(fib6_entry);
7311 goto err_fib6_entry_create;
7312 }
7313
7314 if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
7315 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7316 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7317 return 0;
7318 }
7319
7320 replaced = fib_node->fib_entry;
7321 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
7322 if (err)
7323 goto err_fib_node_entry_link;
7324
7325 /* Nothing to replace */
7326 if (!replaced)
7327 return 0;
7328
7329 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
7330 fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
7331 common);
7332 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
7333
7334 return 0;
7335
7336 err_fib_node_entry_link:
7337 fib_node->fib_entry = replaced;
7338 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7339 err_fib6_entry_create:
7340 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7341 return err;
7342 }
7343
mlxsw_sp_router_fib6_append(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct fib6_info ** rt_arr,unsigned int nrt6)7344 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
7345 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7346 struct fib6_info **rt_arr, unsigned int nrt6)
7347 {
7348 struct mlxsw_sp_fib6_entry *fib6_entry;
7349 struct mlxsw_sp_fib_node *fib_node;
7350 struct fib6_info *rt = rt_arr[0];
7351 int err;
7352
7353 if (rt->fib6_src.plen)
7354 return -EINVAL;
7355
7356 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7357 return 0;
7358
7359 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7360 &rt->fib6_dst.addr,
7361 sizeof(rt->fib6_dst.addr),
7362 rt->fib6_dst.plen,
7363 MLXSW_SP_L3_PROTO_IPV6);
7364 if (IS_ERR(fib_node))
7365 return PTR_ERR(fib_node);
7366
7367 if (WARN_ON_ONCE(!fib_node->fib_entry)) {
7368 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7369 return -EINVAL;
7370 }
7371
7372 fib6_entry = container_of(fib_node->fib_entry,
7373 struct mlxsw_sp_fib6_entry, common);
7374 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
7375 if (err)
7376 goto err_fib6_entry_nexthop_add;
7377
7378 return 0;
7379
7380 err_fib6_entry_nexthop_add:
7381 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7382 return err;
7383 }
7384
mlxsw_sp_router_fib6_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct fib6_info ** rt_arr,unsigned int nrt6)7385 static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
7386 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7387 struct fib6_info **rt_arr, unsigned int nrt6)
7388 {
7389 struct mlxsw_sp_fib6_entry *fib6_entry;
7390 struct mlxsw_sp_fib_node *fib_node;
7391 struct fib6_info *rt = rt_arr[0];
7392 int err;
7393
7394 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7395 return 0;
7396
7397 /* Multipath routes are first added to the FIB trie and only then
7398 * notified. If we vetoed the addition, we will get a delete
7399 * notification for a route we do not have. Therefore, do not warn if
7400 * route was not found.
7401 */
7402 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
7403 if (!fib6_entry)
7404 return 0;
7405
7406 /* If not all the nexthops are deleted, then only reduce the nexthop
7407 * group.
7408 */
7409 if (nrt6 != fib6_entry->nrt6) {
7410 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
7411 return 0;
7412 }
7413
7414 fib_node = fib6_entry->common.fib_node;
7415
7416 err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
7417 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7418 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7419 return err;
7420 }
7421
7422 static struct mlxsw_sp_mr_table *
mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr * vr,int family)7423 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
7424 {
7425 if (family == RTNL_FAMILY_IPMR)
7426 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
7427 else
7428 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
7429 }
7430
mlxsw_sp_router_fibmr_add(struct mlxsw_sp * mlxsw_sp,struct mfc_entry_notifier_info * men_info,bool replace)7431 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
7432 struct mfc_entry_notifier_info *men_info,
7433 bool replace)
7434 {
7435 struct mlxsw_sp_mr_table *mrt;
7436 struct mlxsw_sp_vr *vr;
7437
7438 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
7439 if (IS_ERR(vr))
7440 return PTR_ERR(vr);
7441
7442 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7443 return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
7444 }
7445
mlxsw_sp_router_fibmr_del(struct mlxsw_sp * mlxsw_sp,struct mfc_entry_notifier_info * men_info)7446 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
7447 struct mfc_entry_notifier_info *men_info)
7448 {
7449 struct mlxsw_sp_mr_table *mrt;
7450 struct mlxsw_sp_vr *vr;
7451
7452 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
7453 if (WARN_ON(!vr))
7454 return;
7455
7456 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7457 mlxsw_sp_mr_route_del(mrt, men_info->mfc);
7458 mlxsw_sp_vr_put(mlxsw_sp, vr);
7459 }
7460
7461 static int
mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp * mlxsw_sp,struct vif_entry_notifier_info * ven_info)7462 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
7463 struct vif_entry_notifier_info *ven_info)
7464 {
7465 struct mlxsw_sp_mr_table *mrt;
7466 struct mlxsw_sp_rif *rif;
7467 struct mlxsw_sp_vr *vr;
7468
7469 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
7470 if (IS_ERR(vr))
7471 return PTR_ERR(vr);
7472
7473 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7474 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
7475 return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
7476 ven_info->vif_index,
7477 ven_info->vif_flags, rif);
7478 }
7479
7480 static void
mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp * mlxsw_sp,struct vif_entry_notifier_info * ven_info)7481 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
7482 struct vif_entry_notifier_info *ven_info)
7483 {
7484 struct mlxsw_sp_mr_table *mrt;
7485 struct mlxsw_sp_vr *vr;
7486
7487 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
7488 if (WARN_ON(!vr))
7489 return;
7490
7491 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7492 mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
7493 mlxsw_sp_vr_put(mlxsw_sp, vr);
7494 }
7495
mlxsw_sp_fib4_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)7496 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
7497 struct mlxsw_sp_fib_node *fib_node)
7498 {
7499 struct mlxsw_sp_fib4_entry *fib4_entry;
7500
7501 fib4_entry = container_of(fib_node->fib_entry,
7502 struct mlxsw_sp_fib4_entry, common);
7503 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7504 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
7505 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7506 }
7507
mlxsw_sp_fib6_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)7508 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
7509 struct mlxsw_sp_fib_node *fib_node)
7510 {
7511 struct mlxsw_sp_fib6_entry *fib6_entry;
7512
7513 fib6_entry = container_of(fib_node->fib_entry,
7514 struct mlxsw_sp_fib6_entry, common);
7515 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7516 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7517 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7518 }
7519
mlxsw_sp_fib_node_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_node * fib_node)7520 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
7521 struct mlxsw_sp_fib_node *fib_node)
7522 {
7523 switch (fib_node->fib->proto) {
7524 case MLXSW_SP_L3_PROTO_IPV4:
7525 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
7526 break;
7527 case MLXSW_SP_L3_PROTO_IPV6:
7528 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
7529 break;
7530 }
7531 }
7532
mlxsw_sp_vr_fib_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,enum mlxsw_sp_l3proto proto)7533 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
7534 struct mlxsw_sp_vr *vr,
7535 enum mlxsw_sp_l3proto proto)
7536 {
7537 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
7538 struct mlxsw_sp_fib_node *fib_node, *tmp;
7539
7540 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
7541 bool do_break = &tmp->list == &fib->node_list;
7542
7543 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
7544 if (do_break)
7545 break;
7546 }
7547 }
7548
mlxsw_sp_router_fib_flush(struct mlxsw_sp * mlxsw_sp)7549 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
7550 {
7551 int i, j;
7552
7553 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
7554 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
7555
7556 if (!mlxsw_sp_vr_is_used(vr))
7557 continue;
7558
7559 for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
7560 mlxsw_sp_mr_table_flush(vr->mr_table[j]);
7561 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
7562
7563 /* If virtual router was only used for IPv4, then it's no
7564 * longer used.
7565 */
7566 if (!mlxsw_sp_vr_is_used(vr))
7567 continue;
7568 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
7569 }
7570 }
7571
7572 struct mlxsw_sp_fib6_event {
7573 struct fib6_info **rt_arr;
7574 unsigned int nrt6;
7575 };
7576
7577 struct mlxsw_sp_fib_event {
7578 struct list_head list; /* node in fib queue */
7579 union {
7580 struct mlxsw_sp_fib6_event fib6_event;
7581 struct fib_entry_notifier_info fen_info;
7582 struct fib_rule_notifier_info fr_info;
7583 struct fib_nh_notifier_info fnh_info;
7584 struct mfc_entry_notifier_info men_info;
7585 struct vif_entry_notifier_info ven_info;
7586 };
7587 struct mlxsw_sp *mlxsw_sp;
7588 unsigned long event;
7589 int family;
7590 };
7591
7592 static int
mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event * fib6_event,struct fib6_entry_notifier_info * fen6_info)7593 mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
7594 struct fib6_entry_notifier_info *fen6_info)
7595 {
7596 struct fib6_info *rt = fen6_info->rt;
7597 struct fib6_info **rt_arr;
7598 struct fib6_info *iter;
7599 unsigned int nrt6;
7600 int i = 0;
7601
7602 nrt6 = fen6_info->nsiblings + 1;
7603
7604 rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
7605 if (!rt_arr)
7606 return -ENOMEM;
7607
7608 fib6_event->rt_arr = rt_arr;
7609 fib6_event->nrt6 = nrt6;
7610
7611 rt_arr[0] = rt;
7612 fib6_info_hold(rt);
7613
7614 if (!fen6_info->nsiblings)
7615 return 0;
7616
7617 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
7618 if (i == fen6_info->nsiblings)
7619 break;
7620
7621 rt_arr[i + 1] = iter;
7622 fib6_info_hold(iter);
7623 i++;
7624 }
7625 WARN_ON_ONCE(i != fen6_info->nsiblings);
7626
7627 return 0;
7628 }
7629
7630 static void
mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event * fib6_event)7631 mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
7632 {
7633 int i;
7634
7635 for (i = 0; i < fib6_event->nrt6; i++)
7636 mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
7637 kfree(fib6_event->rt_arr);
7638 }
7639
mlxsw_sp_router_fib4_event_process(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_event * fib_event)7640 static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
7641 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7642 struct mlxsw_sp_fib_event *fib_event)
7643 {
7644 int err;
7645
7646 mlxsw_sp_span_respin(mlxsw_sp);
7647
7648 switch (fib_event->event) {
7649 case FIB_EVENT_ENTRY_REPLACE:
7650 err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
7651 if (err) {
7652 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7653 dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7654 mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
7655 &fib_event->fen_info);
7656 }
7657 fib_info_put(fib_event->fen_info.fi);
7658 break;
7659 case FIB_EVENT_ENTRY_DEL:
7660 err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
7661 if (err)
7662 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7663 fib_info_put(fib_event->fen_info.fi);
7664 break;
7665 case FIB_EVENT_NH_ADD:
7666 case FIB_EVENT_NH_DEL:
7667 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
7668 fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
7669 break;
7670 }
7671 }
7672
mlxsw_sp_router_fib6_event_process(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_entry_op_ctx * op_ctx,struct mlxsw_sp_fib_event * fib_event)7673 static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
7674 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7675 struct mlxsw_sp_fib_event *fib_event)
7676 {
7677 struct mlxsw_sp_fib6_event *fib6_event = &fib_event->fib6_event;
7678 int err;
7679
7680 mlxsw_sp_span_respin(mlxsw_sp);
7681
7682 switch (fib_event->event) {
7683 case FIB_EVENT_ENTRY_REPLACE:
7684 err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7685 fib_event->fib6_event.nrt6);
7686 if (err) {
7687 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7688 dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7689 mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7690 fib6_event->rt_arr,
7691 fib6_event->nrt6);
7692 }
7693 mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7694 break;
7695 case FIB_EVENT_ENTRY_APPEND:
7696 err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7697 fib_event->fib6_event.nrt6);
7698 if (err) {
7699 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7700 dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
7701 mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7702 fib6_event->rt_arr,
7703 fib6_event->nrt6);
7704 }
7705 mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7706 break;
7707 case FIB_EVENT_ENTRY_DEL:
7708 err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7709 fib_event->fib6_event.nrt6);
7710 if (err)
7711 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7712 mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7713 break;
7714 }
7715 }
7716
mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_fib_event * fib_event)7717 static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
7718 struct mlxsw_sp_fib_event *fib_event)
7719 {
7720 bool replace;
7721 int err;
7722
7723 rtnl_lock();
7724 mutex_lock(&mlxsw_sp->router->lock);
7725 switch (fib_event->event) {
7726 case FIB_EVENT_ENTRY_REPLACE:
7727 case FIB_EVENT_ENTRY_ADD:
7728 replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
7729
7730 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
7731 if (err)
7732 dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
7733 mr_cache_put(fib_event->men_info.mfc);
7734 break;
7735 case FIB_EVENT_ENTRY_DEL:
7736 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
7737 mr_cache_put(fib_event->men_info.mfc);
7738 break;
7739 case FIB_EVENT_VIF_ADD:
7740 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
7741 &fib_event->ven_info);
7742 if (err)
7743 dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
7744 dev_put(fib_event->ven_info.dev);
7745 break;
7746 case FIB_EVENT_VIF_DEL:
7747 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
7748 dev_put(fib_event->ven_info.dev);
7749 break;
7750 }
7751 mutex_unlock(&mlxsw_sp->router->lock);
7752 rtnl_unlock();
7753 }
7754
mlxsw_sp_router_fib_event_work(struct work_struct * work)7755 static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
7756 {
7757 struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
7758 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
7759 struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
7760 struct mlxsw_sp_fib_event *next_fib_event;
7761 struct mlxsw_sp_fib_event *fib_event;
7762 int last_family = AF_UNSPEC;
7763 LIST_HEAD(fib_event_queue);
7764
7765 spin_lock_bh(&router->fib_event_queue_lock);
7766 list_splice_init(&router->fib_event_queue, &fib_event_queue);
7767 spin_unlock_bh(&router->fib_event_queue_lock);
7768
7769 /* Router lock is held here to make sure per-instance
7770 * operation context is not used in between FIB4/6 events
7771 * processing.
7772 */
7773 mutex_lock(&router->lock);
7774 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
7775 list_for_each_entry_safe(fib_event, next_fib_event,
7776 &fib_event_queue, list) {
7777 /* Check if the next entry in the queue exists and it is
7778 * of the same type (family and event) as the currect one.
7779 * In that case it is permitted to do the bulking
7780 * of multiple FIB entries to a single register write.
7781 */
7782 op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
7783 fib_event->family == next_fib_event->family &&
7784 fib_event->event == next_fib_event->event;
7785 op_ctx->event = fib_event->event;
7786
7787 /* In case family of this and the previous entry are different, context
7788 * reinitialization is going to be needed now, indicate that.
7789 * Note that since last_family is initialized to AF_UNSPEC, this is always
7790 * going to happen for the first entry processed in the work.
7791 */
7792 if (fib_event->family != last_family)
7793 op_ctx->initialized = false;
7794
7795 switch (fib_event->family) {
7796 case AF_INET:
7797 mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
7798 fib_event);
7799 break;
7800 case AF_INET6:
7801 mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
7802 fib_event);
7803 break;
7804 case RTNL_FAMILY_IP6MR:
7805 case RTNL_FAMILY_IPMR:
7806 /* Unlock here as inside FIBMR the lock is taken again
7807 * under RTNL. The per-instance operation context
7808 * is not used by FIBMR.
7809 */
7810 mutex_unlock(&router->lock);
7811 mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
7812 fib_event);
7813 mutex_lock(&router->lock);
7814 break;
7815 default:
7816 WARN_ON_ONCE(1);
7817 }
7818 last_family = fib_event->family;
7819 kfree(fib_event);
7820 cond_resched();
7821 }
7822 WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
7823 mutex_unlock(&router->lock);
7824 }
7825
mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event * fib_event,struct fib_notifier_info * info)7826 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
7827 struct fib_notifier_info *info)
7828 {
7829 struct fib_entry_notifier_info *fen_info;
7830 struct fib_nh_notifier_info *fnh_info;
7831
7832 switch (fib_event->event) {
7833 case FIB_EVENT_ENTRY_REPLACE:
7834 case FIB_EVENT_ENTRY_DEL:
7835 fen_info = container_of(info, struct fib_entry_notifier_info,
7836 info);
7837 fib_event->fen_info = *fen_info;
7838 /* Take reference on fib_info to prevent it from being
7839 * freed while event is queued. Release it afterwards.
7840 */
7841 fib_info_hold(fib_event->fen_info.fi);
7842 break;
7843 case FIB_EVENT_NH_ADD:
7844 case FIB_EVENT_NH_DEL:
7845 fnh_info = container_of(info, struct fib_nh_notifier_info,
7846 info);
7847 fib_event->fnh_info = *fnh_info;
7848 fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
7849 break;
7850 }
7851 }
7852
mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event * fib_event,struct fib_notifier_info * info)7853 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
7854 struct fib_notifier_info *info)
7855 {
7856 struct fib6_entry_notifier_info *fen6_info;
7857 int err;
7858
7859 switch (fib_event->event) {
7860 case FIB_EVENT_ENTRY_REPLACE:
7861 case FIB_EVENT_ENTRY_APPEND:
7862 case FIB_EVENT_ENTRY_DEL:
7863 fen6_info = container_of(info, struct fib6_entry_notifier_info,
7864 info);
7865 err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
7866 fen6_info);
7867 if (err)
7868 return err;
7869 break;
7870 }
7871
7872 return 0;
7873 }
7874
7875 static void
mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event * fib_event,struct fib_notifier_info * info)7876 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
7877 struct fib_notifier_info *info)
7878 {
7879 switch (fib_event->event) {
7880 case FIB_EVENT_ENTRY_REPLACE:
7881 case FIB_EVENT_ENTRY_ADD:
7882 case FIB_EVENT_ENTRY_DEL:
7883 memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
7884 mr_cache_hold(fib_event->men_info.mfc);
7885 break;
7886 case FIB_EVENT_VIF_ADD:
7887 case FIB_EVENT_VIF_DEL:
7888 memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
7889 dev_hold(fib_event->ven_info.dev);
7890 break;
7891 }
7892 }
7893
mlxsw_sp_router_fib_rule_event(unsigned long event,struct fib_notifier_info * info,struct mlxsw_sp * mlxsw_sp)7894 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
7895 struct fib_notifier_info *info,
7896 struct mlxsw_sp *mlxsw_sp)
7897 {
7898 struct netlink_ext_ack *extack = info->extack;
7899 struct fib_rule_notifier_info *fr_info;
7900 struct fib_rule *rule;
7901 int err = 0;
7902
7903 /* nothing to do at the moment */
7904 if (event == FIB_EVENT_RULE_DEL)
7905 return 0;
7906
7907 fr_info = container_of(info, struct fib_rule_notifier_info, info);
7908 rule = fr_info->rule;
7909
7910 /* Rule only affects locally generated traffic */
7911 if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
7912 return 0;
7913
7914 switch (info->family) {
7915 case AF_INET:
7916 if (!fib4_rule_default(rule) && !rule->l3mdev)
7917 err = -EOPNOTSUPP;
7918 break;
7919 case AF_INET6:
7920 if (!fib6_rule_default(rule) && !rule->l3mdev)
7921 err = -EOPNOTSUPP;
7922 break;
7923 case RTNL_FAMILY_IPMR:
7924 if (!ipmr_rule_default(rule) && !rule->l3mdev)
7925 err = -EOPNOTSUPP;
7926 break;
7927 case RTNL_FAMILY_IP6MR:
7928 if (!ip6mr_rule_default(rule) && !rule->l3mdev)
7929 err = -EOPNOTSUPP;
7930 break;
7931 }
7932
7933 if (err < 0)
7934 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
7935
7936 return err;
7937 }
7938
7939 /* Called with rcu_read_lock() */
mlxsw_sp_router_fib_event(struct notifier_block * nb,unsigned long event,void * ptr)7940 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
7941 unsigned long event, void *ptr)
7942 {
7943 struct mlxsw_sp_fib_event *fib_event;
7944 struct fib_notifier_info *info = ptr;
7945 struct mlxsw_sp_router *router;
7946 int err;
7947
7948 if ((info->family != AF_INET && info->family != AF_INET6 &&
7949 info->family != RTNL_FAMILY_IPMR &&
7950 info->family != RTNL_FAMILY_IP6MR))
7951 return NOTIFY_DONE;
7952
7953 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7954
7955 switch (event) {
7956 case FIB_EVENT_RULE_ADD:
7957 case FIB_EVENT_RULE_DEL:
7958 err = mlxsw_sp_router_fib_rule_event(event, info,
7959 router->mlxsw_sp);
7960 return notifier_from_errno(err);
7961 case FIB_EVENT_ENTRY_ADD:
7962 case FIB_EVENT_ENTRY_REPLACE:
7963 case FIB_EVENT_ENTRY_APPEND:
7964 if (info->family == AF_INET) {
7965 struct fib_entry_notifier_info *fen_info = ptr;
7966
7967 if (fen_info->fi->fib_nh_is_v6) {
7968 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
7969 return notifier_from_errno(-EINVAL);
7970 }
7971 }
7972 break;
7973 }
7974
7975 fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
7976 if (!fib_event)
7977 return NOTIFY_BAD;
7978
7979 fib_event->mlxsw_sp = router->mlxsw_sp;
7980 fib_event->event = event;
7981 fib_event->family = info->family;
7982
7983 switch (info->family) {
7984 case AF_INET:
7985 mlxsw_sp_router_fib4_event(fib_event, info);
7986 break;
7987 case AF_INET6:
7988 err = mlxsw_sp_router_fib6_event(fib_event, info);
7989 if (err)
7990 goto err_fib_event;
7991 break;
7992 case RTNL_FAMILY_IP6MR:
7993 case RTNL_FAMILY_IPMR:
7994 mlxsw_sp_router_fibmr_event(fib_event, info);
7995 break;
7996 }
7997
7998 /* Enqueue the event and trigger the work */
7999 spin_lock_bh(&router->fib_event_queue_lock);
8000 list_add_tail(&fib_event->list, &router->fib_event_queue);
8001 spin_unlock_bh(&router->fib_event_queue_lock);
8002 mlxsw_core_schedule_work(&router->fib_event_work);
8003
8004 return NOTIFY_DONE;
8005
8006 err_fib_event:
8007 kfree(fib_event);
8008 return NOTIFY_BAD;
8009 }
8010
8011 static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)8012 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
8013 const struct net_device *dev)
8014 {
8015 int i;
8016
8017 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
8018 if (mlxsw_sp->router->rifs[i] &&
8019 mlxsw_sp->router->rifs[i]->dev == dev)
8020 return mlxsw_sp->router->rifs[i];
8021
8022 return NULL;
8023 }
8024
mlxsw_sp_rif_exists(struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)8025 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
8026 const struct net_device *dev)
8027 {
8028 struct mlxsw_sp_rif *rif;
8029
8030 mutex_lock(&mlxsw_sp->router->lock);
8031 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8032 mutex_unlock(&mlxsw_sp->router->lock);
8033
8034 return rif;
8035 }
8036
mlxsw_sp_rif_vid(struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)8037 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
8038 {
8039 struct mlxsw_sp_rif *rif;
8040 u16 vid = 0;
8041
8042 mutex_lock(&mlxsw_sp->router->lock);
8043 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8044 if (!rif)
8045 goto out;
8046
8047 /* We only return the VID for VLAN RIFs. Otherwise we return an
8048 * invalid value (0).
8049 */
8050 if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
8051 goto out;
8052
8053 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
8054
8055 out:
8056 mutex_unlock(&mlxsw_sp->router->lock);
8057 return vid;
8058 }
8059
mlxsw_sp_router_rif_disable(struct mlxsw_sp * mlxsw_sp,u16 rif)8060 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
8061 {
8062 char ritr_pl[MLXSW_REG_RITR_LEN];
8063 int err;
8064
8065 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
8066 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8067 if (err)
8068 return err;
8069
8070 mlxsw_reg_ritr_enable_set(ritr_pl, false);
8071 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8072 }
8073
mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif)8074 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
8075 struct mlxsw_sp_rif *rif)
8076 {
8077 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
8078 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
8079 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
8080 }
8081
8082 static bool
mlxsw_sp_rif_should_config(struct mlxsw_sp_rif * rif,struct net_device * dev,unsigned long event)8083 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
8084 unsigned long event)
8085 {
8086 struct inet6_dev *inet6_dev;
8087 bool addr_list_empty = true;
8088 struct in_device *idev;
8089
8090 switch (event) {
8091 case NETDEV_UP:
8092 return rif == NULL;
8093 case NETDEV_DOWN:
8094 rcu_read_lock();
8095 idev = __in_dev_get_rcu(dev);
8096 if (idev && idev->ifa_list)
8097 addr_list_empty = false;
8098
8099 inet6_dev = __in6_dev_get(dev);
8100 if (addr_list_empty && inet6_dev &&
8101 !list_empty(&inet6_dev->addr_list))
8102 addr_list_empty = false;
8103 rcu_read_unlock();
8104
8105 /* macvlans do not have a RIF, but rather piggy back on the
8106 * RIF of their lower device.
8107 */
8108 if (netif_is_macvlan(dev) && addr_list_empty)
8109 return true;
8110
8111 if (rif && addr_list_empty &&
8112 !netif_is_l3_slave(rif->dev))
8113 return true;
8114 /* It is possible we already removed the RIF ourselves
8115 * if it was assigned to a netdev that is now a bridge
8116 * or LAG slave.
8117 */
8118 return false;
8119 }
8120
8121 return false;
8122 }
8123
8124 static enum mlxsw_sp_rif_type
mlxsw_sp_dev_rif_type(const struct mlxsw_sp * mlxsw_sp,const struct net_device * dev)8125 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
8126 const struct net_device *dev)
8127 {
8128 enum mlxsw_sp_fid_type type;
8129
8130 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
8131 return MLXSW_SP_RIF_TYPE_IPIP_LB;
8132
8133 /* Otherwise RIF type is derived from the type of the underlying FID. */
8134 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
8135 type = MLXSW_SP_FID_TYPE_8021Q;
8136 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
8137 type = MLXSW_SP_FID_TYPE_8021Q;
8138 else if (netif_is_bridge_master(dev))
8139 type = MLXSW_SP_FID_TYPE_8021D;
8140 else
8141 type = MLXSW_SP_FID_TYPE_RFID;
8142
8143 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
8144 }
8145
mlxsw_sp_rif_index_alloc(struct mlxsw_sp * mlxsw_sp,u16 * p_rif_index)8146 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
8147 {
8148 int i;
8149
8150 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
8151 if (!mlxsw_sp->router->rifs[i]) {
8152 *p_rif_index = i;
8153 return 0;
8154 }
8155 }
8156
8157 return -ENOBUFS;
8158 }
8159
mlxsw_sp_rif_alloc(size_t rif_size,u16 rif_index,u16 vr_id,struct net_device * l3_dev)8160 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
8161 u16 vr_id,
8162 struct net_device *l3_dev)
8163 {
8164 struct mlxsw_sp_rif *rif;
8165
8166 rif = kzalloc(rif_size, GFP_KERNEL);
8167 if (!rif)
8168 return NULL;
8169
8170 INIT_LIST_HEAD(&rif->nexthop_list);
8171 INIT_LIST_HEAD(&rif->neigh_list);
8172 if (l3_dev) {
8173 ether_addr_copy(rif->addr, l3_dev->dev_addr);
8174 rif->mtu = l3_dev->mtu;
8175 rif->dev = l3_dev;
8176 }
8177 rif->vr_id = vr_id;
8178 rif->rif_index = rif_index;
8179
8180 return rif;
8181 }
8182
mlxsw_sp_rif_by_index(const struct mlxsw_sp * mlxsw_sp,u16 rif_index)8183 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
8184 u16 rif_index)
8185 {
8186 return mlxsw_sp->router->rifs[rif_index];
8187 }
8188
mlxsw_sp_rif_index(const struct mlxsw_sp_rif * rif)8189 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
8190 {
8191 return rif->rif_index;
8192 }
8193
mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb * lb_rif)8194 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8195 {
8196 return lb_rif->common.rif_index;
8197 }
8198
mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb * lb_rif)8199 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8200 {
8201 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
8202 struct mlxsw_sp_vr *ul_vr;
8203
8204 ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
8205 if (WARN_ON(IS_ERR(ul_vr)))
8206 return 0;
8207
8208 return ul_vr->id;
8209 }
8210
mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb * lb_rif)8211 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8212 {
8213 return lb_rif->ul_rif_id;
8214 }
8215
8216 static bool
mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif * rif)8217 mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif)
8218 {
8219 return mlxsw_sp_rif_counter_valid_get(rif,
8220 MLXSW_SP_RIF_COUNTER_EGRESS) &&
8221 mlxsw_sp_rif_counter_valid_get(rif,
8222 MLXSW_SP_RIF_COUNTER_INGRESS);
8223 }
8224
8225 static int
mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif * rif)8226 mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif)
8227 {
8228 int err;
8229
8230 err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8231 if (err)
8232 return err;
8233
8234 /* Clear stale data. */
8235 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8236 MLXSW_SP_RIF_COUNTER_INGRESS,
8237 NULL);
8238 if (err)
8239 goto err_clear_ingress;
8240
8241 err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8242 if (err)
8243 goto err_alloc_egress;
8244
8245 /* Clear stale data. */
8246 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8247 MLXSW_SP_RIF_COUNTER_EGRESS,
8248 NULL);
8249 if (err)
8250 goto err_clear_egress;
8251
8252 return 0;
8253
8254 err_clear_egress:
8255 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8256 err_alloc_egress:
8257 err_clear_ingress:
8258 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8259 return err;
8260 }
8261
8262 static void
mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif * rif)8263 mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif)
8264 {
8265 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
8266 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
8267 }
8268
8269 static void
mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif * rif,struct netdev_notifier_offload_xstats_info * info)8270 mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif,
8271 struct netdev_notifier_offload_xstats_info *info)
8272 {
8273 if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8274 return;
8275 netdev_offload_xstats_report_used(info->report_used);
8276 }
8277
8278 static int
mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif * rif,struct rtnl_hw_stats64 * p_stats)8279 mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif,
8280 struct rtnl_hw_stats64 *p_stats)
8281 {
8282 struct mlxsw_sp_rif_counter_set_basic ingress;
8283 struct mlxsw_sp_rif_counter_set_basic egress;
8284 int err;
8285
8286 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8287 MLXSW_SP_RIF_COUNTER_INGRESS,
8288 &ingress);
8289 if (err)
8290 return err;
8291
8292 err = mlxsw_sp_rif_counter_fetch_clear(rif,
8293 MLXSW_SP_RIF_COUNTER_EGRESS,
8294 &egress);
8295 if (err)
8296 return err;
8297
8298 #define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX) \
8299 ((SET.good_unicast_ ## SFX) + \
8300 (SET.good_multicast_ ## SFX) + \
8301 (SET.good_broadcast_ ## SFX))
8302
8303 p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets);
8304 p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets);
8305 p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes);
8306 p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes);
8307 p_stats->rx_errors = ingress.error_packets;
8308 p_stats->tx_errors = egress.error_packets;
8309 p_stats->rx_dropped = ingress.discard_packets;
8310 p_stats->tx_dropped = egress.discard_packets;
8311 p_stats->multicast = ingress.good_multicast_packets +
8312 ingress.good_broadcast_packets;
8313
8314 #undef MLXSW_SP_ROUTER_ALL_GOOD
8315
8316 return 0;
8317 }
8318
8319 static int
mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif * rif,struct netdev_notifier_offload_xstats_info * info)8320 mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
8321 struct netdev_notifier_offload_xstats_info *info)
8322 {
8323 struct rtnl_hw_stats64 stats = {};
8324 int err;
8325
8326 if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8327 return 0;
8328
8329 err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats);
8330 if (err)
8331 return err;
8332
8333 netdev_offload_xstats_report_delta(info->report_delta, &stats);
8334 return 0;
8335 }
8336
8337 struct mlxsw_sp_router_hwstats_notify_work {
8338 struct work_struct work;
8339 struct net_device *dev;
8340 };
8341
mlxsw_sp_router_hwstats_notify_work(struct work_struct * work)8342 static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
8343 {
8344 struct mlxsw_sp_router_hwstats_notify_work *hws_work =
8345 container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
8346 work);
8347
8348 rtnl_lock();
8349 rtnl_offload_xstats_notify(hws_work->dev);
8350 rtnl_unlock();
8351 dev_put(hws_work->dev);
8352 kfree(hws_work);
8353 }
8354
8355 static void
mlxsw_sp_router_hwstats_notify_schedule(struct net_device * dev)8356 mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
8357 {
8358 struct mlxsw_sp_router_hwstats_notify_work *hws_work;
8359
8360 /* To collect notification payload, the core ends up sending another
8361 * notifier block message, which would deadlock on the attempt to
8362 * acquire the router lock again. Just postpone the notification until
8363 * later.
8364 */
8365
8366 hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL);
8367 if (!hws_work)
8368 return;
8369
8370 INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
8371 dev_hold(dev);
8372 hws_work->dev = dev;
8373 mlxsw_core_schedule_work(&hws_work->work);
8374 }
8375
mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif * rif)8376 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
8377 {
8378 return rif->dev->ifindex;
8379 }
8380
mlxsw_sp_rif_dev(const struct mlxsw_sp_rif * rif)8381 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
8382 {
8383 return rif->dev;
8384 }
8385
mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif * rif)8386 static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
8387 {
8388 struct rtnl_hw_stats64 stats = {};
8389
8390 if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
8391 netdev_offload_xstats_push_delta(rif->dev,
8392 NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8393 &stats);
8394 }
8395
8396 static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)8397 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
8398 const struct mlxsw_sp_rif_params *params,
8399 struct netlink_ext_ack *extack)
8400 {
8401 u32 tb_id = l3mdev_fib_table(params->dev);
8402 const struct mlxsw_sp_rif_ops *ops;
8403 struct mlxsw_sp_fid *fid = NULL;
8404 enum mlxsw_sp_rif_type type;
8405 struct mlxsw_sp_rif *rif;
8406 struct mlxsw_sp_vr *vr;
8407 u16 rif_index;
8408 int i, err;
8409
8410 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
8411 ops = mlxsw_sp->router->rif_ops_arr[type];
8412
8413 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
8414 if (IS_ERR(vr))
8415 return ERR_CAST(vr);
8416 vr->rif_count++;
8417
8418 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
8419 if (err) {
8420 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8421 goto err_rif_index_alloc;
8422 }
8423
8424 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
8425 if (!rif) {
8426 err = -ENOMEM;
8427 goto err_rif_alloc;
8428 }
8429 dev_hold(rif->dev);
8430 mlxsw_sp->router->rifs[rif_index] = rif;
8431 rif->mlxsw_sp = mlxsw_sp;
8432 rif->ops = ops;
8433
8434 if (ops->fid_get) {
8435 fid = ops->fid_get(rif, extack);
8436 if (IS_ERR(fid)) {
8437 err = PTR_ERR(fid);
8438 goto err_fid_get;
8439 }
8440 rif->fid = fid;
8441 }
8442
8443 if (ops->setup)
8444 ops->setup(rif, params);
8445
8446 err = ops->configure(rif, extack);
8447 if (err)
8448 goto err_configure;
8449
8450 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8451 err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
8452 if (err)
8453 goto err_mr_rif_add;
8454 }
8455
8456 if (netdev_offload_xstats_enabled(rif->dev,
8457 NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8458 err = mlxsw_sp_router_port_l3_stats_enable(rif);
8459 if (err)
8460 goto err_stats_enable;
8461 mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
8462 } else {
8463 mlxsw_sp_rif_counters_alloc(rif);
8464 }
8465
8466 return rif;
8467
8468 err_stats_enable:
8469 err_mr_rif_add:
8470 for (i--; i >= 0; i--)
8471 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8472 ops->deconfigure(rif);
8473 err_configure:
8474 if (fid)
8475 mlxsw_sp_fid_put(fid);
8476 err_fid_get:
8477 mlxsw_sp->router->rifs[rif_index] = NULL;
8478 dev_put(rif->dev);
8479 kfree(rif);
8480 err_rif_alloc:
8481 err_rif_index_alloc:
8482 vr->rif_count--;
8483 mlxsw_sp_vr_put(mlxsw_sp, vr);
8484 return ERR_PTR(err);
8485 }
8486
mlxsw_sp_rif_destroy(struct mlxsw_sp_rif * rif)8487 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
8488 {
8489 const struct mlxsw_sp_rif_ops *ops = rif->ops;
8490 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8491 struct mlxsw_sp_fid *fid = rif->fid;
8492 struct mlxsw_sp_vr *vr;
8493 int i;
8494
8495 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8496 vr = &mlxsw_sp->router->vrs[rif->vr_id];
8497
8498 if (netdev_offload_xstats_enabled(rif->dev,
8499 NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8500 mlxsw_sp_rif_push_l3_stats(rif);
8501 mlxsw_sp_router_port_l3_stats_disable(rif);
8502 mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
8503 } else {
8504 mlxsw_sp_rif_counters_free(rif);
8505 }
8506
8507 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8508 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8509 ops->deconfigure(rif);
8510 if (fid)
8511 /* Loopback RIFs are not associated with a FID. */
8512 mlxsw_sp_fid_put(fid);
8513 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
8514 dev_put(rif->dev);
8515 kfree(rif);
8516 vr->rif_count--;
8517 mlxsw_sp_vr_put(mlxsw_sp, vr);
8518 }
8519
mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp * mlxsw_sp,struct net_device * dev)8520 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
8521 struct net_device *dev)
8522 {
8523 struct mlxsw_sp_rif *rif;
8524
8525 mutex_lock(&mlxsw_sp->router->lock);
8526 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8527 if (!rif)
8528 goto out;
8529 mlxsw_sp_rif_destroy(rif);
8530 out:
8531 mutex_unlock(&mlxsw_sp->router->lock);
8532 }
8533
8534 static void
mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params * params,struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)8535 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
8536 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8537 {
8538 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8539
8540 params->vid = mlxsw_sp_port_vlan->vid;
8541 params->lag = mlxsw_sp_port->lagged;
8542 if (params->lag)
8543 params->lag_id = mlxsw_sp_port->lag_id;
8544 else
8545 params->system_port = mlxsw_sp_port->local_port;
8546 }
8547
8548 static struct mlxsw_sp_rif_subport *
mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif * rif)8549 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
8550 {
8551 return container_of(rif, struct mlxsw_sp_rif_subport, common);
8552 }
8553
8554 static struct mlxsw_sp_rif *
mlxsw_sp_rif_subport_get(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_rif_params * params,struct netlink_ext_ack * extack)8555 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
8556 const struct mlxsw_sp_rif_params *params,
8557 struct netlink_ext_ack *extack)
8558 {
8559 struct mlxsw_sp_rif_subport *rif_subport;
8560 struct mlxsw_sp_rif *rif;
8561
8562 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
8563 if (!rif)
8564 return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
8565
8566 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8567 refcount_inc(&rif_subport->ref_count);
8568 return rif;
8569 }
8570
mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif * rif)8571 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
8572 {
8573 struct mlxsw_sp_rif_subport *rif_subport;
8574
8575 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8576 if (!refcount_dec_and_test(&rif_subport->ref_count))
8577 return;
8578
8579 mlxsw_sp_rif_destroy(rif);
8580 }
8581
mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif_mac_profile * profile,struct netlink_ext_ack * extack)8582 static int mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp *mlxsw_sp,
8583 struct mlxsw_sp_rif_mac_profile *profile,
8584 struct netlink_ext_ack *extack)
8585 {
8586 u8 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
8587 struct mlxsw_sp_router *router = mlxsw_sp->router;
8588 int id;
8589
8590 id = idr_alloc(&router->rif_mac_profiles_idr, profile, 0,
8591 max_rif_mac_profiles, GFP_KERNEL);
8592
8593 if (id >= 0) {
8594 profile->id = id;
8595 return 0;
8596 }
8597
8598 if (id == -ENOSPC)
8599 NL_SET_ERR_MSG_MOD(extack,
8600 "Exceeded number of supported router interface MAC profiles");
8601
8602 return id;
8603 }
8604
8605 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp * mlxsw_sp,u8 mac_profile)8606 mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp *mlxsw_sp, u8 mac_profile)
8607 {
8608 struct mlxsw_sp_rif_mac_profile *profile;
8609
8610 profile = idr_remove(&mlxsw_sp->router->rif_mac_profiles_idr,
8611 mac_profile);
8612 WARN_ON(!profile);
8613 return profile;
8614 }
8615
8616 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_alloc(const char * mac)8617 mlxsw_sp_rif_mac_profile_alloc(const char *mac)
8618 {
8619 struct mlxsw_sp_rif_mac_profile *profile;
8620
8621 profile = kzalloc(sizeof(*profile), GFP_KERNEL);
8622 if (!profile)
8623 return NULL;
8624
8625 ether_addr_copy(profile->mac_prefix, mac);
8626 refcount_set(&profile->ref_count, 1);
8627 return profile;
8628 }
8629
8630 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp * mlxsw_sp,const char * mac)8631 mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
8632 {
8633 struct mlxsw_sp_router *router = mlxsw_sp->router;
8634 struct mlxsw_sp_rif_mac_profile *profile;
8635 int id;
8636
8637 idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
8638 if (ether_addr_equal_masked(profile->mac_prefix, mac,
8639 mlxsw_sp->mac_mask))
8640 return profile;
8641 }
8642
8643 return NULL;
8644 }
8645
mlxsw_sp_rif_mac_profiles_occ_get(void * priv)8646 static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
8647 {
8648 const struct mlxsw_sp *mlxsw_sp = priv;
8649
8650 return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
8651 }
8652
8653 static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp * mlxsw_sp,const char * mac,struct netlink_ext_ack * extack)8654 mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
8655 struct netlink_ext_ack *extack)
8656 {
8657 struct mlxsw_sp_rif_mac_profile *profile;
8658 int err;
8659
8660 profile = mlxsw_sp_rif_mac_profile_alloc(mac);
8661 if (!profile)
8662 return ERR_PTR(-ENOMEM);
8663
8664 err = mlxsw_sp_rif_mac_profile_index_alloc(mlxsw_sp, profile, extack);
8665 if (err)
8666 goto profile_index_alloc_err;
8667
8668 atomic_inc(&mlxsw_sp->router->rif_mac_profiles_count);
8669 return profile;
8670
8671 profile_index_alloc_err:
8672 kfree(profile);
8673 return ERR_PTR(err);
8674 }
8675
mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp * mlxsw_sp,u8 mac_profile)8676 static void mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp *mlxsw_sp,
8677 u8 mac_profile)
8678 {
8679 struct mlxsw_sp_rif_mac_profile *profile;
8680
8681 atomic_dec(&mlxsw_sp->router->rif_mac_profiles_count);
8682 profile = mlxsw_sp_rif_mac_profile_index_free(mlxsw_sp, mac_profile);
8683 kfree(profile);
8684 }
8685
mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp * mlxsw_sp,const char * mac,u8 * p_mac_profile,struct netlink_ext_ack * extack)8686 static int mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp *mlxsw_sp,
8687 const char *mac, u8 *p_mac_profile,
8688 struct netlink_ext_ack *extack)
8689 {
8690 struct mlxsw_sp_rif_mac_profile *profile;
8691
8692 profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, mac);
8693 if (profile) {
8694 refcount_inc(&profile->ref_count);
8695 goto out;
8696 }
8697
8698 profile = mlxsw_sp_rif_mac_profile_create(mlxsw_sp, mac, extack);
8699 if (IS_ERR(profile))
8700 return PTR_ERR(profile);
8701
8702 out:
8703 *p_mac_profile = profile->id;
8704 return 0;
8705 }
8706
mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp * mlxsw_sp,u8 mac_profile)8707 static void mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp *mlxsw_sp,
8708 u8 mac_profile)
8709 {
8710 struct mlxsw_sp_rif_mac_profile *profile;
8711
8712 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8713 mac_profile);
8714 if (WARN_ON(!profile))
8715 return;
8716
8717 if (!refcount_dec_and_test(&profile->ref_count))
8718 return;
8719
8720 mlxsw_sp_rif_mac_profile_destroy(mlxsw_sp, mac_profile);
8721 }
8722
mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif * rif)8723 static bool mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif *rif)
8724 {
8725 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8726 struct mlxsw_sp_rif_mac_profile *profile;
8727
8728 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8729 rif->mac_profile_id);
8730 if (WARN_ON(!profile))
8731 return false;
8732
8733 return refcount_read(&profile->ref_count) > 1;
8734 }
8735
mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif * rif,const char * new_mac)8736 static int mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif *rif,
8737 const char *new_mac)
8738 {
8739 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8740 struct mlxsw_sp_rif_mac_profile *profile;
8741
8742 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8743 rif->mac_profile_id);
8744 if (WARN_ON(!profile))
8745 return -EINVAL;
8746
8747 ether_addr_copy(profile->mac_prefix, new_mac);
8748 return 0;
8749 }
8750
8751 static int
mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,const char * new_mac,struct netlink_ext_ack * extack)8752 mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
8753 struct mlxsw_sp_rif *rif,
8754 const char *new_mac,
8755 struct netlink_ext_ack *extack)
8756 {
8757 u8 mac_profile;
8758 int err;
8759
8760 if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
8761 !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
8762 return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
8763
8764 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
8765 &mac_profile, extack);
8766 if (err)
8767 return err;
8768
8769 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, rif->mac_profile_id);
8770 rif->mac_profile_id = mac_profile;
8771 return 0;
8772 }
8773
8774 static int
__mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan,struct net_device * l3_dev,struct netlink_ext_ack * extack)8775 __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8776 struct net_device *l3_dev,
8777 struct netlink_ext_ack *extack)
8778 {
8779 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8780 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
8781 struct mlxsw_sp_rif_params params = {
8782 .dev = l3_dev,
8783 };
8784 u16 vid = mlxsw_sp_port_vlan->vid;
8785 struct mlxsw_sp_rif *rif;
8786 struct mlxsw_sp_fid *fid;
8787 int err;
8788
8789 mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan);
8790 rif = mlxsw_sp_rif_subport_get(mlxsw_sp, ¶ms, extack);
8791 if (IS_ERR(rif))
8792 return PTR_ERR(rif);
8793
8794 /* FID was already created, just take a reference */
8795 fid = rif->ops->fid_get(rif, extack);
8796 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
8797 if (err)
8798 goto err_fid_port_vid_map;
8799
8800 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
8801 if (err)
8802 goto err_port_vid_learning_set;
8803
8804 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
8805 BR_STATE_FORWARDING);
8806 if (err)
8807 goto err_port_vid_stp_set;
8808
8809 mlxsw_sp_port_vlan->fid = fid;
8810
8811 return 0;
8812
8813 err_port_vid_stp_set:
8814 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8815 err_port_vid_learning_set:
8816 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8817 err_fid_port_vid_map:
8818 mlxsw_sp_fid_put(fid);
8819 mlxsw_sp_rif_subport_put(rif);
8820 return err;
8821 }
8822
8823 static void
__mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)8824 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8825 {
8826 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8827 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
8828 struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
8829 u16 vid = mlxsw_sp_port_vlan->vid;
8830
8831 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
8832 return;
8833
8834 mlxsw_sp_port_vlan->fid = NULL;
8835 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
8836 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8837 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8838 mlxsw_sp_fid_put(fid);
8839 mlxsw_sp_rif_subport_put(rif);
8840 }
8841
8842 int
mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan,struct net_device * l3_dev,struct netlink_ext_ack * extack)8843 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8844 struct net_device *l3_dev,
8845 struct netlink_ext_ack *extack)
8846 {
8847 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8848 struct mlxsw_sp_rif *rif;
8849 int err = 0;
8850
8851 mutex_lock(&mlxsw_sp->router->lock);
8852 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8853 if (!rif)
8854 goto out;
8855
8856 err = __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
8857 extack);
8858 out:
8859 mutex_unlock(&mlxsw_sp->router->lock);
8860 return err;
8861 }
8862
8863 void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)8864 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8865 {
8866 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8867
8868 mutex_lock(&mlxsw_sp->router->lock);
8869 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8870 mutex_unlock(&mlxsw_sp->router->lock);
8871 }
8872
mlxsw_sp_inetaddr_port_vlan_event(struct net_device * l3_dev,struct net_device * port_dev,unsigned long event,u16 vid,struct netlink_ext_ack * extack)8873 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
8874 struct net_device *port_dev,
8875 unsigned long event, u16 vid,
8876 struct netlink_ext_ack *extack)
8877 {
8878 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
8879 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
8880
8881 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
8882 if (WARN_ON(!mlxsw_sp_port_vlan))
8883 return -EINVAL;
8884
8885 switch (event) {
8886 case NETDEV_UP:
8887 return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
8888 l3_dev, extack);
8889 case NETDEV_DOWN:
8890 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8891 break;
8892 }
8893
8894 return 0;
8895 }
8896
mlxsw_sp_inetaddr_port_event(struct net_device * port_dev,unsigned long event,struct netlink_ext_ack * extack)8897 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
8898 unsigned long event,
8899 struct netlink_ext_ack *extack)
8900 {
8901 if (netif_is_bridge_port(port_dev) ||
8902 netif_is_lag_port(port_dev) ||
8903 netif_is_ovs_port(port_dev))
8904 return 0;
8905
8906 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
8907 MLXSW_SP_DEFAULT_VID, extack);
8908 }
8909
__mlxsw_sp_inetaddr_lag_event(struct net_device * l3_dev,struct net_device * lag_dev,unsigned long event,u16 vid,struct netlink_ext_ack * extack)8910 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
8911 struct net_device *lag_dev,
8912 unsigned long event, u16 vid,
8913 struct netlink_ext_ack *extack)
8914 {
8915 struct net_device *port_dev;
8916 struct list_head *iter;
8917 int err;
8918
8919 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
8920 if (mlxsw_sp_port_dev_check(port_dev)) {
8921 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
8922 port_dev,
8923 event, vid,
8924 extack);
8925 if (err)
8926 return err;
8927 }
8928 }
8929
8930 return 0;
8931 }
8932
mlxsw_sp_inetaddr_lag_event(struct net_device * lag_dev,unsigned long event,struct netlink_ext_ack * extack)8933 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
8934 unsigned long event,
8935 struct netlink_ext_ack *extack)
8936 {
8937 if (netif_is_bridge_port(lag_dev))
8938 return 0;
8939
8940 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
8941 MLXSW_SP_DEFAULT_VID, extack);
8942 }
8943
mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev,unsigned long event,struct netlink_ext_ack * extack)8944 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
8945 struct net_device *l3_dev,
8946 unsigned long event,
8947 struct netlink_ext_ack *extack)
8948 {
8949 struct mlxsw_sp_rif_params params = {
8950 .dev = l3_dev,
8951 };
8952 struct mlxsw_sp_rif *rif;
8953
8954 switch (event) {
8955 case NETDEV_UP:
8956 if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
8957 u16 proto;
8958
8959 br_vlan_get_proto(l3_dev, &proto);
8960 if (proto == ETH_P_8021AD) {
8961 NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
8962 return -EOPNOTSUPP;
8963 }
8964 }
8965 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack);
8966 if (IS_ERR(rif))
8967 return PTR_ERR(rif);
8968 break;
8969 case NETDEV_DOWN:
8970 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8971 mlxsw_sp_rif_destroy(rif);
8972 break;
8973 }
8974
8975 return 0;
8976 }
8977
mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * vlan_dev,unsigned long event,struct netlink_ext_ack * extack)8978 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
8979 struct net_device *vlan_dev,
8980 unsigned long event,
8981 struct netlink_ext_ack *extack)
8982 {
8983 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
8984 u16 vid = vlan_dev_vlan_id(vlan_dev);
8985
8986 if (netif_is_bridge_port(vlan_dev))
8987 return 0;
8988
8989 if (mlxsw_sp_port_dev_check(real_dev))
8990 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
8991 event, vid, extack);
8992 else if (netif_is_lag_master(real_dev))
8993 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
8994 vid, extack);
8995 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
8996 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
8997 extack);
8998
8999 return 0;
9000 }
9001
mlxsw_sp_rif_macvlan_is_vrrp4(const u8 * mac)9002 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
9003 {
9004 u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
9005 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9006
9007 return ether_addr_equal_masked(mac, vrrp4, mask);
9008 }
9009
mlxsw_sp_rif_macvlan_is_vrrp6(const u8 * mac)9010 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
9011 {
9012 u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
9013 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
9014
9015 return ether_addr_equal_masked(mac, vrrp6, mask);
9016 }
9017
mlxsw_sp_rif_vrrp_op(struct mlxsw_sp * mlxsw_sp,u16 rif_index,const u8 * mac,bool adding)9018 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9019 const u8 *mac, bool adding)
9020 {
9021 char ritr_pl[MLXSW_REG_RITR_LEN];
9022 u8 vrrp_id = adding ? mac[5] : 0;
9023 int err;
9024
9025 if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
9026 !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
9027 return 0;
9028
9029 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9030 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9031 if (err)
9032 return err;
9033
9034 if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
9035 mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
9036 else
9037 mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
9038
9039 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9040 }
9041
mlxsw_sp_rif_macvlan_add(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev,struct netlink_ext_ack * extack)9042 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
9043 const struct net_device *macvlan_dev,
9044 struct netlink_ext_ack *extack)
9045 {
9046 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9047 struct mlxsw_sp_rif *rif;
9048 int err;
9049
9050 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9051 if (!rif) {
9052 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
9053 return -EOPNOTSUPP;
9054 }
9055
9056 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9057 mlxsw_sp_fid_index(rif->fid), true);
9058 if (err)
9059 return err;
9060
9061 err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
9062 macvlan_dev->dev_addr, true);
9063 if (err)
9064 goto err_rif_vrrp_add;
9065
9066 /* Make sure the bridge driver does not have this MAC pointing at
9067 * some other port.
9068 */
9069 if (rif->ops->fdb_del)
9070 rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
9071
9072 return 0;
9073
9074 err_rif_vrrp_add:
9075 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9076 mlxsw_sp_fid_index(rif->fid), false);
9077 return err;
9078 }
9079
__mlxsw_sp_rif_macvlan_del(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev)9080 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9081 const struct net_device *macvlan_dev)
9082 {
9083 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
9084 struct mlxsw_sp_rif *rif;
9085
9086 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
9087 /* If we do not have a RIF, then we already took care of
9088 * removing the macvlan's MAC during RIF deletion.
9089 */
9090 if (!rif)
9091 return;
9092 mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
9093 false);
9094 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
9095 mlxsw_sp_fid_index(rif->fid), false);
9096 }
9097
mlxsw_sp_rif_macvlan_del(struct mlxsw_sp * mlxsw_sp,const struct net_device * macvlan_dev)9098 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
9099 const struct net_device *macvlan_dev)
9100 {
9101 mutex_lock(&mlxsw_sp->router->lock);
9102 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9103 mutex_unlock(&mlxsw_sp->router->lock);
9104 }
9105
mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * macvlan_dev,unsigned long event,struct netlink_ext_ack * extack)9106 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
9107 struct net_device *macvlan_dev,
9108 unsigned long event,
9109 struct netlink_ext_ack *extack)
9110 {
9111 switch (event) {
9112 case NETDEV_UP:
9113 return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
9114 case NETDEV_DOWN:
9115 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
9116 break;
9117 }
9118
9119 return 0;
9120 }
9121
__mlxsw_sp_inetaddr_event(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,unsigned long event,struct netlink_ext_ack * extack)9122 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
9123 struct net_device *dev,
9124 unsigned long event,
9125 struct netlink_ext_ack *extack)
9126 {
9127 if (mlxsw_sp_port_dev_check(dev))
9128 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
9129 else if (netif_is_lag_master(dev))
9130 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
9131 else if (netif_is_bridge_master(dev))
9132 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
9133 extack);
9134 else if (is_vlan_dev(dev))
9135 return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
9136 extack);
9137 else if (netif_is_macvlan(dev))
9138 return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
9139 extack);
9140 else
9141 return 0;
9142 }
9143
mlxsw_sp_inetaddr_event(struct notifier_block * nb,unsigned long event,void * ptr)9144 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
9145 unsigned long event, void *ptr)
9146 {
9147 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
9148 struct net_device *dev = ifa->ifa_dev->dev;
9149 struct mlxsw_sp_router *router;
9150 struct mlxsw_sp_rif *rif;
9151 int err = 0;
9152
9153 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
9154 if (event == NETDEV_UP)
9155 return NOTIFY_DONE;
9156
9157 router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
9158 mutex_lock(&router->lock);
9159 rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
9160 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9161 goto out;
9162
9163 err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
9164 out:
9165 mutex_unlock(&router->lock);
9166 return notifier_from_errno(err);
9167 }
9168
mlxsw_sp_inetaddr_valid_event(struct notifier_block * unused,unsigned long event,void * ptr)9169 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
9170 unsigned long event, void *ptr)
9171 {
9172 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
9173 struct net_device *dev = ivi->ivi_dev->dev;
9174 struct mlxsw_sp *mlxsw_sp;
9175 struct mlxsw_sp_rif *rif;
9176 int err = 0;
9177
9178 mlxsw_sp = mlxsw_sp_lower_get(dev);
9179 if (!mlxsw_sp)
9180 return NOTIFY_DONE;
9181
9182 mutex_lock(&mlxsw_sp->router->lock);
9183 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9184 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9185 goto out;
9186
9187 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
9188 out:
9189 mutex_unlock(&mlxsw_sp->router->lock);
9190 return notifier_from_errno(err);
9191 }
9192
9193 struct mlxsw_sp_inet6addr_event_work {
9194 struct work_struct work;
9195 struct mlxsw_sp *mlxsw_sp;
9196 struct net_device *dev;
9197 unsigned long event;
9198 };
9199
mlxsw_sp_inet6addr_event_work(struct work_struct * work)9200 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
9201 {
9202 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
9203 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
9204 struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
9205 struct net_device *dev = inet6addr_work->dev;
9206 unsigned long event = inet6addr_work->event;
9207 struct mlxsw_sp_rif *rif;
9208
9209 rtnl_lock();
9210 mutex_lock(&mlxsw_sp->router->lock);
9211
9212 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9213 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9214 goto out;
9215
9216 __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
9217 out:
9218 mutex_unlock(&mlxsw_sp->router->lock);
9219 rtnl_unlock();
9220 dev_put(dev);
9221 kfree(inet6addr_work);
9222 }
9223
9224 /* Called with rcu_read_lock() */
mlxsw_sp_inet6addr_event(struct notifier_block * nb,unsigned long event,void * ptr)9225 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
9226 unsigned long event, void *ptr)
9227 {
9228 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
9229 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
9230 struct net_device *dev = if6->idev->dev;
9231 struct mlxsw_sp_router *router;
9232
9233 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
9234 if (event == NETDEV_UP)
9235 return NOTIFY_DONE;
9236
9237 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
9238 if (!inet6addr_work)
9239 return NOTIFY_BAD;
9240
9241 router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
9242 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
9243 inet6addr_work->mlxsw_sp = router->mlxsw_sp;
9244 inet6addr_work->dev = dev;
9245 inet6addr_work->event = event;
9246 dev_hold(dev);
9247 mlxsw_core_schedule_work(&inet6addr_work->work);
9248
9249 return NOTIFY_DONE;
9250 }
9251
mlxsw_sp_inet6addr_valid_event(struct notifier_block * unused,unsigned long event,void * ptr)9252 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
9253 unsigned long event, void *ptr)
9254 {
9255 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
9256 struct net_device *dev = i6vi->i6vi_dev->dev;
9257 struct mlxsw_sp *mlxsw_sp;
9258 struct mlxsw_sp_rif *rif;
9259 int err = 0;
9260
9261 mlxsw_sp = mlxsw_sp_lower_get(dev);
9262 if (!mlxsw_sp)
9263 return NOTIFY_DONE;
9264
9265 mutex_lock(&mlxsw_sp->router->lock);
9266 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9267 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9268 goto out;
9269
9270 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
9271 out:
9272 mutex_unlock(&mlxsw_sp->router->lock);
9273 return notifier_from_errno(err);
9274 }
9275
mlxsw_sp_rif_edit(struct mlxsw_sp * mlxsw_sp,u16 rif_index,const char * mac,int mtu,u8 mac_profile)9276 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9277 const char *mac, int mtu, u8 mac_profile)
9278 {
9279 char ritr_pl[MLXSW_REG_RITR_LEN];
9280 int err;
9281
9282 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9283 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9284 if (err)
9285 return err;
9286
9287 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
9288 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
9289 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, mac_profile);
9290 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
9291 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9292 }
9293
9294 static int
mlxsw_sp_router_port_change_event(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)9295 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
9296 struct mlxsw_sp_rif *rif,
9297 struct netlink_ext_ack *extack)
9298 {
9299 struct net_device *dev = rif->dev;
9300 u8 old_mac_profile;
9301 u16 fid_index;
9302 int err;
9303
9304 fid_index = mlxsw_sp_fid_index(rif->fid);
9305
9306 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
9307 if (err)
9308 return err;
9309
9310 old_mac_profile = rif->mac_profile_id;
9311 err = mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, dev->dev_addr,
9312 extack);
9313 if (err)
9314 goto err_rif_mac_profile_replace;
9315
9316 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
9317 dev->mtu, rif->mac_profile_id);
9318 if (err)
9319 goto err_rif_edit;
9320
9321 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
9322 if (err)
9323 goto err_rif_fdb_op;
9324
9325 if (rif->mtu != dev->mtu) {
9326 struct mlxsw_sp_vr *vr;
9327 int i;
9328
9329 /* The RIF is relevant only to its mr_table instance, as unlike
9330 * unicast routing, in multicast routing a RIF cannot be shared
9331 * between several multicast routing tables.
9332 */
9333 vr = &mlxsw_sp->router->vrs[rif->vr_id];
9334 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
9335 mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
9336 rif, dev->mtu);
9337 }
9338
9339 ether_addr_copy(rif->addr, dev->dev_addr);
9340 rif->mtu = dev->mtu;
9341
9342 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
9343
9344 return 0;
9345
9346 err_rif_fdb_op:
9347 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu,
9348 old_mac_profile);
9349 err_rif_edit:
9350 mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, rif->addr, extack);
9351 err_rif_mac_profile_replace:
9352 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
9353 return err;
9354 }
9355
mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif * rif,struct netdev_notifier_pre_changeaddr_info * info)9356 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
9357 struct netdev_notifier_pre_changeaddr_info *info)
9358 {
9359 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9360 struct mlxsw_sp_rif_mac_profile *profile;
9361 struct netlink_ext_ack *extack;
9362 u8 max_rif_mac_profiles;
9363 u64 occ;
9364
9365 extack = netdev_notifier_info_to_extack(&info->info);
9366
9367 profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, info->dev_addr);
9368 if (profile)
9369 return 0;
9370
9371 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
9372 occ = mlxsw_sp_rif_mac_profiles_occ_get(mlxsw_sp);
9373 if (occ < max_rif_mac_profiles)
9374 return 0;
9375
9376 if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
9377 return 0;
9378
9379 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interface MAC profiles");
9380 return -ENOBUFS;
9381 }
9382
mlxsw_sp_is_offload_xstats_event(unsigned long event)9383 static bool mlxsw_sp_is_offload_xstats_event(unsigned long event)
9384 {
9385 switch (event) {
9386 case NETDEV_OFFLOAD_XSTATS_ENABLE:
9387 case NETDEV_OFFLOAD_XSTATS_DISABLE:
9388 case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9389 case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9390 return true;
9391 }
9392
9393 return false;
9394 }
9395
9396 static int
mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif * rif,unsigned long event,struct netdev_notifier_offload_xstats_info * info)9397 mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif,
9398 unsigned long event,
9399 struct netdev_notifier_offload_xstats_info *info)
9400 {
9401 switch (info->type) {
9402 case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
9403 break;
9404 default:
9405 return 0;
9406 }
9407
9408 switch (event) {
9409 case NETDEV_OFFLOAD_XSTATS_ENABLE:
9410 return mlxsw_sp_router_port_l3_stats_enable(rif);
9411 case NETDEV_OFFLOAD_XSTATS_DISABLE:
9412 mlxsw_sp_router_port_l3_stats_disable(rif);
9413 return 0;
9414 case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9415 mlxsw_sp_router_port_l3_stats_report_used(rif, info);
9416 return 0;
9417 case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9418 return mlxsw_sp_router_port_l3_stats_report_delta(rif, info);
9419 }
9420
9421 WARN_ON_ONCE(1);
9422 return 0;
9423 }
9424
9425 static int
mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,unsigned long event,struct netdev_notifier_offload_xstats_info * info)9426 mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp *mlxsw_sp,
9427 struct net_device *dev,
9428 unsigned long event,
9429 struct netdev_notifier_offload_xstats_info *info)
9430 {
9431 struct mlxsw_sp_rif *rif;
9432
9433 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9434 if (!rif)
9435 return 0;
9436
9437 return mlxsw_sp_router_port_offload_xstats_cmd(rif, event, info);
9438 }
9439
mlxsw_sp_is_router_event(unsigned long event)9440 static bool mlxsw_sp_is_router_event(unsigned long event)
9441 {
9442 switch (event) {
9443 case NETDEV_PRE_CHANGEADDR:
9444 case NETDEV_CHANGEADDR:
9445 case NETDEV_CHANGEMTU:
9446 return true;
9447 default:
9448 return false;
9449 }
9450 }
9451
mlxsw_sp_netdevice_router_port_event(struct net_device * dev,unsigned long event,void * ptr)9452 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
9453 unsigned long event, void *ptr)
9454 {
9455 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
9456 struct mlxsw_sp *mlxsw_sp;
9457 struct mlxsw_sp_rif *rif;
9458
9459 mlxsw_sp = mlxsw_sp_lower_get(dev);
9460 if (!mlxsw_sp)
9461 return 0;
9462
9463 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9464 if (!rif)
9465 return 0;
9466
9467 switch (event) {
9468 case NETDEV_CHANGEMTU:
9469 case NETDEV_CHANGEADDR:
9470 return mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
9471 case NETDEV_PRE_CHANGEADDR:
9472 return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
9473 default:
9474 WARN_ON_ONCE(1);
9475 break;
9476 }
9477
9478 return 0;
9479 }
9480
mlxsw_sp_port_vrf_join(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev,struct netlink_ext_ack * extack)9481 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
9482 struct net_device *l3_dev,
9483 struct netlink_ext_ack *extack)
9484 {
9485 struct mlxsw_sp_rif *rif;
9486
9487 /* If netdev is already associated with a RIF, then we need to
9488 * destroy it and create a new one with the new virtual router ID.
9489 */
9490 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9491 if (rif)
9492 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
9493 extack);
9494
9495 return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
9496 }
9497
mlxsw_sp_port_vrf_leave(struct mlxsw_sp * mlxsw_sp,struct net_device * l3_dev)9498 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
9499 struct net_device *l3_dev)
9500 {
9501 struct mlxsw_sp_rif *rif;
9502
9503 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9504 if (!rif)
9505 return;
9506 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
9507 }
9508
mlxsw_sp_is_vrf_event(unsigned long event,void * ptr)9509 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
9510 {
9511 struct netdev_notifier_changeupper_info *info = ptr;
9512
9513 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
9514 return false;
9515 return netif_is_l3_master(info->upper_dev);
9516 }
9517
9518 static int
mlxsw_sp_netdevice_vrf_event(struct net_device * l3_dev,unsigned long event,struct netdev_notifier_changeupper_info * info)9519 mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
9520 struct netdev_notifier_changeupper_info *info)
9521 {
9522 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
9523 int err = 0;
9524
9525 /* We do not create a RIF for a macvlan, but only use it to
9526 * direct more MAC addresses to the router.
9527 */
9528 if (!mlxsw_sp || netif_is_macvlan(l3_dev))
9529 return 0;
9530
9531 switch (event) {
9532 case NETDEV_PRECHANGEUPPER:
9533 break;
9534 case NETDEV_CHANGEUPPER:
9535 if (info->linking) {
9536 struct netlink_ext_ack *extack;
9537
9538 extack = netdev_notifier_info_to_extack(&info->info);
9539 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
9540 } else {
9541 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
9542 }
9543 break;
9544 }
9545
9546 return err;
9547 }
9548
mlxsw_sp_router_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)9549 static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
9550 unsigned long event, void *ptr)
9551 {
9552 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
9553 struct mlxsw_sp_router *router;
9554 struct mlxsw_sp *mlxsw_sp;
9555 int err = 0;
9556
9557 router = container_of(nb, struct mlxsw_sp_router, netdevice_nb);
9558 mlxsw_sp = router->mlxsw_sp;
9559
9560 mutex_lock(&mlxsw_sp->router->lock);
9561
9562 if (mlxsw_sp_is_offload_xstats_event(event))
9563 err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev,
9564 event, ptr);
9565 else if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
9566 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
9567 event, ptr);
9568 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
9569 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
9570 event, ptr);
9571 else if (mlxsw_sp_is_router_event(event))
9572 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
9573 else if (mlxsw_sp_is_vrf_event(event, ptr))
9574 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
9575
9576 mutex_unlock(&mlxsw_sp->router->lock);
9577
9578 return notifier_from_errno(err);
9579 }
9580
__mlxsw_sp_rif_macvlan_flush(struct net_device * dev,struct netdev_nested_priv * priv)9581 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
9582 struct netdev_nested_priv *priv)
9583 {
9584 struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
9585
9586 if (!netif_is_macvlan(dev))
9587 return 0;
9588
9589 return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9590 mlxsw_sp_fid_index(rif->fid), false);
9591 }
9592
mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif * rif)9593 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
9594 {
9595 struct netdev_nested_priv priv = {
9596 .data = (void *)rif,
9597 };
9598
9599 if (!netif_is_macvlan_port(rif->dev))
9600 return 0;
9601
9602 netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
9603 return netdev_walk_all_upper_dev_rcu(rif->dev,
9604 __mlxsw_sp_rif_macvlan_flush, &priv);
9605 }
9606
mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params)9607 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
9608 const struct mlxsw_sp_rif_params *params)
9609 {
9610 struct mlxsw_sp_rif_subport *rif_subport;
9611
9612 rif_subport = mlxsw_sp_rif_subport_rif(rif);
9613 refcount_set(&rif_subport->ref_count, 1);
9614 rif_subport->vid = params->vid;
9615 rif_subport->lag = params->lag;
9616 if (params->lag)
9617 rif_subport->lag_id = params->lag_id;
9618 else
9619 rif_subport->system_port = params->system_port;
9620 }
9621
mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif * rif,bool enable)9622 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
9623 {
9624 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9625 struct mlxsw_sp_rif_subport *rif_subport;
9626 char ritr_pl[MLXSW_REG_RITR_LEN];
9627
9628 rif_subport = mlxsw_sp_rif_subport_rif(rif);
9629 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
9630 rif->rif_index, rif->vr_id, rif->dev->mtu);
9631 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
9632 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
9633 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
9634 rif_subport->lag ? rif_subport->lag_id :
9635 rif_subport->system_port,
9636 rif_subport->vid);
9637
9638 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9639 }
9640
mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)9641 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
9642 struct netlink_ext_ack *extack)
9643 {
9644 u8 mac_profile;
9645 int err;
9646
9647 err = mlxsw_sp_rif_mac_profile_get(rif->mlxsw_sp, rif->addr,
9648 &mac_profile, extack);
9649 if (err)
9650 return err;
9651 rif->mac_profile_id = mac_profile;
9652
9653 err = mlxsw_sp_rif_subport_op(rif, true);
9654 if (err)
9655 goto err_rif_subport_op;
9656
9657 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9658 mlxsw_sp_fid_index(rif->fid), true);
9659 if (err)
9660 goto err_rif_fdb_op;
9661
9662 mlxsw_sp_fid_rif_set(rif->fid, rif);
9663 return 0;
9664
9665 err_rif_fdb_op:
9666 mlxsw_sp_rif_subport_op(rif, false);
9667 err_rif_subport_op:
9668 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
9669 return err;
9670 }
9671
mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif * rif)9672 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
9673 {
9674 struct mlxsw_sp_fid *fid = rif->fid;
9675
9676 mlxsw_sp_fid_rif_set(fid, NULL);
9677 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9678 mlxsw_sp_fid_index(fid), false);
9679 mlxsw_sp_rif_macvlan_flush(rif);
9680 mlxsw_sp_rif_subport_op(rif, false);
9681 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9682 }
9683
9684 static struct mlxsw_sp_fid *
mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)9685 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
9686 struct netlink_ext_ack *extack)
9687 {
9688 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
9689 }
9690
9691 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
9692 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
9693 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
9694 .setup = mlxsw_sp_rif_subport_setup,
9695 .configure = mlxsw_sp_rif_subport_configure,
9696 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
9697 .fid_get = mlxsw_sp_rif_subport_fid_get,
9698 };
9699
mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif * rif,enum mlxsw_reg_ritr_if_type type,u16 vid_fid,bool enable)9700 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
9701 enum mlxsw_reg_ritr_if_type type,
9702 u16 vid_fid, bool enable)
9703 {
9704 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9705 char ritr_pl[MLXSW_REG_RITR_LEN];
9706
9707 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
9708 rif->dev->mtu);
9709 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
9710 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
9711 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
9712
9713 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9714 }
9715
mlxsw_sp_router_port(const struct mlxsw_sp * mlxsw_sp)9716 u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
9717 {
9718 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
9719 }
9720
mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)9721 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
9722 struct netlink_ext_ack *extack)
9723 {
9724 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9725 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9726 u8 mac_profile;
9727 int err;
9728
9729 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
9730 &mac_profile, extack);
9731 if (err)
9732 return err;
9733 rif->mac_profile_id = mac_profile;
9734
9735 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
9736 true);
9737 if (err)
9738 goto err_rif_vlan_fid_op;
9739
9740 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9741 mlxsw_sp_router_port(mlxsw_sp), true);
9742 if (err)
9743 goto err_fid_mc_flood_set;
9744
9745 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9746 mlxsw_sp_router_port(mlxsw_sp), true);
9747 if (err)
9748 goto err_fid_bc_flood_set;
9749
9750 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9751 mlxsw_sp_fid_index(rif->fid), true);
9752 if (err)
9753 goto err_rif_fdb_op;
9754
9755 mlxsw_sp_fid_rif_set(rif->fid, rif);
9756 return 0;
9757
9758 err_rif_fdb_op:
9759 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9760 mlxsw_sp_router_port(mlxsw_sp), false);
9761 err_fid_bc_flood_set:
9762 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9763 mlxsw_sp_router_port(mlxsw_sp), false);
9764 err_fid_mc_flood_set:
9765 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
9766 err_rif_vlan_fid_op:
9767 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
9768 return err;
9769 }
9770
mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif * rif)9771 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
9772 {
9773 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9774 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9775 struct mlxsw_sp_fid *fid = rif->fid;
9776
9777 mlxsw_sp_fid_rif_set(fid, NULL);
9778 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9779 mlxsw_sp_fid_index(fid), false);
9780 mlxsw_sp_rif_macvlan_flush(rif);
9781 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9782 mlxsw_sp_router_port(mlxsw_sp), false);
9783 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9784 mlxsw_sp_router_port(mlxsw_sp), false);
9785 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
9786 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9787 }
9788
9789 static struct mlxsw_sp_fid *
mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)9790 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
9791 struct netlink_ext_ack *extack)
9792 {
9793 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
9794 }
9795
mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif * rif,const char * mac)9796 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9797 {
9798 struct switchdev_notifier_fdb_info info = {};
9799 struct net_device *dev;
9800
9801 dev = br_fdb_find_port(rif->dev, mac, 0);
9802 if (!dev)
9803 return;
9804
9805 info.addr = mac;
9806 info.vid = 0;
9807 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9808 NULL);
9809 }
9810
9811 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
9812 .type = MLXSW_SP_RIF_TYPE_FID,
9813 .rif_size = sizeof(struct mlxsw_sp_rif),
9814 .configure = mlxsw_sp_rif_fid_configure,
9815 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
9816 .fid_get = mlxsw_sp_rif_fid_fid_get,
9817 .fdb_del = mlxsw_sp_rif_fid_fdb_del,
9818 };
9819
9820 static struct mlxsw_sp_fid *
mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)9821 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
9822 struct netlink_ext_ack *extack)
9823 {
9824 struct net_device *br_dev;
9825 u16 vid;
9826 int err;
9827
9828 if (is_vlan_dev(rif->dev)) {
9829 vid = vlan_dev_vlan_id(rif->dev);
9830 br_dev = vlan_dev_real_dev(rif->dev);
9831 if (WARN_ON(!netif_is_bridge_master(br_dev)))
9832 return ERR_PTR(-EINVAL);
9833 } else {
9834 err = br_vlan_get_pvid(rif->dev, &vid);
9835 if (err < 0 || !vid) {
9836 NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
9837 return ERR_PTR(-EINVAL);
9838 }
9839 }
9840
9841 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
9842 }
9843
mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif * rif,const char * mac)9844 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9845 {
9846 struct switchdev_notifier_fdb_info info = {};
9847 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
9848 struct net_device *br_dev;
9849 struct net_device *dev;
9850
9851 br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
9852 dev = br_fdb_find_port(br_dev, mac, vid);
9853 if (!dev)
9854 return;
9855
9856 info.addr = mac;
9857 info.vid = vid;
9858 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9859 NULL);
9860 }
9861
9862 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
9863 .type = MLXSW_SP_RIF_TYPE_VLAN,
9864 .rif_size = sizeof(struct mlxsw_sp_rif),
9865 .configure = mlxsw_sp_rif_fid_configure,
9866 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
9867 .fid_get = mlxsw_sp_rif_vlan_fid_get,
9868 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
9869 };
9870
9871 static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif * rif)9872 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
9873 {
9874 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
9875 }
9876
9877 static void
mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif * rif,const struct mlxsw_sp_rif_params * params)9878 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
9879 const struct mlxsw_sp_rif_params *params)
9880 {
9881 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
9882 struct mlxsw_sp_rif_ipip_lb *rif_lb;
9883
9884 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
9885 common);
9886 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
9887 rif_lb->lb_config = params_lb->lb_config;
9888 }
9889
9890 static int
mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)9891 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
9892 struct netlink_ext_ack *extack)
9893 {
9894 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9895 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
9896 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9897 struct mlxsw_sp_vr *ul_vr;
9898 int err;
9899
9900 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
9901 if (IS_ERR(ul_vr))
9902 return PTR_ERR(ul_vr);
9903
9904 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
9905 if (err)
9906 goto err_loopback_op;
9907
9908 lb_rif->ul_vr_id = ul_vr->id;
9909 lb_rif->ul_rif_id = 0;
9910 ++ul_vr->rif_count;
9911 return 0;
9912
9913 err_loopback_op:
9914 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9915 return err;
9916 }
9917
mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif * rif)9918 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
9919 {
9920 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9921 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9922 struct mlxsw_sp_vr *ul_vr;
9923
9924 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
9925 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
9926
9927 --ul_vr->rif_count;
9928 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9929 }
9930
9931 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
9932 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
9933 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
9934 .setup = mlxsw_sp_rif_ipip_lb_setup,
9935 .configure = mlxsw_sp1_rif_ipip_lb_configure,
9936 .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure,
9937 };
9938
9939 static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
9940 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
9941 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
9942 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
9943 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
9944 };
9945
9946 static int
mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif * ul_rif,bool enable)9947 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
9948 {
9949 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9950 char ritr_pl[MLXSW_REG_RITR_LEN];
9951
9952 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
9953 ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
9954 mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
9955 MLXSW_REG_RITR_LOOPBACK_GENERIC);
9956
9957 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9958 }
9959
9960 static struct mlxsw_sp_rif *
mlxsw_sp_ul_rif_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_vr * vr,struct netlink_ext_ack * extack)9961 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
9962 struct netlink_ext_ack *extack)
9963 {
9964 struct mlxsw_sp_rif *ul_rif;
9965 u16 rif_index;
9966 int err;
9967
9968 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
9969 if (err) {
9970 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
9971 return ERR_PTR(err);
9972 }
9973
9974 ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
9975 if (!ul_rif)
9976 return ERR_PTR(-ENOMEM);
9977
9978 mlxsw_sp->router->rifs[rif_index] = ul_rif;
9979 ul_rif->mlxsw_sp = mlxsw_sp;
9980 err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
9981 if (err)
9982 goto ul_rif_op_err;
9983
9984 return ul_rif;
9985
9986 ul_rif_op_err:
9987 mlxsw_sp->router->rifs[rif_index] = NULL;
9988 kfree(ul_rif);
9989 return ERR_PTR(err);
9990 }
9991
mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif * ul_rif)9992 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
9993 {
9994 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9995
9996 mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
9997 mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
9998 kfree(ul_rif);
9999 }
10000
10001 static struct mlxsw_sp_rif *
mlxsw_sp_ul_rif_get(struct mlxsw_sp * mlxsw_sp,u32 tb_id,struct netlink_ext_ack * extack)10002 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
10003 struct netlink_ext_ack *extack)
10004 {
10005 struct mlxsw_sp_vr *vr;
10006 int err;
10007
10008 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
10009 if (IS_ERR(vr))
10010 return ERR_CAST(vr);
10011
10012 if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
10013 return vr->ul_rif;
10014
10015 vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
10016 if (IS_ERR(vr->ul_rif)) {
10017 err = PTR_ERR(vr->ul_rif);
10018 goto err_ul_rif_create;
10019 }
10020
10021 vr->rif_count++;
10022 refcount_set(&vr->ul_rif_refcnt, 1);
10023
10024 return vr->ul_rif;
10025
10026 err_ul_rif_create:
10027 mlxsw_sp_vr_put(mlxsw_sp, vr);
10028 return ERR_PTR(err);
10029 }
10030
mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif * ul_rif)10031 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
10032 {
10033 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
10034 struct mlxsw_sp_vr *vr;
10035
10036 vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
10037
10038 if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
10039 return;
10040
10041 vr->rif_count--;
10042 mlxsw_sp_ul_rif_destroy(ul_rif);
10043 mlxsw_sp_vr_put(mlxsw_sp, vr);
10044 }
10045
mlxsw_sp_router_ul_rif_get(struct mlxsw_sp * mlxsw_sp,u32 ul_tb_id,u16 * ul_rif_index)10046 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
10047 u16 *ul_rif_index)
10048 {
10049 struct mlxsw_sp_rif *ul_rif;
10050 int err = 0;
10051
10052 mutex_lock(&mlxsw_sp->router->lock);
10053 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
10054 if (IS_ERR(ul_rif)) {
10055 err = PTR_ERR(ul_rif);
10056 goto out;
10057 }
10058 *ul_rif_index = ul_rif->rif_index;
10059 out:
10060 mutex_unlock(&mlxsw_sp->router->lock);
10061 return err;
10062 }
10063
mlxsw_sp_router_ul_rif_put(struct mlxsw_sp * mlxsw_sp,u16 ul_rif_index)10064 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
10065 {
10066 struct mlxsw_sp_rif *ul_rif;
10067
10068 mutex_lock(&mlxsw_sp->router->lock);
10069 ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
10070 if (WARN_ON(!ul_rif))
10071 goto out;
10072
10073 mlxsw_sp_ul_rif_put(ul_rif);
10074 out:
10075 mutex_unlock(&mlxsw_sp->router->lock);
10076 }
10077
10078 static int
mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif * rif,struct netlink_ext_ack * extack)10079 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
10080 struct netlink_ext_ack *extack)
10081 {
10082 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10083 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
10084 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10085 struct mlxsw_sp_rif *ul_rif;
10086 int err;
10087
10088 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
10089 if (IS_ERR(ul_rif))
10090 return PTR_ERR(ul_rif);
10091
10092 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
10093 if (err)
10094 goto err_loopback_op;
10095
10096 lb_rif->ul_vr_id = 0;
10097 lb_rif->ul_rif_id = ul_rif->rif_index;
10098
10099 return 0;
10100
10101 err_loopback_op:
10102 mlxsw_sp_ul_rif_put(ul_rif);
10103 return err;
10104 }
10105
mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif * rif)10106 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
10107 {
10108 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
10109 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
10110 struct mlxsw_sp_rif *ul_rif;
10111
10112 ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
10113 mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
10114 mlxsw_sp_ul_rif_put(ul_rif);
10115 }
10116
10117 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
10118 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
10119 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
10120 .setup = mlxsw_sp_rif_ipip_lb_setup,
10121 .configure = mlxsw_sp2_rif_ipip_lb_configure,
10122 .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure,
10123 };
10124
10125 static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
10126 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
10127 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
10128 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
10129 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
10130 };
10131
mlxsw_sp_rifs_init(struct mlxsw_sp * mlxsw_sp)10132 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
10133 {
10134 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10135 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10136 struct mlxsw_core *core = mlxsw_sp->core;
10137
10138 if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES))
10139 return -EIO;
10140 mlxsw_sp->router->max_rif_mac_profile =
10141 MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES);
10142
10143 mlxsw_sp->router->rifs = kcalloc(max_rifs,
10144 sizeof(struct mlxsw_sp_rif *),
10145 GFP_KERNEL);
10146 if (!mlxsw_sp->router->rifs)
10147 return -ENOMEM;
10148
10149 idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
10150 atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
10151 devlink_resource_occ_get_register(devlink,
10152 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
10153 mlxsw_sp_rif_mac_profiles_occ_get,
10154 mlxsw_sp);
10155
10156 return 0;
10157 }
10158
mlxsw_sp_rifs_fini(struct mlxsw_sp * mlxsw_sp)10159 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
10160 {
10161 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10162 int i;
10163
10164 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
10165 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
10166
10167 devlink_resource_occ_get_unregister(devlink,
10168 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
10169 WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
10170 idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
10171 kfree(mlxsw_sp->router->rifs);
10172 }
10173
10174 static int
mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp * mlxsw_sp)10175 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
10176 {
10177 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
10178
10179 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
10180 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
10181 }
10182
mlxsw_sp_ipips_init(struct mlxsw_sp * mlxsw_sp)10183 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
10184 {
10185 int err;
10186
10187 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
10188
10189 err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
10190 if (err)
10191 return err;
10192 err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
10193 if (err)
10194 return err;
10195
10196 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
10197 }
10198
mlxsw_sp1_ipips_init(struct mlxsw_sp * mlxsw_sp)10199 static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
10200 {
10201 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
10202 return mlxsw_sp_ipips_init(mlxsw_sp);
10203 }
10204
mlxsw_sp2_ipips_init(struct mlxsw_sp * mlxsw_sp)10205 static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
10206 {
10207 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
10208 return mlxsw_sp_ipips_init(mlxsw_sp);
10209 }
10210
mlxsw_sp_ipips_fini(struct mlxsw_sp * mlxsw_sp)10211 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
10212 {
10213 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
10214 }
10215
mlxsw_sp_router_fib_dump_flush(struct notifier_block * nb)10216 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
10217 {
10218 struct mlxsw_sp_router *router;
10219
10220 /* Flush pending FIB notifications and then flush the device's
10221 * table before requesting another dump. The FIB notification
10222 * block is unregistered, so no need to take RTNL.
10223 */
10224 mlxsw_core_flush_owq();
10225 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
10226 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
10227 }
10228
10229 #ifdef CONFIG_IP_ROUTE_MULTIPATH
10230 struct mlxsw_sp_mp_hash_config {
10231 DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
10232 DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
10233 DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
10234 DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
10235 bool inc_parsing_depth;
10236 };
10237
10238 #define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
10239 bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)
10240
10241 #define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
10242 bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)
10243
10244 #define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
10245 bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
10246
mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config * config)10247 static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
10248 {
10249 unsigned long *inner_headers = config->inner_headers;
10250 unsigned long *inner_fields = config->inner_fields;
10251
10252 /* IPv4 inner */
10253 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
10254 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
10255 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
10256 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
10257 /* IPv6 inner */
10258 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
10259 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
10260 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
10261 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
10262 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
10263 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
10264 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
10265 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
10266 }
10267
mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config * config)10268 static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
10269 {
10270 unsigned long *headers = config->headers;
10271 unsigned long *fields = config->fields;
10272
10273 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
10274 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
10275 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
10276 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
10277 }
10278
10279 static void
mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config * config,u32 hash_fields)10280 mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
10281 u32 hash_fields)
10282 {
10283 unsigned long *inner_headers = config->inner_headers;
10284 unsigned long *inner_fields = config->inner_fields;
10285
10286 /* IPv4 Inner */
10287 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
10288 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
10289 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
10290 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
10291 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
10292 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
10293 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
10294 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
10295 /* IPv6 inner */
10296 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
10297 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
10298 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
10299 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
10300 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
10301 }
10302 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
10303 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
10304 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
10305 }
10306 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
10307 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
10308 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
10309 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
10310 /* L4 inner */
10311 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
10312 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
10313 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
10314 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
10315 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
10316 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
10317 }
10318
mlxsw_sp_mp4_hash_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mp_hash_config * config)10319 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
10320 struct mlxsw_sp_mp_hash_config *config)
10321 {
10322 struct net *net = mlxsw_sp_net(mlxsw_sp);
10323 unsigned long *headers = config->headers;
10324 unsigned long *fields = config->fields;
10325 u32 hash_fields;
10326
10327 switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
10328 case 0:
10329 mlxsw_sp_mp4_hash_outer_addr(config);
10330 break;
10331 case 1:
10332 mlxsw_sp_mp4_hash_outer_addr(config);
10333 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
10334 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
10335 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10336 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10337 break;
10338 case 2:
10339 /* Outer */
10340 mlxsw_sp_mp4_hash_outer_addr(config);
10341 /* Inner */
10342 mlxsw_sp_mp_hash_inner_l3(config);
10343 break;
10344 case 3:
10345 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
10346 /* Outer */
10347 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
10348 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
10349 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
10350 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
10351 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
10352 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
10353 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
10354 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
10355 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
10356 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
10357 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10358 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
10359 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10360 /* Inner */
10361 mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
10362 break;
10363 }
10364 }
10365
mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config * config)10366 static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
10367 {
10368 unsigned long *headers = config->headers;
10369 unsigned long *fields = config->fields;
10370
10371 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
10372 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
10373 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
10374 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
10375 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
10376 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
10377 }
10378
mlxsw_sp_mp6_hash_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mp_hash_config * config)10379 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
10380 struct mlxsw_sp_mp_hash_config *config)
10381 {
10382 u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
10383 unsigned long *headers = config->headers;
10384 unsigned long *fields = config->fields;
10385
10386 switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
10387 case 0:
10388 mlxsw_sp_mp6_hash_outer_addr(config);
10389 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10390 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10391 break;
10392 case 1:
10393 mlxsw_sp_mp6_hash_outer_addr(config);
10394 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
10395 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10396 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10397 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10398 break;
10399 case 2:
10400 /* Outer */
10401 mlxsw_sp_mp6_hash_outer_addr(config);
10402 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10403 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10404 /* Inner */
10405 mlxsw_sp_mp_hash_inner_l3(config);
10406 config->inc_parsing_depth = true;
10407 break;
10408 case 3:
10409 /* Outer */
10410 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
10411 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
10412 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
10413 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
10414 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
10415 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
10416 }
10417 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
10418 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
10419 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
10420 }
10421 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
10422 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10423 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
10424 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10425 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
10426 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10427 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
10428 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10429 /* Inner */
10430 mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
10431 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
10432 config->inc_parsing_depth = true;
10433 break;
10434 }
10435 }
10436
mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp * mlxsw_sp,bool old_inc_parsing_depth,bool new_inc_parsing_depth)10437 static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp,
10438 bool old_inc_parsing_depth,
10439 bool new_inc_parsing_depth)
10440 {
10441 int err;
10442
10443 if (!old_inc_parsing_depth && new_inc_parsing_depth) {
10444 err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
10445 if (err)
10446 return err;
10447 mlxsw_sp->router->inc_parsing_depth = true;
10448 } else if (old_inc_parsing_depth && !new_inc_parsing_depth) {
10449 mlxsw_sp_parsing_depth_dec(mlxsw_sp);
10450 mlxsw_sp->router->inc_parsing_depth = false;
10451 }
10452
10453 return 0;
10454 }
10455
mlxsw_sp_mp_hash_init(struct mlxsw_sp * mlxsw_sp)10456 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
10457 {
10458 bool old_inc_parsing_depth, new_inc_parsing_depth;
10459 struct mlxsw_sp_mp_hash_config config = {};
10460 char recr2_pl[MLXSW_REG_RECR2_LEN];
10461 unsigned long bit;
10462 u32 seed;
10463 int err;
10464
10465 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
10466 mlxsw_reg_recr2_pack(recr2_pl, seed);
10467 mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
10468 mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
10469
10470 old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
10471 new_inc_parsing_depth = config.inc_parsing_depth;
10472 err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp,
10473 old_inc_parsing_depth,
10474 new_inc_parsing_depth);
10475 if (err)
10476 return err;
10477
10478 for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
10479 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
10480 for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
10481 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
10482 for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
10483 mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
10484 for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
10485 mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
10486
10487 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
10488 if (err)
10489 goto err_reg_write;
10490
10491 return 0;
10492
10493 err_reg_write:
10494 mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth,
10495 old_inc_parsing_depth);
10496 return err;
10497 }
10498 #else
mlxsw_sp_mp_hash_init(struct mlxsw_sp * mlxsw_sp)10499 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
10500 {
10501 return 0;
10502 }
10503 #endif
10504
mlxsw_sp_dscp_init(struct mlxsw_sp * mlxsw_sp)10505 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
10506 {
10507 char rdpm_pl[MLXSW_REG_RDPM_LEN];
10508 unsigned int i;
10509
10510 MLXSW_REG_ZERO(rdpm, rdpm_pl);
10511
10512 /* HW is determining switch priority based on DSCP-bits, but the
10513 * kernel is still doing that based on the ToS. Since there's a
10514 * mismatch in bits we need to make sure to translate the right
10515 * value ToS would observe, skipping the 2 least-significant ECN bits.
10516 */
10517 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
10518 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
10519
10520 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
10521 }
10522
__mlxsw_sp_router_init(struct mlxsw_sp * mlxsw_sp)10523 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
10524 {
10525 struct net *net = mlxsw_sp_net(mlxsw_sp);
10526 char rgcr_pl[MLXSW_REG_RGCR_LEN];
10527 u64 max_rifs;
10528 bool usp;
10529
10530 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
10531 return -EIO;
10532 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10533 usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
10534
10535 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
10536 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
10537 mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
10538 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
10539 }
10540
__mlxsw_sp_router_fini(struct mlxsw_sp * mlxsw_sp)10541 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
10542 {
10543 char rgcr_pl[MLXSW_REG_RGCR_LEN];
10544
10545 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
10546 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
10547 }
10548
10549 static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
10550 .init = mlxsw_sp_router_ll_basic_init,
10551 .ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
10552 .ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
10553 .raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
10554 .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
10555 .fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
10556 .fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
10557 .fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
10558 .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
10559 .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
10560 .fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
10561 .fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
10562 };
10563
mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router * router)10564 static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
10565 {
10566 size_t max_size = 0;
10567 int i;
10568
10569 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
10570 size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;
10571
10572 if (size > max_size)
10573 max_size = size;
10574 }
10575 router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
10576 GFP_KERNEL);
10577 if (!router->ll_op_ctx)
10578 return -ENOMEM;
10579 INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
10580 return 0;
10581 }
10582
mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router * router)10583 static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
10584 {
10585 WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
10586 kfree(router->ll_op_ctx);
10587 }
10588
mlxsw_sp_lb_rif_init(struct mlxsw_sp * mlxsw_sp)10589 static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
10590 {
10591 u16 lb_rif_index;
10592 int err;
10593
10594 /* Create a generic loopback RIF associated with the main table
10595 * (default VRF). Any table can be used, but the main table exists
10596 * anyway, so we do not waste resources.
10597 */
10598 err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN,
10599 &lb_rif_index);
10600 if (err)
10601 return err;
10602
10603 mlxsw_sp->router->lb_rif_index = lb_rif_index;
10604
10605 return 0;
10606 }
10607
mlxsw_sp_lb_rif_fini(struct mlxsw_sp * mlxsw_sp)10608 static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
10609 {
10610 mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index);
10611 }
10612
mlxsw_sp1_router_init(struct mlxsw_sp * mlxsw_sp)10613 static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
10614 {
10615 size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
10616
10617 mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
10618 mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
10619 mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10620
10621 return 0;
10622 }
10623
10624 const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
10625 .init = mlxsw_sp1_router_init,
10626 .ipips_init = mlxsw_sp1_ipips_init,
10627 };
10628
mlxsw_sp2_router_init(struct mlxsw_sp * mlxsw_sp)10629 static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
10630 {
10631 size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
10632
10633 mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
10634 mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
10635 mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10636
10637 return 0;
10638 }
10639
10640 const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
10641 .init = mlxsw_sp2_router_init,
10642 .ipips_init = mlxsw_sp2_ipips_init,
10643 };
10644
mlxsw_sp_router_init(struct mlxsw_sp * mlxsw_sp,struct netlink_ext_ack * extack)10645 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
10646 struct netlink_ext_ack *extack)
10647 {
10648 struct mlxsw_sp_router *router;
10649 int err;
10650
10651 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
10652 if (!router)
10653 return -ENOMEM;
10654 mutex_init(&router->lock);
10655 mlxsw_sp->router = router;
10656 router->mlxsw_sp = mlxsw_sp;
10657
10658 err = mlxsw_sp->router_ops->init(mlxsw_sp);
10659 if (err)
10660 goto err_router_ops_init;
10661
10662 err = mlxsw_sp_router_xm_init(mlxsw_sp);
10663 if (err)
10664 goto err_xm_init;
10665
10666 router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = mlxsw_sp_router_xm_ipv4_is_supported(mlxsw_sp) ?
10667 &mlxsw_sp_router_ll_xm_ops :
10668 &mlxsw_sp_router_ll_basic_ops;
10669 router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
10670
10671 err = mlxsw_sp_router_ll_op_ctx_init(router);
10672 if (err)
10673 goto err_ll_op_ctx_init;
10674
10675 INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
10676 INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
10677 mlxsw_sp_nh_grp_activity_work);
10678
10679 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
10680 err = __mlxsw_sp_router_init(mlxsw_sp);
10681 if (err)
10682 goto err_router_init;
10683
10684 err = mlxsw_sp_rifs_init(mlxsw_sp);
10685 if (err)
10686 goto err_rifs_init;
10687
10688 err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
10689 if (err)
10690 goto err_ipips_init;
10691
10692 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
10693 &mlxsw_sp_nexthop_ht_params);
10694 if (err)
10695 goto err_nexthop_ht_init;
10696
10697 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
10698 &mlxsw_sp_nexthop_group_ht_params);
10699 if (err)
10700 goto err_nexthop_group_ht_init;
10701
10702 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
10703 err = mlxsw_sp_lpm_init(mlxsw_sp);
10704 if (err)
10705 goto err_lpm_init;
10706
10707 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
10708 if (err)
10709 goto err_mr_init;
10710
10711 err = mlxsw_sp_vrs_init(mlxsw_sp);
10712 if (err)
10713 goto err_vrs_init;
10714
10715 err = mlxsw_sp_lb_rif_init(mlxsw_sp);
10716 if (err)
10717 goto err_lb_rif_init;
10718
10719 err = mlxsw_sp_neigh_init(mlxsw_sp);
10720 if (err)
10721 goto err_neigh_init;
10722
10723 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
10724 if (err)
10725 goto err_mp_hash_init;
10726
10727 err = mlxsw_sp_dscp_init(mlxsw_sp);
10728 if (err)
10729 goto err_dscp_init;
10730
10731 INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
10732 INIT_LIST_HEAD(&router->fib_event_queue);
10733 spin_lock_init(&router->fib_event_queue_lock);
10734
10735 router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
10736 err = register_inetaddr_notifier(&router->inetaddr_nb);
10737 if (err)
10738 goto err_register_inetaddr_notifier;
10739
10740 router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
10741 err = register_inet6addr_notifier(&router->inet6addr_nb);
10742 if (err)
10743 goto err_register_inet6addr_notifier;
10744
10745 mlxsw_sp->router->netevent_nb.notifier_call =
10746 mlxsw_sp_router_netevent_event;
10747 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10748 if (err)
10749 goto err_register_netevent_notifier;
10750
10751 mlxsw_sp->router->nexthop_nb.notifier_call =
10752 mlxsw_sp_nexthop_obj_event;
10753 err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10754 &mlxsw_sp->router->nexthop_nb,
10755 extack);
10756 if (err)
10757 goto err_register_nexthop_notifier;
10758
10759 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
10760 err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10761 &mlxsw_sp->router->fib_nb,
10762 mlxsw_sp_router_fib_dump_flush, extack);
10763 if (err)
10764 goto err_register_fib_notifier;
10765
10766 mlxsw_sp->router->netdevice_nb.notifier_call =
10767 mlxsw_sp_router_netdevice_event;
10768 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
10769 &mlxsw_sp->router->netdevice_nb);
10770 if (err)
10771 goto err_register_netdev_notifier;
10772
10773 return 0;
10774
10775 err_register_netdev_notifier:
10776 unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10777 &mlxsw_sp->router->fib_nb);
10778 err_register_fib_notifier:
10779 unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10780 &mlxsw_sp->router->nexthop_nb);
10781 err_register_nexthop_notifier:
10782 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10783 err_register_netevent_notifier:
10784 unregister_inet6addr_notifier(&router->inet6addr_nb);
10785 err_register_inet6addr_notifier:
10786 unregister_inetaddr_notifier(&router->inetaddr_nb);
10787 err_register_inetaddr_notifier:
10788 mlxsw_core_flush_owq();
10789 WARN_ON(!list_empty(&router->fib_event_queue));
10790 err_dscp_init:
10791 err_mp_hash_init:
10792 mlxsw_sp_neigh_fini(mlxsw_sp);
10793 err_neigh_init:
10794 mlxsw_sp_lb_rif_fini(mlxsw_sp);
10795 err_lb_rif_init:
10796 mlxsw_sp_vrs_fini(mlxsw_sp);
10797 err_vrs_init:
10798 mlxsw_sp_mr_fini(mlxsw_sp);
10799 err_mr_init:
10800 mlxsw_sp_lpm_fini(mlxsw_sp);
10801 err_lpm_init:
10802 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
10803 err_nexthop_group_ht_init:
10804 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
10805 err_nexthop_ht_init:
10806 mlxsw_sp_ipips_fini(mlxsw_sp);
10807 err_ipips_init:
10808 mlxsw_sp_rifs_fini(mlxsw_sp);
10809 err_rifs_init:
10810 __mlxsw_sp_router_fini(mlxsw_sp);
10811 err_router_init:
10812 cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
10813 mlxsw_sp_router_ll_op_ctx_fini(router);
10814 err_ll_op_ctx_init:
10815 mlxsw_sp_router_xm_fini(mlxsw_sp);
10816 err_xm_init:
10817 err_router_ops_init:
10818 mutex_destroy(&mlxsw_sp->router->lock);
10819 kfree(mlxsw_sp->router);
10820 return err;
10821 }
10822
mlxsw_sp_router_fini(struct mlxsw_sp * mlxsw_sp)10823 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
10824 {
10825 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
10826 &mlxsw_sp->router->netdevice_nb);
10827 unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10828 &mlxsw_sp->router->fib_nb);
10829 unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10830 &mlxsw_sp->router->nexthop_nb);
10831 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10832 unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
10833 unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
10834 mlxsw_core_flush_owq();
10835 WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
10836 mlxsw_sp_neigh_fini(mlxsw_sp);
10837 mlxsw_sp_lb_rif_fini(mlxsw_sp);
10838 mlxsw_sp_vrs_fini(mlxsw_sp);
10839 mlxsw_sp_mr_fini(mlxsw_sp);
10840 mlxsw_sp_lpm_fini(mlxsw_sp);
10841 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
10842 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
10843 mlxsw_sp_ipips_fini(mlxsw_sp);
10844 mlxsw_sp_rifs_fini(mlxsw_sp);
10845 __mlxsw_sp_router_fini(mlxsw_sp);
10846 cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
10847 mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
10848 mlxsw_sp_router_xm_fini(mlxsw_sp);
10849 mutex_destroy(&mlxsw_sp->router->lock);
10850 kfree(mlxsw_sp->router);
10851 }
10852